hunk
dict
file
stringlengths
0
11.8M
file_path
stringlengths
2
234
label
int64
0
1
commit_url
stringlengths
74
103
dependency_score
sequencelengths
5
5
{ "id": 4, "code_window": [ "\n", "\tfor now := range tick {\n", "\t\tnowSec := now.UTC().Unix() + cacheDurationSec\n", "\t\tfor idx, id := range us.validUserIds {\n", "\t\t\tus.generateNewHashes(lastSec, nowSec, idx, id)\n", "\t\t}\n", "\t\tlastSec = nowSec\n" ], "labels": [ "keep", "keep", "replace", "keep", "keep", "keep", "keep" ], "after_edit": [ "\t\tnowSec := now.Unix() + cacheDurationSec\n" ], "file_path": "proxy/vmess/protocol/user/userset.go", "type": "replace", "edit_start_line_idx": 68 }
package user import ( "sync" "time" "github.com/v2ray/v2ray-core/common/collect" "github.com/v2ray/v2ray-core/proxy/vmess/config" ) const ( updateIntervalSec = 10 cacheDurationSec = 120 ) type UserSet interface { AddUser(user config.User) error GetUser(timeHash []byte) (*config.ID, int64, bool) } type TimedUserSet struct { validUserIds []*config.ID userHash map[string]indexTimePair userHashDeleteQueue *collect.TimedQueue access sync.RWMutex } type indexTimePair struct { index int timeSec int64 } func NewTimedUserSet() UserSet { tus := &TimedUserSet{ validUserIds: make([]*config.ID, 0, 16), userHash: make(map[string]indexTimePair, 512), userHashDeleteQueue: collect.NewTimedQueue(updateIntervalSec), access: sync.RWMutex{}, } go tus.updateUserHash(time.Tick(updateIntervalSec * time.Second)) go tus.removeEntries(tus.userHashDeleteQueue.RemovedEntries()) return tus } func (us *TimedUserSet) removeEntries(entries <-chan interface{}) { for entry := range entries { us.access.Lock() delete(us.userHash, entry.(string)) us.access.Unlock() } } func (us *TimedUserSet) generateNewHashes(lastSec, nowSec int64, idx int, id *config.ID) { idHash := NewTimeHash(HMACHash{}) for lastSec < nowSec { idHash := idHash.Hash(id.Bytes[:], lastSec) us.access.Lock() us.userHash[string(idHash)] = indexTimePair{idx, lastSec} us.access.Unlock() us.userHashDeleteQueue.Add(string(idHash), lastSec+2*cacheDurationSec) lastSec++ } } func (us *TimedUserSet) updateUserHash(tick <-chan time.Time) { lastSec := time.Now().UTC().Unix() - cacheDurationSec for now := range tick { nowSec := now.UTC().Unix() + cacheDurationSec for idx, id := range us.validUserIds { us.generateNewHashes(lastSec, nowSec, idx, id) } lastSec = nowSec } } func (us *TimedUserSet) AddUser(user config.User) error { id := user.ID() idx := len(us.validUserIds) us.validUserIds = append(us.validUserIds, id) nowSec := time.Now().UTC().Unix() lastSec := nowSec - cacheDurationSec us.generateNewHashes(lastSec, nowSec+cacheDurationSec, idx, id) return nil } func (us TimedUserSet) GetUser(userHash []byte) (*config.ID, int64, bool) { defer us.access.RUnlock() us.access.RLock() pair, found := us.userHash[string(userHash)] if found { return us.validUserIds[pair.index], pair.timeSec, true } return nil, 0, false }
proxy/vmess/protocol/user/userset.go
1
https://github.com/v2ray/v2ray-core/commit/fdb41bbd50444b702940b117efee5216a4f4756a
[ 0.9970882534980774, 0.3040297031402588, 0.0001670221536187455, 0.00789879634976387, 0.4535047709941864 ]
{ "id": 4, "code_window": [ "\n", "\tfor now := range tick {\n", "\t\tnowSec := now.UTC().Unix() + cacheDurationSec\n", "\t\tfor idx, id := range us.validUserIds {\n", "\t\t\tus.generateNewHashes(lastSec, nowSec, idx, id)\n", "\t\t}\n", "\t\tlastSec = nowSec\n" ], "labels": [ "keep", "keep", "replace", "keep", "keep", "keep", "keep" ], "after_edit": [ "\t\tnowSec := now.Unix() + cacheDurationSec\n" ], "file_path": "proxy/vmess/protocol/user/userset.go", "type": "replace", "edit_start_line_idx": 68 }
# 开发指引 ## 基本 ### 版本控制 Git ### Branch 本项目只使用一个 Branch:master。所有更改全部提交进 master,并确保 master 在任一时刻都是可编译可使用的。 ### 引用其它项目 * golang * 产品代码只能使用 golang 的标准库,即名称不包含任何网址的包; * 测试代码可以使用 golang.org/x/... ; * 如需引用其它项目请事先创建 Issue 讨论; * 其它 * 只要不违反双方的协议(本项目为 MIT),且对项目有帮助的工具,都可以使用。 ## 开发流程 ### 写代码之前 发现任何问题,或对项目有任何想法,请立即[创建 Issue](https://github.com/V2Ray/v2ray-core/blob/master/spec/issue.md) 讨论之,以减少重复劳动和消耗在代码上的时间。 ### 修改代码 * golang * 请参考 [Effective Go](https://golang.org/doc/effective_go.html); * 每一次 commit 之前请运行: gofmt -w github.com/v2ray/v2ray-core/ * 每一次 commit 之前请确保测试通过: go test github.com/v2ray/v2ray-core/... * 提交 PR 之前请确保新增代码有超过 60% 的代码覆盖率(code coverage)。 * 其它 * 请注意代码的可读性 ### Pull Request 提交 PR 之前请先运行 git pull 以确保 merge 可顺利进行。 ## 对代码的修改 ### 功能性问题 请提交至少一个测试用例(test case)来验证对现有功能的改动。 ### 性能相关 请提交必要的测试数据来证明现有代码的性能缺陷,或是新增代码的性能提升。 ### 新功能 如果新增功能对已有功能不影响,请提供可以开启/关闭的开关(如 flag),并使新功能保持默认关闭的状态。 ### 其它 视具体情况而定。
spec/develop.md
0
https://github.com/v2ray/v2ray-core/commit/fdb41bbd50444b702940b117efee5216a4f4756a
[ 0.00016861256153788418, 0.00016576923371758312, 0.00016375862469431013, 0.00016482167120557278, 0.0000019262886326032458 ]
{ "id": 4, "code_window": [ "\n", "\tfor now := range tick {\n", "\t\tnowSec := now.UTC().Unix() + cacheDurationSec\n", "\t\tfor idx, id := range us.validUserIds {\n", "\t\t\tus.generateNewHashes(lastSec, nowSec, idx, id)\n", "\t\t}\n", "\t\tlastSec = nowSec\n" ], "labels": [ "keep", "keep", "replace", "keep", "keep", "keep", "keep" ], "after_edit": [ "\t\tnowSec := now.Unix() + cacheDurationSec\n" ], "file_path": "proxy/vmess/protocol/user/userset.go", "type": "replace", "edit_start_line_idx": 68 }
package retry import ( "errors" "time" ) var ( RetryFailed = errors.New("All retry attempts failed.") ) type RetryStrategy interface { On(func() error) error } type retryer struct { NextDelay func(int) int } func (r *retryer) On(method func() error) error { attempt := 0 for { err := method() if err == nil { return nil } delay := r.NextDelay(attempt) if delay < 0 { return RetryFailed } <-time.After(time.Duration(delay) * time.Millisecond) attempt++ } } func Timed(attempts int, delay int) RetryStrategy { return &retryer{ NextDelay: func(attempt int) int { if attempt >= attempts { return -1 } return delay }, } }
common/retry/retry.go
0
https://github.com/v2ray/v2ray-core/commit/fdb41bbd50444b702940b117efee5216a4f4756a
[ 0.0001809952809708193, 0.0001737548445817083, 0.00016825761122163385, 0.0001727206981740892, 0.000004606364655046491 ]
{ "id": 4, "code_window": [ "\n", "\tfor now := range tick {\n", "\t\tnowSec := now.UTC().Unix() + cacheDurationSec\n", "\t\tfor idx, id := range us.validUserIds {\n", "\t\t\tus.generateNewHashes(lastSec, nowSec, idx, id)\n", "\t\t}\n", "\t\tlastSec = nowSec\n" ], "labels": [ "keep", "keep", "replace", "keep", "keep", "keep", "keep" ], "after_edit": [ "\t\tnowSec := now.Unix() + cacheDurationSec\n" ], "file_path": "proxy/vmess/protocol/user/userset.go", "type": "replace", "edit_start_line_idx": 68 }
package json import ( "encoding/json" "github.com/v2ray/v2ray-core/proxy/vmess/config" ) // ConfigUser is an user account in VMess configuration. type ConfigUser struct { Id *config.ID Email string } func (u *ConfigUser) UnmarshalJSON(data []byte) error { type rawUser struct { IdString string `json:"id"` EmailString string `json:"email"` } var rawUserValue rawUser if err := json.Unmarshal(data, &rawUserValue); err != nil { return err } id, err := config.NewID(rawUserValue.IdString) if err != nil { return err } u.Id = id u.Email = rawUserValue.EmailString return nil } func (u *ConfigUser) ID() *config.ID { return u.Id }
proxy/vmess/config/json/user.go
0
https://github.com/v2ray/v2ray-core/commit/fdb41bbd50444b702940b117efee5216a4f4756a
[ 0.00030954243266023695, 0.0002136851690011099, 0.00016473288997076452, 0.0001902326912386343, 0.000059082867664983496 ]
{ "id": 5, "code_window": [ "func (us *TimedUserSet) AddUser(user config.User) error {\n", "\tid := user.ID()\n", "\tidx := len(us.validUserIds)\n", "\tus.validUserIds = append(us.validUserIds, id)\n", "\n", "\tnowSec := time.Now().UTC().Unix()\n", "\tlastSec := nowSec - cacheDurationSec\n", "\tus.generateNewHashes(lastSec, nowSec+cacheDurationSec, idx, id)\n", "\n", "\treturn nil\n" ], "labels": [ "keep", "keep", "keep", "keep", "keep", "replace", "keep", "keep", "keep", "keep" ], "after_edit": [ "\tnowSec := time.Now().Unix()\n" ], "file_path": "proxy/vmess/protocol/user/userset.go", "type": "replace", "edit_start_line_idx": 81 }
// Package vmess contains protocol definition, io lib for VMess. package protocol import ( "crypto/aes" "crypto/cipher" "encoding/binary" "hash/fnv" "io" "time" "github.com/v2ray/v2ray-core/common/alloc" v2io "github.com/v2ray/v2ray-core/common/io" "github.com/v2ray/v2ray-core/common/log" v2net "github.com/v2ray/v2ray-core/common/net" "github.com/v2ray/v2ray-core/proxy" "github.com/v2ray/v2ray-core/proxy/vmess/config" "github.com/v2ray/v2ray-core/proxy/vmess/protocol/user" "github.com/v2ray/v2ray-core/transport" ) const ( addrTypeIPv4 = byte(0x01) addrTypeIPv6 = byte(0x03) addrTypeDomain = byte(0x02) CmdTCP = byte(0x01) CmdUDP = byte(0x02) Version = byte(0x01) blockSize = 16 ) // VMessRequest implements the request message of VMess protocol. It only contains the header of a // request message. The data part will be handled by conection handler directly, in favor of data // streaming. type VMessRequest struct { Version byte UserId config.ID RequestIV []byte RequestKey []byte ResponseHeader []byte Command byte Address v2net.Address } // Destination is the final destination of this request. func (request *VMessRequest) Destination() v2net.Destination { if request.Command == CmdTCP { return v2net.NewTCPDestination(request.Address) } else { return v2net.NewUDPDestination(request.Address) } } // VMessRequestReader is a parser to read VMessRequest from a byte stream. type VMessRequestReader struct { vUserSet user.UserSet } // NewVMessRequestReader creates a new VMessRequestReader with a given UserSet func NewVMessRequestReader(vUserSet user.UserSet) *VMessRequestReader { return &VMessRequestReader{ vUserSet: vUserSet, } } // Read reads a VMessRequest from a byte stream. func (r *VMessRequestReader) Read(reader io.Reader) (*VMessRequest, error) { buffer := alloc.NewSmallBuffer() nBytes, err := v2net.ReadAllBytes(reader, buffer.Value[:config.IDBytesLen]) if err != nil { return nil, err } userId, timeSec, valid := r.vUserSet.GetUser(buffer.Value[:nBytes]) if !valid { return nil, proxy.InvalidAuthentication } aesCipher, err := aes.NewCipher(userId.CmdKey()) if err != nil { return nil, err } aesStream := cipher.NewCFBDecrypter(aesCipher, user.Int64Hash(timeSec)) decryptor := v2io.NewCryptionReader(aesStream, reader) if err != nil { return nil, err } nBytes, err = v2net.ReadAllBytes(decryptor, buffer.Value[:41]) if err != nil { return nil, err } bufferLen := nBytes request := &VMessRequest{ UserId: *userId, Version: buffer.Value[0], } if request.Version != Version { log.Warning("Invalid protocol version %d", request.Version) return nil, proxy.InvalidProtocolVersion } request.RequestIV = buffer.Value[1:17] // 16 bytes request.RequestKey = buffer.Value[17:33] // 16 bytes request.ResponseHeader = buffer.Value[33:37] // 4 bytes request.Command = buffer.Value[37] port := binary.BigEndian.Uint16(buffer.Value[38:40]) switch buffer.Value[40] { case addrTypeIPv4: _, err = v2net.ReadAllBytes(decryptor, buffer.Value[41:45]) // 4 bytes bufferLen += 4 if err != nil { return nil, err } request.Address = v2net.IPAddress(buffer.Value[41:45], port) case addrTypeIPv6: _, err = v2net.ReadAllBytes(decryptor, buffer.Value[41:57]) // 16 bytes bufferLen += 16 if err != nil { return nil, err } request.Address = v2net.IPAddress(buffer.Value[41:57], port) case addrTypeDomain: _, err = v2net.ReadAllBytes(decryptor, buffer.Value[41:42]) if err != nil { return nil, err } domainLength := int(buffer.Value[41]) _, err = v2net.ReadAllBytes(decryptor, buffer.Value[42:42+domainLength]) if err != nil { return nil, err } bufferLen += 1 + domainLength request.Address = v2net.DomainAddress(string(buffer.Value[42:42+domainLength]), port) } _, err = v2net.ReadAllBytes(decryptor, buffer.Value[bufferLen:bufferLen+4]) if err != nil { return nil, err } fnv1a := fnv.New32a() fnv1a.Write(buffer.Value[:bufferLen]) actualHash := fnv1a.Sum32() expectedHash := binary.BigEndian.Uint32(buffer.Value[bufferLen : bufferLen+4]) if actualHash != expectedHash { return nil, transport.CorruptedPacket } return request, nil } // ToBytes returns a VMessRequest in the form of byte array. func (request *VMessRequest) ToBytes(idHash user.CounterHash, randomRangeInt64 user.RandomInt64InRange, buffer *alloc.Buffer) (*alloc.Buffer, error) { if buffer == nil { buffer = alloc.NewSmallBuffer().Clear() } counter := randomRangeInt64(time.Now().UTC().Unix(), 30) hash := idHash.Hash(request.UserId.Bytes[:], counter) buffer.Append(hash) encryptionBegin := buffer.Len() buffer.AppendBytes(request.Version) buffer.Append(request.RequestIV) buffer.Append(request.RequestKey) buffer.Append(request.ResponseHeader) buffer.AppendBytes(request.Command) buffer.Append(request.Address.PortBytes()) switch { case request.Address.IsIPv4(): buffer.AppendBytes(addrTypeIPv4) buffer.Append(request.Address.IP()) case request.Address.IsIPv6(): buffer.AppendBytes(addrTypeIPv6) buffer.Append(request.Address.IP()) case request.Address.IsDomain(): buffer.AppendBytes(addrTypeDomain, byte(len(request.Address.Domain()))) buffer.Append([]byte(request.Address.Domain())) } encryptionEnd := buffer.Len() fnv1a := fnv.New32a() fnv1a.Write(buffer.Value[encryptionBegin:encryptionEnd]) fnvHash := fnv1a.Sum32() buffer.AppendBytes(byte(fnvHash>>24), byte(fnvHash>>16), byte(fnvHash>>8), byte(fnvHash)) encryptionEnd += 4 aesCipher, err := aes.NewCipher(request.UserId.CmdKey()) if err != nil { return nil, err } aesStream := cipher.NewCFBEncrypter(aesCipher, user.Int64Hash(counter)) aesStream.XORKeyStream(buffer.Value[encryptionBegin:encryptionEnd], buffer.Value[encryptionBegin:encryptionEnd]) return buffer, nil }
proxy/vmess/protocol/vmess.go
1
https://github.com/v2ray/v2ray-core/commit/fdb41bbd50444b702940b117efee5216a4f4756a
[ 0.012807883322238922, 0.000788861361797899, 0.0001643110445002094, 0.00017339264741167426, 0.0026250588707625866 ]
{ "id": 5, "code_window": [ "func (us *TimedUserSet) AddUser(user config.User) error {\n", "\tid := user.ID()\n", "\tidx := len(us.validUserIds)\n", "\tus.validUserIds = append(us.validUserIds, id)\n", "\n", "\tnowSec := time.Now().UTC().Unix()\n", "\tlastSec := nowSec - cacheDurationSec\n", "\tus.generateNewHashes(lastSec, nowSec+cacheDurationSec, idx, id)\n", "\n", "\treturn nil\n" ], "labels": [ "keep", "keep", "keep", "keep", "keep", "replace", "keep", "keep", "keep", "keep" ], "after_edit": [ "\tnowSec := time.Now().Unix()\n" ], "file_path": "proxy/vmess/protocol/user/userset.go", "type": "replace", "edit_start_line_idx": 81 }
package mocks import ( "github.com/v2ray/v2ray-core/proxy/vmess/config" ) type MockUserSet struct { UserIds []*config.ID UserHashes map[string]int Timestamps map[string]int64 } func (us *MockUserSet) AddUser(user config.User) error { us.UserIds = append(us.UserIds, user.ID()) return nil } func (us *MockUserSet) GetUser(userhash []byte) (*config.ID, int64, bool) { idx, found := us.UserHashes[string(userhash)] if found { return us.UserIds[idx], us.Timestamps[string(userhash)], true } return nil, 0, false }
testing/mocks/mockuserset.go
0
https://github.com/v2ray/v2ray-core/commit/fdb41bbd50444b702940b117efee5216a4f4756a
[ 0.9760235548019409, 0.33122268319129944, 0.0062319207936525345, 0.011412526480853558, 0.45594796538352966 ]
{ "id": 5, "code_window": [ "func (us *TimedUserSet) AddUser(user config.User) error {\n", "\tid := user.ID()\n", "\tidx := len(us.validUserIds)\n", "\tus.validUserIds = append(us.validUserIds, id)\n", "\n", "\tnowSec := time.Now().UTC().Unix()\n", "\tlastSec := nowSec - cacheDurationSec\n", "\tus.generateNewHashes(lastSec, nowSec+cacheDurationSec, idx, id)\n", "\n", "\treturn nil\n" ], "labels": [ "keep", "keep", "keep", "keep", "keep", "replace", "keep", "keep", "keep", "keep" ], "after_edit": [ "\tnowSec := time.Now().Unix()\n" ], "file_path": "proxy/vmess/protocol/user/userset.go", "type": "replace", "edit_start_line_idx": 81 }
# Issue 指引 在新建 Issue 之前请先阅读下面的内容,以提高 Issue 的质量和效率。 ## 开放分类 在创建 Issue 的时候可标记为以下分类: ### Bug 代码的质量问题,可以是功能性(Functionality),稳定性(Reliability)或者性能(Performance)。在 Bug 的内容中请注明下列内容以方便重现: * 操作系统:如 Windows 10,Ubuntu 14.04 等,64 位 / 32 位 * V2Ray 版本:版本号或源码同步日期 * Golang 版本:如果有的话 * 问题描述:任何的错误信息,不正常的行为等 * 日志文件:如果有的话 如对软件使用有任何问题也请发到这个类别。 请在一个 Issue 中只描述一个问题,如果你遇到多个问题,请分别创建不同的 Issue,以方便讨论和解决。如果合在一起发,将有很高的机率被标记为 Chat 而降低解决问题的优先级。 ### Chat 聊天或其它相关性不强的内容。标记为 Chat 的 Issue 将在最后回复 7 天后关闭。 ### Enhancemet 有关新特性的建议,如果是针对现有代码的修改,请详细描述您的建议。 ## 以下分类仅供管理员使用 ### Announcement 新版本发布、最新动态等公告内容,将于发布之后的 7 日内关闭。。 ### Duplicate 此 Issue 与之前的某一个 Issue 重复。 ### Help Wanted 标记为 Help Wanted 的 Issue 接受 Pull Request,一般为新特性的实现,如果您对其中某一个感兴趣,欢迎供献代码。 ### Invalid 无法重现的 Bug 或没有意义的 Issue。 ### vLog 用于记录一些 V2Ray 的实现细节,想法等,将于发布之后的 30 日内关闭。 ### Won't fix 此 Issue 是一个合理的问题,但不影响软件的正常使用,故不修复。
spec/issue.md
0
https://github.com/v2ray/v2ray-core/commit/fdb41bbd50444b702940b117efee5216a4f4756a
[ 0.00017347051471006125, 0.0001692368823569268, 0.00016326173499692231, 0.00017110853514168411, 0.000003743624574781279 ]
{ "id": 5, "code_window": [ "func (us *TimedUserSet) AddUser(user config.User) error {\n", "\tid := user.ID()\n", "\tidx := len(us.validUserIds)\n", "\tus.validUserIds = append(us.validUserIds, id)\n", "\n", "\tnowSec := time.Now().UTC().Unix()\n", "\tlastSec := nowSec - cacheDurationSec\n", "\tus.generateNewHashes(lastSec, nowSec+cacheDurationSec, idx, id)\n", "\n", "\treturn nil\n" ], "labels": [ "keep", "keep", "keep", "keep", "keep", "replace", "keep", "keep", "keep", "keep" ], "after_edit": [ "\tnowSec := time.Now().Unix()\n" ], "file_path": "proxy/vmess/protocol/user/userset.go", "type": "replace", "edit_start_line_idx": 81 }
The MIT License (MIT) Copyright (c) 2015 V2Ray Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
LICENSE
0
https://github.com/v2ray/v2ray-core/commit/fdb41bbd50444b702940b117efee5216a4f4756a
[ 0.00017666745407041162, 0.00017185870092362165, 0.00016861865879036486, 0.00017028998991008848, 0.000003468084742053179 ]
{ "id": 6, "code_window": [ "\t\tbuffer = alloc.NewSmallBuffer().Clear()\n", "\t}\n", "\n", "\tcounter := randomRangeInt64(time.Now().UTC().Unix(), 30)\n", "\thash := idHash.Hash(request.UserId.Bytes[:], counter)\n", "\n", "\tbuffer.Append(hash)\n", "\n", "\tencryptionBegin := buffer.Len()\n" ], "labels": [ "keep", "keep", "keep", "replace", "keep", "keep", "keep", "keep", "keep" ], "after_edit": [ "\tcounter := randomRangeInt64(time.Now().Unix(), 30)\n" ], "file_path": "proxy/vmess/protocol/vmess.go", "type": "replace", "edit_start_line_idx": 168 }
package user import ( "sync" "time" "github.com/v2ray/v2ray-core/common/collect" "github.com/v2ray/v2ray-core/proxy/vmess/config" ) const ( updateIntervalSec = 10 cacheDurationSec = 120 ) type UserSet interface { AddUser(user config.User) error GetUser(timeHash []byte) (*config.ID, int64, bool) } type TimedUserSet struct { validUserIds []*config.ID userHash map[string]indexTimePair userHashDeleteQueue *collect.TimedQueue access sync.RWMutex } type indexTimePair struct { index int timeSec int64 } func NewTimedUserSet() UserSet { tus := &TimedUserSet{ validUserIds: make([]*config.ID, 0, 16), userHash: make(map[string]indexTimePair, 512), userHashDeleteQueue: collect.NewTimedQueue(updateIntervalSec), access: sync.RWMutex{}, } go tus.updateUserHash(time.Tick(updateIntervalSec * time.Second)) go tus.removeEntries(tus.userHashDeleteQueue.RemovedEntries()) return tus } func (us *TimedUserSet) removeEntries(entries <-chan interface{}) { for entry := range entries { us.access.Lock() delete(us.userHash, entry.(string)) us.access.Unlock() } } func (us *TimedUserSet) generateNewHashes(lastSec, nowSec int64, idx int, id *config.ID) { idHash := NewTimeHash(HMACHash{}) for lastSec < nowSec { idHash := idHash.Hash(id.Bytes[:], lastSec) us.access.Lock() us.userHash[string(idHash)] = indexTimePair{idx, lastSec} us.access.Unlock() us.userHashDeleteQueue.Add(string(idHash), lastSec+2*cacheDurationSec) lastSec++ } } func (us *TimedUserSet) updateUserHash(tick <-chan time.Time) { lastSec := time.Now().UTC().Unix() - cacheDurationSec for now := range tick { nowSec := now.UTC().Unix() + cacheDurationSec for idx, id := range us.validUserIds { us.generateNewHashes(lastSec, nowSec, idx, id) } lastSec = nowSec } } func (us *TimedUserSet) AddUser(user config.User) error { id := user.ID() idx := len(us.validUserIds) us.validUserIds = append(us.validUserIds, id) nowSec := time.Now().UTC().Unix() lastSec := nowSec - cacheDurationSec us.generateNewHashes(lastSec, nowSec+cacheDurationSec, idx, id) return nil } func (us TimedUserSet) GetUser(userHash []byte) (*config.ID, int64, bool) { defer us.access.RUnlock() us.access.RLock() pair, found := us.userHash[string(userHash)] if found { return us.validUserIds[pair.index], pair.timeSec, true } return nil, 0, false }
proxy/vmess/protocol/user/userset.go
1
https://github.com/v2ray/v2ray-core/commit/fdb41bbd50444b702940b117efee5216a4f4756a
[ 0.00163706683088094, 0.0004058672930113971, 0.00016379563021473587, 0.0002475320070516318, 0.0004286165058147162 ]
{ "id": 6, "code_window": [ "\t\tbuffer = alloc.NewSmallBuffer().Clear()\n", "\t}\n", "\n", "\tcounter := randomRangeInt64(time.Now().UTC().Unix(), 30)\n", "\thash := idHash.Hash(request.UserId.Bytes[:], counter)\n", "\n", "\tbuffer.Append(hash)\n", "\n", "\tencryptionBegin := buffer.Len()\n" ], "labels": [ "keep", "keep", "keep", "replace", "keep", "keep", "keep", "keep", "keep" ], "after_edit": [ "\tcounter := randomRangeInt64(time.Now().Unix(), 30)\n" ], "file_path": "proxy/vmess/protocol/vmess.go", "type": "replace", "edit_start_line_idx": 168 }
package json import ( "encoding/json" "net" "strings" "github.com/v2ray/v2ray-core/common/log" v2net "github.com/v2ray/v2ray-core/common/net" "github.com/v2ray/v2ray-core/config" jsonconfig "github.com/v2ray/v2ray-core/config/json" vmessconfig "github.com/v2ray/v2ray-core/proxy/vmess/config" ) type RawConfigTarget struct { Address string `json:"address"` Port uint16 `json:"port"` Users []*ConfigUser `json:"users"` Network string `json:"network"` } func (config RawConfigTarget) HasNetwork(network string) bool { return strings.Contains(config.Network, network) } type ConfigTarget struct { Address v2net.Address Users []*ConfigUser TCPEnabled bool UDPEnabled bool } func (t *ConfigTarget) UnmarshalJSON(data []byte) error { var rawConfig RawConfigTarget if err := json.Unmarshal(data, &rawConfig); err != nil { return err } t.Users = rawConfig.Users ip := net.ParseIP(rawConfig.Address) if ip == nil { log.Error("Unable to parse IP: %s", rawConfig.Address) return config.BadConfiguration } t.Address = v2net.IPAddress(ip, rawConfig.Port) if rawConfig.HasNetwork("tcp") { t.TCPEnabled = true } if rawConfig.HasNetwork("udp") { t.UDPEnabled = true } return nil } type Outbound struct { TargetList []*ConfigTarget `json:"vnext"` } func (o *Outbound) Targets() []*vmessconfig.OutboundTarget { targets := make([]*vmessconfig.OutboundTarget, 0, 2*len(o.TargetList)) for _, rawTarget := range o.TargetList { users := make([]vmessconfig.User, 0, len(rawTarget.Users)) for _, rawUser := range rawTarget.Users { users = append(users, rawUser) } if rawTarget.TCPEnabled { targets = append(targets, &vmessconfig.OutboundTarget{ Destination: v2net.NewTCPDestination(rawTarget.Address), Accounts: users, }) } if rawTarget.UDPEnabled { targets = append(targets, &vmessconfig.OutboundTarget{ Destination: v2net.NewUDPDestination(rawTarget.Address), Accounts: users, }) } } return targets } func init() { jsonconfig.RegisterConfigType("vmess", config.TypeOutbound, func() interface{} { return new(Outbound) }) }
proxy/vmess/config/json/outbound.go
0
https://github.com/v2ray/v2ray-core/commit/fdb41bbd50444b702940b117efee5216a4f4756a
[ 0.00017353470320813358, 0.0001698164560366422, 0.00016235171642620116, 0.00017087195010390133, 0.0000035123082398058614 ]
{ "id": 6, "code_window": [ "\t\tbuffer = alloc.NewSmallBuffer().Clear()\n", "\t}\n", "\n", "\tcounter := randomRangeInt64(time.Now().UTC().Unix(), 30)\n", "\thash := idHash.Hash(request.UserId.Bytes[:], counter)\n", "\n", "\tbuffer.Append(hash)\n", "\n", "\tencryptionBegin := buffer.Len()\n" ], "labels": [ "keep", "keep", "keep", "replace", "keep", "keep", "keep", "keep", "keep" ], "after_edit": [ "\tcounter := randomRangeInt64(time.Now().Unix(), 30)\n" ], "file_path": "proxy/vmess/protocol/vmess.go", "type": "replace", "edit_start_line_idx": 168 }
package json import ( "github.com/v2ray/v2ray-core/config" "github.com/v2ray/v2ray-core/config/json" ) type BlackHoleConfig struct { } func init() { json.RegisterConfigType("blackhole", config.TypeInbound, func() interface{} { return new(BlackHoleConfig) }) }
proxy/blackhole/config/json/json.go
0
https://github.com/v2ray/v2ray-core/commit/fdb41bbd50444b702940b117efee5216a4f4756a
[ 0.00016732596850488335, 0.00016702490393072367, 0.00016672382480464876, 0.00016702490393072367, 3.01071850117296e-7 ]
{ "id": 6, "code_window": [ "\t\tbuffer = alloc.NewSmallBuffer().Clear()\n", "\t}\n", "\n", "\tcounter := randomRangeInt64(time.Now().UTC().Unix(), 30)\n", "\thash := idHash.Hash(request.UserId.Bytes[:], counter)\n", "\n", "\tbuffer.Append(hash)\n", "\n", "\tencryptionBegin := buffer.Len()\n" ], "labels": [ "keep", "keep", "keep", "replace", "keep", "keep", "keep", "keep", "keep" ], "after_edit": [ "\tcounter := randomRangeInt64(time.Now().Unix(), 30)\n" ], "file_path": "proxy/vmess/protocol/vmess.go", "type": "replace", "edit_start_line_idx": 168 }
package unit import ( "strconv" ) type IntSubject struct { *Subject value int } func NewIntSubject(base *Subject, value int) *IntSubject { return &IntSubject{ Subject: base, value: value, } } func (subject *IntSubject) Named(name string) *IntSubject { subject.Subject.Named(name) return subject } func (subject *IntSubject) Fail(verb string, other int) { subject.FailWithMessage("Not true that " + subject.DisplayString() + " " + verb + " <" + strconv.Itoa(other) + ">.") } func (subject *IntSubject) DisplayString() string { return subject.Subject.DisplayString(strconv.Itoa(subject.value)) } func (subject *IntSubject) Equals(expectation int) { if subject.value != expectation { subject.Fail("is equal to", expectation) } } func (subject *IntSubject) GreaterThan(expectation int) { if subject.value <= expectation { subject.Fail("is greater than", expectation) } } func (subject *IntSubject) LessThan(expectation int) { if subject.value >= expectation { subject.Fail("is less than", expectation) } }
testing/unit/intsubject.go
0
https://github.com/v2ray/v2ray-core/commit/fdb41bbd50444b702940b117efee5216a4f4756a
[ 0.00017592296353541315, 0.0001733635290293023, 0.00017049568123184144, 0.0001736778940539807, 0.000001780440470611211 ]
{ "id": 0, "code_window": [ "\tExternalNamespace namespace.Instance\n", "\n", "\t// Ingressgateway instance\n", "\tIngress ingress.Instance\n", "\t// Eastwest gateway instance\n", "\tEastWest ingress.Instance\n", "\n", "\t// Standard echo app to be used by tests\n", "\tPodA echo.Instances\n", "\t// Standard echo app to be used by tests\n", "\tPodB echo.Instances\n" ], "labels": [ "keep", "keep", "keep", "keep", "replace", "replace", "keep", "keep", "keep", "keep", "keep" ], "after_edit": [], "file_path": "tests/integration/pilot/common/apps.go", "type": "replace", "edit_start_line_idx": 39 }
// +build integ // Copyright Istio Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package common import ( "strconv" "strings" "istio.io/istio/pkg/test/framework/components/echo" "istio.io/istio/pkg/test/framework/components/echo/common" "istio.io/istio/pkg/test/framework/components/echo/echoboot" "istio.io/istio/pkg/test/framework/components/istio" "istio.io/istio/pkg/test/framework/components/istio/ingress" "istio.io/istio/pkg/test/framework/components/namespace" "istio.io/istio/pkg/test/framework/resource" "istio.io/istio/pkg/test/util/tmpl" ) type EchoDeployments struct { // Namespace echo apps will be deployed Namespace namespace.Instance // Namespace where external echo app will be deployed ExternalNamespace namespace.Instance // Ingressgateway instance Ingress ingress.Instance // Eastwest gateway instance EastWest ingress.Instance // Standard echo app to be used by tests PodA echo.Instances // Standard echo app to be used by tests PodB echo.Instances // Standard echo app to be used by tests PodC echo.Instances // Standard echo app with TPROXY interception mode to be used by tests PodTproxy echo.Instances // Headless echo app to be used by tests Headless echo.Instances // StatefulSet echo app to be used by tests StatefulSet echo.Instances // Echo app to be used by tests, with no sidecar injected Naked echo.Instances // A virtual machine echo app (only deployed to one cluster) VM echo.Instances // Echo app to be used by tests, with no sidecar injected External echo.Instances All echo.Instances } const ( PodASvc = "a" PodBSvc = "b" PodCSvc = "c" PodTproxySvc = "tproxy" VMSvc = "vm" HeadlessSvc = "headless" StatefulSetSvc = "statefulset" NakedSvc = "naked" ExternalSvc = "external" externalHostname = "fake.external.com" ) func FindPortByName(name string) echo.Port { for _, p := range common.EchoPorts { if p.Name == name { return p } } return echo.Port{} } func serviceEntryPorts() []echo.Port { res := []echo.Port{} for _, p := range common.EchoPorts { if strings.HasPrefix(p.Name, "auto") { // The protocol needs to be set in common.EchoPorts to configure the echo deployment // But for service entry, we want to ensure we set it to "" which will use sniffing p.Protocol = "" } res = append(res, p) } return res } func SetupApps(t resource.Context, i istio.Instance, apps *EchoDeployments) error { var err error apps.Namespace, err = namespace.New(t, namespace.Config{ Prefix: "echo", Inject: true, }) if err != nil { return err } apps.ExternalNamespace, err = namespace.New(t, namespace.Config{ Prefix: "external", Inject: false, }) if err != nil { return err } apps.Ingress = i.IngressFor(t.Clusters().Default()) apps.EastWest = i.CustomIngressFor(t.Clusters().Default(), "istio-eastwestgateway", "eastwestgateway") // Headless services don't work with targetPort, set to same port headlessPorts := make([]echo.Port, len(common.EchoPorts)) for i, p := range common.EchoPorts { p.ServicePort = p.InstancePort headlessPorts[i] = p } builder := echoboot.NewBuilder(t). WithClusters(t.Clusters()...). WithConfig(echo.Config{ Service: PodASvc, Namespace: apps.Namespace, Ports: common.EchoPorts, Subsets: []echo.SubsetConfig{{}}, Locality: "region.zone.subzone", WorkloadOnlyPorts: common.WorkloadPorts, }). WithConfig(echo.Config{ Service: PodBSvc, Namespace: apps.Namespace, Ports: common.EchoPorts, Subsets: []echo.SubsetConfig{{}}, WorkloadOnlyPorts: common.WorkloadPorts, }). WithConfig(echo.Config{ Service: PodCSvc, Namespace: apps.Namespace, Ports: common.EchoPorts, Subsets: []echo.SubsetConfig{{}}, WorkloadOnlyPorts: common.WorkloadPorts, }). WithConfig(echo.Config{ Service: HeadlessSvc, Headless: true, Namespace: apps.Namespace, Ports: headlessPorts, Subsets: []echo.SubsetConfig{{}}, WorkloadOnlyPorts: common.WorkloadPorts, }). WithConfig(echo.Config{ Service: StatefulSetSvc, Headless: true, StatefulSet: true, Namespace: apps.Namespace, Ports: headlessPorts, Subsets: []echo.SubsetConfig{{}}, WorkloadOnlyPorts: common.WorkloadPorts, }). WithConfig(echo.Config{ Service: NakedSvc, Namespace: apps.Namespace, Ports: common.EchoPorts, Subsets: []echo.SubsetConfig{ { Annotations: map[echo.Annotation]*echo.AnnotationValue{ echo.SidecarInject: { Value: strconv.FormatBool(false), }, }, }, }, WorkloadOnlyPorts: common.WorkloadPorts, }). WithConfig(echo.Config{ Service: ExternalSvc, Namespace: apps.ExternalNamespace, DefaultHostHeader: externalHostname, Ports: common.EchoPorts, Subsets: []echo.SubsetConfig{ { Annotations: map[echo.Annotation]*echo.AnnotationValue{ echo.SidecarInject: { Value: strconv.FormatBool(false), }, }, }, }, WorkloadOnlyPorts: common.WorkloadPorts, }). WithConfig(echo.Config{ Service: PodTproxySvc, Namespace: apps.Namespace, Ports: common.EchoPorts, Subsets: []echo.SubsetConfig{{ Annotations: echo.NewAnnotations().Set(echo.SidecarInterceptionMode, "TPROXY"), }}, WorkloadOnlyPorts: common.WorkloadPorts, }). WithConfig(echo.Config{ Service: VMSvc, Namespace: apps.Namespace, Ports: common.EchoPorts, DeployAsVM: true, AutoRegisterVM: true, Subsets: []echo.SubsetConfig{{}}, WorkloadOnlyPorts: common.WorkloadPorts, }) echos, err := builder.Build() if err != nil { return err } apps.All = echos apps.PodA = echos.Match(echo.Service(PodASvc)) apps.PodB = echos.Match(echo.Service(PodBSvc)) apps.PodC = echos.Match(echo.Service(PodCSvc)) apps.PodTproxy = echos.Match(echo.Service(PodTproxySvc)) apps.Headless = echos.Match(echo.Service(HeadlessSvc)) apps.StatefulSet = echos.Match(echo.Service(StatefulSetSvc)) apps.Naked = echos.Match(echo.Service(NakedSvc)) apps.External = echos.Match(echo.Service(ExternalSvc)) if !t.Settings().SkipVM { apps.VM = echos.Match(echo.Service(VMSvc)) } if err := t.Config().ApplyYAMLNoCleanup(apps.Namespace.Name(), ` apiVersion: networking.istio.io/v1alpha3 kind: Sidecar metadata: name: restrict-to-namespace spec: egress: - hosts: - "./*" - "istio-system/*" `); err != nil { return err } se, err := tmpl.Evaluate(`apiVersion: networking.istio.io/v1alpha3 kind: ServiceEntry metadata: name: external-service spec: hosts: - {{.Hostname}} location: MESH_EXTERNAL resolution: DNS endpoints: - address: external.{{.Namespace}}.svc.cluster.local ports: - name: http-tls-origination number: 8888 protocol: http targetPort: 443 - name: http2-tls-origination number: 8882 protocol: http2 targetPort: 443 {{- range $i, $p := .Ports }} - name: {{$p.Name}} number: {{$p.ServicePort}} protocol: "{{$p.Protocol}}" {{- end }} `, map[string]interface{}{"Namespace": apps.ExternalNamespace.Name(), "Hostname": externalHostname, "Ports": serviceEntryPorts()}) if err != nil { return err } if err := t.Config().ApplyYAML(apps.Namespace.Name(), se); err != nil { return err } return nil } func (d EchoDeployments) IsMulticluster() bool { return d.All.Clusters().IsMulticluster() }
tests/integration/pilot/common/apps.go
1
https://github.com/istio/istio/commit/33d59c1dfd4678bea158d6940ce03f9dae537961
[ 0.1049795150756836, 0.00526468874886632, 0.0001625813019927591, 0.00019538924971129745, 0.0199582576751709 ]
{ "id": 0, "code_window": [ "\tExternalNamespace namespace.Instance\n", "\n", "\t// Ingressgateway instance\n", "\tIngress ingress.Instance\n", "\t// Eastwest gateway instance\n", "\tEastWest ingress.Instance\n", "\n", "\t// Standard echo app to be used by tests\n", "\tPodA echo.Instances\n", "\t// Standard echo app to be used by tests\n", "\tPodB echo.Instances\n" ], "labels": [ "keep", "keep", "keep", "keep", "replace", "replace", "keep", "keep", "keep", "keep", "keep" ], "after_edit": [], "file_path": "tests/integration/pilot/common/apps.go", "type": "replace", "edit_start_line_idx": 39 }
// Copyright Istio Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package route import ( "fmt" "sort" "strconv" "strings" core "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" route "github.com/envoyproxy/go-control-plane/envoy/config/route/v3" xdsfault "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/common/fault/v3" xdshttpfault "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/fault/v3" matcher "github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3" xdstype "github.com/envoyproxy/go-control-plane/envoy/type/v3" wellknown "github.com/envoyproxy/go-control-plane/pkg/wellknown" "github.com/golang/protobuf/ptypes/any" "github.com/golang/protobuf/ptypes/duration" "github.com/golang/protobuf/ptypes/wrappers" "google.golang.org/protobuf/types/known/durationpb" networking "istio.io/api/networking/v1alpha3" "istio.io/istio/pilot/pkg/features" "istio.io/istio/pilot/pkg/model" "istio.io/istio/pilot/pkg/networking/core/v1alpha3/route/retry" "istio.io/istio/pilot/pkg/networking/util" "istio.io/istio/pkg/config" "istio.io/istio/pkg/config/constants" "istio.io/istio/pkg/config/host" "istio.io/istio/pkg/config/labels" "istio.io/istio/pkg/util/gogo" "istio.io/pkg/log" ) // Headers with special meaning in Envoy const ( HeaderMethod = ":method" HeaderAuthority = ":authority" HeaderScheme = ":scheme" ) // DefaultRouteName is the name assigned to a route generated by default in absence of a virtual service. const DefaultRouteName = "default" var regexEngine = &matcher.RegexMatcher_GoogleRe2{GoogleRe2: &matcher.RegexMatcher_GoogleRE2{}} // VirtualHostWrapper is a context-dependent virtual host entry with guarded routes. // Note: Currently we are not fully utilizing this structure. We could invoke this logic // once for all sidecars in the cluster to compute all RDS for inside the mesh and arrange // it by listener port. However to properly use such an optimization, we need to have an // eventing subsystem to invalidate the computed routes if any service changes/virtual services change. type VirtualHostWrapper struct { // Port is the listener port for outbound sidecar (e.g. service port) Port int // Services are the services from the registry. Each service // in this list should have a virtual host entry Services []*model.Service // VirtualServiceHosts is a list of hosts defined in the virtual service // if virtual service hostname is same as a the service registry host, then // the host would appear in Services as we need to generate all variants of the // service's hostname within a platform (e.g., foo, foo.default, foo.default.svc, etc.) VirtualServiceHosts []string // Routes in the virtual host Routes []*route.Route } // BuildSidecarVirtualHostsFromConfigAndRegistry creates virtual hosts from // the given set of virtual services and a list of services from the // service registry. Services are indexed by FQDN hostnames. // The list of services is also passed to allow maintaining consistent ordering. func BuildSidecarVirtualHostsFromConfigAndRegistry(node *model.Proxy, push *model.PushContext, serviceRegistry map[host.Name]*model.Service, virtualServices []config.Config, listenPort int) []VirtualHostWrapper { out := make([]VirtualHostWrapper, 0) // translate all virtual service configs into virtual hosts for _, virtualService := range virtualServices { wrappers := buildSidecarVirtualHostsForVirtualService(node, push, virtualService, serviceRegistry, listenPort) if len(wrappers) == 0 { // If none of the routes matched by source (i.e. proxyLabels), then discard this entire virtual service continue } out = append(out, wrappers...) } // compute services missing virtual service configs missing := make(map[host.Name]struct{}) for fqdn := range serviceRegistry { missing[fqdn] = struct{}{} } for _, wrapper := range out { for _, service := range wrapper.Services { delete(missing, service.Hostname) } } // append default hosts for the service missing virtual services for hn := range missing { svc := serviceRegistry[hn] for _, port := range svc.Ports { if port.Protocol.IsHTTP() || util.IsProtocolSniffingEnabledForPort(port) { cluster := model.BuildSubsetKey(model.TrafficDirectionOutbound, "", svc.Hostname, port.Port) traceOperation := traceOperation(string(svc.Hostname), port.Port) httpRoute := BuildDefaultHTTPOutboundRoute(node, cluster, traceOperation) // if this host has no virtualservice, the consistentHash on its destinationRule will be useless if hashPolicy := getHashPolicyByService(node, push, svc, port); hashPolicy != nil { httpRoute.GetRoute().HashPolicy = []*route.RouteAction_HashPolicy{hashPolicy} } out = append(out, VirtualHostWrapper{ Port: port.Port, Services: []*model.Service{svc}, Routes: []*route.Route{httpRoute}, }) } } } return out } // separateVSHostsAndServices splits the virtual service hosts into services (if they are found in the registry) and // plain non-registry hostnames func separateVSHostsAndServices(virtualService config.Config, serviceRegistry map[host.Name]*model.Service) ([]string, []*model.Service) { rule := virtualService.Spec.(*networking.VirtualService) hosts := make([]string, 0) servicesInVirtualService := make([]*model.Service, 0) wchosts := make([]host.Name, 0) // As a performance optimization, process non wildcard hosts first, so that they can be // looked up directly in the service registry map. for _, hostname := range rule.Hosts { vshost := host.Name(hostname) if !vshost.IsWildCarded() { if svc, exists := serviceRegistry[vshost]; exists { servicesInVirtualService = append(servicesInVirtualService, svc) } else { hosts = append(hosts, hostname) } } else { // Add it to the wildcard hosts so that they can be processed later. wchosts = append(wchosts, vshost) } } // Now process wild card hosts as they need to follow the slow path of looping through all services in the registry. for _, hostname := range wchosts { // Say host is *.global foundSvcMatch := false // Say we have services *.foo.global, *.bar.global for svcHost, svc := range serviceRegistry { // *.foo.global matches *.global if svcHost.Matches(hostname) { servicesInVirtualService = append(servicesInVirtualService, svc) foundSvcMatch = true } } if !foundSvcMatch { hosts = append(hosts, string(hostname)) } } return hosts, servicesInVirtualService } // buildSidecarVirtualHostsForVirtualService creates virtual hosts corresponding to a virtual service. // Called for each port to determine the list of vhosts on the given port. // It may return an empty list if no VirtualService rule has a matching service. func buildSidecarVirtualHostsForVirtualService( node *model.Proxy, push *model.PushContext, virtualService config.Config, serviceRegistry map[host.Name]*model.Service, listenPort int) []VirtualHostWrapper { hosts, servicesInVirtualService := separateVSHostsAndServices(virtualService, serviceRegistry) // Now group these services by port so that we can infer the destination.port if the user // doesn't specify any port for a multiport service. We need to know the destination port in // order to build the cluster name (outbound|<port>|<subset>|<serviceFQDN>) // If the destination service is being accessed on port X, we set that as the default // destination port serviceByPort := make(map[int][]*model.Service) for _, svc := range servicesInVirtualService { for _, port := range svc.Ports { if port.Protocol.IsHTTP() || util.IsProtocolSniffingEnabledForPort(port) { serviceByPort[port.Port] = append(serviceByPort[port.Port], svc) } } } // We need to group the virtual hosts by port, because each http connection manager is // going to send a separate RDS request // Note that we need to build non-default HTTP routes only for the virtual services. // The services in the serviceRegistry will always have a default route (/) if len(serviceByPort) == 0 { // This is a gross HACK. Fix me. Its a much bigger surgery though, due to the way // the current code is written. serviceByPort[80] = nil } meshGateway := map[string]bool{constants.IstioMeshGateway: true} out := make([]VirtualHostWrapper, 0, len(serviceByPort)) routes, err := BuildHTTPRoutesForVirtualService(node, push, virtualService, serviceRegistry, listenPort, meshGateway) if err != nil || len(routes) == 0 { return out } for port, portServices := range serviceByPort { out = append(out, VirtualHostWrapper{ Port: port, Services: portServices, VirtualServiceHosts: hosts, Routes: routes, }) } return out } // GetDestinationCluster generates a cluster name for the route, or error if no cluster // can be found. Called by translateRule to determine if func GetDestinationCluster(destination *networking.Destination, service *model.Service, listenerPort int) string { port := listenerPort if destination.GetPort() != nil { port = int(destination.GetPort().GetNumber()) } else if service != nil && len(service.Ports) == 1 { // if service only has one port defined, use that as the port, otherwise use default listenerPort port = service.Ports[0].Port // Do not return blackhole cluster for service==nil case as there is a legitimate use case for // calling this function with nil service: to route to a pre-defined statically configured cluster // declared as part of the bootstrap. // If blackhole cluster is needed, do the check on the caller side. See gateway and tls.go for examples. } return model.BuildSubsetKey(model.TrafficDirectionOutbound, destination.Subset, host.Name(destination.Host), port) } // BuildHTTPRoutesForVirtualService creates data plane HTTP routes from the virtual service spec. // The rule should be adapted to destination names (outbound clusters). // Each rule is guarded by source labels. // // This is called for each port to compute virtual hosts. // Each VirtualService is tried, with a list of services that listen on the port. // Error indicates the given virtualService can't be used on the port. // This function is used by both the gateway and the sidecar func BuildHTTPRoutesForVirtualService( node *model.Proxy, push *model.PushContext, virtualService config.Config, serviceRegistry map[host.Name]*model.Service, listenPort int, gatewayNames map[string]bool) ([]*route.Route, error) { vs, ok := virtualService.Spec.(*networking.VirtualService) if !ok { // should never happen return nil, fmt.Errorf("in not a virtual service: %#v", virtualService) } out := make([]*route.Route, 0, len(vs.Http)) catchall := false for _, http := range vs.Http { if len(http.Match) == 0 { if r := translateRoute(push, node, http, nil, listenPort, virtualService, serviceRegistry, gatewayNames); r != nil { out = append(out, r) } catchall = true } else { for _, match := range http.Match { if r := translateRoute(push, node, http, match, listenPort, virtualService, serviceRegistry, gatewayNames); r != nil { out = append(out, r) // This is a catch all path. Routes are matched in order, so we will never go beyond this match // As an optimization, we can just top sending any more routes here. if isCatchAllMatch(match) { catchall = true break } } } } if catchall { break } } if len(out) == 0 { return nil, fmt.Errorf("no routes matched") } return out, nil } // sourceMatchHttp checks if the sourceLabels or the gateways in a match condition match with the // labels for the proxy or the gateway name for which we are generating a route func sourceMatchHTTP(match *networking.HTTPMatchRequest, proxyLabels labels.Collection, gatewayNames map[string]bool, proxyNamespace string) bool { if match == nil { return true } // Trim by source labels or mesh gateway if len(match.Gateways) > 0 { for _, g := range match.Gateways { if gatewayNames[g] { return true } } } else if proxyLabels.IsSupersetOf(match.GetSourceLabels()) { return match.SourceNamespace == "" || match.SourceNamespace == proxyNamespace } return false } // translateRoute translates HTTP routes func translateRoute(push *model.PushContext, node *model.Proxy, in *networking.HTTPRoute, match *networking.HTTPMatchRequest, port int, virtualService config.Config, serviceRegistry map[host.Name]*model.Service, gatewayNames map[string]bool) *route.Route { // When building routes, its okay if the target cluster cannot be // resolved Traffic to such clusters will blackhole. // Match by the destination port specified in the match condition if match != nil && match.Port != 0 && match.Port != uint32(port) { return nil } // Match by source labels/gateway names inside the match condition if !sourceMatchHTTP(match, labels.Collection{node.Metadata.Labels}, gatewayNames, node.Metadata.Namespace) { return nil } out := &route.Route{ Match: translateRouteMatch(match), Metadata: util.BuildConfigInfoMetadata(virtualService.Meta), } routeName := in.Name if match != nil && match.Name != "" { routeName = routeName + "." + match.Name } // add a name to the route out.Name = routeName authority := "" if in.Headers != nil { operations := translateHeadersOperations(in.Headers) out.RequestHeadersToAdd = operations.requestHeadersToAdd out.ResponseHeadersToAdd = operations.responseHeadersToAdd out.RequestHeadersToRemove = operations.requestHeadersToRemove out.ResponseHeadersToRemove = operations.responseHeadersToRemove authority = operations.authority } if redirect := in.Redirect; redirect != nil { action := &route.Route_Redirect{ Redirect: &route.RedirectAction{ HostRedirect: redirect.Authority, PathRewriteSpecifier: &route.RedirectAction_PathRedirect{ PathRedirect: redirect.Uri, }, }, } switch in.Redirect.RedirectCode { case 0, 301: action.Redirect.ResponseCode = route.RedirectAction_MOVED_PERMANENTLY case 302: action.Redirect.ResponseCode = route.RedirectAction_FOUND case 303: action.Redirect.ResponseCode = route.RedirectAction_SEE_OTHER case 307: action.Redirect.ResponseCode = route.RedirectAction_TEMPORARY_REDIRECT case 308: action.Redirect.ResponseCode = route.RedirectAction_PERMANENT_REDIRECT default: log.Warnf("Redirect Code %d is not yet supported", in.Redirect.RedirectCode) action = nil } out.Action = action } else { action := &route.RouteAction{ Cors: translateCORSPolicy(in.CorsPolicy), RetryPolicy: retry.ConvertPolicy(in.Retries), } // Configure timeouts specified by Virtual Service if they are provided, otherwise set it to defaults. var d *duration.Duration if in.Timeout != nil { d = gogo.DurationToProtoDuration(in.Timeout) } else { d = features.DefaultRequestTimeout } action.Timeout = d // Use deprecated value for now as the replacement MaxStreamDuration has some regressions. // nolint: staticcheck action.MaxGrpcTimeout = d out.Action = &route.Route_Route{Route: action} action.PrefixRewrite = in.Rewrite.GetUri() if in.Rewrite.GetAuthority() != "" { authority = in.Rewrite.GetAuthority() } if authority != "" { action.HostRewriteSpecifier = &route.RouteAction_HostRewriteLiteral{ HostRewriteLiteral: authority, } } if in.Mirror != nil { if mp := mirrorPercent(in); mp != nil { action.RequestMirrorPolicies = []*route.RouteAction_RequestMirrorPolicy{{ Cluster: GetDestinationCluster(in.Mirror, serviceRegistry[host.Name(in.Mirror.Host)], port), RuntimeFraction: mp, TraceSampled: &wrappers.BoolValue{Value: false}, }} } } // TODO: eliminate this logic and use the total_weight option in envoy route weighted := make([]*route.WeightedCluster_ClusterWeight, 0) for _, dst := range in.Route { weight := &wrappers.UInt32Value{Value: uint32(dst.Weight)} if dst.Weight == 0 { // Ignore 0 weighted clusters if there are other clusters in the route. // But if this is the only cluster in the route, then add it as a cluster with weight 100 if len(in.Route) == 1 { weight.Value = uint32(100) } else { continue } } hostname := host.Name(dst.GetDestination().GetHost()) n := GetDestinationCluster(dst.Destination, serviceRegistry[hostname], port) clusterWeight := &route.WeightedCluster_ClusterWeight{ Name: n, Weight: weight, } if dst.Headers != nil { operations := translateHeadersOperations(dst.Headers) clusterWeight.RequestHeadersToAdd = operations.requestHeadersToAdd clusterWeight.RequestHeadersToRemove = operations.requestHeadersToRemove clusterWeight.ResponseHeadersToAdd = operations.responseHeadersToAdd clusterWeight.ResponseHeadersToRemove = operations.responseHeadersToRemove } weighted = append(weighted, clusterWeight) var configNamespace string if serviceRegistry[hostname] != nil { configNamespace = serviceRegistry[hostname].Attributes.Namespace } hashPolicy := getHashPolicy(push, node, dst, configNamespace) if hashPolicy != nil { action.HashPolicy = append(action.HashPolicy, hashPolicy) } } // rewrite to a single cluster if there is only weighted cluster if len(weighted) == 1 { action.ClusterSpecifier = &route.RouteAction_Cluster{Cluster: weighted[0].Name} out.RequestHeadersToAdd = append(out.RequestHeadersToAdd, weighted[0].RequestHeadersToAdd...) out.RequestHeadersToRemove = append(out.RequestHeadersToRemove, weighted[0].RequestHeadersToRemove...) out.ResponseHeadersToAdd = append(out.ResponseHeadersToAdd, weighted[0].ResponseHeadersToAdd...) out.ResponseHeadersToRemove = append(out.ResponseHeadersToRemove, weighted[0].ResponseHeadersToRemove...) } else { action.ClusterSpecifier = &route.RouteAction_WeightedClusters{ WeightedClusters: &route.WeightedCluster{ Clusters: weighted, }, } } } out.Decorator = &route.Decorator{ Operation: getRouteOperation(out, virtualService.Name, port), } if fault := in.Fault; fault != nil { out.TypedPerFilterConfig = make(map[string]*any.Any) out.TypedPerFilterConfig[wellknown.Fault] = util.MessageToAny(translateFault(in.Fault)) } return out } // SortHeaderValueOption type and the functions below (Len, Less and Swap) are for sort.Stable for type HeaderValueOption type SortHeaderValueOption []*core.HeaderValueOption // mirrorPercent computes the mirror percent to be used based on "Mirror" data in route. func mirrorPercent(in *networking.HTTPRoute) *core.RuntimeFractionalPercent { switch { case in.MirrorPercentage != nil: if in.MirrorPercentage.GetValue() > 0 { return &core.RuntimeFractionalPercent{ DefaultValue: translatePercentToFractionalPercent(in.MirrorPercentage), } } // If zero percent is provided explicitly, we should not mirror. return nil case in.MirrorPercent != nil: if in.MirrorPercent.GetValue() > 0 { return &core.RuntimeFractionalPercent{ DefaultValue: translateIntegerToFractionalPercent((int32(in.MirrorPercent.GetValue()))), } } // If zero percent is provided explicitly, we should not mirror. return nil default: // Default to 100 percent if percent is not given. return &core.RuntimeFractionalPercent{ DefaultValue: translateIntegerToFractionalPercent(100), } } } // Len is i the sort.Interface for SortHeaderValueOption func (b SortHeaderValueOption) Len() int { return len(b) } // Less is in the sort.Interface for SortHeaderValueOption func (b SortHeaderValueOption) Less(i, j int) bool { if b[i] == nil || b[i].Header == nil { return false } else if b[j] == nil || b[j].Header == nil { return true } return strings.Compare(b[i].Header.Key, b[j].Header.Key) < 0 } // Swap is in the sort.Interface for SortHeaderValueOption func (b SortHeaderValueOption) Swap(i, j int) { b[i], b[j] = b[j], b[i] } // translateAppendHeaders translates headers func translateAppendHeaders(headers map[string]string, appendFlag bool) ([]*core.HeaderValueOption, string) { if len(headers) == 0 { return nil, "" } authority := "" headerValueOptionList := make([]*core.HeaderValueOption, 0, len(headers)) for key, value := range headers { if isAuthorityHeader(key) { // If there are multiple, last one wins; validation will reject authority = value } if isInternalHeader(key) { continue } headerValueOptionList = append(headerValueOptionList, &core.HeaderValueOption{ Header: &core.HeaderValue{ Key: key, Value: value, }, Append: &wrappers.BoolValue{Value: appendFlag}, }) } sort.Stable(SortHeaderValueOption(headerValueOptionList)) return headerValueOptionList, authority } type headersOperations struct { requestHeadersToAdd []*core.HeaderValueOption responseHeadersToAdd []*core.HeaderValueOption requestHeadersToRemove []string responseHeadersToRemove []string authority string } // isInternalHeader returns true if a header refers to an internal value that cannot be modified by Envoy func isInternalHeader(headerKey string) bool { return strings.HasPrefix(headerKey, ":") || strings.EqualFold(headerKey, "host") } // isAuthorityHeader returns true if a header refers to the authority header func isAuthorityHeader(headerKey string) bool { return strings.EqualFold(headerKey, ":authority") || strings.EqualFold(headerKey, "host") } func dropInternal(keys []string) []string { result := make([]string, 0, len(keys)) for _, k := range keys { if isInternalHeader(k) { continue } result = append(result, k) } return result } // translateHeadersOperations translates headers operations func translateHeadersOperations(headers *networking.Headers) headersOperations { req := headers.GetRequest() resp := headers.GetResponse() requestHeadersToAdd, setAuthority := translateAppendHeaders(req.GetSet(), false) reqAdd, addAuthority := translateAppendHeaders(req.GetAdd(), true) requestHeadersToAdd = append(requestHeadersToAdd, reqAdd...) responseHeadersToAdd, _ := translateAppendHeaders(resp.GetSet(), false) respAdd, _ := translateAppendHeaders(resp.GetAdd(), true) responseHeadersToAdd = append(responseHeadersToAdd, respAdd...) auth := addAuthority if setAuthority != "" { // If authority is set in 'add' and 'set', pick the one from 'set' auth = setAuthority } return headersOperations{ requestHeadersToAdd: requestHeadersToAdd, responseHeadersToAdd: responseHeadersToAdd, requestHeadersToRemove: dropInternal(req.GetRemove()), responseHeadersToRemove: dropInternal(resp.GetRemove()), authority: auth, } } // translateRouteMatch translates match condition func translateRouteMatch(in *networking.HTTPMatchRequest) *route.RouteMatch { out := &route.RouteMatch{PathSpecifier: &route.RouteMatch_Prefix{Prefix: "/"}} if in == nil { return out } for name, stringMatch := range in.Headers { matcher := translateHeaderMatch(name, stringMatch) out.Headers = append(out.Headers, matcher) } for name, stringMatch := range in.WithoutHeaders { matcher := translateHeaderMatch(name, stringMatch) matcher.InvertMatch = true out.Headers = append(out.Headers, matcher) } // guarantee ordering of headers sort.Slice(out.Headers, func(i, j int) bool { return out.Headers[i].Name < out.Headers[j].Name }) if in.Uri != nil { switch m := in.Uri.MatchType.(type) { case *networking.StringMatch_Exact: out.PathSpecifier = &route.RouteMatch_Path{Path: m.Exact} case *networking.StringMatch_Prefix: out.PathSpecifier = &route.RouteMatch_Prefix{Prefix: m.Prefix} case *networking.StringMatch_Regex: out.PathSpecifier = &route.RouteMatch_SafeRegex{ SafeRegex: &matcher.RegexMatcher{ // nolint: staticcheck EngineType: regexEngine, Regex: m.Regex, }, } } } out.CaseSensitive = &wrappers.BoolValue{Value: !in.IgnoreUriCase} if in.Method != nil { matcher := translateHeaderMatch(HeaderMethod, in.Method) out.Headers = append(out.Headers, matcher) } if in.Authority != nil { matcher := translateHeaderMatch(HeaderAuthority, in.Authority) out.Headers = append(out.Headers, matcher) } if in.Scheme != nil { matcher := translateHeaderMatch(HeaderScheme, in.Scheme) out.Headers = append(out.Headers, matcher) } for name, stringMatch := range in.QueryParams { matcher := translateQueryParamMatch(name, stringMatch) out.QueryParameters = append(out.QueryParameters, matcher) } return out } // translateQueryParamMatch translates a StringMatch to a QueryParameterMatcher. func translateQueryParamMatch(name string, in *networking.StringMatch) *route.QueryParameterMatcher { out := &route.QueryParameterMatcher{ Name: name, } switch m := in.MatchType.(type) { case *networking.StringMatch_Exact: out.QueryParameterMatchSpecifier = &route.QueryParameterMatcher_StringMatch{ StringMatch: &matcher.StringMatcher{MatchPattern: &matcher.StringMatcher_Exact{Exact: m.Exact}}, } case *networking.StringMatch_Regex: out.QueryParameterMatchSpecifier = &route.QueryParameterMatcher_StringMatch{ StringMatch: &matcher.StringMatcher{ MatchPattern: &matcher.StringMatcher_SafeRegex{ SafeRegex: &matcher.RegexMatcher{ EngineType: regexEngine, Regex: m.Regex, }, }, }, } } return out } // isCatchAllHeaderMatch determines if the given header is matched with all strings or not. // Currently, if the regex has "*" value, it returns true func isCatchAllHeaderMatch(in *networking.StringMatch) bool { catchall := false if in == nil { return true } switch m := in.MatchType.(type) { case *networking.StringMatch_Regex: catchall = m.Regex == "*" } return catchall } // translateHeaderMatch translates to HeaderMatcher func translateHeaderMatch(name string, in *networking.StringMatch) *route.HeaderMatcher { out := &route.HeaderMatcher{ Name: name, } if isCatchAllHeaderMatch(in) { out.HeaderMatchSpecifier = &route.HeaderMatcher_PresentMatch{PresentMatch: true} return out } switch m := in.MatchType.(type) { case *networking.StringMatch_Exact: out.HeaderMatchSpecifier = &route.HeaderMatcher_ExactMatch{ExactMatch: m.Exact} case *networking.StringMatch_Prefix: // Envoy regex grammar is RE2 (https://github.com/google/re2/wiki/Syntax) // Golang has a slightly different regex grammar out.HeaderMatchSpecifier = &route.HeaderMatcher_PrefixMatch{PrefixMatch: m.Prefix} case *networking.StringMatch_Regex: out.HeaderMatchSpecifier = &route.HeaderMatcher_SafeRegexMatch{ SafeRegexMatch: &matcher.RegexMatcher{ EngineType: regexEngine, Regex: m.Regex, }, } } return out } func convertToExactEnvoyMatch(in []string) []*matcher.StringMatcher { res := make([]*matcher.StringMatcher, 0, len(in)) for _, istioMatcher := range in { res = append(res, &matcher.StringMatcher{MatchPattern: &matcher.StringMatcher_Exact{Exact: istioMatcher}}) } return res } func convertToEnvoyMatch(in []*networking.StringMatch) []*matcher.StringMatcher { res := make([]*matcher.StringMatcher, 0, len(in)) for _, istioMatcher := range in { switch m := istioMatcher.MatchType.(type) { case *networking.StringMatch_Exact: res = append(res, &matcher.StringMatcher{MatchPattern: &matcher.StringMatcher_Exact{Exact: m.Exact}}) case *networking.StringMatch_Prefix: res = append(res, &matcher.StringMatcher{MatchPattern: &matcher.StringMatcher_Prefix{Prefix: m.Prefix}}) case *networking.StringMatch_Regex: res = append(res, &matcher.StringMatcher{ MatchPattern: &matcher.StringMatcher_SafeRegex{ SafeRegex: &matcher.RegexMatcher{ EngineType: regexEngine, Regex: m.Regex, }, }, }) } } return res } // translateCORSPolicy translates CORS policy func translateCORSPolicy(in *networking.CorsPolicy) *route.CorsPolicy { if in == nil { return nil } // CORS filter is enabled by default out := route.CorsPolicy{} if in.AllowOrigins != nil { out.AllowOriginStringMatch = convertToEnvoyMatch(in.AllowOrigins) } else if in.AllowOrigin != nil { out.AllowOriginStringMatch = convertToExactEnvoyMatch(in.AllowOrigin) } out.EnabledSpecifier = &route.CorsPolicy_FilterEnabled{ FilterEnabled: &core.RuntimeFractionalPercent{ DefaultValue: &xdstype.FractionalPercent{ Numerator: 100, Denominator: xdstype.FractionalPercent_HUNDRED, }, }, } out.AllowCredentials = gogo.BoolToProtoBool(in.AllowCredentials) out.AllowHeaders = strings.Join(in.AllowHeaders, ",") out.AllowMethods = strings.Join(in.AllowMethods, ",") out.ExposeHeaders = strings.Join(in.ExposeHeaders, ",") if in.MaxAge != nil { out.MaxAge = strconv.FormatInt(in.MaxAge.GetSeconds(), 10) } return &out } // getRouteOperation returns readable route description for trace. func getRouteOperation(in *route.Route, vsName string, port int) string { path := "/*" m := in.GetMatch() ps := m.GetPathSpecifier() if ps != nil { switch ps.(type) { case *route.RouteMatch_Prefix: path = m.GetPrefix() + "*" case *route.RouteMatch_Path: path = m.GetPath() case *route.RouteMatch_SafeRegex: path = m.GetSafeRegex().GetRegex() } } // If there is only one destination cluster in route, return host:port/uri as description of route. // Otherwise there are multiple destination clusters and destination host is not clear. For that case // return virtual serivce name:port/uri as substitute. if c := in.GetRoute().GetCluster(); model.IsValidSubsetKey(c) { // Parse host and port from cluster name. _, _, h, p := model.ParseSubsetKey(c) return string(h) + ":" + strconv.Itoa(p) + path } return vsName + ":" + strconv.Itoa(port) + path } // BuildDefaultHTTPInboundRoute builds a default inbound route. func BuildDefaultHTTPInboundRoute(node *model.Proxy, clusterName string, operation string) *route.Route { notimeout := durationpb.New(0) routeAction := &route.RouteAction{ ClusterSpecifier: &route.RouteAction_Cluster{Cluster: clusterName}, Timeout: notimeout, } routeAction.MaxStreamDuration = &route.RouteAction_MaxStreamDuration{ MaxStreamDuration: notimeout, // If not configured at all, the grpc-timeout header is not used and // gRPC requests time out like any other requests using timeout or its default. GrpcTimeoutHeaderMax: notimeout, } val := &route.Route{ Match: translateRouteMatch(nil), Decorator: &route.Decorator{ Operation: operation, }, Action: &route.Route_Route{ Route: routeAction, }, } val.Name = DefaultRouteName return val } // BuildDefaultHTTPOutboundRoute builds a default outbound route, including a retry policy. func BuildDefaultHTTPOutboundRoute(node *model.Proxy, clusterName string, operation string) *route.Route { // Start with the same configuration as for inbound. out := BuildDefaultHTTPInboundRoute(node, clusterName, operation) // Add a default retry policy for outbound routes. out.GetRoute().RetryPolicy = retry.DefaultPolicy() return out } // translatePercentToFractionalPercent translates an v1alpha3 Percent instance // to an envoy.type.FractionalPercent instance. func translatePercentToFractionalPercent(p *networking.Percent) *xdstype.FractionalPercent { return &xdstype.FractionalPercent{ Numerator: uint32(p.Value * 10000), Denominator: xdstype.FractionalPercent_MILLION, } } // translateIntegerToFractionalPercent translates an int32 instance to an // envoy.type.FractionalPercent instance. func translateIntegerToFractionalPercent(p int32) *xdstype.FractionalPercent { return &xdstype.FractionalPercent{ Numerator: uint32(p), Denominator: xdstype.FractionalPercent_HUNDRED, } } // translateFault translates networking.HTTPFaultInjection into Envoy's HTTPFault func translateFault(in *networking.HTTPFaultInjection) *xdshttpfault.HTTPFault { if in == nil { return nil } out := xdshttpfault.HTTPFault{} if in.Delay != nil { out.Delay = &xdsfault.FaultDelay{} if in.Delay.Percentage != nil { out.Delay.Percentage = translatePercentToFractionalPercent(in.Delay.Percentage) } else { out.Delay.Percentage = translateIntegerToFractionalPercent(in.Delay.Percent) } switch d := in.Delay.HttpDelayType.(type) { case *networking.HTTPFaultInjection_Delay_FixedDelay: out.Delay.FaultDelaySecifier = &xdsfault.FaultDelay_FixedDelay{ FixedDelay: gogo.DurationToProtoDuration(d.FixedDelay), } default: log.Warnf("Exponential faults are not yet supported") out.Delay = nil } } if in.Abort != nil { out.Abort = &xdshttpfault.FaultAbort{} if in.Abort.Percentage != nil { out.Abort.Percentage = translatePercentToFractionalPercent(in.Abort.Percentage) } switch a := in.Abort.ErrorType.(type) { case *networking.HTTPFaultInjection_Abort_HttpStatus: out.Abort.ErrorType = &xdshttpfault.FaultAbort_HttpStatus{ HttpStatus: uint32(a.HttpStatus), } default: log.Warnf("Non-HTTP type abort faults are not yet supported") out.Abort = nil } } if out.Delay == nil && out.Abort == nil { return nil } return &out } func portLevelSettingsConsistentHash(dst *networking.Destination, pls []*networking.TrafficPolicy_PortTrafficPolicy) *networking.LoadBalancerSettings_ConsistentHashLB { if dst.Port != nil { portNumber := dst.GetPort().GetNumber() for _, setting := range pls { number := setting.GetPort().GetNumber() if number == portNumber { return setting.GetLoadBalancer().GetConsistentHash() } } } return nil } func consistentHashToHashPolicy(consistentHash *networking.LoadBalancerSettings_ConsistentHashLB) *route.RouteAction_HashPolicy { switch consistentHash.GetHashKey().(type) { case *networking.LoadBalancerSettings_ConsistentHashLB_HttpHeaderName: return &route.RouteAction_HashPolicy{ PolicySpecifier: &route.RouteAction_HashPolicy_Header_{ Header: &route.RouteAction_HashPolicy_Header{ HeaderName: consistentHash.GetHttpHeaderName(), }, }, } case *networking.LoadBalancerSettings_ConsistentHashLB_HttpCookie: cookie := consistentHash.GetHttpCookie() var ttl *duration.Duration if cookie.GetTtl() != nil { ttl = gogo.DurationToProtoDuration(cookie.GetTtl()) } return &route.RouteAction_HashPolicy{ PolicySpecifier: &route.RouteAction_HashPolicy_Cookie_{ Cookie: &route.RouteAction_HashPolicy_Cookie{ Name: cookie.GetName(), Ttl: ttl, Path: cookie.GetPath(), }, }, } case *networking.LoadBalancerSettings_ConsistentHashLB_UseSourceIp: return &route.RouteAction_HashPolicy{ PolicySpecifier: &route.RouteAction_HashPolicy_ConnectionProperties_{ ConnectionProperties: &route.RouteAction_HashPolicy_ConnectionProperties{ SourceIp: consistentHash.GetUseSourceIp(), }, }, } case *networking.LoadBalancerSettings_ConsistentHashLB_HttpQueryParameterName: return &route.RouteAction_HashPolicy{ PolicySpecifier: &route.RouteAction_HashPolicy_QueryParameter_{ QueryParameter: &route.RouteAction_HashPolicy_QueryParameter{ Name: consistentHash.GetHttpQueryParameterName(), }, }, } } return nil } func getHashPolicyByService(node *model.Proxy, push *model.PushContext, svc *model.Service, port *model.Port) *route.RouteAction_HashPolicy { if push == nil { return nil } destinationRule := push.DestinationRule(node, svc) if destinationRule == nil { return nil } rule := destinationRule.Spec.(*networking.DestinationRule) consistentHash := rule.GetTrafficPolicy().GetLoadBalancer().GetConsistentHash() portLevelSettings := rule.GetTrafficPolicy().GetPortLevelSettings() for _, setting := range portLevelSettings { number := setting.GetPort().GetNumber() if int(number) == port.Port { consistentHash = setting.GetLoadBalancer().GetConsistentHash() break } } return consistentHashToHashPolicy(consistentHash) } func getHashPolicy(push *model.PushContext, node *model.Proxy, dst *networking.HTTPRouteDestination, configNamespace string) *route.RouteAction_HashPolicy { if push == nil { return nil } destination := dst.GetDestination() destinationRule := push.DestinationRule(node, &model.Service{ Hostname: host.Name(destination.Host), Attributes: model.ServiceAttributes{Namespace: configNamespace}, }) if destinationRule == nil { return nil } rule := destinationRule.Spec.(*networking.DestinationRule) consistentHash := rule.GetTrafficPolicy().GetLoadBalancer().GetConsistentHash() portLevelSettings := rule.GetTrafficPolicy().GetPortLevelSettings() plsHash := portLevelSettingsConsistentHash(destination, portLevelSettings) var subsetHash, subsetPLSHash *networking.LoadBalancerSettings_ConsistentHashLB for _, subset := range rule.GetSubsets() { if subset.GetName() == destination.GetSubset() { subsetPortLevelSettings := subset.GetTrafficPolicy().GetPortLevelSettings() subsetHash = subset.GetTrafficPolicy().GetLoadBalancer().GetConsistentHash() subsetPLSHash = portLevelSettingsConsistentHash(destination, subsetPortLevelSettings) break } } switch { case subsetPLSHash != nil: consistentHash = subsetPLSHash case subsetHash != nil: consistentHash = subsetHash case plsHash != nil: consistentHash = plsHash } return consistentHashToHashPolicy(consistentHash) } // isCatchAll returns true if HTTPMatchRequest is a catchall match otherwise // false. Note - this may not be exactly "catch all" as we don't know the full // class of possible inputs As such, this is used only for optimization. func isCatchAllMatch(m *networking.HTTPMatchRequest) bool { catchall := false if m.Uri != nil { switch m := m.Uri.MatchType.(type) { case *networking.StringMatch_Prefix: catchall = m.Prefix == "/" case *networking.StringMatch_Regex: catchall = m.Regex == "*" } } // A Match is catch all if and only if it has no match set // and URI has a prefix / or regex *. return catchall && len(m.Headers) == 0 && len(m.QueryParams) == 0 && len(m.SourceLabels) == 0 && len(m.WithoutHeaders) == 0 && len(m.Gateways) == 0 && m.Method == nil && m.Scheme == nil && m.Port == 0 && m.Authority == nil && m.SourceNamespace == "" } // CombineVHostRoutes semi concatenates Vhost's routes into a single route set. // Moves the catch all routes alone to the end, while retaining // the relative order of other routes in the concatenated route. // Assumes that the virtual services that generated first and second are ordered by // time. func CombineVHostRoutes(routeSets ...[]*route.Route) []*route.Route { l := 0 for _, rs := range routeSets { l += len(rs) } allroutes := make([]*route.Route, 0, l) catchAllRoutes := make([]*route.Route, 0) for _, routes := range routeSets { for _, r := range routes { if isCatchAllRoute(r) { catchAllRoutes = append(catchAllRoutes, r) } else { allroutes = append(allroutes, r) } } } return append(allroutes, catchAllRoutes...) } // isCatchAllRoute returns true if an Envoy route is a catchall route otherwise false. func isCatchAllRoute(r *route.Route) bool { catchall := false switch ir := r.Match.PathSpecifier.(type) { case *route.RouteMatch_Prefix: catchall = ir.Prefix == "/" case *route.RouteMatch_SafeRegex: catchall = ir.SafeRegex.GetRegex() == "*" } // A Match is catch all if and only if it has no header/query param match // and URI has a prefix / or regex *. return catchall && len(r.Match.Headers) == 0 && len(r.Match.QueryParameters) == 0 } func traceOperation(host string, port int) string { // Format : "%s:%d/*" return host + ":" + strconv.Itoa(port) + "/*" }
pilot/pkg/networking/core/v1alpha3/route/route.go
0
https://github.com/istio/istio/commit/33d59c1dfd4678bea158d6940ce03f9dae537961
[ 0.002635426353663206, 0.00023102863633539528, 0.00016396002320107073, 0.0001697121188044548, 0.00028461049078032374 ]
{ "id": 0, "code_window": [ "\tExternalNamespace namespace.Instance\n", "\n", "\t// Ingressgateway instance\n", "\tIngress ingress.Instance\n", "\t// Eastwest gateway instance\n", "\tEastWest ingress.Instance\n", "\n", "\t// Standard echo app to be used by tests\n", "\tPodA echo.Instances\n", "\t// Standard echo app to be used by tests\n", "\tPodB echo.Instances\n" ], "labels": [ "keep", "keep", "keep", "keep", "replace", "replace", "keep", "keep", "keep", "keep", "keep" ], "after_edit": [], "file_path": "tests/integration/pilot/common/apps.go", "type": "replace", "edit_start_line_idx": 39 }
# Environment variables used to configure istio startup # Comma separated list of CIDRs used for services. If set, iptables will be run to allow istio # sidecar to intercept outbound calls to configured addresses. If not set, outbound istio sidecar # will not be used via iptables. # ISTIO_SERVICE_CIDR= # Name of the service exposed by the machine. # ISTIO_SERVICE=myservice # The mode used to redirect inbound connections to Envoy. This setting # has no effect on outbound traffic: iptables REDIRECT is always used for # outbound connections. # If "REDIRECT", use iptables REDIRECT to NAT and redirect to Envoy. # The "REDIRECT" mode loses source addresses during redirection. # If "TPROXY", use iptables TPROXY to redirect to Envoy. # The "TPROXY" mode preserves both the source and destination IP # addresses and ports, so that they can be used for advanced filtering # and manipulation. # The "TPROXY" mode also configures the sidecar to run with the # CAP_NET_ADMIN capability, which is required to use TPROXY. # If not set, defaults to "REDIRECT". # ISTIO_INBOUND_INTERCEPTION_MODE=REDIRECT # When the interception mode is "TPROXY", the iptables skb mark that is set on # every inbound packet to be redirected to Envoy. # If not set, defaults to "1337". # ISTIO_INBOUND_TPROXY_MARK=1337 # When the interception mode is "TPROXY", the number of the routing table that # is configured and used to route inbound connections to the loopback interface # in order to be redirected to Envoy. # If not set, defaults to "133". # ISTIO_INBOUND_TPROXY_ROUTE_TABLE=133 # Comma separated list of local ports that will use Istio sidecar for inbound services. # If set, iptables rules will be configured to intercept inbound traffic and redirect to sidecar. # If not set, no rules will be enabled # ISTIO_INBOUND_PORTS= # List of ports to exclude from inbound interception, if ISTIO_INBOUND_PORTS is set to * # Port 22 is automatically excluded # ISTIO_INBOUND_EXCLUDE_PORTS= # Namespace of the cluster. # ISTIO_NAMESPACE=default # Specify the IP address used in endpoints. If not set, 'hostname --ip-address' will be used. # Needed if the host has multiple IP. # ISTIO_SVC_IP= # If istio-pilot is configured with mTLS authentication (--controlPlaneAuthPolicy MUTUAL_TLS ) you must # also configure the mesh expansion machines: # ISTIO_PILOT_PORT=15005 # ISTIO_CP_AUTH=MUTUAL_TLS # Fine tunning - useful if installing/building binaries instead of using the .deb file, or running # multiple instances. # Port used by Envoy. Defaults to 15001, used in the autogenerated config # ENVOY_PORT=15001 # User running Envoy. For testing you can use a regular user ID - however running iptables requires # root or netadmin capability. The debian file creates user istio. # ENVOY_USER=istio-proxy # Uncomment to enable debugging # ISTIO_AGENT_FLAGS="--proxyLogLevel debug" # Directory for stdout redirection. The redirection is required because envoy attempts to open # /dev/stdout - must be a real file. Will be used for access logs. Additional config for logsaver # needs to be made, envoy reopens the file on SIGUSR1 # ISTIO_LOG_DIR=/var/log/istio # Installation directory for istio binaries, customize in case you're using a binary. # This is likely to change - current path matches the docker layout in 0.1 # ISTIO_BIN_BASE=/usr/local/bin # Location of istio configs. # ISTIO_CFG=/var/lib/istio # Ignore Istio iptables custom rules # Enable this flag if you would like to manage iptables yourself. Default to false (true/false) # ISTIO_CUSTOM_IP_TABLES=false # Location of provisioning certificates. VM provisioning tools must generate a certificate with # the expected SAN. Istio-agent will use it to connect to istiod and get fresh certificates. # PROV_CERT=/var/run/secrets/istio # Location to save the certificates from the CA. Setting this to the same location with PROV_CERT # allows rotation of the secrets. Users may also use longer-lived PROV_CERT, rotated under the control # of the provisioning tool. # Istiod may return a certificate with additional information and shorter lived, to be used for # workload communication. In order to use the certificate with applications not supporting SDS, set this # environment variable. If the value is different from PROV_CERTS the workload certs will be saved, but # the provisioning cert will remain under control of the VM provisioning tools. # OUTPUT_CERTS=/var/run/secrets/istio # OUTPUT_CERTS=/etc/certs # Address of the CA. The CA must implement the Istio protocol, accepting the provisioning certificate # and returning workload certificates. Istiod is implementing the protocol, and is the default value # if CA_ADDR is not set. # CA_ADDR # set CA_ADDR if your istiod.istio-system.svc is on port other than 15012 # CA_ADDR=istiod.istio-system.svc:32018
tools/packaging/common/sidecar.env
0
https://github.com/istio/istio/commit/33d59c1dfd4678bea158d6940ce03f9dae537961
[ 0.00017857667990028858, 0.00016868919192347676, 0.0001630414481041953, 0.0001682505535427481, 0.000003863680831273086 ]
{ "id": 0, "code_window": [ "\tExternalNamespace namespace.Instance\n", "\n", "\t// Ingressgateway instance\n", "\tIngress ingress.Instance\n", "\t// Eastwest gateway instance\n", "\tEastWest ingress.Instance\n", "\n", "\t// Standard echo app to be used by tests\n", "\tPodA echo.Instances\n", "\t// Standard echo app to be used by tests\n", "\tPodB echo.Instances\n" ], "labels": [ "keep", "keep", "keep", "keep", "replace", "replace", "keep", "keep", "keep", "keep", "keep" ], "after_edit": [], "file_path": "tests/integration/pilot/common/apps.go", "type": "replace", "edit_start_line_idx": 39 }
// Copyright Istio Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package fixtures import ( "testing" "github.com/onsi/gomega" "istio.io/istio/galley/pkg/config/testing/data" "istio.io/istio/pkg/config/event" ) func TestAccumulator(t *testing.T) { g := gomega.NewWithT(t) a := &Accumulator{} a.Handle(data.Event1Col1AddItem1) expected := []event.Event{data.Event1Col1AddItem1} g.Expect(a.Events()).To(gomega.Equal(expected)) a.Handle(data.Event2Col1AddItem2) expected = []event.Event{data.Event1Col1AddItem1, data.Event2Col1AddItem2} g.Expect(a.Events()).To(gomega.Equal(expected)) } func TestAccumulator_Clear(t *testing.T) { g := gomega.NewWithT(t) a := &Accumulator{} a.Handle(data.Event1Col1AddItem1) a.Handle(data.Event2Col1AddItem2) a.Clear() g.Expect(a.Events()).To(gomega.Equal([]event.Event{})) } func TestAccumulator_String(t *testing.T) { a := &Accumulator{} a.Handle(data.Event1Col1AddItem1) a.Handle(data.Event2Col1AddItem2) // ensure that it does not crash _ = a.String() }
galley/pkg/config/testing/fixtures/accumulator_test.go
0
https://github.com/istio/istio/commit/33d59c1dfd4678bea158d6940ce03f9dae537961
[ 0.0001781951723387465, 0.00017287934315390885, 0.00016923040675465018, 0.00017211982049047947, 0.0000032599791666143574 ]
{ "id": 1, "code_window": [ "\tif err != nil {\n", "\t\treturn err\n", "\t}\n", "\n", "\tapps.Ingress = i.IngressFor(t.Clusters().Default())\n", "\tapps.EastWest = i.CustomIngressFor(t.Clusters().Default(), \"istio-eastwestgateway\", \"eastwestgateway\")\n", "\n", "\t// Headless services don't work with targetPort, set to same port\n", "\theadlessPorts := make([]echo.Port, len(common.EchoPorts))\n", "\tfor i, p := range common.EchoPorts {\n", "\t\tp.ServicePort = p.InstancePort\n" ], "labels": [ "keep", "keep", "keep", "keep", "keep", "replace", "keep", "keep", "keep", "keep", "keep" ], "after_edit": [], "file_path": "tests/integration/pilot/common/apps.go", "type": "replace", "edit_start_line_idx": 119 }
// +build integ // Copyright Istio Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package common import ( "strconv" "strings" "istio.io/istio/pkg/test/framework/components/echo" "istio.io/istio/pkg/test/framework/components/echo/common" "istio.io/istio/pkg/test/framework/components/echo/echoboot" "istio.io/istio/pkg/test/framework/components/istio" "istio.io/istio/pkg/test/framework/components/istio/ingress" "istio.io/istio/pkg/test/framework/components/namespace" "istio.io/istio/pkg/test/framework/resource" "istio.io/istio/pkg/test/util/tmpl" ) type EchoDeployments struct { // Namespace echo apps will be deployed Namespace namespace.Instance // Namespace where external echo app will be deployed ExternalNamespace namespace.Instance // Ingressgateway instance Ingress ingress.Instance // Eastwest gateway instance EastWest ingress.Instance // Standard echo app to be used by tests PodA echo.Instances // Standard echo app to be used by tests PodB echo.Instances // Standard echo app to be used by tests PodC echo.Instances // Standard echo app with TPROXY interception mode to be used by tests PodTproxy echo.Instances // Headless echo app to be used by tests Headless echo.Instances // StatefulSet echo app to be used by tests StatefulSet echo.Instances // Echo app to be used by tests, with no sidecar injected Naked echo.Instances // A virtual machine echo app (only deployed to one cluster) VM echo.Instances // Echo app to be used by tests, with no sidecar injected External echo.Instances All echo.Instances } const ( PodASvc = "a" PodBSvc = "b" PodCSvc = "c" PodTproxySvc = "tproxy" VMSvc = "vm" HeadlessSvc = "headless" StatefulSetSvc = "statefulset" NakedSvc = "naked" ExternalSvc = "external" externalHostname = "fake.external.com" ) func FindPortByName(name string) echo.Port { for _, p := range common.EchoPorts { if p.Name == name { return p } } return echo.Port{} } func serviceEntryPorts() []echo.Port { res := []echo.Port{} for _, p := range common.EchoPorts { if strings.HasPrefix(p.Name, "auto") { // The protocol needs to be set in common.EchoPorts to configure the echo deployment // But for service entry, we want to ensure we set it to "" which will use sniffing p.Protocol = "" } res = append(res, p) } return res } func SetupApps(t resource.Context, i istio.Instance, apps *EchoDeployments) error { var err error apps.Namespace, err = namespace.New(t, namespace.Config{ Prefix: "echo", Inject: true, }) if err != nil { return err } apps.ExternalNamespace, err = namespace.New(t, namespace.Config{ Prefix: "external", Inject: false, }) if err != nil { return err } apps.Ingress = i.IngressFor(t.Clusters().Default()) apps.EastWest = i.CustomIngressFor(t.Clusters().Default(), "istio-eastwestgateway", "eastwestgateway") // Headless services don't work with targetPort, set to same port headlessPorts := make([]echo.Port, len(common.EchoPorts)) for i, p := range common.EchoPorts { p.ServicePort = p.InstancePort headlessPorts[i] = p } builder := echoboot.NewBuilder(t). WithClusters(t.Clusters()...). WithConfig(echo.Config{ Service: PodASvc, Namespace: apps.Namespace, Ports: common.EchoPorts, Subsets: []echo.SubsetConfig{{}}, Locality: "region.zone.subzone", WorkloadOnlyPorts: common.WorkloadPorts, }). WithConfig(echo.Config{ Service: PodBSvc, Namespace: apps.Namespace, Ports: common.EchoPorts, Subsets: []echo.SubsetConfig{{}}, WorkloadOnlyPorts: common.WorkloadPorts, }). WithConfig(echo.Config{ Service: PodCSvc, Namespace: apps.Namespace, Ports: common.EchoPorts, Subsets: []echo.SubsetConfig{{}}, WorkloadOnlyPorts: common.WorkloadPorts, }). WithConfig(echo.Config{ Service: HeadlessSvc, Headless: true, Namespace: apps.Namespace, Ports: headlessPorts, Subsets: []echo.SubsetConfig{{}}, WorkloadOnlyPorts: common.WorkloadPorts, }). WithConfig(echo.Config{ Service: StatefulSetSvc, Headless: true, StatefulSet: true, Namespace: apps.Namespace, Ports: headlessPorts, Subsets: []echo.SubsetConfig{{}}, WorkloadOnlyPorts: common.WorkloadPorts, }). WithConfig(echo.Config{ Service: NakedSvc, Namespace: apps.Namespace, Ports: common.EchoPorts, Subsets: []echo.SubsetConfig{ { Annotations: map[echo.Annotation]*echo.AnnotationValue{ echo.SidecarInject: { Value: strconv.FormatBool(false), }, }, }, }, WorkloadOnlyPorts: common.WorkloadPorts, }). WithConfig(echo.Config{ Service: ExternalSvc, Namespace: apps.ExternalNamespace, DefaultHostHeader: externalHostname, Ports: common.EchoPorts, Subsets: []echo.SubsetConfig{ { Annotations: map[echo.Annotation]*echo.AnnotationValue{ echo.SidecarInject: { Value: strconv.FormatBool(false), }, }, }, }, WorkloadOnlyPorts: common.WorkloadPorts, }). WithConfig(echo.Config{ Service: PodTproxySvc, Namespace: apps.Namespace, Ports: common.EchoPorts, Subsets: []echo.SubsetConfig{{ Annotations: echo.NewAnnotations().Set(echo.SidecarInterceptionMode, "TPROXY"), }}, WorkloadOnlyPorts: common.WorkloadPorts, }). WithConfig(echo.Config{ Service: VMSvc, Namespace: apps.Namespace, Ports: common.EchoPorts, DeployAsVM: true, AutoRegisterVM: true, Subsets: []echo.SubsetConfig{{}}, WorkloadOnlyPorts: common.WorkloadPorts, }) echos, err := builder.Build() if err != nil { return err } apps.All = echos apps.PodA = echos.Match(echo.Service(PodASvc)) apps.PodB = echos.Match(echo.Service(PodBSvc)) apps.PodC = echos.Match(echo.Service(PodCSvc)) apps.PodTproxy = echos.Match(echo.Service(PodTproxySvc)) apps.Headless = echos.Match(echo.Service(HeadlessSvc)) apps.StatefulSet = echos.Match(echo.Service(StatefulSetSvc)) apps.Naked = echos.Match(echo.Service(NakedSvc)) apps.External = echos.Match(echo.Service(ExternalSvc)) if !t.Settings().SkipVM { apps.VM = echos.Match(echo.Service(VMSvc)) } if err := t.Config().ApplyYAMLNoCleanup(apps.Namespace.Name(), ` apiVersion: networking.istio.io/v1alpha3 kind: Sidecar metadata: name: restrict-to-namespace spec: egress: - hosts: - "./*" - "istio-system/*" `); err != nil { return err } se, err := tmpl.Evaluate(`apiVersion: networking.istio.io/v1alpha3 kind: ServiceEntry metadata: name: external-service spec: hosts: - {{.Hostname}} location: MESH_EXTERNAL resolution: DNS endpoints: - address: external.{{.Namespace}}.svc.cluster.local ports: - name: http-tls-origination number: 8888 protocol: http targetPort: 443 - name: http2-tls-origination number: 8882 protocol: http2 targetPort: 443 {{- range $i, $p := .Ports }} - name: {{$p.Name}} number: {{$p.ServicePort}} protocol: "{{$p.Protocol}}" {{- end }} `, map[string]interface{}{"Namespace": apps.ExternalNamespace.Name(), "Hostname": externalHostname, "Ports": serviceEntryPorts()}) if err != nil { return err } if err := t.Config().ApplyYAML(apps.Namespace.Name(), se); err != nil { return err } return nil } func (d EchoDeployments) IsMulticluster() bool { return d.All.Clusters().IsMulticluster() }
tests/integration/pilot/common/apps.go
1
https://github.com/istio/istio/commit/33d59c1dfd4678bea158d6940ce03f9dae537961
[ 0.9990826845169067, 0.2490234076976776, 0.00016451030387543142, 0.00205548875965178, 0.410453736782074 ]
{ "id": 1, "code_window": [ "\tif err != nil {\n", "\t\treturn err\n", "\t}\n", "\n", "\tapps.Ingress = i.IngressFor(t.Clusters().Default())\n", "\tapps.EastWest = i.CustomIngressFor(t.Clusters().Default(), \"istio-eastwestgateway\", \"eastwestgateway\")\n", "\n", "\t// Headless services don't work with targetPort, set to same port\n", "\theadlessPorts := make([]echo.Port, len(common.EchoPorts))\n", "\tfor i, p := range common.EchoPorts {\n", "\t\tp.ServicePort = p.InstancePort\n" ], "labels": [ "keep", "keep", "keep", "keep", "keep", "replace", "keep", "keep", "keep", "keep", "keep" ], "after_edit": [], "file_path": "tests/integration/pilot/common/apps.go", "type": "replace", "edit_start_line_idx": 119 }
// Copyright Istio Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Tool to get xDS configs from pilot. This tool simulate envoy sidecar gRPC call to get config, // so it will work even when sidecar haswhen sidecar hasn't connected (e.g in the case of pilot running on local machine)) // // Usage: // // First, you can either manually expose pilot gRPC port or rely on this tool to port-forward pilot by omitting -pilot_url flag: // // * By port-forward existing pilot: // ```bash // kubectl port-forward $(kubectl get pod -l app=istiod -o jsonpath='{.items[0].metadata.name}' -n istio-system) -n istio-system 15010 // ``` // * Or run local pilot using the same k8s config. // ```bash // pilot-discovery discovery --kubeconfig=${HOME}/.kube/config // ``` // // To get LDS or CDS, use -type lds or -type cds, and provide the pod id or app label. For example: // ```bash // go run pilot_cli.go --type lds --proxytag httpbin-5766dd474b-2hlnx # --res will be ignored // go run pilot_cli.go --type lds --proxytag httpbin // ``` // Note If more than one pod match with the app label, one will be picked arbitrarily. // // For EDS/RDS, provide comma-separated-list of corresponding clusters or routes name. For example: // ```bash // go run ./pilot/tools/debug/pilot_cli.go --type eds --proxytag httpbin \ // --res "inbound|http||sleep.default.svc.cluster.local,outbound|http||httpbin.default.svc.cluster.local" // ``` // // Script requires kube config in order to connect to k8s registry to get pod information (for LDS and CDS type). The default // value for kubeconfig path is .kube/config in home folder (works for Linux only). It can be changed via -kubeconfig flag. // ```bash // go run ./pilot/debug/pilot_cli.go --type lds --proxytag httpbin --kubeconfig path/to/kube/config // ``` package main import ( "context" "flag" "fmt" "io/ioutil" "math/rand" "net" "os" "os/exec" "path/filepath" "strings" "time" core "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" discovery "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3" "google.golang.org/grpc" v1 "k8s.io/api/core/v1" meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes" _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" "k8s.io/client-go/tools/clientcmd" v3 "istio.io/istio/pilot/pkg/xds/v3" "istio.io/istio/pkg/util/gogoprotomarshal" "istio.io/pkg/env" "istio.io/pkg/log" ) const ( LocalPortStart = 50000 LocalPortEnd = 60000 ) // PodInfo holds information to identify pod. type PodInfo struct { Name string Namespace string IP string ProxyType string } func getAllPods(kubeconfig string) (*v1.PodList, error) { cfg, err := clientcmd.BuildConfigFromFlags("", kubeconfig) if err != nil { return nil, err } clientset, err := kubernetes.NewForConfig(cfg) if err != nil { return nil, err } return clientset.CoreV1().Pods(meta_v1.NamespaceAll).List(context.TODO(), meta_v1.ListOptions{}) } func NewPodInfo(nameOrAppLabel string, kubeconfig string, proxyType string) *PodInfo { log.Infof("Using kube config at %s", kubeconfig) pods, err := getAllPods(kubeconfig) if err != nil { log.Errorf(err.Error()) return nil } for _, pod := range pods.Items { log.Infof("pod %q", pod.Name) if pod.Name == nameOrAppLabel { log.Infof("Found pod %s.%s~%s matching name %q", pod.Name, pod.Namespace, pod.Status.PodIP, nameOrAppLabel) return &PodInfo{ Name: pod.Name, Namespace: pod.Namespace, IP: pod.Status.PodIP, ProxyType: proxyType, } } if app, ok := pod.ObjectMeta.Labels["app"]; ok && app == nameOrAppLabel { log.Infof("Found pod %s.%s~%s matching app label %q", pod.Name, pod.Namespace, pod.Status.PodIP, nameOrAppLabel) return &PodInfo{ Name: pod.Name, Namespace: pod.Namespace, IP: pod.Status.PodIP, ProxyType: proxyType, } } if istio, ok := pod.ObjectMeta.Labels["istio"]; ok && istio == nameOrAppLabel { log.Infof("Found pod %s.%s~%s matching app label %q", pod.Name, pod.Namespace, pod.Status.PodIP, nameOrAppLabel) return &PodInfo{ Name: pod.Name, Namespace: pod.Namespace, IP: pod.Status.PodIP, } } } log.Warnf("Cannot find pod with name or app label matching %q in registry.", nameOrAppLabel) return nil } func (p PodInfo) makeNodeID() string { if p.ProxyType != "" { return fmt.Sprintf("%s~%s~%s.%s~%s.svc.cluster.local", p.ProxyType, p.IP, p.Name, p.Namespace, p.Namespace) } if strings.HasPrefix(p.Name, "istio-ingressgateway") || strings.HasPrefix(p.Name, "istio-egressgateway") { return fmt.Sprintf("router~%s~%s.%s~%s.svc.cluster.local", p.IP, p.Name, p.Namespace, p.Namespace) } if strings.HasPrefix(p.Name, "istio-ingress") { return fmt.Sprintf("ingress~%s~%s.%s~%s.svc.cluster.local", p.IP, p.Name, p.Namespace, p.Namespace) } return fmt.Sprintf("sidecar~%s~%s.%s~%s.svc.cluster.local", p.IP, p.Name, p.Namespace, p.Namespace) } func configTypeToTypeURL(configType string) string { switch configType { case "lds": return v3.ListenerType case "cds": return v3.ClusterType case "rds": return v3.RouteType case "eds": return v3.EndpointType default: panic(fmt.Sprintf("Unknown type %s", configType)) } } func (p PodInfo) makeRequest(configType string) *discovery.DiscoveryRequest { return &discovery.DiscoveryRequest{ Node: &core.Node{ Id: p.makeNodeID(), }, TypeUrl: configTypeToTypeURL(configType), } } func (p PodInfo) appendResources(req *discovery.DiscoveryRequest, resources []string) *discovery.DiscoveryRequest { req.ResourceNames = resources return req } func (p PodInfo) getXdsResponse(pilotURL string, req *discovery.DiscoveryRequest) (*discovery.DiscoveryResponse, error) { conn, err := grpc.Dial(pilotURL, grpc.WithInsecure()) if err != nil { panic(err.Error()) } defer func() { _ = conn.Close() }() adsClient := discovery.NewAggregatedDiscoveryServiceClient(conn) stream, err := adsClient.StreamAggregatedResources(context.Background()) if err != nil { panic(err.Error()) } err = stream.Send(req) if err != nil { panic(err.Error()) } res, err := stream.Recv() if err != nil { panic(err.Error()) } return res, err } var homeVar = env.RegisterStringVar("HOME", "", "") func resolveKubeConfigPath(kubeConfig string) string { path := strings.Replace(kubeConfig, "~", homeVar.Get(), 1) ret, err := filepath.Abs(path) if err != nil { panic(err.Error()) } return ret } // nolint: golint func portForwardPilot(kubeConfig, pilotURL string) (*os.Process, string, error) { if pilotURL != "" { // No need to port-forward, url is already provided. return nil, pilotURL, nil } log.Info("Pilot url is not provided, try to port-forward pilot pod.") podName := "" pods, err := getAllPods(kubeConfig) if err != nil { return nil, "", err } for _, pod := range pods.Items { if app, ok := pod.ObjectMeta.Labels["app"]; ok && app == "istiod" { podName = pod.Name } } if podName == "" { return nil, "", fmt.Errorf("cannot find istio-pilot pod") } r := rand.New(rand.NewSource(time.Now().UnixNano())) localPort := r.Intn(LocalPortEnd-LocalPortStart) + LocalPortStart cmd := fmt.Sprintf("kubectl port-forward %s -n istio-system %d:15010", podName, localPort) parts := strings.Split(cmd, " ") c := exec.Command(parts[0], parts[1:]...) err = c.Start() if err != nil { return nil, "", err } // Make sure istio-pilot is reachable. reachable := false url := fmt.Sprintf("localhost:%d", localPort) for i := 0; i < 10 && !reachable; i++ { conn, err := net.Dial("tcp", url) if err == nil { _ = conn.Close() reachable = true } time.Sleep(1 * time.Second) } if !reachable { return nil, "", fmt.Errorf("cannot reach local pilot url: %s", url) } return c.Process, fmt.Sprintf("localhost:%d", localPort), nil } func main() { kubeConfig := flag.String("kubeconfig", "~/.kube/config", "path to the kubeconfig file. Default is ~/.kube/config") pilotURL := flag.String("pilot", "", "pilot address. Will try port forward if not provided.") configType := flag.String("type", "lds", "lds, cds, rds or eds. Default lds.") proxyType := flag.String("proxytype", "", "sidecar, ingress, router.") proxyTag := flag.String("proxytag", "", "Pod name or app label or istio label to identify the proxy.") resources := flag.String("res", "", "Resource(s) to get config for. LDS/CDS should leave it empty.") outputFile := flag.String("out", "", "output file. Leave blank to go to stdout") flag.Parse() process, pilot, err := portForwardPilot(resolveKubeConfigPath(*kubeConfig), *pilotURL) if err != nil { log.Errorf("pilot port forward failed: %v", err) return } defer func() { if process != nil { err := process.Kill() if err != nil { log.Errorf("Failed to kill port-forward process, pid: %d", process.Pid) } } }() pod := NewPodInfo(*proxyTag, resolveKubeConfigPath(*kubeConfig), *proxyType) var resp *discovery.DiscoveryResponse switch *configType { case "lds", "cds": resp, err = pod.getXdsResponse(pilot, pod.makeRequest(*configType)) case "rds", "eds": resp, err = pod.getXdsResponse(pilot, pod.appendResources(pod.makeRequest(*configType), strings.Split(*resources, ","))) default: log.Errorf("Unknown config type: %q", *configType) os.Exit(1) } if err != nil { log.Errorf("Failed to get Xds response for %v. Error: %v", *resources, err) return } strResponse, _ := gogoprotomarshal.ToJSONWithIndent(resp, " ") if outputFile == nil || *outputFile == "" { fmt.Printf("%v\n", strResponse) } else if err := ioutil.WriteFile(*outputFile, []byte(strResponse), 0644); err != nil { log.Errorf("Cannot write output to file %q", *outputFile) } }
pilot/tools/debug/pilot_cli.go
0
https://github.com/istio/istio/commit/33d59c1dfd4678bea158d6940ce03f9dae537961
[ 0.001464565983042121, 0.0002826411509886384, 0.00015735266788396984, 0.00016898226749617606, 0.0003157682076562196 ]
{ "id": 1, "code_window": [ "\tif err != nil {\n", "\t\treturn err\n", "\t}\n", "\n", "\tapps.Ingress = i.IngressFor(t.Clusters().Default())\n", "\tapps.EastWest = i.CustomIngressFor(t.Clusters().Default(), \"istio-eastwestgateway\", \"eastwestgateway\")\n", "\n", "\t// Headless services don't work with targetPort, set to same port\n", "\theadlessPorts := make([]echo.Port, len(common.EchoPorts))\n", "\tfor i, p := range common.EchoPorts {\n", "\t\tp.ServicePort = p.InstancePort\n" ], "labels": [ "keep", "keep", "keep", "keep", "keep", "replace", "keep", "keep", "keep", "keep", "keep" ], "after_edit": [], "file_path": "tests/integration/pilot/common/apps.go", "type": "replace", "edit_start_line_idx": 119 }
config_path: "/etc/istio/proxy" binary_path: "/usr/local/bin/envoy" service_cluster: "istio-proxy" drain_duration: {seconds: 2} parent_shutdown_duration: {seconds: 3} discovery_address: "istio-pilot:15010" proxy_admin_port: 15000 control_plane_auth_policy: NONE tracing: { zipkin: { address: "localhost:6000" }, tls_settings: { mode: SIMPLE, ca_certificates: "/etc/zipkin/ca.pem", sni: "zipkin-custom-sni"}} # Sets all relevant options to values different than default
pkg/bootstrap/testdata/tracing_tls_custom_sni.proxycfg
0
https://github.com/istio/istio/commit/33d59c1dfd4678bea158d6940ce03f9dae537961
[ 0.00016687413153704256, 0.00016467302339151502, 0.00016247190069407225, 0.00016467302339151502, 0.000002201115421485156 ]
{ "id": 1, "code_window": [ "\tif err != nil {\n", "\t\treturn err\n", "\t}\n", "\n", "\tapps.Ingress = i.IngressFor(t.Clusters().Default())\n", "\tapps.EastWest = i.CustomIngressFor(t.Clusters().Default(), \"istio-eastwestgateway\", \"eastwestgateway\")\n", "\n", "\t// Headless services don't work with targetPort, set to same port\n", "\theadlessPorts := make([]echo.Port, len(common.EchoPorts))\n", "\tfor i, p := range common.EchoPorts {\n", "\t\tp.ServicePort = p.InstancePort\n" ], "labels": [ "keep", "keep", "keep", "keep", "keep", "replace", "keep", "keep", "keep", "keep", "keep" ], "after_edit": [], "file_path": "tests/integration/pilot/common/apps.go", "type": "replace", "edit_start_line_idx": 119 }
// Copyright Istio Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package kube import ( "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" "k8s.io/client-go/dynamic" "k8s.io/client-go/kubernetes" // import GKE cluster authentication plugin _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" "istio.io/istio/pkg/kube" ) // Interfaces interface allows access to the Kubernetes API Service methods. It is mainly used for // test/injection purposes. type Interfaces interface { DynamicInterface() (dynamic.Interface, error) APIExtensionsClientset() (clientset.Interface, error) KubeClient() (kubernetes.Interface, error) } type interfaces struct { cfg *rest.Config } var _ Interfaces = &interfaces{} // NewInterfacesFromConfigFile returns a new instance of Interfaces. func NewInterfacesFromConfigFile(kubeconfig string) (Interfaces, error) { config, err := clientcmd.BuildConfigFromFlags("", kubeconfig) if err != nil { return nil, err } return NewInterfaces(config), nil } // NewInterfaces returns a new instance of Interfaces. func NewInterfaces(cfg *rest.Config) Interfaces { return &interfaces{ cfg: cfg, } } // DynamicInterface returns a new dynamic.Interface for the specified API Group/Version. func (k *interfaces) DynamicInterface() (dynamic.Interface, error) { return dynamic.NewForConfig(k.cfg) } // APIExtensionsClientset returns a new apiextensions clientset func (k *interfaces) APIExtensionsClientset() (clientset.Interface, error) { return clientset.NewForConfig(k.cfg) } // KubeClient returns a new kubernetes Interface client. func (k *interfaces) KubeClient() (kubernetes.Interface, error) { return kubernetes.NewForConfig(k.cfg) } type wrapper struct { kube.Client } func (w wrapper) DynamicInterface() (dynamic.Interface, error) { return w.Dynamic(), nil } func (w wrapper) APIExtensionsClientset() (clientset.Interface, error) { return w.Client.Ext(), nil } func (w wrapper) KubeClient() (kubernetes.Interface, error) { return w.Kube(), nil } var _ Interfaces = &wrapper{} func NewInterfacesFromClient(client kube.Client) Interfaces { return wrapper{client} }
galley/pkg/config/source/kube/interfaces.go
0
https://github.com/istio/istio/commit/33d59c1dfd4678bea158d6940ce03f9dae537961
[ 0.00017784515512175858, 0.0001671710779191926, 0.00016140569641720504, 0.00016443348431494087, 0.000005281124686007388 ]
{ "id": 2, "code_window": [ "\t\t\t\t\tal = nil\n", "\t\t\t\t}\n", "\t\t\t\tchilds = append(childs, TrafficCall{\n", "\t\t\t\t\tname: fmt.Sprintf(\"mode:%v,sni:%v,alpn:%v\", mode, sni, alpn),\n", "\t\t\t\t\tcall: apps.EastWest.CallWithRetryOrFail,\n", "\t\t\t\t\topts: echo.CallOptions{\n", "\t\t\t\t\t\tPort: &echo.Port{\n" ], "labels": [ "keep", "keep", "keep", "keep", "replace", "keep", "keep" ], "after_edit": [ "\t\t\t\t\tcall: apps.Ingress.CallWithRetryOrFail,\n" ], "file_path": "tests/integration/pilot/common/routing.go", "type": "replace", "edit_start_line_idx": 679 }
// +build integ // Copyright Istio Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package common import ( "strconv" "strings" "istio.io/istio/pkg/test/framework/components/echo" "istio.io/istio/pkg/test/framework/components/echo/common" "istio.io/istio/pkg/test/framework/components/echo/echoboot" "istio.io/istio/pkg/test/framework/components/istio" "istio.io/istio/pkg/test/framework/components/istio/ingress" "istio.io/istio/pkg/test/framework/components/namespace" "istio.io/istio/pkg/test/framework/resource" "istio.io/istio/pkg/test/util/tmpl" ) type EchoDeployments struct { // Namespace echo apps will be deployed Namespace namespace.Instance // Namespace where external echo app will be deployed ExternalNamespace namespace.Instance // Ingressgateway instance Ingress ingress.Instance // Eastwest gateway instance EastWest ingress.Instance // Standard echo app to be used by tests PodA echo.Instances // Standard echo app to be used by tests PodB echo.Instances // Standard echo app to be used by tests PodC echo.Instances // Standard echo app with TPROXY interception mode to be used by tests PodTproxy echo.Instances // Headless echo app to be used by tests Headless echo.Instances // StatefulSet echo app to be used by tests StatefulSet echo.Instances // Echo app to be used by tests, with no sidecar injected Naked echo.Instances // A virtual machine echo app (only deployed to one cluster) VM echo.Instances // Echo app to be used by tests, with no sidecar injected External echo.Instances All echo.Instances } const ( PodASvc = "a" PodBSvc = "b" PodCSvc = "c" PodTproxySvc = "tproxy" VMSvc = "vm" HeadlessSvc = "headless" StatefulSetSvc = "statefulset" NakedSvc = "naked" ExternalSvc = "external" externalHostname = "fake.external.com" ) func FindPortByName(name string) echo.Port { for _, p := range common.EchoPorts { if p.Name == name { return p } } return echo.Port{} } func serviceEntryPorts() []echo.Port { res := []echo.Port{} for _, p := range common.EchoPorts { if strings.HasPrefix(p.Name, "auto") { // The protocol needs to be set in common.EchoPorts to configure the echo deployment // But for service entry, we want to ensure we set it to "" which will use sniffing p.Protocol = "" } res = append(res, p) } return res } func SetupApps(t resource.Context, i istio.Instance, apps *EchoDeployments) error { var err error apps.Namespace, err = namespace.New(t, namespace.Config{ Prefix: "echo", Inject: true, }) if err != nil { return err } apps.ExternalNamespace, err = namespace.New(t, namespace.Config{ Prefix: "external", Inject: false, }) if err != nil { return err } apps.Ingress = i.IngressFor(t.Clusters().Default()) apps.EastWest = i.CustomIngressFor(t.Clusters().Default(), "istio-eastwestgateway", "eastwestgateway") // Headless services don't work with targetPort, set to same port headlessPorts := make([]echo.Port, len(common.EchoPorts)) for i, p := range common.EchoPorts { p.ServicePort = p.InstancePort headlessPorts[i] = p } builder := echoboot.NewBuilder(t). WithClusters(t.Clusters()...). WithConfig(echo.Config{ Service: PodASvc, Namespace: apps.Namespace, Ports: common.EchoPorts, Subsets: []echo.SubsetConfig{{}}, Locality: "region.zone.subzone", WorkloadOnlyPorts: common.WorkloadPorts, }). WithConfig(echo.Config{ Service: PodBSvc, Namespace: apps.Namespace, Ports: common.EchoPorts, Subsets: []echo.SubsetConfig{{}}, WorkloadOnlyPorts: common.WorkloadPorts, }). WithConfig(echo.Config{ Service: PodCSvc, Namespace: apps.Namespace, Ports: common.EchoPorts, Subsets: []echo.SubsetConfig{{}}, WorkloadOnlyPorts: common.WorkloadPorts, }). WithConfig(echo.Config{ Service: HeadlessSvc, Headless: true, Namespace: apps.Namespace, Ports: headlessPorts, Subsets: []echo.SubsetConfig{{}}, WorkloadOnlyPorts: common.WorkloadPorts, }). WithConfig(echo.Config{ Service: StatefulSetSvc, Headless: true, StatefulSet: true, Namespace: apps.Namespace, Ports: headlessPorts, Subsets: []echo.SubsetConfig{{}}, WorkloadOnlyPorts: common.WorkloadPorts, }). WithConfig(echo.Config{ Service: NakedSvc, Namespace: apps.Namespace, Ports: common.EchoPorts, Subsets: []echo.SubsetConfig{ { Annotations: map[echo.Annotation]*echo.AnnotationValue{ echo.SidecarInject: { Value: strconv.FormatBool(false), }, }, }, }, WorkloadOnlyPorts: common.WorkloadPorts, }). WithConfig(echo.Config{ Service: ExternalSvc, Namespace: apps.ExternalNamespace, DefaultHostHeader: externalHostname, Ports: common.EchoPorts, Subsets: []echo.SubsetConfig{ { Annotations: map[echo.Annotation]*echo.AnnotationValue{ echo.SidecarInject: { Value: strconv.FormatBool(false), }, }, }, }, WorkloadOnlyPorts: common.WorkloadPorts, }). WithConfig(echo.Config{ Service: PodTproxySvc, Namespace: apps.Namespace, Ports: common.EchoPorts, Subsets: []echo.SubsetConfig{{ Annotations: echo.NewAnnotations().Set(echo.SidecarInterceptionMode, "TPROXY"), }}, WorkloadOnlyPorts: common.WorkloadPorts, }). WithConfig(echo.Config{ Service: VMSvc, Namespace: apps.Namespace, Ports: common.EchoPorts, DeployAsVM: true, AutoRegisterVM: true, Subsets: []echo.SubsetConfig{{}}, WorkloadOnlyPorts: common.WorkloadPorts, }) echos, err := builder.Build() if err != nil { return err } apps.All = echos apps.PodA = echos.Match(echo.Service(PodASvc)) apps.PodB = echos.Match(echo.Service(PodBSvc)) apps.PodC = echos.Match(echo.Service(PodCSvc)) apps.PodTproxy = echos.Match(echo.Service(PodTproxySvc)) apps.Headless = echos.Match(echo.Service(HeadlessSvc)) apps.StatefulSet = echos.Match(echo.Service(StatefulSetSvc)) apps.Naked = echos.Match(echo.Service(NakedSvc)) apps.External = echos.Match(echo.Service(ExternalSvc)) if !t.Settings().SkipVM { apps.VM = echos.Match(echo.Service(VMSvc)) } if err := t.Config().ApplyYAMLNoCleanup(apps.Namespace.Name(), ` apiVersion: networking.istio.io/v1alpha3 kind: Sidecar metadata: name: restrict-to-namespace spec: egress: - hosts: - "./*" - "istio-system/*" `); err != nil { return err } se, err := tmpl.Evaluate(`apiVersion: networking.istio.io/v1alpha3 kind: ServiceEntry metadata: name: external-service spec: hosts: - {{.Hostname}} location: MESH_EXTERNAL resolution: DNS endpoints: - address: external.{{.Namespace}}.svc.cluster.local ports: - name: http-tls-origination number: 8888 protocol: http targetPort: 443 - name: http2-tls-origination number: 8882 protocol: http2 targetPort: 443 {{- range $i, $p := .Ports }} - name: {{$p.Name}} number: {{$p.ServicePort}} protocol: "{{$p.Protocol}}" {{- end }} `, map[string]interface{}{"Namespace": apps.ExternalNamespace.Name(), "Hostname": externalHostname, "Ports": serviceEntryPorts()}) if err != nil { return err } if err := t.Config().ApplyYAML(apps.Namespace.Name(), se); err != nil { return err } return nil } func (d EchoDeployments) IsMulticluster() bool { return d.All.Clusters().IsMulticluster() }
tests/integration/pilot/common/apps.go
1
https://github.com/istio/istio/commit/33d59c1dfd4678bea158d6940ce03f9dae537961
[ 0.001158876926638186, 0.00027565762866288424, 0.0001616502704564482, 0.00017218907305505127, 0.00025253373314626515 ]
{ "id": 2, "code_window": [ "\t\t\t\t\tal = nil\n", "\t\t\t\t}\n", "\t\t\t\tchilds = append(childs, TrafficCall{\n", "\t\t\t\t\tname: fmt.Sprintf(\"mode:%v,sni:%v,alpn:%v\", mode, sni, alpn),\n", "\t\t\t\t\tcall: apps.EastWest.CallWithRetryOrFail,\n", "\t\t\t\t\topts: echo.CallOptions{\n", "\t\t\t\t\t\tPort: &echo.Port{\n" ], "labels": [ "keep", "keep", "keep", "keep", "replace", "keep", "keep" ], "after_edit": [ "\t\t\t\t\tcall: apps.Ingress.CallWithRetryOrFail,\n" ], "file_path": "tests/integration/pilot/common/routing.go", "type": "replace", "edit_start_line_idx": 679 }
// Copyright Istio Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package namespace var ( chck Static = "" _ Instance = &chck ) // Static is a namespace that may or may not exist. It is used for configuration purposes only type Static string func (s Static) Name() string { return string(s) } func (s Static) Prefix() string { return string(s) } func (s Static) SetLabel(key, value string) error { panic("implement me") } func (s Static) RemoveLabel(key string) error { panic("implement me") } func (s *Static) UnmarshalJSON(bytes []byte) error { *s = Static(bytes) return nil }
pkg/test/framework/components/namespace/static.go
0
https://github.com/istio/istio/commit/33d59c1dfd4678bea158d6940ce03f9dae537961
[ 0.0001834545546444133, 0.00017552879580762237, 0.00016586836136411875, 0.0001751538657117635, 0.000005738524578191573 ]
{ "id": 2, "code_window": [ "\t\t\t\t\tal = nil\n", "\t\t\t\t}\n", "\t\t\t\tchilds = append(childs, TrafficCall{\n", "\t\t\t\t\tname: fmt.Sprintf(\"mode:%v,sni:%v,alpn:%v\", mode, sni, alpn),\n", "\t\t\t\t\tcall: apps.EastWest.CallWithRetryOrFail,\n", "\t\t\t\t\topts: echo.CallOptions{\n", "\t\t\t\t\t\tPort: &echo.Port{\n" ], "labels": [ "keep", "keep", "keep", "keep", "replace", "keep", "keep" ], "after_edit": [ "\t\t\t\t\tcall: apps.Ingress.CallWithRetryOrFail,\n" ], "file_path": "tests/integration/pilot/common/routing.go", "type": "replace", "edit_start_line_idx": 679 }
The `istio-cni-repair` binary. Can be run as a standalone command line tool or as a daemon.
cni/cmd/istio-cni-repair/README.md
0
https://github.com/istio/istio/commit/33d59c1dfd4678bea158d6940ce03f9dae537961
[ 0.00016468798276036978, 0.00016468798276036978, 0.00016468798276036978, 0.00016468798276036978, 0 ]
{ "id": 2, "code_window": [ "\t\t\t\t\tal = nil\n", "\t\t\t\t}\n", "\t\t\t\tchilds = append(childs, TrafficCall{\n", "\t\t\t\t\tname: fmt.Sprintf(\"mode:%v,sni:%v,alpn:%v\", mode, sni, alpn),\n", "\t\t\t\t\tcall: apps.EastWest.CallWithRetryOrFail,\n", "\t\t\t\t\topts: echo.CallOptions{\n", "\t\t\t\t\t\tPort: &echo.Port{\n" ], "labels": [ "keep", "keep", "keep", "keep", "replace", "keep", "keep" ], "after_edit": [ "\t\t\t\t\tcall: apps.Ingress.CallWithRetryOrFail,\n" ], "file_path": "tests/integration/pilot/common/routing.go", "type": "replace", "edit_start_line_idx": 679 }
apiVersion: release-notes/v2 kind: bug-fix area: installation issue: - 29364 releaseNotes: - | **Fixed** Newer control plane installations were removing permissions for `rbac.istio.io` from `istiod`, causing older control planes relying on that CRD group to hang on restart.
releasenotes/notes/29372.yaml
0
https://github.com/istio/istio/commit/33d59c1dfd4678bea158d6940ce03f9dae537961
[ 0.0001688772317720577, 0.0001688772317720577, 0.0001688772317720577, 0.0001688772317720577, 0 ]
{ "id": 3, "code_window": [ "\t\t\t\t\topts: echo.CallOptions{\n", "\t\t\t\t\t\tPort: &echo.Port{\n", "\t\t\t\t\t\t\tServicePort: 15443,\n", "\t\t\t\t\t\t\tProtocol: protocol.HTTPS,\n", "\t\t\t\t\t\t},\n", "\t\t\t\t\t\tServerName: sni,\n", "\t\t\t\t\t\tAlpn: al,\n", "\t\t\t\t\t\tValidator: echo.ExpectError(),\n" ], "labels": [ "keep", "keep", "replace", "keep", "keep", "keep", "keep", "keep" ], "after_edit": [ "\t\t\t\t\t\t\tServicePort: 443,\n" ], "file_path": "tests/integration/pilot/common/routing.go", "type": "replace", "edit_start_line_idx": 682 }
// +build integ // Copyright Istio Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package common import ( "strconv" "strings" "istio.io/istio/pkg/test/framework/components/echo" "istio.io/istio/pkg/test/framework/components/echo/common" "istio.io/istio/pkg/test/framework/components/echo/echoboot" "istio.io/istio/pkg/test/framework/components/istio" "istio.io/istio/pkg/test/framework/components/istio/ingress" "istio.io/istio/pkg/test/framework/components/namespace" "istio.io/istio/pkg/test/framework/resource" "istio.io/istio/pkg/test/util/tmpl" ) type EchoDeployments struct { // Namespace echo apps will be deployed Namespace namespace.Instance // Namespace where external echo app will be deployed ExternalNamespace namespace.Instance // Ingressgateway instance Ingress ingress.Instance // Eastwest gateway instance EastWest ingress.Instance // Standard echo app to be used by tests PodA echo.Instances // Standard echo app to be used by tests PodB echo.Instances // Standard echo app to be used by tests PodC echo.Instances // Standard echo app with TPROXY interception mode to be used by tests PodTproxy echo.Instances // Headless echo app to be used by tests Headless echo.Instances // StatefulSet echo app to be used by tests StatefulSet echo.Instances // Echo app to be used by tests, with no sidecar injected Naked echo.Instances // A virtual machine echo app (only deployed to one cluster) VM echo.Instances // Echo app to be used by tests, with no sidecar injected External echo.Instances All echo.Instances } const ( PodASvc = "a" PodBSvc = "b" PodCSvc = "c" PodTproxySvc = "tproxy" VMSvc = "vm" HeadlessSvc = "headless" StatefulSetSvc = "statefulset" NakedSvc = "naked" ExternalSvc = "external" externalHostname = "fake.external.com" ) func FindPortByName(name string) echo.Port { for _, p := range common.EchoPorts { if p.Name == name { return p } } return echo.Port{} } func serviceEntryPorts() []echo.Port { res := []echo.Port{} for _, p := range common.EchoPorts { if strings.HasPrefix(p.Name, "auto") { // The protocol needs to be set in common.EchoPorts to configure the echo deployment // But for service entry, we want to ensure we set it to "" which will use sniffing p.Protocol = "" } res = append(res, p) } return res } func SetupApps(t resource.Context, i istio.Instance, apps *EchoDeployments) error { var err error apps.Namespace, err = namespace.New(t, namespace.Config{ Prefix: "echo", Inject: true, }) if err != nil { return err } apps.ExternalNamespace, err = namespace.New(t, namespace.Config{ Prefix: "external", Inject: false, }) if err != nil { return err } apps.Ingress = i.IngressFor(t.Clusters().Default()) apps.EastWest = i.CustomIngressFor(t.Clusters().Default(), "istio-eastwestgateway", "eastwestgateway") // Headless services don't work with targetPort, set to same port headlessPorts := make([]echo.Port, len(common.EchoPorts)) for i, p := range common.EchoPorts { p.ServicePort = p.InstancePort headlessPorts[i] = p } builder := echoboot.NewBuilder(t). WithClusters(t.Clusters()...). WithConfig(echo.Config{ Service: PodASvc, Namespace: apps.Namespace, Ports: common.EchoPorts, Subsets: []echo.SubsetConfig{{}}, Locality: "region.zone.subzone", WorkloadOnlyPorts: common.WorkloadPorts, }). WithConfig(echo.Config{ Service: PodBSvc, Namespace: apps.Namespace, Ports: common.EchoPorts, Subsets: []echo.SubsetConfig{{}}, WorkloadOnlyPorts: common.WorkloadPorts, }). WithConfig(echo.Config{ Service: PodCSvc, Namespace: apps.Namespace, Ports: common.EchoPorts, Subsets: []echo.SubsetConfig{{}}, WorkloadOnlyPorts: common.WorkloadPorts, }). WithConfig(echo.Config{ Service: HeadlessSvc, Headless: true, Namespace: apps.Namespace, Ports: headlessPorts, Subsets: []echo.SubsetConfig{{}}, WorkloadOnlyPorts: common.WorkloadPorts, }). WithConfig(echo.Config{ Service: StatefulSetSvc, Headless: true, StatefulSet: true, Namespace: apps.Namespace, Ports: headlessPorts, Subsets: []echo.SubsetConfig{{}}, WorkloadOnlyPorts: common.WorkloadPorts, }). WithConfig(echo.Config{ Service: NakedSvc, Namespace: apps.Namespace, Ports: common.EchoPorts, Subsets: []echo.SubsetConfig{ { Annotations: map[echo.Annotation]*echo.AnnotationValue{ echo.SidecarInject: { Value: strconv.FormatBool(false), }, }, }, }, WorkloadOnlyPorts: common.WorkloadPorts, }). WithConfig(echo.Config{ Service: ExternalSvc, Namespace: apps.ExternalNamespace, DefaultHostHeader: externalHostname, Ports: common.EchoPorts, Subsets: []echo.SubsetConfig{ { Annotations: map[echo.Annotation]*echo.AnnotationValue{ echo.SidecarInject: { Value: strconv.FormatBool(false), }, }, }, }, WorkloadOnlyPorts: common.WorkloadPorts, }). WithConfig(echo.Config{ Service: PodTproxySvc, Namespace: apps.Namespace, Ports: common.EchoPorts, Subsets: []echo.SubsetConfig{{ Annotations: echo.NewAnnotations().Set(echo.SidecarInterceptionMode, "TPROXY"), }}, WorkloadOnlyPorts: common.WorkloadPorts, }). WithConfig(echo.Config{ Service: VMSvc, Namespace: apps.Namespace, Ports: common.EchoPorts, DeployAsVM: true, AutoRegisterVM: true, Subsets: []echo.SubsetConfig{{}}, WorkloadOnlyPorts: common.WorkloadPorts, }) echos, err := builder.Build() if err != nil { return err } apps.All = echos apps.PodA = echos.Match(echo.Service(PodASvc)) apps.PodB = echos.Match(echo.Service(PodBSvc)) apps.PodC = echos.Match(echo.Service(PodCSvc)) apps.PodTproxy = echos.Match(echo.Service(PodTproxySvc)) apps.Headless = echos.Match(echo.Service(HeadlessSvc)) apps.StatefulSet = echos.Match(echo.Service(StatefulSetSvc)) apps.Naked = echos.Match(echo.Service(NakedSvc)) apps.External = echos.Match(echo.Service(ExternalSvc)) if !t.Settings().SkipVM { apps.VM = echos.Match(echo.Service(VMSvc)) } if err := t.Config().ApplyYAMLNoCleanup(apps.Namespace.Name(), ` apiVersion: networking.istio.io/v1alpha3 kind: Sidecar metadata: name: restrict-to-namespace spec: egress: - hosts: - "./*" - "istio-system/*" `); err != nil { return err } se, err := tmpl.Evaluate(`apiVersion: networking.istio.io/v1alpha3 kind: ServiceEntry metadata: name: external-service spec: hosts: - {{.Hostname}} location: MESH_EXTERNAL resolution: DNS endpoints: - address: external.{{.Namespace}}.svc.cluster.local ports: - name: http-tls-origination number: 8888 protocol: http targetPort: 443 - name: http2-tls-origination number: 8882 protocol: http2 targetPort: 443 {{- range $i, $p := .Ports }} - name: {{$p.Name}} number: {{$p.ServicePort}} protocol: "{{$p.Protocol}}" {{- end }} `, map[string]interface{}{"Namespace": apps.ExternalNamespace.Name(), "Hostname": externalHostname, "Ports": serviceEntryPorts()}) if err != nil { return err } if err := t.Config().ApplyYAML(apps.Namespace.Name(), se); err != nil { return err } return nil } func (d EchoDeployments) IsMulticluster() bool { return d.All.Clusters().IsMulticluster() }
tests/integration/pilot/common/apps.go
1
https://github.com/istio/istio/commit/33d59c1dfd4678bea158d6940ce03f9dae537961
[ 0.0013389342930167913, 0.00023502492695115507, 0.00016236735973507166, 0.00016841331671457738, 0.00022031755361240357 ]
{ "id": 3, "code_window": [ "\t\t\t\t\topts: echo.CallOptions{\n", "\t\t\t\t\t\tPort: &echo.Port{\n", "\t\t\t\t\t\t\tServicePort: 15443,\n", "\t\t\t\t\t\t\tProtocol: protocol.HTTPS,\n", "\t\t\t\t\t\t},\n", "\t\t\t\t\t\tServerName: sni,\n", "\t\t\t\t\t\tAlpn: al,\n", "\t\t\t\t\t\tValidator: echo.ExpectError(),\n" ], "labels": [ "keep", "keep", "replace", "keep", "keep", "keep", "keep", "keep" ], "after_edit": [ "\t\t\t\t\t\t\tServicePort: 443,\n" ], "file_path": "tests/integration/pilot/common/routing.go", "type": "replace", "edit_start_line_idx": 682 }
ARG BASE_VERSION=latest FROM gcr.io/istio-release/base:${BASE_VERSION} COPY client /usr/local/bin/client COPY server /usr/local/bin/server COPY certs/cert.crt /cert.crt COPY certs/cert.key /cert.key ENTRYPOINT ["/usr/local/bin/server"]
pkg/test/echo/docker/Dockerfile.app
0
https://github.com/istio/istio/commit/33d59c1dfd4678bea158d6940ce03f9dae537961
[ 0.00017046512220986187, 0.00016655771469231695, 0.00016265030717477202, 0.00016655771469231695, 0.000003907407517544925 ]
{ "id": 3, "code_window": [ "\t\t\t\t\topts: echo.CallOptions{\n", "\t\t\t\t\t\tPort: &echo.Port{\n", "\t\t\t\t\t\t\tServicePort: 15443,\n", "\t\t\t\t\t\t\tProtocol: protocol.HTTPS,\n", "\t\t\t\t\t\t},\n", "\t\t\t\t\t\tServerName: sni,\n", "\t\t\t\t\t\tAlpn: al,\n", "\t\t\t\t\t\tValidator: echo.ExpectError(),\n" ], "labels": [ "keep", "keep", "replace", "keep", "keep", "keep", "keep", "keep" ], "after_edit": [ "\t\t\t\t\t\t\tServicePort: 443,\n" ], "file_path": "tests/integration/pilot/common/routing.go", "type": "replace", "edit_start_line_idx": 682 }
// +build integ // Copyright Istio Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package stackdriver import ( "context" "path/filepath" "testing" "golang.org/x/sync/errgroup" "istio.io/istio/pkg/test/env" "istio.io/istio/pkg/test/framework" "istio.io/istio/pkg/test/framework/components/echo" "istio.io/istio/pkg/test/framework/components/stackdriver" "istio.io/istio/pkg/test/util/retry" "istio.io/istio/tests/integration/telemetry" ) const ( tcpServerConnectionCount = "tests/integration/telemetry/stackdriver/testdata/server_tcp_connection_count.json.tmpl" tcpClientConnectionCount = "tests/integration/telemetry/stackdriver/testdata/client_tcp_connection_count.json.tmpl" tcpServerLogEntry = "tests/integration/telemetry/stackdriver/testdata/tcp_server_access_log.json.tmpl" ) // TestTCPStackdriverMonitoring verifies that stackdriver TCP filter works. func TestTCPStackdriverMonitoring(t *testing.T) { framework.NewTest(t). Features("observability.telemetry.stackdriver"). Run(func(ctx framework.TestContext) { g, _ := errgroup.WithContext(context.Background()) for _, cltInstance := range clt { cltInstance := cltInstance g.Go(func() error { err := retry.UntilSuccess(func() error { _, err := cltInstance.Call(echo.CallOptions{ Target: srv[0], PortName: "tcp", Count: telemetry.RequestCountMultipler * len(srv), }) if err != nil { return err } t.Logf("Validating Telemetry for Cluster %v", cltInstance.Config().Cluster) clName := cltInstance.Config().Cluster.Name() trustDomain := telemetry.GetTrustDomain(cltInstance.Config().Cluster, ist.Settings().SystemNamespace) if err := validateMetrics(t, filepath.Join(env.IstioSrc, tcpServerConnectionCount), filepath.Join(env.IstioSrc, tcpClientConnectionCount), clName, trustDomain); err != nil { return err } if err := validateLogs(t, filepath.Join(env.IstioSrc, tcpServerLogEntry), clName, trustDomain, stackdriver.ServerAccessLog); err != nil { return err } return nil }, retry.Delay(framework.TelemetryRetryDelay), retry.Timeout(framework.TelemetryRetryTimeout)) if err != nil { return err } return nil }) } if err := g.Wait(); err != nil { t.Fatalf("test failed: %v", err) } }) }
tests/integration/telemetry/stackdriver/stackdriver_tcp_filter_test.go
0
https://github.com/istio/istio/commit/33d59c1dfd4678bea158d6940ce03f9dae537961
[ 0.00017765312804840505, 0.00017189113714266568, 0.0001633866340853274, 0.00017207943892572075, 0.000004380223344924161 ]
{ "id": 3, "code_window": [ "\t\t\t\t\topts: echo.CallOptions{\n", "\t\t\t\t\t\tPort: &echo.Port{\n", "\t\t\t\t\t\t\tServicePort: 15443,\n", "\t\t\t\t\t\t\tProtocol: protocol.HTTPS,\n", "\t\t\t\t\t\t},\n", "\t\t\t\t\t\tServerName: sni,\n", "\t\t\t\t\t\tAlpn: al,\n", "\t\t\t\t\t\tValidator: echo.ExpectError(),\n" ], "labels": [ "keep", "keep", "replace", "keep", "keep", "keep", "keep", "keep" ], "after_edit": [ "\t\t\t\t\t\t\tServicePort: 443,\n" ], "file_path": "tests/integration/pilot/common/routing.go", "type": "replace", "edit_start_line_idx": 682 }
// Copyright Istio Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package model import ( "testing" "time" "github.com/davecgh/go-spew/spew" core "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" auth "github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3" matcher "github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3" "github.com/google/go-cmp/cmp" "google.golang.org/protobuf/testing/protocmp" "google.golang.org/protobuf/types/known/durationpb" "istio.io/istio/pilot/pkg/model" "istio.io/istio/pkg/spiffe" ) func TestConstructSdsSecretConfig(t *testing.T) { testCases := []struct { name string secretName string expected *auth.SdsSecretConfig }{ { name: "ConstructSdsSecretConfig", secretName: "spiffe://cluster.local/ns/bar/sa/foo", expected: &auth.SdsSecretConfig{ Name: "spiffe://cluster.local/ns/bar/sa/foo", SdsConfig: &core.ConfigSource{ ResourceApiVersion: core.ApiVersion_V3, ConfigSourceSpecifier: &core.ConfigSource_ApiConfigSource{ ApiConfigSource: &core.ApiConfigSource{ ApiType: core.ApiConfigSource_GRPC, SetNodeOnFirstMessageOnly: true, TransportApiVersion: core.ApiVersion_V3, GrpcServices: []*core.GrpcService{ { TargetSpecifier: &core.GrpcService_EnvoyGrpc_{ EnvoyGrpc: &core.GrpcService_EnvoyGrpc{ClusterName: SDSClusterName}, }, }, }, }, }, }, }, }, { name: "ConstructSdsSecretConfig without secretName", secretName: "", expected: nil, }, } for _, c := range testCases { t.Run(c.name, func(t *testing.T) { if got := ConstructSdsSecretConfig(c.secretName, &model.Proxy{}); !cmp.Equal(got, c.expected, protocmp.Transform()) { t.Errorf("ConstructSdsSecretConfig: got(%#v), want(%#v)\n", got, c.expected) } }) } } func TestConstructValidationContext(t *testing.T) { testCases := []struct { name string rootCAFilePath string subjectAltNames []string expected *auth.CommonTlsContext_ValidationContext }{ { name: "default CA", rootCAFilePath: "/root/ca", subjectAltNames: []string{"SystemCACertificates.keychain", "SystemRootCertificates.keychain"}, expected: &auth.CommonTlsContext_ValidationContext{ ValidationContext: &auth.CertificateValidationContext{ TrustedCa: &core.DataSource{ Specifier: &core.DataSource_Filename{ Filename: "/root/ca", }, }, MatchSubjectAltNames: []*matcher.StringMatcher{ { MatchPattern: &matcher.StringMatcher_Exact{ Exact: "SystemCACertificates.keychain", }, }, { MatchPattern: &matcher.StringMatcher_Exact{ Exact: "SystemRootCertificates.keychain", }, }, }, }, }, }, { name: "default CA without subjectAltNames", rootCAFilePath: "/root/ca", expected: &auth.CommonTlsContext_ValidationContext{ ValidationContext: &auth.CertificateValidationContext{ TrustedCa: &core.DataSource{ Specifier: &core.DataSource_Filename{ Filename: "/root/ca", }, }, }, }, }, } for _, c := range testCases { t.Run(c.name, func(t *testing.T) { if got := ConstructValidationContext(c.rootCAFilePath, c.subjectAltNames); !cmp.Equal(got, c.expected, protocmp.Transform()) { t.Errorf("ConstructValidationContext: got(%#v), want(%#v)\n", got, c.expected) } }) } } func TestApplyToCommonTLSContext(t *testing.T) { testCases := []struct { name string node *model.Proxy trustDomainAliases []string validateClient bool expected *auth.CommonTlsContext }{ { name: "MTLSStrict using SDS", node: &model.Proxy{ Metadata: &model.NodeMetadata{}, }, validateClient: true, expected: &auth.CommonTlsContext{ TlsCertificateSdsSecretConfigs: []*auth.SdsSecretConfig{ { Name: "default", SdsConfig: &core.ConfigSource{ InitialFetchTimeout: durationpb.New(time.Second * 0), ResourceApiVersion: core.ApiVersion_V3, ConfigSourceSpecifier: &core.ConfigSource_ApiConfigSource{ ApiConfigSource: &core.ApiConfigSource{ ApiType: core.ApiConfigSource_GRPC, SetNodeOnFirstMessageOnly: true, TransportApiVersion: core.ApiVersion_V3, GrpcServices: []*core.GrpcService{ { TargetSpecifier: &core.GrpcService_EnvoyGrpc_{ EnvoyGrpc: &core.GrpcService_EnvoyGrpc{ClusterName: SDSClusterName}, }, }, }, }, }, }, }, }, ValidationContextType: &auth.CommonTlsContext_CombinedValidationContext{ CombinedValidationContext: &auth.CommonTlsContext_CombinedCertificateValidationContext{ DefaultValidationContext: &auth.CertificateValidationContext{}, ValidationContextSdsSecretConfig: &auth.SdsSecretConfig{ Name: "ROOTCA", SdsConfig: &core.ConfigSource{ InitialFetchTimeout: durationpb.New(time.Second * 0), ResourceApiVersion: core.ApiVersion_V3, ConfigSourceSpecifier: &core.ConfigSource_ApiConfigSource{ ApiConfigSource: &core.ApiConfigSource{ ApiType: core.ApiConfigSource_GRPC, SetNodeOnFirstMessageOnly: true, TransportApiVersion: core.ApiVersion_V3, GrpcServices: []*core.GrpcService{ { TargetSpecifier: &core.GrpcService_EnvoyGrpc_{ EnvoyGrpc: &core.GrpcService_EnvoyGrpc{ClusterName: SDSClusterName}, }, }, }, }, }, }, }, }, }, }, }, { name: "MTLSStrict using SDS and SAN aliases", node: &model.Proxy{ Metadata: &model.NodeMetadata{}, }, validateClient: true, trustDomainAliases: []string{"alias-1.domain", "some-other-alias-1.domain", "alias-2.domain"}, expected: &auth.CommonTlsContext{ TlsCertificateSdsSecretConfigs: []*auth.SdsSecretConfig{ { Name: "default", SdsConfig: &core.ConfigSource{ InitialFetchTimeout: durationpb.New(time.Second * 0), ResourceApiVersion: core.ApiVersion_V3, ConfigSourceSpecifier: &core.ConfigSource_ApiConfigSource{ ApiConfigSource: &core.ApiConfigSource{ ApiType: core.ApiConfigSource_GRPC, SetNodeOnFirstMessageOnly: true, TransportApiVersion: core.ApiVersion_V3, GrpcServices: []*core.GrpcService{ { TargetSpecifier: &core.GrpcService_EnvoyGrpc_{ EnvoyGrpc: &core.GrpcService_EnvoyGrpc{ClusterName: SDSClusterName}, }, }, }, }, }, }, }, }, ValidationContextType: &auth.CommonTlsContext_CombinedValidationContext{ CombinedValidationContext: &auth.CommonTlsContext_CombinedCertificateValidationContext{ DefaultValidationContext: &auth.CertificateValidationContext{MatchSubjectAltNames: []*matcher.StringMatcher{ {MatchPattern: &matcher.StringMatcher_Prefix{Prefix: spiffe.URIPrefix + "alias-1.domain" + "/"}}, {MatchPattern: &matcher.StringMatcher_Prefix{Prefix: spiffe.URIPrefix + "some-other-alias-1.domain" + "/"}}, {MatchPattern: &matcher.StringMatcher_Prefix{Prefix: spiffe.URIPrefix + "alias-2.domain" + "/"}}, }}, ValidationContextSdsSecretConfig: &auth.SdsSecretConfig{ Name: "ROOTCA", SdsConfig: &core.ConfigSource{ InitialFetchTimeout: durationpb.New(time.Second * 0), ResourceApiVersion: core.ApiVersion_V3, ConfigSourceSpecifier: &core.ConfigSource_ApiConfigSource{ ApiConfigSource: &core.ApiConfigSource{ ApiType: core.ApiConfigSource_GRPC, SetNodeOnFirstMessageOnly: true, TransportApiVersion: core.ApiVersion_V3, GrpcServices: []*core.GrpcService{ { TargetSpecifier: &core.GrpcService_EnvoyGrpc_{ EnvoyGrpc: &core.GrpcService_EnvoyGrpc{ClusterName: SDSClusterName}, }, }, }, }, }, }, }, }, }, }, }, { name: "MTLS using SDS with custom certs in metadata", node: &model.Proxy{ Metadata: &model.NodeMetadata{ TLSServerCertChain: "serverCertChain", TLSServerKey: "serverKey", TLSServerRootCert: "servrRootCert", }, }, validateClient: true, expected: &auth.CommonTlsContext{ TlsCertificateSdsSecretConfigs: []*auth.SdsSecretConfig{ { Name: "file-cert:serverCertChain~serverKey", SdsConfig: &core.ConfigSource{ ConfigSourceSpecifier: &core.ConfigSource_ApiConfigSource{ ApiConfigSource: &core.ApiConfigSource{ ApiType: core.ApiConfigSource_GRPC, SetNodeOnFirstMessageOnly: true, TransportApiVersion: core.ApiVersion_V3, GrpcServices: []*core.GrpcService{ { TargetSpecifier: &core.GrpcService_EnvoyGrpc_{ EnvoyGrpc: &core.GrpcService_EnvoyGrpc{ClusterName: SDSClusterName}, }, }, }, }, }, ResourceApiVersion: core.ApiVersion_V3, }, }, }, ValidationContextType: &auth.CommonTlsContext_CombinedValidationContext{ CombinedValidationContext: &auth.CommonTlsContext_CombinedCertificateValidationContext{ DefaultValidationContext: &auth.CertificateValidationContext{}, ValidationContextSdsSecretConfig: &auth.SdsSecretConfig{ Name: "file-root:servrRootCert", SdsConfig: &core.ConfigSource{ ConfigSourceSpecifier: &core.ConfigSource_ApiConfigSource{ ApiConfigSource: &core.ApiConfigSource{ ApiType: core.ApiConfigSource_GRPC, SetNodeOnFirstMessageOnly: true, TransportApiVersion: core.ApiVersion_V3, GrpcServices: []*core.GrpcService{ { TargetSpecifier: &core.GrpcService_EnvoyGrpc_{ EnvoyGrpc: &core.GrpcService_EnvoyGrpc{ClusterName: SDSClusterName}, }, }, }, }, }, ResourceApiVersion: core.ApiVersion_V3, }, }, }, }, }, }, { name: "ISTIO_MUTUAL SDS without node meta", node: &model.Proxy{ Metadata: &model.NodeMetadata{}, }, validateClient: true, expected: &auth.CommonTlsContext{ TlsCertificateSdsSecretConfigs: []*auth.SdsSecretConfig{ { Name: "default", SdsConfig: &core.ConfigSource{ ConfigSourceSpecifier: &core.ConfigSource_ApiConfigSource{ ApiConfigSource: &core.ApiConfigSource{ ApiType: core.ApiConfigSource_GRPC, SetNodeOnFirstMessageOnly: true, TransportApiVersion: core.ApiVersion_V3, GrpcServices: []*core.GrpcService{ { TargetSpecifier: &core.GrpcService_EnvoyGrpc_{ EnvoyGrpc: &core.GrpcService_EnvoyGrpc{ClusterName: SDSClusterName}, }, }, }, }, }, ResourceApiVersion: core.ApiVersion_V3, InitialFetchTimeout: durationpb.New(time.Second * 0), }, }, }, ValidationContextType: &auth.CommonTlsContext_CombinedValidationContext{ CombinedValidationContext: &auth.CommonTlsContext_CombinedCertificateValidationContext{ DefaultValidationContext: &auth.CertificateValidationContext{}, ValidationContextSdsSecretConfig: &auth.SdsSecretConfig{ Name: "ROOTCA", SdsConfig: &core.ConfigSource{ ConfigSourceSpecifier: &core.ConfigSource_ApiConfigSource{ ApiConfigSource: &core.ApiConfigSource{ ApiType: core.ApiConfigSource_GRPC, SetNodeOnFirstMessageOnly: true, TransportApiVersion: core.ApiVersion_V3, GrpcServices: []*core.GrpcService{ { TargetSpecifier: &core.GrpcService_EnvoyGrpc_{ EnvoyGrpc: &core.GrpcService_EnvoyGrpc{ClusterName: SDSClusterName}, }, }, }, }, }, ResourceApiVersion: core.ApiVersion_V3, InitialFetchTimeout: durationpb.New(time.Second * 0), }, }, }, }, }, }, { name: "ISTIO_MUTUAL with custom cert paths from proxy node metadata", node: &model.Proxy{ Metadata: &model.NodeMetadata{ TLSServerCertChain: "/custom/path/to/cert-chain.pem", TLSServerKey: "/custom-key.pem", TLSServerRootCert: "/custom/path/to/root.pem", }, }, validateClient: true, expected: &auth.CommonTlsContext{ TlsCertificateSdsSecretConfigs: []*auth.SdsSecretConfig{ { Name: "file-cert:/custom/path/to/cert-chain.pem~/custom-key.pem", SdsConfig: &core.ConfigSource{ ConfigSourceSpecifier: &core.ConfigSource_ApiConfigSource{ ApiConfigSource: &core.ApiConfigSource{ ApiType: core.ApiConfigSource_GRPC, SetNodeOnFirstMessageOnly: true, TransportApiVersion: core.ApiVersion_V3, GrpcServices: []*core.GrpcService{ { TargetSpecifier: &core.GrpcService_EnvoyGrpc_{ EnvoyGrpc: &core.GrpcService_EnvoyGrpc{ClusterName: SDSClusterName}, }, }, }, }, }, ResourceApiVersion: core.ApiVersion_V3, }, }, }, ValidationContextType: &auth.CommonTlsContext_CombinedValidationContext{ CombinedValidationContext: &auth.CommonTlsContext_CombinedCertificateValidationContext{ DefaultValidationContext: &auth.CertificateValidationContext{}, ValidationContextSdsSecretConfig: &auth.SdsSecretConfig{ Name: "file-root:/custom/path/to/root.pem", SdsConfig: &core.ConfigSource{ ConfigSourceSpecifier: &core.ConfigSource_ApiConfigSource{ ApiConfigSource: &core.ApiConfigSource{ ApiType: core.ApiConfigSource_GRPC, SetNodeOnFirstMessageOnly: true, TransportApiVersion: core.ApiVersion_V3, GrpcServices: []*core.GrpcService{ { TargetSpecifier: &core.GrpcService_EnvoyGrpc_{ EnvoyGrpc: &core.GrpcService_EnvoyGrpc{ClusterName: SDSClusterName}, }, }, }, }, }, ResourceApiVersion: core.ApiVersion_V3, }, }, }, }, }, }, { name: "SIMPLE with custom cert paths from proxy node metadata without cacerts", node: &model.Proxy{ Metadata: &model.NodeMetadata{ TLSServerCertChain: "/custom/path/to/cert-chain.pem", TLSServerKey: "/custom-key.pem", }, }, validateClient: false, expected: &auth.CommonTlsContext{ TlsCertificateSdsSecretConfigs: []*auth.SdsSecretConfig{ { Name: "file-cert:/custom/path/to/cert-chain.pem~/custom-key.pem", SdsConfig: &core.ConfigSource{ ConfigSourceSpecifier: &core.ConfigSource_ApiConfigSource{ ApiConfigSource: &core.ApiConfigSource{ ApiType: core.ApiConfigSource_GRPC, SetNodeOnFirstMessageOnly: true, TransportApiVersion: core.ApiVersion_V3, GrpcServices: []*core.GrpcService{ { TargetSpecifier: &core.GrpcService_EnvoyGrpc_{ EnvoyGrpc: &core.GrpcService_EnvoyGrpc{ClusterName: SDSClusterName}, }, }, }, }, }, ResourceApiVersion: core.ApiVersion_V3, }, }, }, }, }, } for _, test := range testCases { t.Run(test.name, func(t *testing.T) { tlsContext := &auth.CommonTlsContext{} ApplyToCommonTLSContext(tlsContext, test.node, []string{}, test.trustDomainAliases, test.validateClient) if !cmp.Equal(tlsContext, test.expected, protocmp.Transform()) { t.Errorf("got(%#v), want(%#v)\n", spew.Sdump(tlsContext), spew.Sdump(test.expected)) } }) } }
pilot/pkg/security/model/authentication_test.go
0
https://github.com/istio/istio/commit/33d59c1dfd4678bea158d6940ce03f9dae537961
[ 0.00020191505609545857, 0.00016974283789750189, 0.00016377172141801566, 0.00016812099784146994, 0.000006313029643933987 ]
{ "id": 4, "code_window": [ " name: cross-network-gateway-test\n", " namespace: istio-system\n", "spec:\n", " selector:\n", " istio: eastwestgateway\n", " servers:\n", " - port:\n" ], "labels": [ "keep", "keep", "keep", "keep", "replace", "keep", "keep" ], "after_edit": [ " istio: ingressgateway\n" ], "file_path": "tests/integration/pilot/common/routing.go", "type": "replace", "edit_start_line_idx": 703 }
// +build integ // Copyright Istio Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package common import ( "context" "fmt" "net/http" "reflect" "sort" "strings" "time" "istio.io/istio/pilot/pkg/model" "istio.io/istio/pkg/config/host" "istio.io/istio/pkg/config/protocol" "istio.io/istio/pkg/test" echoclient "istio.io/istio/pkg/test/echo/client" "istio.io/istio/pkg/test/echo/common/scheme" epb "istio.io/istio/pkg/test/echo/proto" "istio.io/istio/pkg/test/framework/components/echo" "istio.io/istio/pkg/test/framework/components/echo/common" "istio.io/istio/pkg/test/framework/components/echo/echotest" "istio.io/istio/pkg/test/framework/components/istio/ingress" "istio.io/istio/pkg/test/util/retry" "istio.io/istio/pkg/test/util/tmpl" ingressutil "istio.io/istio/tests/integration/security/sds_ingress/util" ) const httpVirtualServiceTmpl = ` apiVersion: networking.istio.io/v1alpha3 kind: VirtualService metadata: name: {{.VirtualServiceHost}} spec: gateways: - {{.Gateway}} hosts: - {{.VirtualServiceHost}} http: - route: - destination: host: {{.VirtualServiceHost}} port: number: {{.Port}} {{- if .MatchScheme }} match: - scheme: exact: {{.MatchScheme}} headers: request: add: istio-custom-header: user-defined-value {{- end }} --- ` func httpVirtualService(gateway, host string, port int) string { return tmpl.MustEvaluate(httpVirtualServiceTmpl, struct { Gateway string VirtualServiceHost string Port int MatchScheme string }{gateway, host, port, ""}) } const gatewayTmpl = ` apiVersion: networking.istio.io/v1alpha3 kind: Gateway metadata: name: gateway spec: selector: istio: ingressgateway servers: - port: number: {{.GatewayPort}} name: {{.GatewayPortName}} protocol: {{.GatewayProtocol}} {{- if .Credential }} tls: mode: SIMPLE credentialName: {{.Credential}} {{- end }} hosts: - "{{.GatewayHost}}" --- ` func httpGateway(host string) string { return tmpl.MustEvaluate(gatewayTmpl, struct { GatewayHost string GatewayPort int GatewayPortName string GatewayProtocol string Credential string }{ host, 80, "http", "HTTP", "", }) } func virtualServiceCases(skipVM bool) []TrafficTestCase { noTProxy := echotest.FilterMatch(func(instance echo.Instance) bool { return !instance.Config().IsTProxy() }) var cases []TrafficTestCase cases = append(cases, TrafficTestCase{ name: "added header", config: ` apiVersion: networking.istio.io/v1alpha3 kind: VirtualService metadata: name: default spec: hosts: - {{ .dstSvc }} http: - route: - destination: host: {{ .dstSvc }} headers: request: add: istio-custom-header: user-defined-value`, opts: echo.CallOptions{ PortName: "http", Count: 1, Validator: echo.And( echo.ExpectOK(), echo.ValidatorFunc( func(response echoclient.ParsedResponses, _ error) error { return response.Check(func(_ int, response *echoclient.ParsedResponse) error { return ExpectString(response.RawResponse["Istio-Custom-Header"], "user-defined-value", "request header") }) })), }, workloadAgnostic: true, }, TrafficTestCase{ name: "set header", config: ` apiVersion: networking.istio.io/v1alpha3 kind: VirtualService metadata: name: default spec: hosts: - {{ (index .dst 0).Config.Service }} http: - route: - destination: host: {{ (index .dst 0).Config.Service }} headers: request: set: x-custom: some-value`, opts: echo.CallOptions{ PortName: "http", Count: 1, Validator: echo.And( echo.ExpectOK(), echo.ValidatorFunc( func(response echoclient.ParsedResponses, _ error) error { return response.Check(func(_ int, response *echoclient.ParsedResponse) error { return ExpectString(response.RawResponse["X-Custom"], "some-value", "added request header") }) })), }, workloadAgnostic: true, }, TrafficTestCase{ name: "set authority header", config: ` apiVersion: networking.istio.io/v1alpha3 kind: VirtualService metadata: name: default spec: hosts: - {{ (index .dst 0).Config.Service }} http: - route: - destination: host: {{ (index .dst 0).Config.Service }} headers: request: set: :authority: my-custom-authority`, opts: echo.CallOptions{ PortName: "http", Count: 1, Validator: echo.And( echo.ExpectOK(), echo.ValidatorFunc( func(response echoclient.ParsedResponses, _ error) error { return response.Check(func(_ int, response *echoclient.ParsedResponse) error { return ExpectString(response.RawResponse["Host"], "my-custom-authority", "added authority header") }) })), }, workloadAgnostic: true, }, TrafficTestCase{ name: "redirect", config: ` apiVersion: networking.istio.io/v1alpha3 kind: VirtualService metadata: name: default spec: hosts: - {{ .dstSvc }} http: - match: - uri: exact: /foo redirect: uri: /new/path - match: - uri: exact: /new/path route: - destination: host: {{ .dstSvc }}`, opts: echo.CallOptions{ PortName: "http", Path: "/foo?key=value", FollowRedirects: true, Count: 1, Validator: echo.And( echo.ExpectOK(), echo.ValidatorFunc( func(response echoclient.ParsedResponses, _ error) error { return response.Check(func(_ int, response *echoclient.ParsedResponse) error { return ExpectString(response.URL, "/new/path?key=value", "URL") }) })), }, workloadAgnostic: true, }, TrafficTestCase{ name: "rewrite uri", config: ` apiVersion: networking.istio.io/v1alpha3 kind: VirtualService metadata: name: default spec: hosts: - {{ .dstSvc }} http: - match: - uri: exact: /foo rewrite: uri: /new/path route: - destination: host: {{ .dstSvc }}`, opts: echo.CallOptions{ PortName: "http", Path: "/foo?key=value#hash", Count: 1, Validator: echo.And( echo.ExpectOK(), echo.ValidatorFunc( func(response echoclient.ParsedResponses, _ error) error { return response.Check(func(_ int, response *echoclient.ParsedResponse) error { return ExpectString(response.URL, "/new/path?key=value", "URL") }) })), }, workloadAgnostic: true, }, TrafficTestCase{ name: "rewrite authority", config: ` apiVersion: networking.istio.io/v1alpha3 kind: VirtualService metadata: name: default spec: hosts: - {{ .dstSvc }} http: - match: - uri: exact: /foo rewrite: authority: new-authority route: - destination: host: {{ .dstSvc }}`, opts: echo.CallOptions{ PortName: "http", Path: "/foo", Count: 1, Validator: echo.And( echo.ExpectOK(), echo.ValidatorFunc( func(response echoclient.ParsedResponses, _ error) error { return response.Check(func(_ int, response *echoclient.ParsedResponse) error { return ExpectString(response.Host, "new-authority", "authority") }) })), }, workloadAgnostic: true, }, TrafficTestCase{ name: "cors", // TODO https://github.com/istio/istio/issues/31532 targetFilters: []echotest.Filter{noTProxy, echotest.Not(echotest.VirtualMachines)}, config: ` apiVersion: networking.istio.io/v1alpha3 kind: VirtualService metadata: name: default spec: hosts: - {{ .dstSvc }} http: - corsPolicy: allowOrigins: - exact: cors.com allowMethods: - POST - GET allowCredentials: false allowHeaders: - X-Foo-Bar - X-Foo-Baz maxAge: "24h" route: - destination: host: {{ .dstSvc }} `, children: []TrafficCall{ { name: "preflight", opts: func() echo.CallOptions { header := http.Header{} header.Add("Origin", "cors.com") header.Add("Access-Control-Request-Method", "DELETE") return echo.CallOptions{ PortName: "http", Method: "OPTIONS", Headers: header, Count: 1, Validator: echo.And( echo.ExpectOK(), echo.ValidatorFunc( func(response echoclient.ParsedResponses, _ error) error { return response.Check(func(_ int, response *echoclient.ParsedResponse) error { if err := ExpectString(response.RawResponse["Access-Control-Allow-Origin"], "cors.com", "preflight CORS origin"); err != nil { return err } if err := ExpectString(response.RawResponse["Access-Control-Allow-Methods"], "POST,GET", "preflight CORS method"); err != nil { return err } if err := ExpectString(response.RawResponse["Access-Control-Allow-Headers"], "X-Foo-Bar,X-Foo-Baz", "preflight CORS headers"); err != nil { return err } if err := ExpectString(response.RawResponse["Access-Control-Max-Age"], "86400", "preflight CORS max age"); err != nil { return err } return nil }) })), } }(), }, { name: "get", opts: func() echo.CallOptions { header := http.Header{} header.Add("Origin", "cors.com") return echo.CallOptions{ PortName: "http", Headers: header, Count: 1, Validator: echo.And( echo.ExpectOK(), echo.ValidatorFunc( func(response echoclient.ParsedResponses, _ error) error { return ExpectString(response[0].RawResponse["Access-Control-Allow-Origin"], "cors.com", "GET CORS origin") })), } }(), }, { // GET without matching origin name: "get no origin match", opts: echo.CallOptions{ PortName: "http", Count: 1, Validator: echo.And( echo.ExpectOK(), echo.ValidatorFunc( func(response echoclient.ParsedResponses, _ error) error { return ExpectString(response[0].RawResponse["Access-Control-Allow-Origin"], "", "mismatched CORS origin") })), }, }, }, workloadAgnostic: true, }, ) // reduce the total # of subtests that don't give valuable coverage or just don't work noNaked := echotest.FilterMatch(echo.Not(echo.IsNaked())) noHeadless := echotest.FilterMatch(echo.Not(echo.IsHeadless())) noExternal := echotest.FilterMatch(echo.Not(echo.IsExternal())) for i, tc := range cases { tc.sourceFilters = append(tc.sourceFilters, noNaked, noHeadless) tc.targetFilters = append(tc.targetFilters, noNaked, noHeadless) cases[i] = tc } splits := [][]int{ {50, 25, 25}, {80, 10, 10}, } if skipVM { splits = [][]int{ {50, 50}, {80, 20}, } } for _, split := range splits { split := split cases = append(cases, TrafficTestCase{ name: fmt.Sprintf("shifting-%d", split[0]), toN: len(split), sourceFilters: []echotest.Filter{noHeadless, noNaked}, targetFilters: []echotest.Filter{noHeadless, noExternal}, templateVars: func(_ echo.Callers, _ echo.Instances) map[string]interface{} { return map[string]interface{}{ "split": split, } }, config: ` {{ $split := .split }} apiVersion: networking.istio.io/v1alpha3 kind: VirtualService metadata: name: default spec: hosts: - {{ ( index .dstSvcs 0) }} http: - route: {{- range $idx, $svc := .dstSvcs }} - destination: host: {{ $svc }} weight: {{ ( index $split $idx ) }} {{- end }} `, validateForN: func(src echo.Caller, dests echo.Services) echo.Validator { return echo.And( echo.ExpectOK(), echo.ValidatorFunc(func(responses echoclient.ParsedResponses, err error) error { errorThreshold := 10 if len(split) != len(dests) { // shouldn't happen return fmt.Errorf("split configured for %d destinations, but framework gives %d", len(split), len(dests)) } splitPerHost := map[string]int{} for i, pct := range split { splitPerHost[dests.Services()[i]] = pct } for host, exp := range splitPerHost { hostResponses := responses.Match(func(r *echoclient.ParsedResponse) bool { return strings.HasPrefix(r.Hostname, host) }) if !AlmostEquals(len(hostResponses), exp, errorThreshold) { return fmt.Errorf("expected %v calls to %q, got %v", exp, host, len(hostResponses)) } // echotest should have filtered the deployment to only contain reachable clusters hostDests := dests.Instances().Match(echo.Service(host)) targetClusters := hostDests.Clusters() // don't check headless since lb is unpredictable headlessTarget := hostDests.ContainsMatch(echo.IsHeadless()) if !headlessTarget && len(targetClusters.ByNetwork()[src.(echo.Instance).Config().Cluster.NetworkName()]) > 1 { // Conditionally check reached clusters to work around connection load balancing issues // See https://github.com/istio/istio/issues/32208 for details // We want to skip this for requests from the cross-network pod if err := hostResponses.CheckReachedClusters(targetClusters); err != nil { return fmt.Errorf("did not reach all clusters for %s: %v", host, err) } } } return nil })) }, opts: echo.CallOptions{ PortName: "http", Count: 100, }, workloadAgnostic: true, }) } return cases } func HostHeader(header string) http.Header { h := http.Header{} h["Host"] = []string{header} return h } // tlsOriginationCases contains tests TLS origination from DestinationRule func tlsOriginationCases(apps *EchoDeployments) []TrafficTestCase { tc := TrafficTestCase{ name: "", config: fmt.Sprintf(` apiVersion: networking.istio.io/v1alpha3 kind: DestinationRule metadata: name: external spec: host: %s trafficPolicy: tls: mode: SIMPLE `, apps.External[0].Config().DefaultHostHeader), children: []TrafficCall{}, } expects := []struct { port int alpn string }{ {8888, "http/1.1"}, {8882, "h2"}, } for _, c := range apps.PodA { for _, e := range expects { c := c e := e tc.children = append(tc.children, TrafficCall{ name: fmt.Sprintf("%s: %s", c.Config().Cluster.StableName(), e.alpn), opts: echo.CallOptions{ Port: &echo.Port{ServicePort: e.port, Protocol: protocol.HTTP}, Count: 1, Address: apps.External[0].Address(), Headers: HostHeader(apps.External[0].Config().DefaultHostHeader), Scheme: scheme.HTTP, Validator: echo.And(echo.ExpectOK(), echo.ExpectKey("Alpn", e.alpn)), }, call: c.CallWithRetryOrFail, }) } } return []TrafficTestCase{tc} } // useClientProtocolCases contains tests use_client_protocol from DestinationRule func useClientProtocolCases(apps *EchoDeployments) []TrafficTestCase { var cases []TrafficTestCase client := apps.PodA destination := apps.PodC[0] cases = append(cases, TrafficTestCase{ name: "use client protocol with h2", config: useClientProtocolDestinationRule("use-client-protocol-h2", destination.Config().Service), call: client[0].CallWithRetryOrFail, opts: echo.CallOptions{ Target: destination, PortName: "http", Count: 1, HTTP2: true, Validator: echo.And( echo.ExpectOK(), echo.ExpectKey("Proto", "HTTP/2.0"), ), }, }, TrafficTestCase{ name: "use client protocol with h1", config: useClientProtocolDestinationRule("use-client-protocol-h1", destination.Config().Service), call: client[0].CallWithRetryOrFail, opts: echo.CallOptions{ PortName: "http", Count: 1, Target: destination, HTTP2: false, Validator: echo.And( echo.ExpectOK(), echo.ExpectKey("Proto", "HTTP/1.1"), ), }, }, ) return cases } // trafficLoopCases contains tests to ensure traffic does not loop through the sidecar func trafficLoopCases(apps *EchoDeployments) []TrafficTestCase { cases := []TrafficTestCase{} for _, c := range apps.PodA { for _, d := range apps.PodB { for _, port := range []string{"15001", "15006"} { c, d, port := c, d, port cases = append(cases, TrafficTestCase{ name: port, call: func(t test.Failer, options echo.CallOptions, retryOptions ...retry.Option) echoclient.ParsedResponses { dwl := d.WorkloadsOrFail(t)[0] cwl := c.WorkloadsOrFail(t)[0] resp, err := cwl.ForwardEcho(context.Background(), &epb.ForwardEchoRequest{ Url: fmt.Sprintf("http://%s:%s", dwl.Address(), port), Count: 1, }) // Ideally we would actually check to make sure we do not blow up the pod, // but I couldn't find a way to reliably detect this. if err == nil { t.Fatalf("expected request to fail, but it didn't: %v", resp) } return nil }, }) } } } return cases } // autoPassthroughCases tests that we cannot hit unexpected destinations when using AUTO_PASSTHROUGH func autoPassthroughCases(apps *EchoDeployments) []TrafficTestCase { cases := []TrafficTestCase{} // We test the cross product of all Istio ALPNs (or no ALPN), all mTLS modes, and various backends alpns := []string{"istio", "istio-peer-exchange", "istio-http/1.0", "istio-http/1.1", "istio-h2", ""} modes := []string{"STRICT", "PERMISSIVE", "DISABLE"} mtlsHost := host.Name(apps.PodA[0].Config().FQDN()) nakedHost := host.Name(apps.Naked[0].Config().FQDN()) httpsPort := FindPortByName("https").ServicePort httpsAutoPort := FindPortByName("auto-https").ServicePort snis := []string{ model.BuildSubsetKey(model.TrafficDirectionOutbound, "", mtlsHost, httpsPort), model.BuildDNSSrvSubsetKey(model.TrafficDirectionOutbound, "", mtlsHost, httpsPort), model.BuildSubsetKey(model.TrafficDirectionOutbound, "", nakedHost, httpsPort), model.BuildDNSSrvSubsetKey(model.TrafficDirectionOutbound, "", nakedHost, httpsPort), model.BuildSubsetKey(model.TrafficDirectionOutbound, "", mtlsHost, httpsAutoPort), model.BuildDNSSrvSubsetKey(model.TrafficDirectionOutbound, "", mtlsHost, httpsAutoPort), model.BuildSubsetKey(model.TrafficDirectionOutbound, "", nakedHost, httpsAutoPort), model.BuildDNSSrvSubsetKey(model.TrafficDirectionOutbound, "", nakedHost, httpsAutoPort), } for _, mode := range modes { childs := []TrafficCall{} for _, sni := range snis { for _, alpn := range alpns { alpn, sni, mode := alpn, sni, mode al := &epb.Alpn{Value: []string{alpn}} if alpn == "" { al = nil } childs = append(childs, TrafficCall{ name: fmt.Sprintf("mode:%v,sni:%v,alpn:%v", mode, sni, alpn), call: apps.EastWest.CallWithRetryOrFail, opts: echo.CallOptions{ Port: &echo.Port{ ServicePort: 15443, Protocol: protocol.HTTPS, }, ServerName: sni, Alpn: al, Validator: echo.ExpectError(), }, }, ) } } cases = append(cases, TrafficTestCase{ config: globalPeerAuthentication(mode) + ` --- apiVersion: networking.istio.io/v1alpha3 kind: Gateway metadata: name: cross-network-gateway-test namespace: istio-system spec: selector: istio: eastwestgateway servers: - port: number: 15443 name: tls protocol: TLS tls: mode: AUTO_PASSTHROUGH hosts: - "*.local" `, children: childs, }) } return cases } func gatewayCases() []TrafficTestCase { templateParams := func(protocol protocol.Instance, src echo.Callers, dests echo.Instances) map[string]interface{} { host, dest, portN, cred := "*", dests[0], 80, "" if protocol.IsTLS() { host, portN, cred = dest.Config().FQDN(), 443, "cred" } return map[string]interface{}{ "IngressNamespace": src[0].(ingress.Instance).Namespace(), "GatewayHost": host, "GatewayPort": portN, "GatewayPortName": strings.ToLower(string(protocol)), "GatewayProtocol": string(protocol), "Gateway": "gateway", "VirtualServiceHost": dest.Config().FQDN(), "Port": dest.Config().PortByName("http").ServicePort, "Credential": cred, } } // clears the Target to avoid echo internals trying to match the protocol with the port on echo.Config noTarget := func(_ echo.Caller, _ echo.Instances, opts *echo.CallOptions) { opts.Target = nil } // allows setting the target indirectly via the host header fqdnHostHeader := func(src echo.Caller, dsts echo.Instances, opts *echo.CallOptions) { if opts.Headers == nil { opts.Headers = map[string][]string{} } opts.Headers["Host"] = []string{dsts[0].Config().FQDN()} noTarget(src, dsts, opts) } // SingleRegualrPod is already applied leaving one regular pod, to only regular pods should leave a single workload. singleTarget := []echotest.Filter{echotest.FilterMatch(echotest.RegularPod)} // the following cases don't actually target workloads, we use the singleTarget filter to avoid duplicate cases cases := []TrafficTestCase{ { name: "404", targetFilters: singleTarget, workloadAgnostic: true, viaIngress: true, config: httpGateway("*"), opts: echo.CallOptions{ Count: 1, Port: &echo.Port{ Protocol: protocol.HTTP, }, Headers: map[string][]string{ "Host": {"foo.bar"}, }, Validator: echo.ExpectCode("404"), }, setupOpts: noTarget, }, { name: "https redirect", targetFilters: singleTarget, workloadAgnostic: true, viaIngress: true, config: `apiVersion: networking.istio.io/v1alpha3 kind: Gateway metadata: name: gateway spec: selector: istio: ingressgateway servers: - port: number: 80 name: http protocol: HTTP hosts: - "*" tls: httpsRedirect: true --- `, opts: echo.CallOptions{ Count: 1, Port: &echo.Port{ Protocol: protocol.HTTP, }, Validator: echo.ExpectCode("301"), }, setupOpts: fqdnHostHeader, }, { // See https://github.com/istio/istio/issues/27315 name: "https with x-forwarded-proto", targetFilters: singleTarget, workloadAgnostic: true, viaIngress: true, config: `apiVersion: networking.istio.io/v1alpha3 kind: Gateway metadata: name: gateway spec: selector: istio: ingressgateway servers: - port: number: 80 name: http protocol: HTTP hosts: - "*" tls: httpsRedirect: true --- apiVersion: networking.istio.io/v1alpha3 kind: EnvoyFilter metadata: name: ingressgateway-redirect-config namespace: istio-system spec: configPatches: - applyTo: NETWORK_FILTER match: context: GATEWAY listener: filterChain: filter: name: envoy.filters.network.http_connection_manager patch: operation: MERGE value: typed_config: '@type': type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager xff_num_trusted_hops: 1 normalize_path: true workloadSelector: labels: istio: ingressgateway --- ` + httpVirtualServiceTmpl, opts: echo.CallOptions{ Count: 1, Port: &echo.Port{ Protocol: protocol.HTTP, }, Headers: map[string][]string{ // In real world, this may be set by a downstream LB that terminates the TLS "X-Forwarded-Proto": {"https"}, }, Validator: echo.ExpectOK(), }, setupOpts: fqdnHostHeader, templateVars: func(_ echo.Callers, dests echo.Instances) map[string]interface{} { dest := dests[0] return map[string]interface{}{ "Gateway": "gateway", "VirtualServiceHost": dest.Config().FQDN(), "Port": dest.Config().PortByName("http").ServicePort, } }, }, } for _, proto := range []protocol.Instance{protocol.HTTP, protocol.HTTPS} { proto, secret := proto, "" if proto.IsTLS() { secret = ingressutil.IngressKubeSecretYAML("cred", "{{.IngressNamespace}}", ingressutil.TLS, ingressutil.IngressCredentialA) } cases = append( cases, TrafficTestCase{ name: string(proto), config: gatewayTmpl + httpVirtualServiceTmpl + secret, templateVars: func(src echo.Callers, dests echo.Instances) map[string]interface{} { return templateParams(proto, src, dests) }, setupOpts: fqdnHostHeader, opts: echo.CallOptions{ Count: 1, Port: &echo.Port{ Protocol: proto, }, }, viaIngress: true, workloadAgnostic: true, }, TrafficTestCase{ name: fmt.Sprintf("%s scheme match", proto), config: gatewayTmpl + httpVirtualServiceTmpl + secret, templateVars: func(src echo.Callers, dests echo.Instances) map[string]interface{} { params := templateParams(proto, src, dests) params["MatchScheme"] = strings.ToLower(string(proto)) return params }, setupOpts: fqdnHostHeader, opts: echo.CallOptions{ Count: 1, Port: &echo.Port{ Protocol: proto, }, Validator: echo.And( echo.ExpectOK(), echo.ValidatorFunc( func(response echoclient.ParsedResponses, _ error) error { return response.Check(func(_ int, response *echoclient.ParsedResponse) error { // We check a header is added to ensure our VS actually applied return ExpectString(response.RawResponse["Istio-Custom-Header"], "user-defined-value", "request header") }) })), }, // to keep tests fast, we only run the basic protocol test per-workload and scheme match once (per cluster) targetFilters: singleTarget, viaIngress: true, workloadAgnostic: true, }, ) } return cases } func XFFGatewayCase(apps *EchoDeployments) []TrafficTestCase { cases := []TrafficTestCase{} destinationSets := []echo.Instances{ apps.PodA, } for _, d := range destinationSets { d := d if len(d) == 0 { continue } fqdn := d[0].Config().FQDN() cases = append(cases, TrafficTestCase{ name: d[0].Config().Service, config: httpGateway("*") + httpVirtualService("gateway", fqdn, d[0].Config().PortByName("http").ServicePort), skip: false, call: apps.Ingress.CallWithRetryOrFail, opts: echo.CallOptions{ Count: 1, Port: &echo.Port{ Protocol: protocol.HTTP, }, Headers: map[string][]string{ "X-Forwarded-For": {"56.5.6.7, 72.9.5.6, 98.1.2.3"}, "Host": {fqdn}, }, Validator: echo.ValidatorFunc( func(response echoclient.ParsedResponses, _ error) error { return response.Check(func(_ int, response *echoclient.ParsedResponse) error { externalAddress, ok := response.RawResponse["X-Envoy-External-Address"] if !ok { return fmt.Errorf("missing X-Envoy-External-Address Header") } if err := ExpectString(externalAddress, "72.9.5.6", "envoy-external-address header"); err != nil { return err } xffHeader, ok := response.RawResponse["X-Forwarded-For"] if !ok { return fmt.Errorf("missing X-Forwarded-For Header") } xffIPs := strings.Split(xffHeader, ",") if len(xffIPs) != 4 { return fmt.Errorf("did not receive expected 4 hosts in X-Forwarded-For header") } return ExpectString(strings.TrimSpace(xffIPs[1]), "72.9.5.6", "ip in xff header") }) }), }, }) } return cases } // serviceCases tests overlapping Services. There are a few cases. // Consider we have our base service B, with service port P and target port T // 1) Another service, B', with P -> T. In this case, both the listener and the cluster will conflict. // Because everything is workload oriented, this is not a problem unless they try to make them different // protocols (this is explicitly called out as "not supported") or control inbound connectionPool settings // (which is moving to Sidecar soon) // 2) Another service, B', with P -> T'. In this case, the listener will be distinct, since its based on the target. // The cluster, however, will be shared, which is broken, because we should be forwarding to T when we call B, and T' when we call B'. // 3) Another service, B', with P' -> T. In this case, the listener is shared. This is fine, with the exception of different protocols // The cluster is distinct. // 4) Another service, B', with P' -> T'. There is no conflicts here at all. func serviceCases(apps *EchoDeployments) []TrafficTestCase { cases := []TrafficTestCase{} for _, c := range apps.PodA { c := c // Case 1 // Identical to port "http" or service B, just behind another service name svc := fmt.Sprintf(`apiVersion: v1 kind: Service metadata: name: b-alt-1 labels: app: b spec: ports: - name: http port: %d targetPort: %d selector: app: b`, FindPortByName("http").ServicePort, FindPortByName("http").InstancePort) cases = append(cases, TrafficTestCase{ name: fmt.Sprintf("case 1 both match in cluster %v", c.Config().Cluster.StableName()), config: svc, call: c.CallWithRetryOrFail, opts: echo.CallOptions{ Count: 1, Address: "b-alt-1", Port: &echo.Port{ServicePort: FindPortByName("http").ServicePort, Protocol: protocol.HTTP}, Timeout: time.Millisecond * 100, Validator: echo.ExpectOK(), }, }) // Case 2 // We match the service port, but forward to a different port // Here we make the new target tcp so the test would fail if it went to the http port svc = fmt.Sprintf(`apiVersion: v1 kind: Service metadata: name: b-alt-2 labels: app: b spec: ports: - name: tcp port: %d targetPort: %d selector: app: b`, FindPortByName("http").ServicePort, common.WorkloadPorts[0].Port) cases = append(cases, TrafficTestCase{ name: fmt.Sprintf("case 2 service port match in cluster %v", c.Config().Cluster.StableName()), config: svc, call: c.CallWithRetryOrFail, opts: echo.CallOptions{ Count: 1, Address: "b-alt-2", Port: &echo.Port{ServicePort: FindPortByName("http").ServicePort, Protocol: protocol.TCP}, Scheme: scheme.TCP, Timeout: time.Millisecond * 100, Validator: echo.ExpectOK(), }, }) // Case 3 // We match the target port, but front with a different service port svc = fmt.Sprintf(`apiVersion: v1 kind: Service metadata: name: b-alt-3 labels: app: b spec: ports: - name: http port: 12345 targetPort: %d selector: app: b`, FindPortByName("http").InstancePort) cases = append(cases, TrafficTestCase{ name: fmt.Sprintf("case 3 target port match in cluster %v", c.Config().Cluster.StableName()), config: svc, call: c.CallWithRetryOrFail, opts: echo.CallOptions{ Count: 1, Address: "b-alt-3", Port: &echo.Port{ServicePort: 12345, Protocol: protocol.HTTP}, Timeout: time.Millisecond * 100, Validator: echo.ExpectOK(), }, }) // Case 4 // Completely new set of ports svc = fmt.Sprintf(`apiVersion: v1 kind: Service metadata: name: b-alt-4 labels: app: b spec: ports: - name: http port: 12346 targetPort: %d selector: app: b`, common.WorkloadPorts[1].Port) cases = append(cases, TrafficTestCase{ name: fmt.Sprintf("case 4 no match in cluster %v", c.Config().Cluster.StableName()), config: svc, call: c.CallWithRetryOrFail, opts: echo.CallOptions{ Count: 1, Address: "b-alt-4", Port: &echo.Port{ServicePort: 12346, Protocol: protocol.HTTP}, Timeout: time.Millisecond * 100, Validator: echo.ExpectOK(), }, }) } return cases } func flatten(clients ...[]echo.Instance) []echo.Instance { instances := []echo.Instance{} for _, c := range clients { instances = append(instances, c...) } return instances } // selfCallsCases checks that pods can call themselves func selfCallsCases() []TrafficTestCase { sourceFilters := []echotest.Filter{ echotest.Not(echotest.ExternalServices), echotest.Not(echotest.FilterMatch(echo.IsNaked())), echotest.Not(echotest.FilterMatch(echo.IsHeadless())), } comboFilters := []echotest.CombinationFilter{func(from echo.Instance, to echo.Instances) echo.Instances { return to.Match(echo.FQDN(from.Config().FQDN())) }} return []TrafficTestCase{ // Calls to the Service will go through envoy outbound and inbound, so we get envoy headers added { name: "to service", workloadAgnostic: true, sourceFilters: sourceFilters, comboFilters: comboFilters, opts: echo.CallOptions{ Count: 1, PortName: "http", Validator: echo.And(echo.ExpectOK(), echo.ExpectKey("X-Envoy-Attempt-Count", "1")), }, }, // Localhost calls will go directly to localhost, bypassing Envoy. No envoy headers added. { name: "to localhost", workloadAgnostic: true, sourceFilters: sourceFilters, comboFilters: comboFilters, setupOpts: func(_ echo.Caller, _ echo.Instances, opts *echo.CallOptions) { // the framework will try to set this when enumerating test cases opts.Target = nil }, opts: echo.CallOptions{ Count: 1, Address: "localhost", Port: &echo.Port{ServicePort: 8080}, Scheme: scheme.HTTP, Validator: echo.And(echo.ExpectOK(), echo.ExpectKey("X-Envoy-Attempt-Count", "")), }, }, // PodIP calls will go directly to podIP, bypassing Envoy. No envoy headers added. { name: "to podIP", workloadAgnostic: true, sourceFilters: sourceFilters, comboFilters: comboFilters, setupOpts: func(srcCaller echo.Caller, _ echo.Instances, opts *echo.CallOptions) { src := srcCaller.(echo.Instance) workloads, _ := src.Workloads() opts.Address = workloads[0].Address() // the framework will try to set this when enumerating test cases opts.Target = nil }, opts: echo.CallOptions{ Count: 1, Scheme: scheme.HTTP, Port: &echo.Port{ServicePort: 8080}, Validator: echo.And(echo.ExpectOK(), echo.ExpectKey("X-Envoy-Attempt-Count", "")), }, }, } } // Todo merge with security TestReachability code func protocolSniffingCases() []TrafficTestCase { cases := []TrafficTestCase{} type protocolCase struct { // The port we call port string // The actual type of traffic we send to the port scheme scheme.Instance } protocols := []protocolCase{ {"http", scheme.HTTP}, {"auto-http", scheme.HTTP}, {"tcp", scheme.TCP}, {"auto-tcp", scheme.TCP}, {"grpc", scheme.GRPC}, {"auto-grpc", scheme.GRPC}, } // so we can validate all clusters are hit for _, call := range protocols { call := call cases = append(cases, TrafficTestCase{ // TODO(https://github.com/istio/istio/issues/26798) enable sniffing tcp skip: call.scheme == scheme.TCP, name: call.port, opts: echo.CallOptions{ Count: 1, PortName: call.port, Scheme: call.scheme, Timeout: time.Second * 5, }, validate: func(src echo.Caller, dst echo.Instances) echo.Validator { if call.scheme == scheme.TCP { // no host header for TCP return echo.ExpectOK() } return echo.And( echo.ExpectOK(), echo.ExpectHost(dst[0].Config().HostHeader())) }, workloadAgnostic: true, }) } return cases } // Todo merge with security TestReachability code func instanceIPTests(apps *EchoDeployments) []TrafficTestCase { cases := []TrafficTestCase{} ipCases := []struct { name string endpoint string disableSidecar bool port string code int }{ // instance IP bind { name: "instance IP without sidecar", disableSidecar: true, port: "http-instance", code: 200, }, { name: "instance IP with wildcard sidecar", endpoint: "0.0.0.0", port: "http-instance", code: 200, }, { name: "instance IP with localhost sidecar", endpoint: "127.0.0.1", port: "http-instance", code: 503, }, { name: "instance IP with empty sidecar", endpoint: "", port: "http-instance", code: 200, }, // Localhost bind { name: "localhost IP without sidecar", disableSidecar: true, port: "http-localhost", code: 503, }, { name: "localhost IP with wildcard sidecar", endpoint: "0.0.0.0", port: "http-localhost", code: 503, }, { name: "localhost IP with localhost sidecar", endpoint: "127.0.0.1", port: "http-localhost", code: 200, }, { name: "localhost IP with empty sidecar", endpoint: "", port: "http-localhost", code: 503, }, // Wildcard bind { name: "wildcard IP without sidecar", disableSidecar: true, port: "http", code: 200, }, { name: "wildcard IP with wildcard sidecar", endpoint: "0.0.0.0", port: "http", code: 200, }, { name: "wildcard IP with localhost sidecar", endpoint: "127.0.0.1", port: "http", code: 200, }, { name: "wildcard IP with empty sidecar", endpoint: "", port: "http", code: 200, }, } for _, ipCase := range ipCases { for _, client := range apps.PodA { ipCase := ipCase client := client destination := apps.PodB[0] var config string if !ipCase.disableSidecar { config = fmt.Sprintf(` apiVersion: networking.istio.io/v1alpha3 kind: Sidecar metadata: name: sidecar spec: workloadSelector: labels: app: b egress: - hosts: - "./*" ingress: - port: number: %d protocol: HTTP defaultEndpoint: %s:%d `, FindPortByName(ipCase.port).InstancePort, ipCase.endpoint, FindPortByName(ipCase.port).InstancePort) } cases = append(cases, TrafficTestCase{ name: ipCase.name, call: client.CallWithRetryOrFail, config: config, opts: echo.CallOptions{ Count: 1, Target: destination, PortName: ipCase.port, Scheme: scheme.HTTP, Timeout: time.Second * 5, Validator: echo.ExpectCode(fmt.Sprint(ipCase.code)), }, }) } } return cases } type vmCase struct { name string from echo.Instance to echo.Instances host string } func DNSTestCases(apps *EchoDeployments, cniEnabled bool) []TrafficTestCase { makeSE := func(ips ...string) string { return tmpl.MustEvaluate(` apiVersion: networking.istio.io/v1alpha3 kind: ServiceEntry metadata: name: dns spec: hosts: - "fake.service.local" addresses: {{ range $ip := .IPs }} - "{{$ip}}" {{ end }} resolution: STATIC endpoints: [] ports: - number: 80 name: http protocol: HTTP `, map[string]interface{}{"IPs": ips}) } tcases := []TrafficTestCase{} ipv4 := "1.2.3.4" ipv6 := "1234:1234:1234::1234:1234:1234" dummyLocalhostServer := "127.0.0.1" cases := []struct { name string // TODO(https://github.com/istio/istio/issues/30282) support multiple vips ips string protocol string server string skipCNI bool expected []string }{ { name: "tcp ipv4", ips: ipv4, expected: []string{ipv4}, protocol: "tcp", }, { name: "udp ipv4", ips: ipv4, expected: []string{ipv4}, protocol: "udp", }, { name: "tcp ipv6", ips: ipv6, expected: []string{ipv6}, protocol: "tcp", }, { name: "udp ipv6", ips: ipv6, expected: []string{ipv6}, protocol: "udp", }, { // We should only capture traffic to servers in /etc/resolv.conf nameservers // This checks we do not capture traffic to other servers. // This is important for cases like app -> istio dns server -> dnsmasq -> upstream // If we captured all DNS traffic, we would loop dnsmasq traffic back to our server. name: "tcp localhost server", ips: ipv4, expected: []string{}, protocol: "tcp", skipCNI: true, server: dummyLocalhostServer, }, { name: "udp localhost server", ips: ipv4, expected: []string{}, protocol: "udp", skipCNI: true, server: dummyLocalhostServer, }, } for _, client := range flatten(apps.VM, apps.PodA, apps.PodTproxy) { for _, tt := range cases { if tt.skipCNI && cniEnabled { continue } tt, client := tt, client address := "fake.service.local?" if tt.protocol != "" { address += "&protocol=" + tt.protocol } if tt.server != "" { address += "&server=" + tt.server } tcases = append(tcases, TrafficTestCase{ name: fmt.Sprintf("%s/%s", client.Config().Service, tt.name), config: makeSE(tt.ips), call: client.CallWithRetryOrFail, opts: echo.CallOptions{ Scheme: scheme.DNS, Count: 1, Address: address, Validator: echo.ValidatorFunc( func(response echoclient.ParsedResponses, _ error) error { return response.Check(func(_ int, response *echoclient.ParsedResponse) error { ips := []string{} for _, v := range response.RawResponse { ips = append(ips, v) } sort.Strings(ips) if !reflect.DeepEqual(ips, tt.expected) { return fmt.Errorf("unexpected dns response: wanted %v, got %v", tt.expected, ips) } return nil }) }), }, }) } } svcCases := []struct { name string protocol string server string }{ { name: "tcp", protocol: "tcp", }, { name: "udp", protocol: "udp", }, } for _, client := range flatten(apps.VM, apps.PodA, apps.PodTproxy) { for _, tt := range svcCases { tt, client := tt, client aInCluster := apps.PodA.Match(echo.InCluster(client.Config().Cluster)) if len(aInCluster) == 0 { // The cluster doesn't contain A, but connects to a cluster containing A aInCluster = apps.PodA.Match(echo.InCluster(client.Config().Cluster.Primary())) } address := aInCluster[0].Config().FQDN() + "?" if tt.protocol != "" { address += "&protocol=" + tt.protocol } if tt.server != "" { address += "&server=" + tt.server } expected := aInCluster[0].Address() tcases = append(tcases, TrafficTestCase{ name: fmt.Sprintf("svc/%s/%s", client.Config().Service, tt.name), call: client.CallWithRetryOrFail, opts: echo.CallOptions{ Count: 1, Scheme: scheme.DNS, Address: address, Validator: echo.ValidatorFunc( func(response echoclient.ParsedResponses, _ error) error { return response.Check(func(_ int, response *echoclient.ParsedResponse) error { ips := []string{} for _, v := range response.RawResponse { ips = append(ips, v) } sort.Strings(ips) exp := []string{expected} if !reflect.DeepEqual(ips, exp) { return fmt.Errorf("unexpected dns response: wanted %v, got %v", exp, ips) } return nil }) }), }, }) } } return tcases } func VMTestCases(vms echo.Instances, apps *EchoDeployments) []TrafficTestCase { var testCases []vmCase for _, vm := range vms { testCases = append(testCases, vmCase{ name: "dns: VM to k8s cluster IP service name.namespace host", from: vm, to: apps.PodA, host: PodASvc + "." + apps.Namespace.Name(), }, vmCase{ name: "dns: VM to k8s cluster IP service fqdn host", from: vm, to: apps.PodA, host: apps.PodA[0].Config().FQDN(), }, vmCase{ name: "dns: VM to k8s cluster IP service short name host", from: vm, to: apps.PodA, host: PodASvc, }, vmCase{ name: "dns: VM to k8s headless service", from: vm, to: apps.Headless.Match(echo.InCluster(vm.Config().Cluster.Primary())), host: apps.Headless[0].Config().FQDN(), }, vmCase{ name: "dns: VM to k8s statefulset service", from: vm, to: apps.StatefulSet.Match(echo.InCluster(vm.Config().Cluster.Primary())), host: apps.StatefulSet[0].Config().FQDN(), }, // TODO(https://github.com/istio/istio/issues/32552) re-enable //vmCase{ // name: "dns: VM to k8s statefulset instance.service", // from: vm, // to: apps.StatefulSet.Match(echo.InCluster(vm.Config().Cluster.Primary())), // host: fmt.Sprintf("%s-v1-0.%s", StatefulSetSvc, StatefulSetSvc), //}, //vmCase{ // name: "dns: VM to k8s statefulset instance.service.namespace", // from: vm, // to: apps.StatefulSet.Match(echo.InCluster(vm.Config().Cluster.Primary())), // host: fmt.Sprintf("%s-v1-0.%s.%s", StatefulSetSvc, StatefulSetSvc, apps.Namespace.Name()), //}, //vmCase{ // name: "dns: VM to k8s statefulset instance.service.namespace.svc", // from: vm, // to: apps.StatefulSet.Match(echo.InCluster(vm.Config().Cluster.Primary())), // host: fmt.Sprintf("%s-v1-0.%s.%s.svc", StatefulSetSvc, StatefulSetSvc, apps.Namespace.Name()), //}, //vmCase{ // name: "dns: VM to k8s statefulset instance FQDN", // from: vm, // to: apps.StatefulSet.Match(echo.InCluster(vm.Config().Cluster.Primary())), // host: fmt.Sprintf("%s-v1-0.%s", StatefulSetSvc, apps.StatefulSet[0].Config().FQDN()), //}, ) } for _, podA := range apps.PodA { testCases = append(testCases, vmCase{ name: "k8s to vm", from: podA, to: vms, }) } cases := make([]TrafficTestCase, 0) for _, c := range testCases { c := c validators := []echo.Validator{echo.ExpectOK()} if !c.to.ContainsMatch(echo.IsHeadless()) { // headless load-balancing can be inconsistent validators = append(validators, echo.ExpectReachedClusters(c.to.Clusters())) } cases = append(cases, TrafficTestCase{ name: fmt.Sprintf("%s from %s", c.name, c.from.Config().Cluster.StableName()), call: c.from.CallWithRetryOrFail, opts: echo.CallOptions{ // assume that all echos in `to` only differ in which cluster they're deployed in Target: c.to[0], PortName: "http", Address: c.host, Count: callsPerCluster * len(c.to), Validator: echo.And(validators...), }, }) } return cases } func destinationRule(app, mode string) string { return fmt.Sprintf(`apiVersion: networking.istio.io/v1beta1 kind: DestinationRule metadata: name: %s spec: host: %s trafficPolicy: tls: mode: %s --- `, app, app, mode) } func useClientProtocolDestinationRule(name, app string) string { return fmt.Sprintf(`apiVersion: networking.istio.io/v1beta1 kind: DestinationRule metadata: name: %s spec: host: %s trafficPolicy: tls: mode: DISABLE connectionPool: http: useClientProtocol: true --- `, name, app) } func peerAuthentication(app, mode string) string { return fmt.Sprintf(`apiVersion: security.istio.io/v1beta1 kind: PeerAuthentication metadata: name: %s spec: selector: matchLabels: app: %s mtls: mode: %s --- `, app, app, mode) } func globalPeerAuthentication(mode string) string { return fmt.Sprintf(`apiVersion: security.istio.io/v1beta1 kind: PeerAuthentication metadata: name: default spec: mtls: mode: %s --- `, mode) } func serverFirstTestCases(apps *EchoDeployments) []TrafficTestCase { cases := make([]TrafficTestCase, 0) clients := apps.PodA destination := apps.PodC[0] configs := []struct { port string dest string auth string validator echo.Validator }{ // TODO: All these cases *should* succeed (except the TLS mismatch cases) - but don't due to issues in our implementation // For auto port, outbound request will be delayed by the protocol sniffer, regardless of configuration {"auto-tcp-server", "DISABLE", "DISABLE", echo.ExpectError()}, {"auto-tcp-server", "DISABLE", "PERMISSIVE", echo.ExpectError()}, {"auto-tcp-server", "DISABLE", "STRICT", echo.ExpectError()}, {"auto-tcp-server", "ISTIO_MUTUAL", "DISABLE", echo.ExpectError()}, {"auto-tcp-server", "ISTIO_MUTUAL", "PERMISSIVE", echo.ExpectError()}, {"auto-tcp-server", "ISTIO_MUTUAL", "STRICT", echo.ExpectError()}, // These is broken because we will still enable inbound sniffing for the port. Since there is no tls, // there is no server-first "upgrading" to client-first {"tcp-server", "DISABLE", "DISABLE", echo.ExpectOK()}, {"tcp-server", "DISABLE", "PERMISSIVE", echo.ExpectError()}, // Expected to fail, incompatible configuration {"tcp-server", "DISABLE", "STRICT", echo.ExpectError()}, {"tcp-server", "ISTIO_MUTUAL", "DISABLE", echo.ExpectError()}, // In these cases, we expect success // There is no sniffer on either side {"tcp-server", "DISABLE", "DISABLE", echo.ExpectOK()}, // On outbound, we have no sniffer involved // On inbound, the request is TLS, so its not server first {"tcp-server", "ISTIO_MUTUAL", "PERMISSIVE", echo.ExpectOK()}, {"tcp-server", "ISTIO_MUTUAL", "STRICT", echo.ExpectOK()}, } for _, client := range clients { for _, c := range configs { client, c := client, c cases = append(cases, TrafficTestCase{ name: fmt.Sprintf("%v:%v/%v", c.port, c.dest, c.auth), skip: apps.IsMulticluster(), // TODO stabilize tcp connection breaks config: destinationRule(destination.Config().Service, c.dest) + peerAuthentication(destination.Config().Service, c.auth), call: client.CallWithRetryOrFail, opts: echo.CallOptions{ Target: destination, PortName: c.port, Scheme: scheme.TCP, // Inbound timeout is 1s. We want to test this does not hit the listener filter timeout Timeout: time.Millisecond * 100, Count: 1, Validator: c.validator, }, }) } } return cases }
tests/integration/pilot/common/routing.go
1
https://github.com/istio/istio/commit/33d59c1dfd4678bea158d6940ce03f9dae537961
[ 0.030579369515180588, 0.0005524296429939568, 0.00016424839850515127, 0.0001711202785372734, 0.0024453329388052225 ]
{ "id": 4, "code_window": [ " name: cross-network-gateway-test\n", " namespace: istio-system\n", "spec:\n", " selector:\n", " istio: eastwestgateway\n", " servers:\n", " - port:\n" ], "labels": [ "keep", "keep", "keep", "keep", "replace", "keep", "keep" ], "after_edit": [ " istio: ingressgateway\n" ], "file_path": "tests/integration/pilot/common/routing.go", "type": "replace", "edit_start_line_idx": 703 }
apiVersion: networking.istio.io/v1alpha3 kind: ServiceEntry metadata: name: invalid-service-entry spec: ports: - number: 80 name: http protocol: HTTP discovery: DNS endpoints: # Rather than relying on an external host that might become unreachable (causing test failures) # we can mock the external endpoint using service t which has no sidecar. - address: t.istio-system.svc.cluster.local # TODO: this is brittle ports: http: 8080 # TODO test https
galley/testdatasets/validation/dataset/networking-v1alpha3-ServiceEntry-invalid.yaml
0
https://github.com/istio/istio/commit/33d59c1dfd4678bea158d6940ce03f9dae537961
[ 0.0011401252122595906, 0.001112204510718584, 0.0010842836927622557, 0.001112204510718584, 0.00002792075974866748 ]
{ "id": 4, "code_window": [ " name: cross-network-gateway-test\n", " namespace: istio-system\n", "spec:\n", " selector:\n", " istio: eastwestgateway\n", " servers:\n", " - port:\n" ], "labels": [ "keep", "keep", "keep", "keep", "replace", "keep", "keep" ], "after_edit": [ " istio: ingressgateway\n" ], "file_path": "tests/integration/pilot/common/routing.go", "type": "replace", "edit_start_line_idx": 703 }
#!/bin/bash # # Copyright Istio Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. while true; do curl -s -o /dev/null "http://$GATEWAY_URL/hello"; done
samples/helloworld/loadgen.sh
0
https://github.com/istio/istio/commit/33d59c1dfd4678bea158d6940ce03f9dae537961
[ 0.0001730946241877973, 0.00017300200124736875, 0.0001729093783069402, 0.00017300200124736875, 9.262294042855501e-8 ]
{ "id": 4, "code_window": [ " name: cross-network-gateway-test\n", " namespace: istio-system\n", "spec:\n", " selector:\n", " istio: eastwestgateway\n", " servers:\n", " - port:\n" ], "labels": [ "keep", "keep", "keep", "keep", "replace", "keep", "keep" ], "after_edit": [ " istio: ingressgateway\n" ], "file_path": "tests/integration/pilot/common/routing.go", "type": "replace", "edit_start_line_idx": 703 }
// Copyright Istio Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package mesh import ( "encoding/json" "fmt" "io/ioutil" "os" "path/filepath" "reflect" "strings" "testing" . "github.com/onsi/gomega" v1 "k8s.io/api/admissionregistration/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" klabels "k8s.io/apimachinery/pkg/labels" "istio.io/istio/operator/pkg/compare" "istio.io/istio/operator/pkg/helm" "istio.io/istio/operator/pkg/manifest" "istio.io/istio/operator/pkg/name" "istio.io/istio/operator/pkg/object" "istio.io/istio/operator/pkg/util" "istio.io/istio/operator/pkg/util/clog" "istio.io/istio/operator/pkg/util/httpserver" "istio.io/istio/operator/pkg/util/tgz" tutil "istio.io/istio/pilot/test/util" "istio.io/istio/pkg/test" "istio.io/istio/pkg/test/env" "istio.io/pkg/version" ) const ( istioTestVersion = "istio-1.7.0" testTGZFilename = istioTestVersion + "-linux.tar.gz" ) // chartSourceType defines where charts used in the test come from. type chartSourceType string var ( operatorRootDir = filepath.Join(env.IstioSrc, "operator") // testDataDir contains the directory for manifest-generate test data testDataDir = filepath.Join(operatorRootDir, "cmd/mesh/testdata/manifest-generate") // Snapshot charts are in testdata/manifest-generate/data-snapshot snapshotCharts = func() chartSourceType { d, err := ioutil.TempDir("", "data-snapshot-*") if err != nil { panic(fmt.Errorf("failed to make temp dir: %v", err)) } f, err := os.Open("testdata/manifest-generate/data-snapshot.tar.gz") if err != nil { panic(fmt.Errorf("failed to read data snapshot: %v", err)) } if err := tgz.Extract(f, d); err != nil { panic(fmt.Errorf("failed to extract data snapshot: %v", err)) } return chartSourceType(filepath.Join(d, "manifests")) }() // Compiled in charts come from assets.gen.go compiledInCharts chartSourceType = "COMPILED" _ = compiledInCharts // Live charts come from manifests/ liveCharts = chartSourceType(filepath.Join(env.IstioSrc, helm.OperatorSubdirFilePath)) ) type testGroup []struct { desc string // Small changes to the input profile produce large changes to the golden output // files. This makes it difficult to spot meaningful changes in pull requests. // By default we hide these changes to make developers life's a bit easier. However, // it is still useful to sometimes override this behavior and show the full diff. // When this flag is true, use an alternative file suffix that is not hidden by // default github in pull requests. showOutputFileInPullRequest bool flags string noInput bool outputDir string diffSelect string diffIgnore string chartSource chartSourceType } func TestMain(m *testing.M) { code := m.Run() // Cleanup uncompress snapshot charts os.RemoveAll(string(snapshotCharts)) os.Exit(code) } func TestManifestGenerateComponentHubTag(t *testing.T) { g := NewWithT(t) objs, err := runManifestCommands("component_hub_tag", "", liveCharts) if err != nil { t.Fatal(err) } tests := []struct { deploymentName string containerName string want string }{ { deploymentName: "istio-ingressgateway", containerName: "istio-proxy", want: "istio-spec.hub/proxyv2:istio-spec.tag", }, { deploymentName: "istiod", containerName: "discovery", want: "component.pilot.hub/pilot:2", }, } for _, tt := range tests { for _, os := range objs { containerName := tt.deploymentName if tt.containerName != "" { containerName = tt.containerName } container := mustGetContainer(g, os, tt.deploymentName, containerName) g.Expect(container).Should(HavePathValueEqual(PathValue{"image", tt.want})) } } } func TestManifestGenerateGateways(t *testing.T) { g := NewWithT(t) flags := "-s components.ingressGateways.[0].k8s.resources.requests.memory=999Mi " + "-s components.ingressGateways.[name:user-ingressgateway].k8s.resources.requests.cpu=555m" objss, err := runManifestCommands("gateways", flags, liveCharts) if err != nil { t.Fatal(err) } for _, objs := range objss { g.Expect(objs.kind(name.HPAStr).size()).Should(Equal(3)) g.Expect(objs.kind(name.PDBStr).size()).Should(Equal(3)) g.Expect(objs.kind(name.ServiceStr).labels("istio=ingressgateway").size()).Should(Equal(3)) g.Expect(objs.kind(name.RoleStr).nameMatches(".*gateway.*").size()).Should(Equal(3)) g.Expect(objs.kind(name.RoleBindingStr).nameMatches(".*gateway.*").size()).Should(Equal(3)) g.Expect(objs.kind(name.SAStr).nameMatches(".*gateway.*").size()).Should(Equal(3)) dobj := mustGetDeployment(g, objs, "istio-ingressgateway") d := dobj.Unstructured() c := dobj.Container("istio-proxy") g.Expect(d).Should(HavePathValueContain(PathValue{"metadata.labels", toMap("aaa:aaa-val,bbb:bbb-val")})) g.Expect(c).Should(HavePathValueEqual(PathValue{"resources.requests.cpu", "111m"})) g.Expect(c).Should(HavePathValueEqual(PathValue{"resources.requests.memory", "999Mi"})) dobj = mustGetDeployment(g, objs, "user-ingressgateway") d = dobj.Unstructured() c = dobj.Container("istio-proxy") g.Expect(d).Should(HavePathValueContain(PathValue{"metadata.labels", toMap("ccc:ccc-val,ddd:ddd-val")})) g.Expect(c).Should(HavePathValueEqual(PathValue{"resources.requests.cpu", "555m"})) g.Expect(c).Should(HavePathValueEqual(PathValue{"resources.requests.memory", "888Mi"})) dobj = mustGetDeployment(g, objs, "ilb-gateway") d = dobj.Unstructured() c = dobj.Container("istio-proxy") s := mustGetService(g, objs, "ilb-gateway").Unstructured() g.Expect(d).Should(HavePathValueContain(PathValue{"metadata.labels", toMap("app:istio-ingressgateway,istio:ingressgateway,release: istio")})) g.Expect(c).Should(HavePathValueEqual(PathValue{"resources.requests.cpu", "333m"})) g.Expect(c).Should(HavePathValueEqual(PathValue{"env.[name:PILOT_CERT_PROVIDER].value", "foobar"})) g.Expect(s).Should(HavePathValueContain(PathValue{"metadata.annotations", toMap("cloud.google.com/load-balancer-type: internal")})) g.Expect(s).Should(HavePathValueContain(PathValue{"spec.ports.[0]", portVal("grpc-pilot-mtls", 15011, -1)})) g.Expect(s).Should(HavePathValueContain(PathValue{"spec.ports.[1]", portVal("tcp-citadel-grpc-tls", 8060, 8060)})) g.Expect(s).Should(HavePathValueContain(PathValue{"spec.ports.[2]", portVal("tcp-dns", 5353, -1)})) for _, o := range objs.kind(name.HPAStr).objSlice { ou := o.Unstructured() g.Expect(ou).Should(HavePathValueEqual(PathValue{"spec.minReplicas", int64(1)})) g.Expect(ou).Should(HavePathValueEqual(PathValue{"spec.maxReplicas", int64(5)})) } checkRoleBindingsReferenceRoles(g, objs) } } func TestManifestGenerateIstiodRemote(t *testing.T) { g := NewWithT(t) objss, err := runManifestCommands("istiod_remote", "", liveCharts) if err != nil { t.Fatal(err) } for _, objs := range objss { // check core CRDs exists g.Expect(objs.kind(name.CRDStr).nameEquals("destinationrules.networking.istio.io")).Should(Not(BeNil())) g.Expect(objs.kind(name.CRDStr).nameEquals("gateways.networking.istio.io")).Should(Not(BeNil())) g.Expect(objs.kind(name.CRDStr).nameEquals("sidecars.networking.istio.io")).Should(Not(BeNil())) g.Expect(objs.kind(name.CRDStr).nameEquals("virtualservices.networking.istio.io")).Should(Not(BeNil())) g.Expect(objs.kind(name.CRDStr).nameEquals("adapters.config.istio.io")).Should(BeNil()) g.Expect(objs.kind(name.CRDStr).nameEquals("authorizationpolicies.security.istio.io")).Should(Not(BeNil())) g.Expect(objs.kind(name.ClusterRoleStr).nameEquals("istiod-istio-system")).Should(Not(BeNil())) g.Expect(objs.kind(name.ClusterRoleStr).nameEquals("istio-reader-istio-system")).Should(Not(BeNil())) g.Expect(objs.kind(name.ClusterRoleBindingStr).nameEquals("istiod-istio-system")).Should(Not(BeNil())) g.Expect(objs.kind(name.ClusterRoleBindingStr).nameEquals("istio-reader-istio-system")).Should(Not(BeNil())) g.Expect(objs.kind(name.CMStr).nameEquals("istio-sidecar-injector")).Should(Not(BeNil())) g.Expect(objs.kind(name.ServiceStr).nameEquals("istiod")).Should(Not(BeNil())) g.Expect(objs.kind(name.SAStr).nameEquals("istio-reader-service-account")).Should(Not(BeNil())) g.Expect(objs.kind(name.SAStr).nameEquals("istiod-service-account")).Should(Not(BeNil())) mwc := mustGetMutatingWebhookConfiguration(g, objs, "istio-sidecar-injector").Unstructured() g.Expect(mwc).Should(HavePathValueEqual(PathValue{"webhooks.[0].clientConfig.url", "https://xxx:15017/inject"})) vwc := mustGetValidatingWebhookConfiguration(g, objs, "istiod-istio-system").Unstructured() g.Expect(vwc).Should(HavePathValueEqual(PathValue{"webhooks.[0].clientConfig.url", "https://xxx:15017/validate"})) ep := mustGetEndpoint(g, objs, "istiod").Unstructured() g.Expect(ep).Should(HavePathValueEqual(PathValue{"subsets.[0].addresses.[0]", endpointSubsetAddressVal("", "169.10.112.88", "")})) g.Expect(ep).Should(HavePathValueContain(PathValue{"subsets.[0].ports.[0]", portVal("tcp-istiod", 15012, -1)})) checkClusterRoleBindingsReferenceRoles(g, objs) } } func TestManifestGenerateAllOff(t *testing.T) { g := NewWithT(t) m, _, err := generateManifest("all_off", "", liveCharts) if err != nil { t.Fatal(err) } objs, err := parseObjectSetFromManifest(m) if err != nil { t.Fatal(err) } g.Expect(objs.size()).Should(Equal(0)) } func TestManifestGenerateFlagsMinimalProfile(t *testing.T) { g := NewWithT(t) // Change profile from empty to minimal using flag. m, _, err := generateManifest("empty", "-s profile=minimal", liveCharts) if err != nil { t.Fatal(err) } objs, err := parseObjectSetFromManifest(m) if err != nil { t.Fatal(err) } // minimal profile always has istiod, empty does not. mustGetDeployment(g, objs, "istiod") } func TestManifestGenerateFlagsSetHubTag(t *testing.T) { g := NewWithT(t) m, _, err := generateManifest("minimal", "-s hub=foo -s tag=bar", liveCharts) if err != nil { t.Fatal(err) } objs, err := parseObjectSetFromManifest(m) if err != nil { t.Fatal(err) } dobj := mustGetDeployment(g, objs, "istiod") c := dobj.Container("discovery") g.Expect(c).Should(HavePathValueEqual(PathValue{"image", "foo/pilot:bar"})) } func TestManifestGenerateFlagsSetValues(t *testing.T) { g := NewWithT(t) m, _, err := generateManifest("default", "-s values.global.proxy.image=myproxy -s values.global.proxy.includeIPRanges=172.30.0.0/16,172.21.0.0/16", liveCharts) if err != nil { t.Fatal(err) } objs, err := parseObjectSetFromManifest(m) if err != nil { t.Fatal(err) } dobj := mustGetDeployment(g, objs, "istio-ingressgateway") c := dobj.Container("istio-proxy") g.Expect(c).Should(HavePathValueEqual(PathValue{"image", "gcr.io/istio-testing/myproxy:latest"})) cm := objs.kind("ConfigMap").nameEquals("istio-sidecar-injector").Unstructured() // TODO: change values to some nicer format rather than text block. g.Expect(cm).Should(HavePathValueMatchRegex(PathValue{"data.values", `.*"includeIPRanges"\: "172\.30\.0\.0/16,172\.21\.0\.0/16".*`})) } func TestManifestGenerateFlags(t *testing.T) { flagOutputDir := createTempDirOrFail(t, "flag-output") flagOutputValuesDir := createTempDirOrFail(t, "flag-output-values") runTestGroup(t, testGroup{ { desc: "all_on", diffIgnore: "ConfigMap:*:istio", showOutputFileInPullRequest: true, }, { desc: "flag_values_enable_egressgateway", diffSelect: "Service:*:istio-egressgateway", flags: "--set values.gateways.istio-egressgateway.enabled=true", noInput: true, }, { desc: "flag_output", flags: "-o " + flagOutputDir, diffSelect: "Deployment:*:istiod", outputDir: flagOutputDir, }, { desc: "flag_output_set_values", diffSelect: "Deployment:*:istio-ingressgateway", flags: "-s values.global.proxy.image=mynewproxy -o " + flagOutputValuesDir, outputDir: flagOutputValuesDir, noInput: true, }, { desc: "flag_force", diffSelect: "no:resources:selected", flags: "--force", }, }) removeDirOrFail(t, flagOutputDir) removeDirOrFail(t, flagOutputValuesDir) } func TestManifestGeneratePilot(t *testing.T) { runTestGroup(t, testGroup{ { desc: "pilot_default", diffIgnore: "CustomResourceDefinition:*:*,ConfigMap:*:istio", }, { desc: "pilot_k8s_settings", diffSelect: "Deployment:*:istiod,HorizontalPodAutoscaler:*:istiod", }, { desc: "pilot_override_values", diffSelect: "Deployment:*:istiod,HorizontalPodAutoscaler:*:istiod", }, { desc: "pilot_override_kubernetes", diffSelect: "Deployment:*:istiod, Service:*:istiod,MutatingWebhookConfiguration:*:istio-sidecar-injector,ClusterRoleBinding::istio-reader-istio-system", }, // TODO https://github.com/istio/istio/issues/22347 this is broken for overriding things to default value // This can be seen from REGISTRY_ONLY not applying { desc: "pilot_merge_meshconfig", diffSelect: "ConfigMap:*:istio$", }, }) } func TestManifestGenerateGateway(t *testing.T) { runTestGroup(t, testGroup{ { desc: "ingressgateway_k8s_settings", diffSelect: "Deployment:*:istio-ingressgateway, Service:*:istio-ingressgateway", }, }) } // TestManifestGenerateHelmValues tests whether enabling components through the values passthrough interface works as // expected i.e. without requiring enablement also in IstioOperator API. func TestManifestGenerateHelmValues(t *testing.T) { runTestGroup(t, testGroup{ { desc: "helm_values_enablement", diffSelect: "Deployment:*:istio-egressgateway, Service:*:istio-egressgateway", }, }) } func TestManifestGenerateOrdered(t *testing.T) { // Since this is testing the special case of stable YAML output order, it // does not use the established test group pattern inPath := filepath.Join(testDataDir, "input/all_on.yaml") got1, err := runManifestGenerate([]string{inPath}, "", snapshotCharts) if err != nil { t.Fatal(err) } got2, err := runManifestGenerate([]string{inPath}, "", snapshotCharts) if err != nil { t.Fatal(err) } if got1 != got2 { fmt.Printf("%s", util.YAMLDiff(got1, got2)) t.Errorf("stable_manifest: Manifest generation is not producing stable text output.") } } func TestManifestGenerateFlagAliases(t *testing.T) { inPath := filepath.Join(testDataDir, "input/all_on.yaml") gotSet, err := runManifestGenerate([]string{inPath}, "--set revision=foo", snapshotCharts) if err != nil { t.Fatal(err) } gotAlias, err := runManifestGenerate([]string{inPath}, "--revision=foo", snapshotCharts) if err != nil { t.Fatal(err) } if gotAlias != gotSet { t.Errorf("Flag aliases not producing same output: with --set: \n\n%s\n\nWith alias:\n\n%s\nDiff:\n\n%s\n", gotSet, gotAlias, util.YAMLDiff(gotSet, gotAlias)) } } func TestMultiICPSFiles(t *testing.T) { inPathBase := filepath.Join(testDataDir, "input/all_off.yaml") inPathOverride := filepath.Join(testDataDir, "input/helm_values_enablement.yaml") got, err := runManifestGenerate([]string{inPathBase, inPathOverride}, "", snapshotCharts) if err != nil { t.Fatal(err) } outPath := filepath.Join(testDataDir, "output/helm_values_enablement"+goldenFileSuffixHideChangesInReview) want, err := readFile(outPath) if err != nil { t.Fatal(err) } diffSelect := "Deployment:*:istio-egressgateway, Service:*:istio-egressgateway" got, err = compare.FilterManifest(got, diffSelect, "") if err != nil { t.Errorf("error selecting from output manifest: %v", err) } diff := compare.YAMLCmp(got, want) if diff != "" { t.Errorf("`manifest generate` diff = %s", diff) } } func TestBareSpec(t *testing.T) { inPathBase := filepath.Join(testDataDir, "input/bare_spec.yaml") _, err := runManifestGenerate([]string{inPathBase}, "", liveCharts) if err != nil { t.Fatal(err) } } func TestBareValues(t *testing.T) { inPathBase := filepath.Join(testDataDir, "input/bare_values.yaml") // As long as the generate doesn't panic, we pass it. bare_values.yaml doesn't // overlay well because JSON doesn't handle null values, and our charts // don't expect values to be blown away. _, _ = runManifestGenerate([]string{inPathBase}, "", liveCharts) } func TestBogusControlPlaneSec(t *testing.T) { inPathBase := filepath.Join(testDataDir, "input/bogus_cps.yaml") _, err := runManifestGenerate([]string{inPathBase}, "", liveCharts) if err != nil { t.Fatal(err) } } func TestInstallPackagePath(t *testing.T) { serverDir, err := ioutil.TempDir(os.TempDir(), "istio-test-server-*") if err != nil { t.Fatal(err) } defer os.RemoveAll(serverDir) if err := tgz.Create(string(liveCharts), filepath.Join(serverDir, testTGZFilename)); err != nil { t.Fatal(err) } srv := httpserver.NewServer(serverDir) runTestGroup(t, testGroup{ { // Use some arbitrary small test input (pilot only) since we are testing the local filesystem code here, not // manifest generation. desc: "install_package_path", diffSelect: "Deployment:*:istiod", flags: "--set installPackagePath=" + string(liveCharts), }, { // Specify both charts and profile from local filesystem. desc: "install_package_path", diffSelect: "Deployment:*:istiod", flags: fmt.Sprintf("--set installPackagePath=%s --set profile=%s/profiles/default.yaml", string(liveCharts), string(liveCharts)), }, { // --force is needed for version mismatch. desc: "install_package_path", diffSelect: "Deployment:*:istiod", flags: "--force --set installPackagePath=" + srv.URL() + "/" + testTGZFilename, }, }) } // TestTrailingWhitespace ensures there are no trailing spaces in the manifests // This is important because `kubectl edit` and other commands will get escaped if they are present // making it hard to read/edit func TestTrailingWhitespace(t *testing.T) { got, err := runManifestGenerate([]string{}, "--set values.gateways.istio-egressgateway.enabled=true", liveCharts) if err != nil { t.Fatal(err) } lines := strings.Split(got, "\n") for i, l := range lines { if strings.HasSuffix(l, " ") { t.Errorf("Line %v has a trailing space: [%v]. Context: %v", i, l, strings.Join(lines[i-5:i+5], "\n")) } } } func validateReferentialIntegrity(t *testing.T, objs object.K8sObjects, cname string, deploymentSelector map[string]string) { t.Run(cname, func(t *testing.T) { deployment := mustFindObject(t, objs, cname, name.DeploymentStr) service := mustFindObject(t, objs, cname, name.ServiceStr) pdb := mustFindObject(t, objs, cname, name.PDBStr) hpa := mustFindObject(t, objs, cname, name.HPAStr) podLabels := mustGetLabels(t, deployment, "spec.template.metadata.labels") // Check all selectors align mustSelect(t, mustGetLabels(t, pdb, "spec.selector.matchLabels"), podLabels) mustSelect(t, mustGetLabels(t, service, "spec.selector"), podLabels) mustSelect(t, mustGetLabels(t, deployment, "spec.selector.matchLabels"), podLabels) if hpaName := mustGetPath(t, hpa, "spec.scaleTargetRef.name"); cname != hpaName { t.Fatalf("HPA does not match deployment: %v != %v", cname, hpaName) } serviceAccountName := mustGetPath(t, deployment, "spec.template.spec.serviceAccountName").(string) mustFindObject(t, objs, serviceAccountName, name.SAStr) // Check we aren't changing immutable fields. This only matters for in place upgrade (non revision) // This one is not a selector, it must be an exact match if sel := mustGetLabels(t, deployment, "spec.selector.matchLabels"); !reflect.DeepEqual(deploymentSelector, sel) { t.Fatalf("Depployment selectors are immutable, but changed since 1.5. Was %v, now is %v", deploymentSelector, sel) } }) } // This test enforces that objects that reference other objects do so properly, such as Service selecting deployment func TestConfigSelectors(t *testing.T) { got, err := runManifestGenerate([]string{}, "--set values.gateways.istio-egressgateway.enabled=true", liveCharts) if err != nil { t.Fatal(err) } objs, err := object.ParseK8sObjectsFromYAMLManifest(got) if err != nil { t.Fatal(err) } gotRev, e := runManifestGenerate([]string{}, "--set revision=canary", liveCharts) if e != nil { t.Fatal(e) } objsRev, err := object.ParseK8sObjectsFromYAMLManifest(gotRev) if err != nil { t.Fatal(err) } istiod15Selector := map[string]string{ "istio": "pilot", } istiodCanary16Selector := map[string]string{ "app": "istiod", "istio.io/rev": "canary", } ingress15Selector := map[string]string{ "app": "istio-ingressgateway", "istio": "ingressgateway", } egress15Selector := map[string]string{ "app": "istio-egressgateway", "istio": "egressgateway", } // Validate references within the same deployment validateReferentialIntegrity(t, objs, "istiod", istiod15Selector) validateReferentialIntegrity(t, objs, "istio-ingressgateway", ingress15Selector) validateReferentialIntegrity(t, objs, "istio-egressgateway", egress15Selector) validateReferentialIntegrity(t, objsRev, "istiod-canary", istiodCanary16Selector) t.Run("cross revision", func(t *testing.T) { // Istiod revisions have complicated cross revision implications. We should assert these are correct // First we fetch all the objects for our default install cname := "istiod" deployment := mustFindObject(t, objs, cname, name.DeploymentStr) service := mustFindObject(t, objs, cname, name.ServiceStr) pdb := mustFindObject(t, objs, cname, name.PDBStr) podLabels := mustGetLabels(t, deployment, "spec.template.metadata.labels") // Next we fetch all the objects for a revision install nameRev := "istiod-canary" deploymentRev := mustFindObject(t, objsRev, nameRev, name.DeploymentStr) hpaRev := mustFindObject(t, objsRev, nameRev, name.HPAStr) serviceRev := mustFindObject(t, objsRev, nameRev, name.ServiceStr) pdbRev := mustFindObject(t, objsRev, nameRev, name.PDBStr) podLabelsRev := mustGetLabels(t, deploymentRev, "spec.template.metadata.labels") // Make sure default and revisions do not cross mustNotSelect(t, mustGetLabels(t, serviceRev, "spec.selector"), podLabels) mustNotSelect(t, mustGetLabels(t, service, "spec.selector"), podLabelsRev) mustNotSelect(t, mustGetLabels(t, pdbRev, "spec.selector.matchLabels"), podLabels) mustNotSelect(t, mustGetLabels(t, pdb, "spec.selector.matchLabels"), podLabelsRev) // Make sure the scaleTargetRef points to the correct Deployment if hpaName := mustGetPath(t, hpaRev, "spec.scaleTargetRef.name"); nameRev != hpaName { t.Fatalf("HPA does not match deployment: %v != %v", nameRev, hpaName) } // Check selection of previous versions . This only matters for in place upgrade (non revision) podLabels15 := map[string]string{ "app": "istiod", "istio": "pilot", } mustSelect(t, mustGetLabels(t, service, "spec.selector"), podLabels15) mustNotSelect(t, mustGetLabels(t, serviceRev, "spec.selector"), podLabels15) mustSelect(t, mustGetLabels(t, pdb, "spec.selector.matchLabels"), podLabels15) mustNotSelect(t, mustGetLabels(t, pdbRev, "spec.selector.matchLabels"), podLabels15) }) } // TestLDFlags checks whether building mesh command with // -ldflags "-X istio.io/pkg/version.buildHub=myhub -X istio.io/pkg/version.buildVersion=mytag" // results in these values showing up in a generated manifest. func TestLDFlags(t *testing.T) { tmpHub, tmpTag := version.DockerInfo.Hub, version.DockerInfo.Tag defer func() { version.DockerInfo.Hub, version.DockerInfo.Tag = tmpHub, tmpTag }() version.DockerInfo.Hub = "testHub" version.DockerInfo.Tag = "testTag" l := clog.NewConsoleLogger(os.Stdout, os.Stderr, installerScope) _, iop, err := manifest.GenerateConfig(nil, []string{"installPackagePath=" + string(liveCharts)}, true, nil, l) if err != nil { t.Fatal(err) } if iop.Spec.Hub != version.DockerInfo.Hub || iop.Spec.Tag != version.DockerInfo.Tag { t.Fatalf("DockerInfoHub, DockerInfoTag got: %s,%s, want: %s, %s", iop.Spec.Hub, iop.Spec.Tag, version.DockerInfo.Hub, version.DockerInfo.Tag) } } func runTestGroup(t *testing.T, tests testGroup) { for _, tt := range tests { tt := tt t.Run(tt.desc, func(t *testing.T) { t.Parallel() inPath := filepath.Join(testDataDir, "input", tt.desc+".yaml") outputSuffix := goldenFileSuffixHideChangesInReview if tt.showOutputFileInPullRequest { outputSuffix = goldenFileSuffixShowChangesInReview } outPath := filepath.Join(testDataDir, "output", tt.desc+outputSuffix) var filenames []string if !tt.noInput { filenames = []string{inPath} } csource := snapshotCharts if tt.chartSource != "" { csource = tt.chartSource } got, err := runManifestGenerate(filenames, tt.flags, csource) if err != nil { t.Fatal(err) } if tt.outputDir != "" { got, err = util.ReadFilesWithFilter(tt.outputDir, func(fileName string) bool { return strings.HasSuffix(fileName, ".yaml") }) if err != nil { t.Fatal(err) } } diffSelect := "*:*:*" if tt.diffSelect != "" { diffSelect = tt.diffSelect got, err = compare.FilterManifest(got, diffSelect, "") if err != nil { t.Errorf("error selecting from output manifest: %v", err) } } tutil.RefreshGoldenFile([]byte(got), outPath, t) want, err := readFile(outPath) if err != nil { t.Fatal(err) } for _, v := range []bool{true, false} { diff, err := compare.ManifestDiffWithRenameSelectIgnore(got, want, "", diffSelect, tt.diffIgnore, v) if err != nil { t.Fatal(err) } if diff != "" { t.Errorf("%s: got:\n%s\nwant:\n%s\n(-got, +want)\n%s\n", tt.desc, "", "", diff) } } }) } } // nolint: unparam func generateManifest(inFile, flags string, chartSource chartSourceType) (string, object.K8sObjects, error) { inPath := filepath.Join(testDataDir, "input", inFile+".yaml") manifest, err := runManifestGenerate([]string{inPath}, flags, chartSource) if err != nil { return "", nil, fmt.Errorf("error %s: %s", err, manifest) } objs, err := object.ParseK8sObjectsFromYAMLManifest(manifest) return manifest, objs, err } // runManifestGenerate runs the manifest generate command. If filenames is set, passes the given filenames as -f flag, // flags is passed to the command verbatim. If you set both flags and path, make sure to not use -f in flags. func runManifestGenerate(filenames []string, flags string, chartSource chartSourceType) (string, error) { return runManifestCommand("generate", filenames, flags, chartSource) } func mustGetWebhook(t test.Failer, obj object.K8sObject) []v1.MutatingWebhook { t.Helper() path := mustGetPath(t, obj, "webhooks") by, err := json.Marshal(path) if err != nil { t.Fatal(err) } var mwh []v1.MutatingWebhook if err := json.Unmarshal(by, &mwh); err != nil { t.Fatal(err) } return mwh } func getWebhooks(t *testing.T, setFlags string, webhookName string) []v1.MutatingWebhook { t.Helper() got, err := runManifestGenerate([]string{}, setFlags, liveCharts) if err != nil { t.Fatal(err) } objs, err := object.ParseK8sObjectsFromYAMLManifest(got) if err != nil { t.Fatal(err) } return mustGetWebhook(t, mustFindObject(t, objs, webhookName, name.MutatingWebhookConfigurationStr)) } func getWebhooksFromYaml(t *testing.T, yml string) []v1.MutatingWebhook { t.Helper() objs, err := object.ParseK8sObjectsFromYAMLManifest(yml) if err != nil { t.Fatal(err) } if len(objs) != 1 { t.Fatal("expected one webhook") } return mustGetWebhook(t, *objs[0]) } type LabelSet struct { namespace, pod klabels.Set } func mergeWebhooks(whs ...[]v1.MutatingWebhook) []v1.MutatingWebhook { res := []v1.MutatingWebhook{} for _, wh := range whs { res = append(res, wh...) } return res } const ( // istioctl manifest generate --set values.sidecarInjectorWebhook.useLegacySelectors=true legacyDefaultInjector = ` apiVersion: admissionregistration.k8s.io/v1 kind: MutatingWebhookConfiguration metadata: name: istio-sidecar-injector webhooks: - name: sidecar-injector.istio.io clientConfig: service: name: istiod namespace: istio-system path: "/inject" caBundle: "" sideEffects: None rules: - operations: [ "CREATE" ] apiGroups: [""] apiVersions: ["v1"] resources: ["pods"] failurePolicy: Fail admissionReviewVersions: ["v1beta1", "v1"] namespaceSelector: matchLabels: istio-injection: enabled objectSelector: matchExpressions: - key: "sidecar.istio.io/inject" operator: NotIn values: - "false" ` // istioctl manifest generate --set values.sidecarInjectorWebhook.useLegacySelectors=true --set revision=canary legacyRevisionInjector = ` apiVersion: admissionregistration.k8s.io/v1 kind: MutatingWebhookConfiguration metadata: name: istio-sidecar-injector-canary webhooks: - name: sidecar-injector.istio.io clientConfig: service: name: istiod-canary namespace: istio-system path: "/inject" caBundle: "" sideEffects: None rules: - operations: [ "CREATE" ] apiGroups: [""] apiVersions: ["v1"] resources: ["pods"] failurePolicy: Fail admissionReviewVersions: ["v1beta1", "v1"] namespaceSelector: matchExpressions: - key: istio-injection operator: DoesNotExist - key: istio.io/rev operator: In values: - canary objectSelector: matchExpressions: - key: "sidecar.istio.io/inject" operator: NotIn values: - "false" ` ) // This test checks the mutating webhook selectors behavior, especially with interaction with revisions func TestWebhookSelector(t *testing.T) { // Setup various labels to be tested empty := klabels.Set{} revLabel := klabels.Set{"istio.io/rev": "canary"} legacyAndRevLabel := klabels.Set{"istio-injection": "enabled", "istio.io/rev": "canary"} legacyDisabledAndRevLabel := klabels.Set{"istio-injection": "disabled", "istio.io/rev": "canary"} legacyLabel := klabels.Set{"istio-injection": "enabled"} legacyLabelDisabled := klabels.Set{"istio-injection": "disabled"} objEnabled := klabels.Set{"sidecar.istio.io/inject": "true"} objDisable := klabels.Set{"sidecar.istio.io/inject": "false"} objEnabledAndRev := klabels.Set{"sidecar.istio.io/inject": "true", "istio.io/rev": "canary"} objDisableAndRev := klabels.Set{"sidecar.istio.io/inject": "false", "istio.io/rev": "canary"} defaultWebhook := getWebhooks(t, "", "istio-sidecar-injector") revWebhook := getWebhooks(t, "--set revision=canary", "istio-sidecar-injector-canary") autoWebhook := getWebhooks(t, "--set values.sidecarInjectorWebhook.enableNamespacesByDefault=true", "istio-sidecar-injector") legacyWebhook := getWebhooksFromYaml(t, legacyDefaultInjector) legacyRevWebhook := getWebhooksFromYaml(t, legacyRevisionInjector) // predicate is used to filter out "obvious" test cases, to avoid enumerating all cases // nolint: unparam predicate := func(ls LabelSet) (string, bool) { if ls.namespace.Get("istio-injection") == "disabled" { return "", true } if ls.pod.Get("sidecar.istio.io/inject") == "false" { return "", true } return "", false } // We test the cross product namespace and pod labels: // 1. revision label (istio.io/rev) // 2. inject label true (istio-injection on namespace, sidecar.istio.io/inject on pod) // 3. inject label false // 4. inject label true and revision label // 5. inject label false and revision label // 6. no label // However, we filter out all the disable cases, leaving us with a reasonable number of cases testLabels := []LabelSet{} for _, namespaceLabel := range []klabels.Set{empty, revLabel, legacyLabel, legacyLabelDisabled, legacyAndRevLabel, legacyDisabledAndRevLabel} { for _, podLabel := range []klabels.Set{empty, revLabel, objEnabled, objDisable, objEnabledAndRev, objDisableAndRev} { testLabels = append(testLabels, LabelSet{namespaceLabel, podLabel}) } } type assertion struct { namespaceLabel klabels.Set objectLabel klabels.Set match string } baseAssertions := []assertion{ {empty, empty, ""}, {empty, revLabel, "istiod-canary"}, {empty, objEnabled, "istiod"}, {empty, objEnabledAndRev, "istiod-canary"}, {revLabel, empty, "istiod-canary"}, {revLabel, revLabel, "istiod-canary"}, {revLabel, objEnabled, "istiod-canary"}, {revLabel, objEnabledAndRev, "istiod-canary"}, {legacyLabel, empty, "istiod"}, {legacyLabel, objEnabled, "istiod"}, {legacyAndRevLabel, empty, "istiod"}, {legacyAndRevLabel, objEnabled, "istiod"}, // The behavior of these is a bit odd; they are explicitly selecting a revision but getting // the default Unfortunately, the legacy webhook selectors would select these, cause // duplicate injection, so we defer to the namespace label. {legacyLabel, revLabel, "istiod"}, {legacyAndRevLabel, revLabel, "istiod"}, {legacyLabel, objEnabledAndRev, "istiod"}, {legacyAndRevLabel, objEnabledAndRev, "istiod"}, } cases := []struct { name string webhooks []v1.MutatingWebhook checks []assertion }{ { name: "base", webhooks: mergeWebhooks(defaultWebhook, revWebhook), checks: baseAssertions, }, { // This is exactly the same as above, but empty/empty matches name: "auto injection", webhooks: mergeWebhooks(autoWebhook, revWebhook), checks: append([]assertion{{empty, empty, "istiod"}}, baseAssertions...), }, { // Upgrade from a legacy webhook to a new revision based // Note: we don't need non revision legacy -> non revision, since it will overwrite the webhook name: "revision upgrade", webhooks: mergeWebhooks(legacyWebhook, revWebhook), checks: append([]assertion{ {empty, objEnabled, ""}, // Legacy one requires namespace label }, baseAssertions...), }, { // Use new default webhook, while we still have a legacy revision one around. name: "inplace upgrade", webhooks: mergeWebhooks(defaultWebhook, legacyRevWebhook), checks: append([]assertion{ {empty, revLabel, ""}, // Legacy one requires namespace label {empty, objEnabledAndRev, ""}, // Legacy one requires namespace label }, baseAssertions...), }, } for _, tt := range cases { t.Run(tt.name, func(t *testing.T) { whs := tt.webhooks for _, s := range testLabels { t.Run(fmt.Sprintf("ns:%v pod:%v", s.namespace, s.pod), func(t *testing.T) { found := "" match := 0 for i, wh := range whs { sn := wh.ClientConfig.Service.Name matches := selectorMatches(t, wh.NamespaceSelector, s.namespace) && selectorMatches(t, wh.ObjectSelector, s.pod) if matches && found != "" { // There must be exactly one match, or we will double inject. t.Fatalf("matched multiple webhooks. Had %v, matched %v", found, sn) } if matches { found = sn match = i } } // If our predicate can tell us the expected match, use that if want, ok := predicate(s); ok { if want != found { t.Fatalf("expected webhook to go to service %q, found %q", want, found) } return } // Otherwise, look through our assertions for a matching one, and check that for _, w := range tt.checks { if w.namespaceLabel.String() == s.namespace.String() && w.objectLabel.String() == s.pod.String() { if found != w.match { if found != "" { t.Fatalf("expected webhook to go to service %q, found %q (from match %d)\nNamespace selector: %v\nObject selector: %v)", w.match, found, match, whs[match].NamespaceSelector.MatchExpressions, whs[match].ObjectSelector.MatchExpressions) } else { t.Fatalf("expected webhook to go to service %q, found %q", w.match, found) } } return } } // If none match, a test case is missing for the label set. t.Fatalf("no assertion for namespace=%v pod=%v", s.namespace, s.pod) }) } }) } } func selectorMatches(t *testing.T, selector *metav1.LabelSelector, labels klabels.Set) bool { t.Helper() // From webhook spec: "Default to the empty LabelSelector, which matches everything." if selector == nil { return true } s, err := metav1.LabelSelectorAsSelector(selector) if err != nil { t.Fatal(err) } return s.Matches(labels) } func TestSidecarTemplate(t *testing.T) { runTestGroup(t, testGroup{ { desc: "sidecar_template", diffSelect: "ConfigMap:*:istio-sidecar-injector", }, }) }
operator/cmd/mesh/manifest-generate_test.go
0
https://github.com/istio/istio/commit/33d59c1dfd4678bea158d6940ce03f9dae537961
[ 0.005652351304888725, 0.0003052596584893763, 0.00016645838331896812, 0.00017423342796973884, 0.0005789262941107154 ]
{ "id": 5, "code_window": [ " servers:\n", " - port:\n", " number: 15443\n", " name: tls\n", " protocol: TLS\n", " tls:\n", " mode: AUTO_PASSTHROUGH\n", " hosts:\n" ], "labels": [ "keep", "keep", "replace", "keep", "keep", "keep", "keep", "keep" ], "after_edit": [ " number: 443\n" ], "file_path": "tests/integration/pilot/common/routing.go", "type": "replace", "edit_start_line_idx": 706 }
// +build integ // Copyright Istio Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package common import ( "context" "fmt" "net/http" "reflect" "sort" "strings" "time" "istio.io/istio/pilot/pkg/model" "istio.io/istio/pkg/config/host" "istio.io/istio/pkg/config/protocol" "istio.io/istio/pkg/test" echoclient "istio.io/istio/pkg/test/echo/client" "istio.io/istio/pkg/test/echo/common/scheme" epb "istio.io/istio/pkg/test/echo/proto" "istio.io/istio/pkg/test/framework/components/echo" "istio.io/istio/pkg/test/framework/components/echo/common" "istio.io/istio/pkg/test/framework/components/echo/echotest" "istio.io/istio/pkg/test/framework/components/istio/ingress" "istio.io/istio/pkg/test/util/retry" "istio.io/istio/pkg/test/util/tmpl" ingressutil "istio.io/istio/tests/integration/security/sds_ingress/util" ) const httpVirtualServiceTmpl = ` apiVersion: networking.istio.io/v1alpha3 kind: VirtualService metadata: name: {{.VirtualServiceHost}} spec: gateways: - {{.Gateway}} hosts: - {{.VirtualServiceHost}} http: - route: - destination: host: {{.VirtualServiceHost}} port: number: {{.Port}} {{- if .MatchScheme }} match: - scheme: exact: {{.MatchScheme}} headers: request: add: istio-custom-header: user-defined-value {{- end }} --- ` func httpVirtualService(gateway, host string, port int) string { return tmpl.MustEvaluate(httpVirtualServiceTmpl, struct { Gateway string VirtualServiceHost string Port int MatchScheme string }{gateway, host, port, ""}) } const gatewayTmpl = ` apiVersion: networking.istio.io/v1alpha3 kind: Gateway metadata: name: gateway spec: selector: istio: ingressgateway servers: - port: number: {{.GatewayPort}} name: {{.GatewayPortName}} protocol: {{.GatewayProtocol}} {{- if .Credential }} tls: mode: SIMPLE credentialName: {{.Credential}} {{- end }} hosts: - "{{.GatewayHost}}" --- ` func httpGateway(host string) string { return tmpl.MustEvaluate(gatewayTmpl, struct { GatewayHost string GatewayPort int GatewayPortName string GatewayProtocol string Credential string }{ host, 80, "http", "HTTP", "", }) } func virtualServiceCases(skipVM bool) []TrafficTestCase { noTProxy := echotest.FilterMatch(func(instance echo.Instance) bool { return !instance.Config().IsTProxy() }) var cases []TrafficTestCase cases = append(cases, TrafficTestCase{ name: "added header", config: ` apiVersion: networking.istio.io/v1alpha3 kind: VirtualService metadata: name: default spec: hosts: - {{ .dstSvc }} http: - route: - destination: host: {{ .dstSvc }} headers: request: add: istio-custom-header: user-defined-value`, opts: echo.CallOptions{ PortName: "http", Count: 1, Validator: echo.And( echo.ExpectOK(), echo.ValidatorFunc( func(response echoclient.ParsedResponses, _ error) error { return response.Check(func(_ int, response *echoclient.ParsedResponse) error { return ExpectString(response.RawResponse["Istio-Custom-Header"], "user-defined-value", "request header") }) })), }, workloadAgnostic: true, }, TrafficTestCase{ name: "set header", config: ` apiVersion: networking.istio.io/v1alpha3 kind: VirtualService metadata: name: default spec: hosts: - {{ (index .dst 0).Config.Service }} http: - route: - destination: host: {{ (index .dst 0).Config.Service }} headers: request: set: x-custom: some-value`, opts: echo.CallOptions{ PortName: "http", Count: 1, Validator: echo.And( echo.ExpectOK(), echo.ValidatorFunc( func(response echoclient.ParsedResponses, _ error) error { return response.Check(func(_ int, response *echoclient.ParsedResponse) error { return ExpectString(response.RawResponse["X-Custom"], "some-value", "added request header") }) })), }, workloadAgnostic: true, }, TrafficTestCase{ name: "set authority header", config: ` apiVersion: networking.istio.io/v1alpha3 kind: VirtualService metadata: name: default spec: hosts: - {{ (index .dst 0).Config.Service }} http: - route: - destination: host: {{ (index .dst 0).Config.Service }} headers: request: set: :authority: my-custom-authority`, opts: echo.CallOptions{ PortName: "http", Count: 1, Validator: echo.And( echo.ExpectOK(), echo.ValidatorFunc( func(response echoclient.ParsedResponses, _ error) error { return response.Check(func(_ int, response *echoclient.ParsedResponse) error { return ExpectString(response.RawResponse["Host"], "my-custom-authority", "added authority header") }) })), }, workloadAgnostic: true, }, TrafficTestCase{ name: "redirect", config: ` apiVersion: networking.istio.io/v1alpha3 kind: VirtualService metadata: name: default spec: hosts: - {{ .dstSvc }} http: - match: - uri: exact: /foo redirect: uri: /new/path - match: - uri: exact: /new/path route: - destination: host: {{ .dstSvc }}`, opts: echo.CallOptions{ PortName: "http", Path: "/foo?key=value", FollowRedirects: true, Count: 1, Validator: echo.And( echo.ExpectOK(), echo.ValidatorFunc( func(response echoclient.ParsedResponses, _ error) error { return response.Check(func(_ int, response *echoclient.ParsedResponse) error { return ExpectString(response.URL, "/new/path?key=value", "URL") }) })), }, workloadAgnostic: true, }, TrafficTestCase{ name: "rewrite uri", config: ` apiVersion: networking.istio.io/v1alpha3 kind: VirtualService metadata: name: default spec: hosts: - {{ .dstSvc }} http: - match: - uri: exact: /foo rewrite: uri: /new/path route: - destination: host: {{ .dstSvc }}`, opts: echo.CallOptions{ PortName: "http", Path: "/foo?key=value#hash", Count: 1, Validator: echo.And( echo.ExpectOK(), echo.ValidatorFunc( func(response echoclient.ParsedResponses, _ error) error { return response.Check(func(_ int, response *echoclient.ParsedResponse) error { return ExpectString(response.URL, "/new/path?key=value", "URL") }) })), }, workloadAgnostic: true, }, TrafficTestCase{ name: "rewrite authority", config: ` apiVersion: networking.istio.io/v1alpha3 kind: VirtualService metadata: name: default spec: hosts: - {{ .dstSvc }} http: - match: - uri: exact: /foo rewrite: authority: new-authority route: - destination: host: {{ .dstSvc }}`, opts: echo.CallOptions{ PortName: "http", Path: "/foo", Count: 1, Validator: echo.And( echo.ExpectOK(), echo.ValidatorFunc( func(response echoclient.ParsedResponses, _ error) error { return response.Check(func(_ int, response *echoclient.ParsedResponse) error { return ExpectString(response.Host, "new-authority", "authority") }) })), }, workloadAgnostic: true, }, TrafficTestCase{ name: "cors", // TODO https://github.com/istio/istio/issues/31532 targetFilters: []echotest.Filter{noTProxy, echotest.Not(echotest.VirtualMachines)}, config: ` apiVersion: networking.istio.io/v1alpha3 kind: VirtualService metadata: name: default spec: hosts: - {{ .dstSvc }} http: - corsPolicy: allowOrigins: - exact: cors.com allowMethods: - POST - GET allowCredentials: false allowHeaders: - X-Foo-Bar - X-Foo-Baz maxAge: "24h" route: - destination: host: {{ .dstSvc }} `, children: []TrafficCall{ { name: "preflight", opts: func() echo.CallOptions { header := http.Header{} header.Add("Origin", "cors.com") header.Add("Access-Control-Request-Method", "DELETE") return echo.CallOptions{ PortName: "http", Method: "OPTIONS", Headers: header, Count: 1, Validator: echo.And( echo.ExpectOK(), echo.ValidatorFunc( func(response echoclient.ParsedResponses, _ error) error { return response.Check(func(_ int, response *echoclient.ParsedResponse) error { if err := ExpectString(response.RawResponse["Access-Control-Allow-Origin"], "cors.com", "preflight CORS origin"); err != nil { return err } if err := ExpectString(response.RawResponse["Access-Control-Allow-Methods"], "POST,GET", "preflight CORS method"); err != nil { return err } if err := ExpectString(response.RawResponse["Access-Control-Allow-Headers"], "X-Foo-Bar,X-Foo-Baz", "preflight CORS headers"); err != nil { return err } if err := ExpectString(response.RawResponse["Access-Control-Max-Age"], "86400", "preflight CORS max age"); err != nil { return err } return nil }) })), } }(), }, { name: "get", opts: func() echo.CallOptions { header := http.Header{} header.Add("Origin", "cors.com") return echo.CallOptions{ PortName: "http", Headers: header, Count: 1, Validator: echo.And( echo.ExpectOK(), echo.ValidatorFunc( func(response echoclient.ParsedResponses, _ error) error { return ExpectString(response[0].RawResponse["Access-Control-Allow-Origin"], "cors.com", "GET CORS origin") })), } }(), }, { // GET without matching origin name: "get no origin match", opts: echo.CallOptions{ PortName: "http", Count: 1, Validator: echo.And( echo.ExpectOK(), echo.ValidatorFunc( func(response echoclient.ParsedResponses, _ error) error { return ExpectString(response[0].RawResponse["Access-Control-Allow-Origin"], "", "mismatched CORS origin") })), }, }, }, workloadAgnostic: true, }, ) // reduce the total # of subtests that don't give valuable coverage or just don't work noNaked := echotest.FilterMatch(echo.Not(echo.IsNaked())) noHeadless := echotest.FilterMatch(echo.Not(echo.IsHeadless())) noExternal := echotest.FilterMatch(echo.Not(echo.IsExternal())) for i, tc := range cases { tc.sourceFilters = append(tc.sourceFilters, noNaked, noHeadless) tc.targetFilters = append(tc.targetFilters, noNaked, noHeadless) cases[i] = tc } splits := [][]int{ {50, 25, 25}, {80, 10, 10}, } if skipVM { splits = [][]int{ {50, 50}, {80, 20}, } } for _, split := range splits { split := split cases = append(cases, TrafficTestCase{ name: fmt.Sprintf("shifting-%d", split[0]), toN: len(split), sourceFilters: []echotest.Filter{noHeadless, noNaked}, targetFilters: []echotest.Filter{noHeadless, noExternal}, templateVars: func(_ echo.Callers, _ echo.Instances) map[string]interface{} { return map[string]interface{}{ "split": split, } }, config: ` {{ $split := .split }} apiVersion: networking.istio.io/v1alpha3 kind: VirtualService metadata: name: default spec: hosts: - {{ ( index .dstSvcs 0) }} http: - route: {{- range $idx, $svc := .dstSvcs }} - destination: host: {{ $svc }} weight: {{ ( index $split $idx ) }} {{- end }} `, validateForN: func(src echo.Caller, dests echo.Services) echo.Validator { return echo.And( echo.ExpectOK(), echo.ValidatorFunc(func(responses echoclient.ParsedResponses, err error) error { errorThreshold := 10 if len(split) != len(dests) { // shouldn't happen return fmt.Errorf("split configured for %d destinations, but framework gives %d", len(split), len(dests)) } splitPerHost := map[string]int{} for i, pct := range split { splitPerHost[dests.Services()[i]] = pct } for host, exp := range splitPerHost { hostResponses := responses.Match(func(r *echoclient.ParsedResponse) bool { return strings.HasPrefix(r.Hostname, host) }) if !AlmostEquals(len(hostResponses), exp, errorThreshold) { return fmt.Errorf("expected %v calls to %q, got %v", exp, host, len(hostResponses)) } // echotest should have filtered the deployment to only contain reachable clusters hostDests := dests.Instances().Match(echo.Service(host)) targetClusters := hostDests.Clusters() // don't check headless since lb is unpredictable headlessTarget := hostDests.ContainsMatch(echo.IsHeadless()) if !headlessTarget && len(targetClusters.ByNetwork()[src.(echo.Instance).Config().Cluster.NetworkName()]) > 1 { // Conditionally check reached clusters to work around connection load balancing issues // See https://github.com/istio/istio/issues/32208 for details // We want to skip this for requests from the cross-network pod if err := hostResponses.CheckReachedClusters(targetClusters); err != nil { return fmt.Errorf("did not reach all clusters for %s: %v", host, err) } } } return nil })) }, opts: echo.CallOptions{ PortName: "http", Count: 100, }, workloadAgnostic: true, }) } return cases } func HostHeader(header string) http.Header { h := http.Header{} h["Host"] = []string{header} return h } // tlsOriginationCases contains tests TLS origination from DestinationRule func tlsOriginationCases(apps *EchoDeployments) []TrafficTestCase { tc := TrafficTestCase{ name: "", config: fmt.Sprintf(` apiVersion: networking.istio.io/v1alpha3 kind: DestinationRule metadata: name: external spec: host: %s trafficPolicy: tls: mode: SIMPLE `, apps.External[0].Config().DefaultHostHeader), children: []TrafficCall{}, } expects := []struct { port int alpn string }{ {8888, "http/1.1"}, {8882, "h2"}, } for _, c := range apps.PodA { for _, e := range expects { c := c e := e tc.children = append(tc.children, TrafficCall{ name: fmt.Sprintf("%s: %s", c.Config().Cluster.StableName(), e.alpn), opts: echo.CallOptions{ Port: &echo.Port{ServicePort: e.port, Protocol: protocol.HTTP}, Count: 1, Address: apps.External[0].Address(), Headers: HostHeader(apps.External[0].Config().DefaultHostHeader), Scheme: scheme.HTTP, Validator: echo.And(echo.ExpectOK(), echo.ExpectKey("Alpn", e.alpn)), }, call: c.CallWithRetryOrFail, }) } } return []TrafficTestCase{tc} } // useClientProtocolCases contains tests use_client_protocol from DestinationRule func useClientProtocolCases(apps *EchoDeployments) []TrafficTestCase { var cases []TrafficTestCase client := apps.PodA destination := apps.PodC[0] cases = append(cases, TrafficTestCase{ name: "use client protocol with h2", config: useClientProtocolDestinationRule("use-client-protocol-h2", destination.Config().Service), call: client[0].CallWithRetryOrFail, opts: echo.CallOptions{ Target: destination, PortName: "http", Count: 1, HTTP2: true, Validator: echo.And( echo.ExpectOK(), echo.ExpectKey("Proto", "HTTP/2.0"), ), }, }, TrafficTestCase{ name: "use client protocol with h1", config: useClientProtocolDestinationRule("use-client-protocol-h1", destination.Config().Service), call: client[0].CallWithRetryOrFail, opts: echo.CallOptions{ PortName: "http", Count: 1, Target: destination, HTTP2: false, Validator: echo.And( echo.ExpectOK(), echo.ExpectKey("Proto", "HTTP/1.1"), ), }, }, ) return cases } // trafficLoopCases contains tests to ensure traffic does not loop through the sidecar func trafficLoopCases(apps *EchoDeployments) []TrafficTestCase { cases := []TrafficTestCase{} for _, c := range apps.PodA { for _, d := range apps.PodB { for _, port := range []string{"15001", "15006"} { c, d, port := c, d, port cases = append(cases, TrafficTestCase{ name: port, call: func(t test.Failer, options echo.CallOptions, retryOptions ...retry.Option) echoclient.ParsedResponses { dwl := d.WorkloadsOrFail(t)[0] cwl := c.WorkloadsOrFail(t)[0] resp, err := cwl.ForwardEcho(context.Background(), &epb.ForwardEchoRequest{ Url: fmt.Sprintf("http://%s:%s", dwl.Address(), port), Count: 1, }) // Ideally we would actually check to make sure we do not blow up the pod, // but I couldn't find a way to reliably detect this. if err == nil { t.Fatalf("expected request to fail, but it didn't: %v", resp) } return nil }, }) } } } return cases } // autoPassthroughCases tests that we cannot hit unexpected destinations when using AUTO_PASSTHROUGH func autoPassthroughCases(apps *EchoDeployments) []TrafficTestCase { cases := []TrafficTestCase{} // We test the cross product of all Istio ALPNs (or no ALPN), all mTLS modes, and various backends alpns := []string{"istio", "istio-peer-exchange", "istio-http/1.0", "istio-http/1.1", "istio-h2", ""} modes := []string{"STRICT", "PERMISSIVE", "DISABLE"} mtlsHost := host.Name(apps.PodA[0].Config().FQDN()) nakedHost := host.Name(apps.Naked[0].Config().FQDN()) httpsPort := FindPortByName("https").ServicePort httpsAutoPort := FindPortByName("auto-https").ServicePort snis := []string{ model.BuildSubsetKey(model.TrafficDirectionOutbound, "", mtlsHost, httpsPort), model.BuildDNSSrvSubsetKey(model.TrafficDirectionOutbound, "", mtlsHost, httpsPort), model.BuildSubsetKey(model.TrafficDirectionOutbound, "", nakedHost, httpsPort), model.BuildDNSSrvSubsetKey(model.TrafficDirectionOutbound, "", nakedHost, httpsPort), model.BuildSubsetKey(model.TrafficDirectionOutbound, "", mtlsHost, httpsAutoPort), model.BuildDNSSrvSubsetKey(model.TrafficDirectionOutbound, "", mtlsHost, httpsAutoPort), model.BuildSubsetKey(model.TrafficDirectionOutbound, "", nakedHost, httpsAutoPort), model.BuildDNSSrvSubsetKey(model.TrafficDirectionOutbound, "", nakedHost, httpsAutoPort), } for _, mode := range modes { childs := []TrafficCall{} for _, sni := range snis { for _, alpn := range alpns { alpn, sni, mode := alpn, sni, mode al := &epb.Alpn{Value: []string{alpn}} if alpn == "" { al = nil } childs = append(childs, TrafficCall{ name: fmt.Sprintf("mode:%v,sni:%v,alpn:%v", mode, sni, alpn), call: apps.EastWest.CallWithRetryOrFail, opts: echo.CallOptions{ Port: &echo.Port{ ServicePort: 15443, Protocol: protocol.HTTPS, }, ServerName: sni, Alpn: al, Validator: echo.ExpectError(), }, }, ) } } cases = append(cases, TrafficTestCase{ config: globalPeerAuthentication(mode) + ` --- apiVersion: networking.istio.io/v1alpha3 kind: Gateway metadata: name: cross-network-gateway-test namespace: istio-system spec: selector: istio: eastwestgateway servers: - port: number: 15443 name: tls protocol: TLS tls: mode: AUTO_PASSTHROUGH hosts: - "*.local" `, children: childs, }) } return cases } func gatewayCases() []TrafficTestCase { templateParams := func(protocol protocol.Instance, src echo.Callers, dests echo.Instances) map[string]interface{} { host, dest, portN, cred := "*", dests[0], 80, "" if protocol.IsTLS() { host, portN, cred = dest.Config().FQDN(), 443, "cred" } return map[string]interface{}{ "IngressNamespace": src[0].(ingress.Instance).Namespace(), "GatewayHost": host, "GatewayPort": portN, "GatewayPortName": strings.ToLower(string(protocol)), "GatewayProtocol": string(protocol), "Gateway": "gateway", "VirtualServiceHost": dest.Config().FQDN(), "Port": dest.Config().PortByName("http").ServicePort, "Credential": cred, } } // clears the Target to avoid echo internals trying to match the protocol with the port on echo.Config noTarget := func(_ echo.Caller, _ echo.Instances, opts *echo.CallOptions) { opts.Target = nil } // allows setting the target indirectly via the host header fqdnHostHeader := func(src echo.Caller, dsts echo.Instances, opts *echo.CallOptions) { if opts.Headers == nil { opts.Headers = map[string][]string{} } opts.Headers["Host"] = []string{dsts[0].Config().FQDN()} noTarget(src, dsts, opts) } // SingleRegualrPod is already applied leaving one regular pod, to only regular pods should leave a single workload. singleTarget := []echotest.Filter{echotest.FilterMatch(echotest.RegularPod)} // the following cases don't actually target workloads, we use the singleTarget filter to avoid duplicate cases cases := []TrafficTestCase{ { name: "404", targetFilters: singleTarget, workloadAgnostic: true, viaIngress: true, config: httpGateway("*"), opts: echo.CallOptions{ Count: 1, Port: &echo.Port{ Protocol: protocol.HTTP, }, Headers: map[string][]string{ "Host": {"foo.bar"}, }, Validator: echo.ExpectCode("404"), }, setupOpts: noTarget, }, { name: "https redirect", targetFilters: singleTarget, workloadAgnostic: true, viaIngress: true, config: `apiVersion: networking.istio.io/v1alpha3 kind: Gateway metadata: name: gateway spec: selector: istio: ingressgateway servers: - port: number: 80 name: http protocol: HTTP hosts: - "*" tls: httpsRedirect: true --- `, opts: echo.CallOptions{ Count: 1, Port: &echo.Port{ Protocol: protocol.HTTP, }, Validator: echo.ExpectCode("301"), }, setupOpts: fqdnHostHeader, }, { // See https://github.com/istio/istio/issues/27315 name: "https with x-forwarded-proto", targetFilters: singleTarget, workloadAgnostic: true, viaIngress: true, config: `apiVersion: networking.istio.io/v1alpha3 kind: Gateway metadata: name: gateway spec: selector: istio: ingressgateway servers: - port: number: 80 name: http protocol: HTTP hosts: - "*" tls: httpsRedirect: true --- apiVersion: networking.istio.io/v1alpha3 kind: EnvoyFilter metadata: name: ingressgateway-redirect-config namespace: istio-system spec: configPatches: - applyTo: NETWORK_FILTER match: context: GATEWAY listener: filterChain: filter: name: envoy.filters.network.http_connection_manager patch: operation: MERGE value: typed_config: '@type': type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager xff_num_trusted_hops: 1 normalize_path: true workloadSelector: labels: istio: ingressgateway --- ` + httpVirtualServiceTmpl, opts: echo.CallOptions{ Count: 1, Port: &echo.Port{ Protocol: protocol.HTTP, }, Headers: map[string][]string{ // In real world, this may be set by a downstream LB that terminates the TLS "X-Forwarded-Proto": {"https"}, }, Validator: echo.ExpectOK(), }, setupOpts: fqdnHostHeader, templateVars: func(_ echo.Callers, dests echo.Instances) map[string]interface{} { dest := dests[0] return map[string]interface{}{ "Gateway": "gateway", "VirtualServiceHost": dest.Config().FQDN(), "Port": dest.Config().PortByName("http").ServicePort, } }, }, } for _, proto := range []protocol.Instance{protocol.HTTP, protocol.HTTPS} { proto, secret := proto, "" if proto.IsTLS() { secret = ingressutil.IngressKubeSecretYAML("cred", "{{.IngressNamespace}}", ingressutil.TLS, ingressutil.IngressCredentialA) } cases = append( cases, TrafficTestCase{ name: string(proto), config: gatewayTmpl + httpVirtualServiceTmpl + secret, templateVars: func(src echo.Callers, dests echo.Instances) map[string]interface{} { return templateParams(proto, src, dests) }, setupOpts: fqdnHostHeader, opts: echo.CallOptions{ Count: 1, Port: &echo.Port{ Protocol: proto, }, }, viaIngress: true, workloadAgnostic: true, }, TrafficTestCase{ name: fmt.Sprintf("%s scheme match", proto), config: gatewayTmpl + httpVirtualServiceTmpl + secret, templateVars: func(src echo.Callers, dests echo.Instances) map[string]interface{} { params := templateParams(proto, src, dests) params["MatchScheme"] = strings.ToLower(string(proto)) return params }, setupOpts: fqdnHostHeader, opts: echo.CallOptions{ Count: 1, Port: &echo.Port{ Protocol: proto, }, Validator: echo.And( echo.ExpectOK(), echo.ValidatorFunc( func(response echoclient.ParsedResponses, _ error) error { return response.Check(func(_ int, response *echoclient.ParsedResponse) error { // We check a header is added to ensure our VS actually applied return ExpectString(response.RawResponse["Istio-Custom-Header"], "user-defined-value", "request header") }) })), }, // to keep tests fast, we only run the basic protocol test per-workload and scheme match once (per cluster) targetFilters: singleTarget, viaIngress: true, workloadAgnostic: true, }, ) } return cases } func XFFGatewayCase(apps *EchoDeployments) []TrafficTestCase { cases := []TrafficTestCase{} destinationSets := []echo.Instances{ apps.PodA, } for _, d := range destinationSets { d := d if len(d) == 0 { continue } fqdn := d[0].Config().FQDN() cases = append(cases, TrafficTestCase{ name: d[0].Config().Service, config: httpGateway("*") + httpVirtualService("gateway", fqdn, d[0].Config().PortByName("http").ServicePort), skip: false, call: apps.Ingress.CallWithRetryOrFail, opts: echo.CallOptions{ Count: 1, Port: &echo.Port{ Protocol: protocol.HTTP, }, Headers: map[string][]string{ "X-Forwarded-For": {"56.5.6.7, 72.9.5.6, 98.1.2.3"}, "Host": {fqdn}, }, Validator: echo.ValidatorFunc( func(response echoclient.ParsedResponses, _ error) error { return response.Check(func(_ int, response *echoclient.ParsedResponse) error { externalAddress, ok := response.RawResponse["X-Envoy-External-Address"] if !ok { return fmt.Errorf("missing X-Envoy-External-Address Header") } if err := ExpectString(externalAddress, "72.9.5.6", "envoy-external-address header"); err != nil { return err } xffHeader, ok := response.RawResponse["X-Forwarded-For"] if !ok { return fmt.Errorf("missing X-Forwarded-For Header") } xffIPs := strings.Split(xffHeader, ",") if len(xffIPs) != 4 { return fmt.Errorf("did not receive expected 4 hosts in X-Forwarded-For header") } return ExpectString(strings.TrimSpace(xffIPs[1]), "72.9.5.6", "ip in xff header") }) }), }, }) } return cases } // serviceCases tests overlapping Services. There are a few cases. // Consider we have our base service B, with service port P and target port T // 1) Another service, B', with P -> T. In this case, both the listener and the cluster will conflict. // Because everything is workload oriented, this is not a problem unless they try to make them different // protocols (this is explicitly called out as "not supported") or control inbound connectionPool settings // (which is moving to Sidecar soon) // 2) Another service, B', with P -> T'. In this case, the listener will be distinct, since its based on the target. // The cluster, however, will be shared, which is broken, because we should be forwarding to T when we call B, and T' when we call B'. // 3) Another service, B', with P' -> T. In this case, the listener is shared. This is fine, with the exception of different protocols // The cluster is distinct. // 4) Another service, B', with P' -> T'. There is no conflicts here at all. func serviceCases(apps *EchoDeployments) []TrafficTestCase { cases := []TrafficTestCase{} for _, c := range apps.PodA { c := c // Case 1 // Identical to port "http" or service B, just behind another service name svc := fmt.Sprintf(`apiVersion: v1 kind: Service metadata: name: b-alt-1 labels: app: b spec: ports: - name: http port: %d targetPort: %d selector: app: b`, FindPortByName("http").ServicePort, FindPortByName("http").InstancePort) cases = append(cases, TrafficTestCase{ name: fmt.Sprintf("case 1 both match in cluster %v", c.Config().Cluster.StableName()), config: svc, call: c.CallWithRetryOrFail, opts: echo.CallOptions{ Count: 1, Address: "b-alt-1", Port: &echo.Port{ServicePort: FindPortByName("http").ServicePort, Protocol: protocol.HTTP}, Timeout: time.Millisecond * 100, Validator: echo.ExpectOK(), }, }) // Case 2 // We match the service port, but forward to a different port // Here we make the new target tcp so the test would fail if it went to the http port svc = fmt.Sprintf(`apiVersion: v1 kind: Service metadata: name: b-alt-2 labels: app: b spec: ports: - name: tcp port: %d targetPort: %d selector: app: b`, FindPortByName("http").ServicePort, common.WorkloadPorts[0].Port) cases = append(cases, TrafficTestCase{ name: fmt.Sprintf("case 2 service port match in cluster %v", c.Config().Cluster.StableName()), config: svc, call: c.CallWithRetryOrFail, opts: echo.CallOptions{ Count: 1, Address: "b-alt-2", Port: &echo.Port{ServicePort: FindPortByName("http").ServicePort, Protocol: protocol.TCP}, Scheme: scheme.TCP, Timeout: time.Millisecond * 100, Validator: echo.ExpectOK(), }, }) // Case 3 // We match the target port, but front with a different service port svc = fmt.Sprintf(`apiVersion: v1 kind: Service metadata: name: b-alt-3 labels: app: b spec: ports: - name: http port: 12345 targetPort: %d selector: app: b`, FindPortByName("http").InstancePort) cases = append(cases, TrafficTestCase{ name: fmt.Sprintf("case 3 target port match in cluster %v", c.Config().Cluster.StableName()), config: svc, call: c.CallWithRetryOrFail, opts: echo.CallOptions{ Count: 1, Address: "b-alt-3", Port: &echo.Port{ServicePort: 12345, Protocol: protocol.HTTP}, Timeout: time.Millisecond * 100, Validator: echo.ExpectOK(), }, }) // Case 4 // Completely new set of ports svc = fmt.Sprintf(`apiVersion: v1 kind: Service metadata: name: b-alt-4 labels: app: b spec: ports: - name: http port: 12346 targetPort: %d selector: app: b`, common.WorkloadPorts[1].Port) cases = append(cases, TrafficTestCase{ name: fmt.Sprintf("case 4 no match in cluster %v", c.Config().Cluster.StableName()), config: svc, call: c.CallWithRetryOrFail, opts: echo.CallOptions{ Count: 1, Address: "b-alt-4", Port: &echo.Port{ServicePort: 12346, Protocol: protocol.HTTP}, Timeout: time.Millisecond * 100, Validator: echo.ExpectOK(), }, }) } return cases } func flatten(clients ...[]echo.Instance) []echo.Instance { instances := []echo.Instance{} for _, c := range clients { instances = append(instances, c...) } return instances } // selfCallsCases checks that pods can call themselves func selfCallsCases() []TrafficTestCase { sourceFilters := []echotest.Filter{ echotest.Not(echotest.ExternalServices), echotest.Not(echotest.FilterMatch(echo.IsNaked())), echotest.Not(echotest.FilterMatch(echo.IsHeadless())), } comboFilters := []echotest.CombinationFilter{func(from echo.Instance, to echo.Instances) echo.Instances { return to.Match(echo.FQDN(from.Config().FQDN())) }} return []TrafficTestCase{ // Calls to the Service will go through envoy outbound and inbound, so we get envoy headers added { name: "to service", workloadAgnostic: true, sourceFilters: sourceFilters, comboFilters: comboFilters, opts: echo.CallOptions{ Count: 1, PortName: "http", Validator: echo.And(echo.ExpectOK(), echo.ExpectKey("X-Envoy-Attempt-Count", "1")), }, }, // Localhost calls will go directly to localhost, bypassing Envoy. No envoy headers added. { name: "to localhost", workloadAgnostic: true, sourceFilters: sourceFilters, comboFilters: comboFilters, setupOpts: func(_ echo.Caller, _ echo.Instances, opts *echo.CallOptions) { // the framework will try to set this when enumerating test cases opts.Target = nil }, opts: echo.CallOptions{ Count: 1, Address: "localhost", Port: &echo.Port{ServicePort: 8080}, Scheme: scheme.HTTP, Validator: echo.And(echo.ExpectOK(), echo.ExpectKey("X-Envoy-Attempt-Count", "")), }, }, // PodIP calls will go directly to podIP, bypassing Envoy. No envoy headers added. { name: "to podIP", workloadAgnostic: true, sourceFilters: sourceFilters, comboFilters: comboFilters, setupOpts: func(srcCaller echo.Caller, _ echo.Instances, opts *echo.CallOptions) { src := srcCaller.(echo.Instance) workloads, _ := src.Workloads() opts.Address = workloads[0].Address() // the framework will try to set this when enumerating test cases opts.Target = nil }, opts: echo.CallOptions{ Count: 1, Scheme: scheme.HTTP, Port: &echo.Port{ServicePort: 8080}, Validator: echo.And(echo.ExpectOK(), echo.ExpectKey("X-Envoy-Attempt-Count", "")), }, }, } } // Todo merge with security TestReachability code func protocolSniffingCases() []TrafficTestCase { cases := []TrafficTestCase{} type protocolCase struct { // The port we call port string // The actual type of traffic we send to the port scheme scheme.Instance } protocols := []protocolCase{ {"http", scheme.HTTP}, {"auto-http", scheme.HTTP}, {"tcp", scheme.TCP}, {"auto-tcp", scheme.TCP}, {"grpc", scheme.GRPC}, {"auto-grpc", scheme.GRPC}, } // so we can validate all clusters are hit for _, call := range protocols { call := call cases = append(cases, TrafficTestCase{ // TODO(https://github.com/istio/istio/issues/26798) enable sniffing tcp skip: call.scheme == scheme.TCP, name: call.port, opts: echo.CallOptions{ Count: 1, PortName: call.port, Scheme: call.scheme, Timeout: time.Second * 5, }, validate: func(src echo.Caller, dst echo.Instances) echo.Validator { if call.scheme == scheme.TCP { // no host header for TCP return echo.ExpectOK() } return echo.And( echo.ExpectOK(), echo.ExpectHost(dst[0].Config().HostHeader())) }, workloadAgnostic: true, }) } return cases } // Todo merge with security TestReachability code func instanceIPTests(apps *EchoDeployments) []TrafficTestCase { cases := []TrafficTestCase{} ipCases := []struct { name string endpoint string disableSidecar bool port string code int }{ // instance IP bind { name: "instance IP without sidecar", disableSidecar: true, port: "http-instance", code: 200, }, { name: "instance IP with wildcard sidecar", endpoint: "0.0.0.0", port: "http-instance", code: 200, }, { name: "instance IP with localhost sidecar", endpoint: "127.0.0.1", port: "http-instance", code: 503, }, { name: "instance IP with empty sidecar", endpoint: "", port: "http-instance", code: 200, }, // Localhost bind { name: "localhost IP without sidecar", disableSidecar: true, port: "http-localhost", code: 503, }, { name: "localhost IP with wildcard sidecar", endpoint: "0.0.0.0", port: "http-localhost", code: 503, }, { name: "localhost IP with localhost sidecar", endpoint: "127.0.0.1", port: "http-localhost", code: 200, }, { name: "localhost IP with empty sidecar", endpoint: "", port: "http-localhost", code: 503, }, // Wildcard bind { name: "wildcard IP without sidecar", disableSidecar: true, port: "http", code: 200, }, { name: "wildcard IP with wildcard sidecar", endpoint: "0.0.0.0", port: "http", code: 200, }, { name: "wildcard IP with localhost sidecar", endpoint: "127.0.0.1", port: "http", code: 200, }, { name: "wildcard IP with empty sidecar", endpoint: "", port: "http", code: 200, }, } for _, ipCase := range ipCases { for _, client := range apps.PodA { ipCase := ipCase client := client destination := apps.PodB[0] var config string if !ipCase.disableSidecar { config = fmt.Sprintf(` apiVersion: networking.istio.io/v1alpha3 kind: Sidecar metadata: name: sidecar spec: workloadSelector: labels: app: b egress: - hosts: - "./*" ingress: - port: number: %d protocol: HTTP defaultEndpoint: %s:%d `, FindPortByName(ipCase.port).InstancePort, ipCase.endpoint, FindPortByName(ipCase.port).InstancePort) } cases = append(cases, TrafficTestCase{ name: ipCase.name, call: client.CallWithRetryOrFail, config: config, opts: echo.CallOptions{ Count: 1, Target: destination, PortName: ipCase.port, Scheme: scheme.HTTP, Timeout: time.Second * 5, Validator: echo.ExpectCode(fmt.Sprint(ipCase.code)), }, }) } } return cases } type vmCase struct { name string from echo.Instance to echo.Instances host string } func DNSTestCases(apps *EchoDeployments, cniEnabled bool) []TrafficTestCase { makeSE := func(ips ...string) string { return tmpl.MustEvaluate(` apiVersion: networking.istio.io/v1alpha3 kind: ServiceEntry metadata: name: dns spec: hosts: - "fake.service.local" addresses: {{ range $ip := .IPs }} - "{{$ip}}" {{ end }} resolution: STATIC endpoints: [] ports: - number: 80 name: http protocol: HTTP `, map[string]interface{}{"IPs": ips}) } tcases := []TrafficTestCase{} ipv4 := "1.2.3.4" ipv6 := "1234:1234:1234::1234:1234:1234" dummyLocalhostServer := "127.0.0.1" cases := []struct { name string // TODO(https://github.com/istio/istio/issues/30282) support multiple vips ips string protocol string server string skipCNI bool expected []string }{ { name: "tcp ipv4", ips: ipv4, expected: []string{ipv4}, protocol: "tcp", }, { name: "udp ipv4", ips: ipv4, expected: []string{ipv4}, protocol: "udp", }, { name: "tcp ipv6", ips: ipv6, expected: []string{ipv6}, protocol: "tcp", }, { name: "udp ipv6", ips: ipv6, expected: []string{ipv6}, protocol: "udp", }, { // We should only capture traffic to servers in /etc/resolv.conf nameservers // This checks we do not capture traffic to other servers. // This is important for cases like app -> istio dns server -> dnsmasq -> upstream // If we captured all DNS traffic, we would loop dnsmasq traffic back to our server. name: "tcp localhost server", ips: ipv4, expected: []string{}, protocol: "tcp", skipCNI: true, server: dummyLocalhostServer, }, { name: "udp localhost server", ips: ipv4, expected: []string{}, protocol: "udp", skipCNI: true, server: dummyLocalhostServer, }, } for _, client := range flatten(apps.VM, apps.PodA, apps.PodTproxy) { for _, tt := range cases { if tt.skipCNI && cniEnabled { continue } tt, client := tt, client address := "fake.service.local?" if tt.protocol != "" { address += "&protocol=" + tt.protocol } if tt.server != "" { address += "&server=" + tt.server } tcases = append(tcases, TrafficTestCase{ name: fmt.Sprintf("%s/%s", client.Config().Service, tt.name), config: makeSE(tt.ips), call: client.CallWithRetryOrFail, opts: echo.CallOptions{ Scheme: scheme.DNS, Count: 1, Address: address, Validator: echo.ValidatorFunc( func(response echoclient.ParsedResponses, _ error) error { return response.Check(func(_ int, response *echoclient.ParsedResponse) error { ips := []string{} for _, v := range response.RawResponse { ips = append(ips, v) } sort.Strings(ips) if !reflect.DeepEqual(ips, tt.expected) { return fmt.Errorf("unexpected dns response: wanted %v, got %v", tt.expected, ips) } return nil }) }), }, }) } } svcCases := []struct { name string protocol string server string }{ { name: "tcp", protocol: "tcp", }, { name: "udp", protocol: "udp", }, } for _, client := range flatten(apps.VM, apps.PodA, apps.PodTproxy) { for _, tt := range svcCases { tt, client := tt, client aInCluster := apps.PodA.Match(echo.InCluster(client.Config().Cluster)) if len(aInCluster) == 0 { // The cluster doesn't contain A, but connects to a cluster containing A aInCluster = apps.PodA.Match(echo.InCluster(client.Config().Cluster.Primary())) } address := aInCluster[0].Config().FQDN() + "?" if tt.protocol != "" { address += "&protocol=" + tt.protocol } if tt.server != "" { address += "&server=" + tt.server } expected := aInCluster[0].Address() tcases = append(tcases, TrafficTestCase{ name: fmt.Sprintf("svc/%s/%s", client.Config().Service, tt.name), call: client.CallWithRetryOrFail, opts: echo.CallOptions{ Count: 1, Scheme: scheme.DNS, Address: address, Validator: echo.ValidatorFunc( func(response echoclient.ParsedResponses, _ error) error { return response.Check(func(_ int, response *echoclient.ParsedResponse) error { ips := []string{} for _, v := range response.RawResponse { ips = append(ips, v) } sort.Strings(ips) exp := []string{expected} if !reflect.DeepEqual(ips, exp) { return fmt.Errorf("unexpected dns response: wanted %v, got %v", exp, ips) } return nil }) }), }, }) } } return tcases } func VMTestCases(vms echo.Instances, apps *EchoDeployments) []TrafficTestCase { var testCases []vmCase for _, vm := range vms { testCases = append(testCases, vmCase{ name: "dns: VM to k8s cluster IP service name.namespace host", from: vm, to: apps.PodA, host: PodASvc + "." + apps.Namespace.Name(), }, vmCase{ name: "dns: VM to k8s cluster IP service fqdn host", from: vm, to: apps.PodA, host: apps.PodA[0].Config().FQDN(), }, vmCase{ name: "dns: VM to k8s cluster IP service short name host", from: vm, to: apps.PodA, host: PodASvc, }, vmCase{ name: "dns: VM to k8s headless service", from: vm, to: apps.Headless.Match(echo.InCluster(vm.Config().Cluster.Primary())), host: apps.Headless[0].Config().FQDN(), }, vmCase{ name: "dns: VM to k8s statefulset service", from: vm, to: apps.StatefulSet.Match(echo.InCluster(vm.Config().Cluster.Primary())), host: apps.StatefulSet[0].Config().FQDN(), }, // TODO(https://github.com/istio/istio/issues/32552) re-enable //vmCase{ // name: "dns: VM to k8s statefulset instance.service", // from: vm, // to: apps.StatefulSet.Match(echo.InCluster(vm.Config().Cluster.Primary())), // host: fmt.Sprintf("%s-v1-0.%s", StatefulSetSvc, StatefulSetSvc), //}, //vmCase{ // name: "dns: VM to k8s statefulset instance.service.namespace", // from: vm, // to: apps.StatefulSet.Match(echo.InCluster(vm.Config().Cluster.Primary())), // host: fmt.Sprintf("%s-v1-0.%s.%s", StatefulSetSvc, StatefulSetSvc, apps.Namespace.Name()), //}, //vmCase{ // name: "dns: VM to k8s statefulset instance.service.namespace.svc", // from: vm, // to: apps.StatefulSet.Match(echo.InCluster(vm.Config().Cluster.Primary())), // host: fmt.Sprintf("%s-v1-0.%s.%s.svc", StatefulSetSvc, StatefulSetSvc, apps.Namespace.Name()), //}, //vmCase{ // name: "dns: VM to k8s statefulset instance FQDN", // from: vm, // to: apps.StatefulSet.Match(echo.InCluster(vm.Config().Cluster.Primary())), // host: fmt.Sprintf("%s-v1-0.%s", StatefulSetSvc, apps.StatefulSet[0].Config().FQDN()), //}, ) } for _, podA := range apps.PodA { testCases = append(testCases, vmCase{ name: "k8s to vm", from: podA, to: vms, }) } cases := make([]TrafficTestCase, 0) for _, c := range testCases { c := c validators := []echo.Validator{echo.ExpectOK()} if !c.to.ContainsMatch(echo.IsHeadless()) { // headless load-balancing can be inconsistent validators = append(validators, echo.ExpectReachedClusters(c.to.Clusters())) } cases = append(cases, TrafficTestCase{ name: fmt.Sprintf("%s from %s", c.name, c.from.Config().Cluster.StableName()), call: c.from.CallWithRetryOrFail, opts: echo.CallOptions{ // assume that all echos in `to` only differ in which cluster they're deployed in Target: c.to[0], PortName: "http", Address: c.host, Count: callsPerCluster * len(c.to), Validator: echo.And(validators...), }, }) } return cases } func destinationRule(app, mode string) string { return fmt.Sprintf(`apiVersion: networking.istio.io/v1beta1 kind: DestinationRule metadata: name: %s spec: host: %s trafficPolicy: tls: mode: %s --- `, app, app, mode) } func useClientProtocolDestinationRule(name, app string) string { return fmt.Sprintf(`apiVersion: networking.istio.io/v1beta1 kind: DestinationRule metadata: name: %s spec: host: %s trafficPolicy: tls: mode: DISABLE connectionPool: http: useClientProtocol: true --- `, name, app) } func peerAuthentication(app, mode string) string { return fmt.Sprintf(`apiVersion: security.istio.io/v1beta1 kind: PeerAuthentication metadata: name: %s spec: selector: matchLabels: app: %s mtls: mode: %s --- `, app, app, mode) } func globalPeerAuthentication(mode string) string { return fmt.Sprintf(`apiVersion: security.istio.io/v1beta1 kind: PeerAuthentication metadata: name: default spec: mtls: mode: %s --- `, mode) } func serverFirstTestCases(apps *EchoDeployments) []TrafficTestCase { cases := make([]TrafficTestCase, 0) clients := apps.PodA destination := apps.PodC[0] configs := []struct { port string dest string auth string validator echo.Validator }{ // TODO: All these cases *should* succeed (except the TLS mismatch cases) - but don't due to issues in our implementation // For auto port, outbound request will be delayed by the protocol sniffer, regardless of configuration {"auto-tcp-server", "DISABLE", "DISABLE", echo.ExpectError()}, {"auto-tcp-server", "DISABLE", "PERMISSIVE", echo.ExpectError()}, {"auto-tcp-server", "DISABLE", "STRICT", echo.ExpectError()}, {"auto-tcp-server", "ISTIO_MUTUAL", "DISABLE", echo.ExpectError()}, {"auto-tcp-server", "ISTIO_MUTUAL", "PERMISSIVE", echo.ExpectError()}, {"auto-tcp-server", "ISTIO_MUTUAL", "STRICT", echo.ExpectError()}, // These is broken because we will still enable inbound sniffing for the port. Since there is no tls, // there is no server-first "upgrading" to client-first {"tcp-server", "DISABLE", "DISABLE", echo.ExpectOK()}, {"tcp-server", "DISABLE", "PERMISSIVE", echo.ExpectError()}, // Expected to fail, incompatible configuration {"tcp-server", "DISABLE", "STRICT", echo.ExpectError()}, {"tcp-server", "ISTIO_MUTUAL", "DISABLE", echo.ExpectError()}, // In these cases, we expect success // There is no sniffer on either side {"tcp-server", "DISABLE", "DISABLE", echo.ExpectOK()}, // On outbound, we have no sniffer involved // On inbound, the request is TLS, so its not server first {"tcp-server", "ISTIO_MUTUAL", "PERMISSIVE", echo.ExpectOK()}, {"tcp-server", "ISTIO_MUTUAL", "STRICT", echo.ExpectOK()}, } for _, client := range clients { for _, c := range configs { client, c := client, c cases = append(cases, TrafficTestCase{ name: fmt.Sprintf("%v:%v/%v", c.port, c.dest, c.auth), skip: apps.IsMulticluster(), // TODO stabilize tcp connection breaks config: destinationRule(destination.Config().Service, c.dest) + peerAuthentication(destination.Config().Service, c.auth), call: client.CallWithRetryOrFail, opts: echo.CallOptions{ Target: destination, PortName: c.port, Scheme: scheme.TCP, // Inbound timeout is 1s. We want to test this does not hit the listener filter timeout Timeout: time.Millisecond * 100, Count: 1, Validator: c.validator, }, }) } } return cases }
tests/integration/pilot/common/routing.go
1
https://github.com/istio/istio/commit/33d59c1dfd4678bea158d6940ce03f9dae537961
[ 0.07420551776885986, 0.001148221199400723, 0.00016177208453882486, 0.00017140059208031744, 0.005811321083456278 ]
{ "id": 5, "code_window": [ " servers:\n", " - port:\n", " number: 15443\n", " name: tls\n", " protocol: TLS\n", " tls:\n", " mode: AUTO_PASSTHROUGH\n", " hosts:\n" ], "labels": [ "keep", "keep", "replace", "keep", "keep", "keep", "keep", "keep" ], "after_edit": [ " number: 443\n" ], "file_path": "tests/integration/pilot/common/routing.go", "type": "replace", "edit_start_line_idx": 706 }
{ "node": { "id": "{{ .nodeID }}", "cluster": "{{ .cluster }}", "locality": { {{ if .region }} "region": "{{ .region }}", {{ end }} {{ if .zone }} "zone": "{{ .zone }}", {{ end }} {{ if .sub_zone }} "sub_zone": "{{ .sub_zone }}", {{ end }} }, "metadata": {{ .meta_json_str }} }, "dynamic_resources": { "lds_config": { "ads": {} }, "cds_config": { "ads": {} }, "ads_config": { "api_type": "GRPC", "grpc_services": [ { "google_grpc": { "target_uri": "{{ .discovery_address }}", "stat_prefix": "googlegrpcxds", "channel_credentials": { "ssl_credentials": {} }, "call_credentials": { {{ if .sts }} "sts_service": { "token_exchange_service_uri": "http://localhost:{{ .sts_port }}/token", "subject_token_path": "/var/run/secrets/tokens/istio-token", "subject_token_type": "urn:ietf:params:oauth:token-type:jwt", "scope": "https://www.googleapis.com/auth/cloud-platform", } {{ else }} "google_compute_engine": {} {{ end }} }, "channel_args": { "args": { "grpc.http2.max_pings_without_data": { "int_value": 0 }, "grpc.keepalive_time_ms": { "int_value": 10000 }, "grpc.keepalive_timeout_ms": { "int_value": 20000 } } } }, "initial_metadata": [ {{ if .sts }} {{ if .gcp_project_id }} { "key": "x-goog-user-project", "value": "{{ .gcp_project_id }}" } {{ end }} {{ end }} ] } ] } }, "cluster_manager": { "load_stats_config": { "api_type": "GRPC", "grpc_services": [ { "google_grpc": { "target_uri": "{{ .discovery_address }}", "stat_prefix": "googlegrpcxds", "channel_credentials": { "ssl_credentials": {} }, "call_credentials": { {{ if .sts }} "sts_service": { "token_exchange_service_uri": "http://localhost:{{ .sts_port }}/token", "subject_token_path": "/var/run/secrets/tokens/istio-token", "subject_token_type": "urn:ietf:params:oauth:token-type:jwt", "scope": "https://www.googleapis.com/auth/cloud-platform", } {{ else }} "google_compute_engine": {} {{ end }} }, "channel_args": { "args": { "grpc.http2.max_pings_without_data": { "int_value": 0 }, "grpc.keepalive_time_ms": { "int_value": 10000 }, "grpc.keepalive_timeout_ms": { "int_value": 20000 } } } }, "initial_metadata": [ {{ if .sts }} {{ if .gcp_project_id }} { "key": "x-goog-user-project", "value": "{{ .gcp_project_id }}" } {{ end }} {{ end }} ] } ] } }, "admin": { "access_log_path": "/dev/null", "address": { "socket_address": { "address": "127.0.0.1", "port_value": {{ .config.ProxyAdminPort }} } } } {{ if .stackdriver }} , "tracing": { "http": { "name": "envoy.tracers.opencensus", "config": { "stackdriver_exporter_enabled": true, "stackdriver_project_id": "{{ .stackdriverProjectID }}", {{ if .sts_port }} "stackdriver_grpc_service": { "google_grpc": { "target_uri": "cloudtrace.googleapis.com", "stat_prefix": "oc_stackdriver_tracer", "channel_credentials": { "ssl_credentials": {} }, "call_credentials": { "sts_service": { "token_exchange_service_uri": "http://localhost:{{ .sts_port }}/token", "subject_token_path": "/var/run/secrets/tokens/istio-token", "subject_token_type": "urn:ietf:params:oauth:token-type:jwt", "scope": "https://www.googleapis.com/auth/cloud-platform", } } }, "initial_metadata": [ {{ if .gcp_project_id }} { "key": "x-goog-user-project", "value": "{{ .gcp_project_id }}" } {{ end }} ] }, {{ end }} "stdout_exporter_enabled": {{ .stackdriverDebug }}, "incoming_trace_context": ["CLOUD_TRACE_CONTEXT", "TRACE_CONTEXT", "GRPC_TRACE_BIN", "B3"], "outgoing_trace_context": ["CLOUD_TRACE_CONTEXT", "TRACE_CONTEXT", "GRPC_TRACE_BIN", "B3"], "trace_config":{ "constant_sampler":{ "decision": "ALWAYS_PARENT" }, "max_number_of_annotations": {{ .stackdriverMaxAnnotations }}, "max_number_of_attributes": {{ .stackdriverMaxAttributes }}, "max_number_of_message_events": {{ .stackdriverMaxEvents }}, "max_number_of_links": 200, } } }} {{ end }} , "layered_runtime": { "layers": [ { "name": "rtds_layer", "rtds_layer": { "name": "traffic_director_runtime", "rtds_config": { "ads": {} } } } ] } }
tools/packaging/common/gcp_envoy_bootstrap.json
0
https://github.com/istio/istio/commit/33d59c1dfd4678bea158d6940ce03f9dae537961
[ 0.00017222421593032777, 0.00016903545474633574, 0.00016531525761820376, 0.0001686615578364581, 0.0000019950480236730073 ]
{ "id": 5, "code_window": [ " servers:\n", " - port:\n", " number: 15443\n", " name: tls\n", " protocol: TLS\n", " tls:\n", " mode: AUTO_PASSTHROUGH\n", " hosts:\n" ], "labels": [ "keep", "keep", "replace", "keep", "keep", "keep", "keep", "keep" ], "after_edit": [ " number: 443\n" ], "file_path": "tests/integration/pilot/common/routing.go", "type": "replace", "edit_start_line_idx": 706 }
// Copyright Istio Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package controller import ( "fmt" "sort" "sync" "time" "github.com/hashicorp/go-multierror" "github.com/yl2chen/cidranger" "go.uber.org/atomic" v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" klabels "k8s.io/apimachinery/pkg/labels" "k8s.io/client-go/kubernetes" listerv1 "k8s.io/client-go/listers/core/v1" "k8s.io/client-go/tools/cache" "istio.io/api/label" "istio.io/istio/pilot/pkg/features" "istio.io/istio/pilot/pkg/model" "istio.io/istio/pilot/pkg/serviceregistry" "istio.io/istio/pilot/pkg/serviceregistry/kube" "istio.io/istio/pilot/pkg/serviceregistry/kube/controller/filter" "istio.io/istio/pilot/pkg/util/informermetric" "istio.io/istio/pkg/config/host" "istio.io/istio/pkg/config/labels" "istio.io/istio/pkg/config/mesh" "istio.io/istio/pkg/config/protocol" kubelib "istio.io/istio/pkg/kube" "istio.io/istio/pkg/queue" istiolog "istio.io/pkg/log" "istio.io/pkg/monitoring" ) const ( // NodeRegionLabel is the well-known label for kubernetes node region in beta NodeRegionLabel = v1.LabelFailureDomainBetaRegion // NodeZoneLabel is the well-known label for kubernetes node zone in beta NodeZoneLabel = v1.LabelFailureDomainBetaZone // NodeRegionLabelGA is the well-known label for kubernetes node region in ga NodeRegionLabelGA = v1.LabelTopologyRegion // NodeZoneLabelGA is the well-known label for kubernetes node zone in ga NodeZoneLabelGA = v1.LabelTopologyZone // IstioGatewayPortLabel overrides the default 15443 value to use for a multi-network gateway's port // TODO move gatewayPort to api repo IstioGatewayPortLabel = "networking.istio.io/gatewayPort" // DefaultNetworkGatewayPort is the port used by default for cross-network traffic if not otherwise specified // by meshNetworks or "networking.istio.io/gatewayPort" DefaultNetworkGatewayPort = 15443 ) var log = istiolog.RegisterScope("kube", "kubernetes service registry controller", 0) var ( typeTag = monitoring.MustCreateLabel("type") eventTag = monitoring.MustCreateLabel("event") k8sEvents = monitoring.NewSum( "pilot_k8s_reg_events", "Events from k8s registry.", monitoring.WithLabels(typeTag, eventTag), ) // nolint: gocritic // This is deprecated in favor of `pilot_k8s_endpoints_pending_pod`, which is a gauge indicating the number of // currently missing pods. This helps distinguish transient errors from permanent ones endpointsWithNoPods = monitoring.NewSum( "pilot_k8s_endpoints_with_no_pods", "Endpoints that does not have any corresponding pods.") endpointsPendingPodUpdate = monitoring.NewGauge( "pilot_k8s_endpoints_pending_pod", "Number of endpoints that do not currently have any corresponding pods.", ) ) func init() { monitoring.MustRegister(k8sEvents) monitoring.MustRegister(endpointsWithNoPods) monitoring.MustRegister(endpointsPendingPodUpdate) } func incrementEvent(kind, event string) { k8sEvents.With(typeTag.Value(kind), eventTag.Value(event)).Increment() } // Options stores the configurable attributes of a Controller. type Options struct { SystemNamespace string ResyncPeriod time.Duration DomainSuffix string // ClusterID identifies the remote cluster in a multicluster env. ClusterID string // Metrics for capturing node-based metrics. Metrics model.Metrics // XDSUpdater will push changes to the xDS server. XDSUpdater model.XDSUpdater // NetworksWatcher observes changes to the mesh networks config. NetworksWatcher mesh.NetworksWatcher // MeshWatcher observes changes to the mesh config MeshWatcher mesh.Watcher // EndpointMode decides what source to use to get endpoint information EndpointMode EndpointMode // Maximum QPS when communicating with kubernetes API KubernetesAPIQPS float32 // Maximum burst for throttle when communicating with the kubernetes API KubernetesAPIBurst int // Duration to wait for cache syncs SyncInterval time.Duration // SyncTimeout, if set, causes HasSynced to be returned when marked true. SyncTimeout *atomic.Bool // If meshConfig.DiscoverySelectors are specified, the DiscoveryNamespacesFilter tracks the namespaces this controller watches. DiscoveryNamespacesFilter filter.DiscoveryNamespacesFilter } func (o Options) GetSyncInterval() time.Duration { if o.SyncInterval != 0 { return o.SyncInterval } return time.Millisecond * 100 } // EndpointMode decides what source to use to get endpoint information type EndpointMode int const ( // EndpointsOnly type will use only Kubernetes Endpoints EndpointsOnly EndpointMode = iota // EndpointSliceOnly type will use only Kubernetes EndpointSlices EndpointSliceOnly // TODO: add other modes. Likely want a mode with Endpoints+EndpointSlices that are not controlled by // Kubernetes Controller (e.g. made by user and not duplicated with Endpoints), or a mode with both that // does deduping. Simply doing both won't work for now, since not all Kubernetes components support EndpointSlice. ) var EndpointModeNames = map[EndpointMode]string{ EndpointsOnly: "EndpointsOnly", EndpointSliceOnly: "EndpointSliceOnly", } func (m EndpointMode) String() string { return EndpointModeNames[m] } var _ serviceregistry.Instance = &Controller{} // kubernetesNode represents a kubernetes node that is reachable externally type kubernetesNode struct { address string labels labels.Instance } // controllerInterface is a simplified interface for the Controller used for testing. type controllerInterface interface { getPodLocality(pod *v1.Pod) string cidrRanger() cidranger.Ranger defaultNetwork() string Cluster() string } var _ controllerInterface = &Controller{} // Controller is a collection of synchronized resource watchers // Caches are thread-safe type Controller struct { client kubernetes.Interface queue queue.Instance nsInformer cache.SharedIndexInformer nsLister listerv1.NamespaceLister serviceInformer filter.FilteredSharedIndexInformer serviceLister listerv1.ServiceLister endpoints kubeEndpointsController // Used to watch node accessible from remote cluster. // In multi-cluster(shared control plane multi-networks) scenario, ingress gateway service can be of nodePort type. // With this, we can populate mesh's gateway address with the node ips. nodeInformer cache.SharedIndexInformer nodeLister listerv1.NodeLister pods *PodCache metrics model.Metrics networksWatcher mesh.NetworksWatcher xdsUpdater model.XDSUpdater domainSuffix string clusterID string serviceHandlers []func(*model.Service, model.Event) workloadHandlers []func(*model.WorkloadInstance, model.Event) // This is only used for test stop chan struct{} sync.RWMutex // servicesMap stores hostname ==> service, it is used to reduce convertService calls. servicesMap map[host.Name]*model.Service // nodeSelectorsForServices stores hostname => label selectors that can be used to // refine the set of node port IPs for a service. nodeSelectorsForServices map[host.Name]labels.Instance // map of node name and its address+labels - this is the only thing we need from nodes // for vm to k8s or cross cluster. When node port services select specific nodes by labels, // we run through the label selectors here to pick only ones that we need. // Only nodes with ExternalIP addresses are included in this map ! nodeInfoMap map[string]kubernetesNode // externalNameSvcInstanceMap stores hostname ==> instance, is used to store instances for ExternalName k8s services externalNameSvcInstanceMap map[host.Name][]*model.ServiceInstance // workload instances from workload entries - map of ip -> workload instance workloadInstancesByIP map[string]*model.WorkloadInstance // Stores a map of workload instance name/namespace to address workloadInstancesIPsByName map[string]string // CIDR ranger based on path-compressed prefix trie ranger cidranger.Ranger // Network name for to be used when the meshNetworks for registry nor network label on pod is specified network string // Network name for the registry as specified by the MeshNetworks configmap networkForRegistry string // tracks which services on which ports should act as a gateway for networkForRegistry registryServiceNameGateways map[host.Name]uint32 // gateways for each network, indexed by the service that runs them so we clean them up later networkGateways map[host.Name]map[string][]*model.Gateway // informerInit is set to true once the controller is running successfully. This ensures we do not // return HasSynced=true before we are running informerInit *atomic.Bool // initialSync is set to true after performing an initial in-order processing of all objects. initialSync *atomic.Bool // syncTimeout signals that the registry should mark itself synced even if informers haven't been processed yet. // The timeout may be controlled by a different component than the kube controller. syncTimeout *atomic.Bool // Duration to wait for cache syncs syncInterval time.Duration // If meshConfig.DiscoverySelectors are specified, the DiscoveryNamespacesFilter tracks the namespaces this controller watches. discoveryNamespacesFilter filter.DiscoveryNamespacesFilter systemNamespace string } // NewController creates a new Kubernetes controller // Created by bootstrap and multicluster (see secretcontroller). func NewController(kubeClient kubelib.Client, options Options) *Controller { // The queue requires a time duration for a retry delay after a handler error c := &Controller{ domainSuffix: options.DomainSuffix, client: kubeClient.Kube(), queue: queue.NewQueue(1 * time.Second), clusterID: options.ClusterID, xdsUpdater: options.XDSUpdater, servicesMap: make(map[host.Name]*model.Service), nodeSelectorsForServices: make(map[host.Name]labels.Instance), nodeInfoMap: make(map[string]kubernetesNode), externalNameSvcInstanceMap: make(map[host.Name][]*model.ServiceInstance), workloadInstancesByIP: make(map[string]*model.WorkloadInstance), workloadInstancesIPsByName: make(map[string]string), registryServiceNameGateways: make(map[host.Name]uint32), networkGateways: make(map[host.Name]map[string][]*model.Gateway), networksWatcher: options.NetworksWatcher, metrics: options.Metrics, syncInterval: options.GetSyncInterval(), informerInit: atomic.NewBool(false), initialSync: atomic.NewBool(false), syncTimeout: options.SyncTimeout, discoveryNamespacesFilter: options.DiscoveryNamespacesFilter, systemNamespace: options.SystemNamespace, } c.nsInformer = kubeClient.KubeInformer().Core().V1().Namespaces().Informer() c.nsLister = kubeClient.KubeInformer().Core().V1().Namespaces().Lister() if options.SystemNamespace != "" { nsInformer := filter.NewFilteredSharedIndexInformer(func(obj interface{}) bool { ns, ok := obj.(*v1.Namespace) if !ok { log.Warnf("Namespace watch getting wrong type in event: %T", obj) return false } return ns.Name == c.systemNamespace }, c.nsInformer) c.registerHandlers(nsInformer, "Namespaces", c.onSystemNamespaceEvent, nil) } if c.discoveryNamespacesFilter == nil { c.discoveryNamespacesFilter = filter.NewDiscoveryNamespacesFilter(c.nsLister, options.MeshWatcher.Mesh().DiscoverySelectors) } c.initDiscoveryHandlers(kubeClient, options.EndpointMode, options.MeshWatcher, c.discoveryNamespacesFilter) c.serviceInformer = filter.NewFilteredSharedIndexInformer(c.discoveryNamespacesFilter.Filter, kubeClient.KubeInformer().Core().V1().Services().Informer()) c.serviceLister = listerv1.NewServiceLister(c.serviceInformer.GetIndexer()) c.registerHandlers(c.serviceInformer, "Services", c.onServiceEvent, nil) switch options.EndpointMode { case EndpointsOnly: endpointsInformer := filter.NewFilteredSharedIndexInformer( c.discoveryNamespacesFilter.Filter, kubeClient.KubeInformer().Core().V1().Endpoints().Informer(), ) c.endpoints = newEndpointsController(c, endpointsInformer) case EndpointSliceOnly: endpointSliceInformer := filter.NewFilteredSharedIndexInformer( c.discoveryNamespacesFilter.Filter, kubeClient.KubeInformer().Discovery().V1beta1().EndpointSlices().Informer(), ) c.endpoints = newEndpointSliceController(c, endpointSliceInformer) } // This is for getting the node IPs of a selected set of nodes c.nodeInformer = kubeClient.KubeInformer().Core().V1().Nodes().Informer() c.nodeLister = kubeClient.KubeInformer().Core().V1().Nodes().Lister() c.registerHandlers(c.nodeInformer, "Nodes", c.onNodeEvent, nil) podInformer := filter.NewFilteredSharedIndexInformer(c.discoveryNamespacesFilter.Filter, kubeClient.KubeInformer().Core().V1().Pods().Informer()) c.pods = newPodCache(c, podInformer, func(key string) { item, exists, err := c.endpoints.getInformer().GetIndexer().GetByKey(key) if err != nil { log.Debugf("Endpoint %v lookup failed with error %v, skipping stale endpoint", key, err) return } if !exists { log.Debugf("Endpoint %v not found, skipping stale endpoint", key) return } c.queue.Push(func() error { return c.endpoints.onEvent(item, model.EventUpdate) }) }) c.registerHandlers(c.pods.informer, "Pods", c.pods.onEvent, nil) return c } func (c *Controller) Provider() serviceregistry.ProviderID { return serviceregistry.Kubernetes } func (c *Controller) Cluster() string { return c.clusterID } func (c *Controller) cidrRanger() cidranger.Ranger { c.RLock() defer c.RUnlock() return c.ranger } func (c *Controller) defaultNetwork() string { c.RLock() defer c.RUnlock() if c.networkForRegistry != "" { return c.networkForRegistry } return c.network } func (c *Controller) Cleanup() error { // TODO(landow) do we need to cleanup other things besides endpoint shards? svcs, err := c.serviceLister.List(klabels.NewSelector()) if err != nil { return fmt.Errorf("error listing services for deletion: %v", err) } for _, s := range svcs { name := kube.ServiceHostname(s.Name, s.Namespace, c.domainSuffix) c.xdsUpdater.SvcUpdate(c.clusterID, string(name), s.Namespace, model.EventDelete) // TODO(landow) do we need to notify service handlers? } return nil } func (c *Controller) onServiceEvent(curr interface{}, event model.Event) error { svc, err := convertToService(curr) if err != nil { log.Errorf(err) return nil } log.Debugf("Handle event %s for service %s in namespace %s", event, svc.Name, svc.Namespace) svcConv := kube.ConvertService(*svc, c.domainSuffix, c.clusterID) switch event { case model.EventDelete: c.Lock() delete(c.servicesMap, svcConv.Hostname) delete(c.nodeSelectorsForServices, svcConv.Hostname) delete(c.externalNameSvcInstanceMap, svcConv.Hostname) delete(c.networkGateways, svcConv.Hostname) c.Unlock() default: needsFullPush := false // First, process nodePort gateway service, whose externalIPs specified // and loadbalancer gateway service if svcConv.Attributes.ClusterExternalAddresses != nil { needsFullPush = c.extractGatewaysFromService(svcConv) } else if isNodePortGatewayService(svc) { // We need to know which services are using node selectors because during node events, // we have to update all the node port services accordingly. nodeSelector := getNodeSelectorsForService(svc) c.Lock() // only add when it is nodePort gateway service c.nodeSelectorsForServices[svcConv.Hostname] = nodeSelector c.Unlock() needsFullPush = c.updateServiceNodePortAddresses(svcConv) } // instance conversion is only required when service is added/updated. instances := kube.ExternalNameServiceInstances(svc, svcConv) c.Lock() c.servicesMap[svcConv.Hostname] = svcConv if len(instances) > 0 { c.externalNameSvcInstanceMap[svcConv.Hostname] = instances } c.Unlock() if needsFullPush { // networks are different, we need to update all eds endpoints c.xdsUpdater.ConfigUpdate(&model.PushRequest{Full: true, Reason: []model.TriggerReason{model.NetworksTrigger}}) } } // We also need to update when the Service changes. For Kubernetes, a service change will result in Endpoint updates, // but workload entries will also need to be updated. if event == model.EventAdd || event == model.EventUpdate { // Build IstioEndpoints endpoints := c.endpoints.buildIstioEndpointsWithService(svc.Name, svc.Namespace, svcConv.Hostname) if features.EnableK8SServiceSelectWorkloadEntries { fep := c.collectWorkloadInstanceEndpoints(svcConv) endpoints = append(endpoints, fep...) } if len(endpoints) > 0 { c.xdsUpdater.EDSCacheUpdate(c.clusterID, string(svcConv.Hostname), svc.Namespace, endpoints) } } c.xdsUpdater.SvcUpdate(c.clusterID, string(svcConv.Hostname), svc.Namespace, event) // Notify service handlers. for _, f := range c.serviceHandlers { f(svcConv, event) } return nil } func (c *Controller) onNodeEvent(obj interface{}, event model.Event) error { node, ok := obj.(*v1.Node) if !ok { tombstone, ok := obj.(cache.DeletedFinalStateUnknown) if !ok { log.Errorf("couldn't get object from tombstone %+v", obj) return nil } node, ok = tombstone.Obj.(*v1.Node) if !ok { log.Errorf("tombstone contained object that is not a node %#v", obj) return nil } } var updatedNeeded bool if event == model.EventDelete { updatedNeeded = true c.Lock() delete(c.nodeInfoMap, node.Name) c.Unlock() } else { k8sNode := kubernetesNode{labels: node.Labels} for _, address := range node.Status.Addresses { if address.Type == v1.NodeExternalIP && address.Address != "" { k8sNode.address = address.Address break } } if k8sNode.address == "" { return nil } c.Lock() // check if the node exists as this add event could be due to controller resync // if the stored object changes, then fire an update event. Otherwise, ignore this event. currentNode, exists := c.nodeInfoMap[node.Name] if !exists || !nodeEquals(currentNode, k8sNode) { c.nodeInfoMap[node.Name] = k8sNode updatedNeeded = true } c.Unlock() } // update all related services if updatedNeeded && c.updateServiceNodePortAddresses() { c.xdsUpdater.ConfigUpdate(&model.PushRequest{ Full: true, }) } return nil } // FilterOutFunc func for filtering out objects during update callback type FilterOutFunc func(old, cur interface{}) bool func (c *Controller) registerHandlers( informer filter.FilteredSharedIndexInformer, otype string, handler func(interface{}, model.Event) error, filter FilterOutFunc, ) { if filter == nil { filter = func(old, cur interface{}) bool { oldObj := old.(metav1.Object) newObj := cur.(metav1.Object) // TODO: this is only for test, add resource version for test if oldObj.GetResourceVersion() == "" || newObj.GetResourceVersion() == "" { return false } return oldObj.GetResourceVersion() == newObj.GetResourceVersion() } } wrappedHandler := func(obj interface{}, event model.Event) error { obj = tryGetLatestObject(informer, obj) return handler(obj, event) } if informer, ok := informer.(cache.SharedInformer); ok { _ = informer.SetWatchErrorHandler(informermetric.ErrorHandlerForCluster(c.clusterID)) } informer.AddEventHandler( cache.ResourceEventHandlerFuncs{ // TODO: filtering functions to skip over un-referenced resources (perf) AddFunc: func(obj interface{}) { incrementEvent(otype, "add") c.queue.Push(func() error { return wrappedHandler(obj, model.EventAdd) }) }, UpdateFunc: func(old, cur interface{}) { if !filter(old, cur) { incrementEvent(otype, "update") c.queue.Push(func() error { return wrappedHandler(cur, model.EventUpdate) }) } else { incrementEvent(otype, "updatesame") } }, DeleteFunc: func(obj interface{}) { incrementEvent(otype, "delete") c.queue.Push(func() error { return handler(obj, model.EventDelete) }) }, }) } // tryGetLatestObject attempts to fetch the latest version of the object from the cache. // Changes may have occurred between queuing and processing. func tryGetLatestObject(informer filter.FilteredSharedIndexInformer, obj interface{}) interface{} { key, err := cache.MetaNamespaceKeyFunc(obj) if err != nil { log.Warnf("failed creating key for informer object: %v", err) return obj } latest, exists, err := informer.GetIndexer().GetByKey(key) if !exists || err != nil { log.Warnf("couldn't find %q in informer index", key) return obj } return latest } // HasSynced returns true after the initial state synchronization func (c *Controller) HasSynced() bool { return (c.syncTimeout != nil && c.syncTimeout.Load()) || c.initialSync.Load() } func (c *Controller) informersSynced() bool { if !c.informerInit.Load() { // registration/Run of informers hasn't occurred yet return false } if (c.nsInformer != nil && !c.nsInformer.HasSynced()) || !c.serviceInformer.HasSynced() || !c.endpoints.HasSynced() || !c.pods.informer.HasSynced() || !c.nodeInformer.HasSynced() { return false } return true } // SyncAll syncs all the objects node->service->pod->endpoint in order // TODO: sync same kind of objects in parallel // This can cause great performance cost in multi clusters scenario. // Maybe just sync the cache and trigger one push at last. func (c *Controller) SyncAll() error { var err *multierror.Error if c.nsLister != nil { sysNs, _ := c.nsLister.Get(c.systemNamespace) if sysNs != nil { err = multierror.Append(err, c.onSystemNamespaceEvent(sysNs, model.EventAdd)) } } nodes := c.nodeInformer.GetIndexer().List() log.Debugf("initializing %d nodes", len(nodes)) for _, s := range nodes { err = multierror.Append(err, c.onNodeEvent(s, model.EventAdd)) } services := c.serviceInformer.GetIndexer().List() log.Debugf("initializing %d services", len(services)) for _, s := range services { err = multierror.Append(err, c.onServiceEvent(s, model.EventAdd)) } err = multierror.Append(err, c.syncPods()) err = multierror.Append(err, c.syncEndpoints()) return multierror.Flatten(err.ErrorOrNil()) } func (c *Controller) syncPods() error { var err *multierror.Error pods := c.pods.informer.GetIndexer().List() log.Debugf("initializing %d pods", len(pods)) for _, s := range pods { err = multierror.Append(err, c.pods.onEvent(s, model.EventAdd)) } return err.ErrorOrNil() } func (c *Controller) syncEndpoints() error { var err *multierror.Error endpoints := c.endpoints.getInformer().GetIndexer().List() log.Debugf("initializing %d endpoints", len(endpoints)) for _, s := range endpoints { err = multierror.Append(err, c.endpoints.onEvent(s, model.EventAdd)) } return err.ErrorOrNil() } // Run all controllers until a signal is received func (c *Controller) Run(stop <-chan struct{}) { if c.networksWatcher != nil { c.networksWatcher.AddNetworksHandler(c.reloadNetworkLookup) c.reloadMeshNetworks() c.reloadNetworkGateways() } c.informerInit.Store(true) kubelib.WaitForCacheSyncInterval(stop, c.syncInterval, c.informersSynced) // after informer caches sync the first time, process resources in order if err := c.SyncAll(); err != nil { log.Errorf("one or more errors force-syncing resources: %v", err) } c.initialSync.Store(true) // after the in-order sync we can start processing the queue c.queue.Run(stop) log.Infof("Controller terminated") } // Stop the controller. Only for tests, to simplify the code (defer c.Stop()) func (c *Controller) Stop() { if c.stop != nil { close(c.stop) } } // Services implements a service catalog operation func (c *Controller) Services() ([]*model.Service, error) { c.RLock() out := make([]*model.Service, 0, len(c.servicesMap)) for _, svc := range c.servicesMap { out = append(out, svc) } c.RUnlock() sort.Slice(out, func(i, j int) bool { return out[i].Hostname < out[j].Hostname }) return out, nil } // GetService implements a service catalog operation by hostname specified. func (c *Controller) GetService(hostname host.Name) (*model.Service, error) { c.RLock() svc := c.servicesMap[hostname] c.RUnlock() return svc, nil } // getPodLocality retrieves the locality for a pod. func (c *Controller) getPodLocality(pod *v1.Pod) string { // if pod has `istio-locality` label, skip below ops if len(pod.Labels[model.LocalityLabel]) > 0 { return model.GetLocalityLabelOrDefault(pod.Labels[model.LocalityLabel], "") } // NodeName is set by the scheduler after the pod is created // https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#late-initialization raw, err := c.nodeLister.Get(pod.Spec.NodeName) if err != nil { if pod.Spec.NodeName != "" { log.Warnf("unable to get node %q for pod %q/%q: %v", pod.Spec.NodeName, pod.Namespace, pod.Name, err) } return "" } nodeMeta, err := meta.Accessor(raw) if err != nil { log.Warnf("unable to get node meta: %v", nodeMeta) return "" } region := getLabelValue(nodeMeta, NodeRegionLabel, NodeRegionLabelGA) zone := getLabelValue(nodeMeta, NodeZoneLabel, NodeZoneLabelGA) subzone := getLabelValue(nodeMeta, label.TopologySubzone.Name, "") if region == "" && zone == "" && subzone == "" { return "" } return region + "/" + zone + "/" + subzone // Format: "%s/%s/%s" } // InstancesByPort implements a service catalog operation func (c *Controller) InstancesByPort(svc *model.Service, reqSvcPort int, labelsList labels.Collection) []*model.ServiceInstance { // First get k8s standard service instances and the workload entry instances outInstances := c.endpoints.InstancesByPort(c, svc, reqSvcPort, labelsList) outInstances = append(outInstances, c.serviceInstancesFromWorkloadInstances(svc, reqSvcPort)...) // return when instances found or an error occurs if len(outInstances) > 0 { return outInstances } // Fall back to external name service since we did not find any instances of normal services c.RLock() externalNameInstances := c.externalNameSvcInstanceMap[svc.Hostname] c.RUnlock() if externalNameInstances != nil { inScopeInstances := make([]*model.ServiceInstance, 0) for _, i := range externalNameInstances { if i.Service.Attributes.Namespace == svc.Attributes.Namespace && i.ServicePort.Port == reqSvcPort { inScopeInstances = append(inScopeInstances, i) } } return inScopeInstances } return nil } func (c *Controller) serviceInstancesFromWorkloadInstances(svc *model.Service, reqSvcPort int) []*model.ServiceInstance { // Run through all the workload instances, select ones that match the service labels // only if this is a kubernetes internal service and of ClientSideLB (eds) type // as InstancesByPort is called by the aggregate controller. We dont want to include // workload instances for any other registry var workloadInstancesExist bool c.RLock() workloadInstancesExist = len(c.workloadInstancesByIP) > 0 c.RUnlock() // Only select internal Kubernetes services with selectors if !workloadInstancesExist || svc.Attributes.ServiceRegistry != string(serviceregistry.Kubernetes) || svc.MeshExternal || svc.Resolution != model.ClientSideLB || svc.Attributes.LabelSelectors == nil { return nil } selector := labels.Instance(svc.Attributes.LabelSelectors) // Get the service port name and target port so that we can construct the service instance k8sService, err := c.serviceLister.Services(svc.Attributes.Namespace).Get(svc.Attributes.Name) // We did not find the k8s service. We cannot get the targetPort if err != nil { log.Infof("serviceInstancesFromWorkloadInstances(%s.%s) failed to get k8s service => error %v", svc.Attributes.Name, svc.Attributes.Namespace, err) return nil } var servicePort *model.Port for _, p := range svc.Ports { if p.Port == reqSvcPort { servicePort = p break } } if servicePort == nil { return nil } // Now get the target Port for this service port targetPort, targetPortName := findServiceTargetPort(servicePort, k8sService) if targetPort == 0 { targetPort = reqSvcPort } out := make([]*model.ServiceInstance, 0) c.RLock() for _, wi := range c.workloadInstancesByIP { if wi.Namespace != svc.Attributes.Namespace { continue } if selector.SubsetOf(wi.Endpoint.Labels) { // create an instance with endpoint whose service port name matches istioEndpoint := *wi.Endpoint if targetPortName != "" { // This is a named port, find the corresponding port in the port map matchedPort := wi.PortMap[targetPortName] if matchedPort == 0 { // No match found, skip this endpoint continue } istioEndpoint.EndpointPort = matchedPort } else { istioEndpoint.EndpointPort = uint32(targetPort) } istioEndpoint.ServicePortName = servicePort.Name out = append(out, &model.ServiceInstance{ Service: svc, ServicePort: servicePort, Endpoint: &istioEndpoint, }) } } c.RUnlock() return out } // convenience function to collect all workload entry endpoints in updateEDS calls. func (c *Controller) collectWorkloadInstanceEndpoints(svc *model.Service) []*model.IstioEndpoint { var workloadInstancesExist bool c.RLock() workloadInstancesExist = len(c.workloadInstancesByIP) > 0 c.RUnlock() if !workloadInstancesExist || svc.Resolution != model.ClientSideLB || len(svc.Ports) == 0 { return nil } endpoints := make([]*model.IstioEndpoint, 0) for _, port := range svc.Ports { for _, instance := range c.serviceInstancesFromWorkloadInstances(svc, port.Port) { endpoints = append(endpoints, instance.Endpoint) } } return endpoints } // GetProxyServiceInstances returns service instances co-located with a given proxy // TODO: this code does not return k8s service instances when the proxy's IP is a workload entry // To tackle this, we need a ip2instance map like what we have in service entry. func (c *Controller) GetProxyServiceInstances(proxy *model.Proxy) []*model.ServiceInstance { if len(proxy.IPAddresses) > 0 { proxyIP := proxy.IPAddresses[0] c.RLock() workload, f := c.workloadInstancesByIP[proxyIP] c.RUnlock() if f { return c.hydrateWorkloadInstance(workload) } pod := c.pods.getPodByProxy(proxy) if pod != nil && !proxy.IsVM() { // we don't want to use this block for our test "VM" which is actually a Pod. if !c.isControllerForProxy(proxy) { log.Errorf("proxy is in cluster %v, but controller is for cluster %v", proxy.Metadata.ClusterID, c.clusterID) return nil } // 1. find proxy service by label selector, if not any, there may exist headless service without selector // failover to 2 if services, err := getPodServices(c.serviceLister, pod); err == nil && len(services) > 0 { out := make([]*model.ServiceInstance, 0) for _, svc := range services { out = append(out, c.getProxyServiceInstancesByPod(pod, svc, proxy)...) } return out } // 2. Headless service without selector return c.endpoints.GetProxyServiceInstances(c, proxy) } // 3. The pod is not present when this is called // due to eventual consistency issues. However, we have a lot of information about the pod from the proxy // metadata already. Because of this, we can still get most of the information we need. // If we cannot accurately construct ServiceInstances from just the metadata, this will return an error and we can // attempt to read the real pod. out, err := c.getProxyServiceInstancesFromMetadata(proxy) if err != nil { log.Warnf("getProxyServiceInstancesFromMetadata for %v failed: %v", proxy.ID, err) } return out } // TODO: This could not happen, remove? if c.metrics != nil { c.metrics.AddMetric(model.ProxyStatusNoService, proxy.ID, proxy.ID, "") } else { log.Infof("Missing metrics env, empty list of services for pod %s", proxy.ID) } return nil } func (c *Controller) hydrateWorkloadInstance(si *model.WorkloadInstance) []*model.ServiceInstance { out := []*model.ServiceInstance{} // find the workload entry's service by label selector // rather than scanning through our internal map of model.services, get the services via the k8s apis dummyPod := &v1.Pod{ ObjectMeta: metav1.ObjectMeta{Namespace: si.Namespace, Labels: si.Endpoint.Labels}, } // find the services that map to this workload entry, fire off eds updates if the service is of type client-side lb if k8sServices, err := getPodServices(c.serviceLister, dummyPod); err == nil && len(k8sServices) > 0 { for _, k8sSvc := range k8sServices { var service *model.Service c.RLock() service = c.servicesMap[kube.ServiceHostname(k8sSvc.Name, k8sSvc.Namespace, c.domainSuffix)] c.RUnlock() // Note that this cannot be an external service because k8s external services do not have label selectors. if service == nil || service.Resolution != model.ClientSideLB { // may be a headless service continue } for _, port := range service.Ports { if port.Protocol == protocol.UDP { continue } // Similar code as UpdateServiceShards in eds.go instances := c.InstancesByPort(service, port.Port, labels.Collection{}) out = append(out, instances...) } } } return out } // WorkloadInstanceHandler defines the handler for service instances generated by other registries func (c *Controller) WorkloadInstanceHandler(si *model.WorkloadInstance, event model.Event) { // ignore malformed workload entries. And ignore any workload entry that does not have a label // as there is no way for us to select them if si.Namespace == "" || len(si.Endpoint.Labels) == 0 { return } // this is from a workload entry. Store it in separate map so that // the InstancesByPort can use these as well as the k8s pods. c.Lock() switch event { case model.EventDelete: delete(c.workloadInstancesByIP, si.Endpoint.Address) default: // add or update // Check to see if the workload entry changed. If it did, clear the old entry k := si.Namespace + "/" + si.Name existing := c.workloadInstancesIPsByName[k] if existing != si.Endpoint.Address { delete(c.workloadInstancesByIP, existing) } c.workloadInstancesByIP[si.Endpoint.Address] = si c.workloadInstancesIPsByName[k] = si.Endpoint.Address } c.Unlock() // find the workload entry's service by label selector // rather than scanning through our internal map of model.services, get the services via the k8s apis dummyPod := &v1.Pod{ ObjectMeta: metav1.ObjectMeta{Namespace: si.Namespace, Labels: si.Endpoint.Labels}, } // find the services that map to this workload entry, fire off eds updates if the service is of type client-side lb if k8sServices, err := getPodServices(c.serviceLister, dummyPod); err == nil && len(k8sServices) > 0 { for _, k8sSvc := range k8sServices { var service *model.Service c.RLock() service = c.servicesMap[kube.ServiceHostname(k8sSvc.Name, k8sSvc.Namespace, c.domainSuffix)] c.RUnlock() // Note that this cannot be an external service because k8s external services do not have label selectors. if service == nil || service.Resolution != model.ClientSideLB { // may be a headless service continue } // Get the updated list of endpoints that includes k8s pods and the workload entries for this service // and then notify the EDS server that endpoints for this service have changed. // We need one endpoint object for each service port endpoints := make([]*model.IstioEndpoint, 0) for _, port := range service.Ports { if port.Protocol == protocol.UDP { continue } // Similar code as UpdateServiceShards in eds.go instances := c.InstancesByPort(service, port.Port, labels.Collection{}) for _, inst := range instances { endpoints = append(endpoints, inst.Endpoint) } } // fire off eds update c.xdsUpdater.EDSUpdate(c.clusterID, string(service.Hostname), service.Attributes.Namespace, endpoints) } } } func (c *Controller) onSystemNamespaceEvent(obj interface{}, ev model.Event) error { if ev == model.EventDelete { return nil } ns, ok := obj.(*v1.Namespace) if !ok { log.Warnf("Namespace watch getting wrong type in event: %T", obj) return nil } if ns == nil { return nil } nw := ns.Labels[label.TopologyNetwork.Name] c.Lock() oldDefaultNetwork := c.network c.network = nw c.Unlock() // network changed, not using mesh networks, and controller has been initialized if oldDefaultNetwork != c.network && c.network == c.defaultNetwork() { // refresh pods/endpoints/services c.onNetworkChanged() } return nil } // isControllerForProxy should be used for proxies assumed to be in the kube cluster for this controller. Workload Entries // may not necessarily pass this check, but we still want to allow kube services to select workload instances. func (c *Controller) isControllerForProxy(proxy *model.Proxy) bool { return proxy.Metadata.ClusterID == "" || proxy.Metadata.ClusterID == c.clusterID } // getProxyServiceInstancesFromMetadata retrieves ServiceInstances using proxy Metadata rather than // from the Pod. This allows retrieving Instances immediately, regardless of delays in Kubernetes. // If the proxy doesn't have enough metadata, an error is returned func (c *Controller) getProxyServiceInstancesFromMetadata(proxy *model.Proxy) ([]*model.ServiceInstance, error) { if len(proxy.Metadata.Labels) == 0 { return nil, nil } if !c.isControllerForProxy(proxy) { return nil, fmt.Errorf("proxy is in cluster %v, but controller is for cluster %v", proxy.Metadata.ClusterID, c.clusterID) } // Create a pod with just the information needed to find the associated Services dummyPod := &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Namespace: proxy.ConfigNamespace, Labels: proxy.Metadata.Labels, }, } // Find the Service associated with the pod. services, err := getPodServices(c.serviceLister, dummyPod) if err != nil { return nil, fmt.Errorf("error getting instances for %s: %v", proxy.ID, err) } if len(services) == 0 { return nil, fmt.Errorf("no instances found for %s: %v", proxy.ID, err) } out := make([]*model.ServiceInstance, 0) for _, svc := range services { hostname := kube.ServiceHostname(svc.Name, svc.Namespace, c.domainSuffix) c.RLock() modelService, f := c.servicesMap[hostname] c.RUnlock() if !f { return nil, fmt.Errorf("failed to find model service for %v", hostname) } tps := make(map[model.Port]*model.Port) for _, port := range svc.Spec.Ports { svcPort, f := modelService.Ports.Get(port.Name) if !f { return nil, fmt.Errorf("failed to get svc port for %v", port.Name) } var portNum int if len(proxy.Metadata.PodPorts) > 0 { portNum, err = findPortFromMetadata(port, proxy.Metadata.PodPorts) if err != nil { return nil, fmt.Errorf("failed to find target port for %v: %v", proxy.ID, err) } } else { // most likely a VM - we assume the WorkloadEntry won't remap any ports portNum = port.TargetPort.IntValue() } // Dedupe the target ports here - Service might have configured multiple ports to the same target port, // we will have to create only one ingress listener per port and protocol so that we do not endup // complaining about listener conflicts. targetPort := model.Port{ Port: portNum, Protocol: svcPort.Protocol, } if _, exists := tps[targetPort]; !exists { tps[targetPort] = svcPort } } epBuilder := NewEndpointBuilderFromMetadata(c, proxy) for tp, svcPort := range tps { // consider multiple IP scenarios for _, ip := range proxy.IPAddresses { // Construct the ServiceInstance out = append(out, &model.ServiceInstance{ Service: modelService, ServicePort: svcPort, Endpoint: epBuilder.buildIstioEndpoint(ip, int32(tp.Port), svcPort.Name), }) } } } return out, nil } func (c *Controller) getProxyServiceInstancesByPod(pod *v1.Pod, service *v1.Service, proxy *model.Proxy) []*model.ServiceInstance { out := make([]*model.ServiceInstance, 0) hostname := kube.ServiceHostname(service.Name, service.Namespace, c.domainSuffix) c.RLock() svc := c.servicesMap[hostname] c.RUnlock() if svc == nil { return out } tps := make(map[model.Port]*model.Port) for _, port := range service.Spec.Ports { svcPort, exists := svc.Ports.Get(port.Name) if !exists { continue } // find target port portNum, err := FindPort(pod, &port) if err != nil { log.Warnf("Failed to find port for service %s/%s: %v", service.Namespace, service.Name, err) continue } // Dedupe the target ports here - Service might have configured multiple ports to the same target port, // we will have to create only one ingress listener per port and protocol so that we do not endup // complaining about listener conflicts. targetPort := model.Port{ Port: portNum, Protocol: svcPort.Protocol, } if _, exists = tps[targetPort]; !exists { tps[targetPort] = svcPort } } builder := NewEndpointBuilder(c, pod) for tp, svcPort := range tps { // consider multiple IP scenarios for _, ip := range proxy.IPAddresses { istioEndpoint := builder.buildIstioEndpoint(ip, int32(tp.Port), svcPort.Name) out = append(out, &model.ServiceInstance{ Service: svc, ServicePort: svcPort, Endpoint: istioEndpoint, }) } } return out } func (c *Controller) GetProxyWorkloadLabels(proxy *model.Proxy) labels.Collection { pod := c.pods.getPodByProxy(proxy) if pod != nil { return labels.Collection{pod.Labels} } return nil } // GetIstioServiceAccounts returns the Istio service accounts running a service // hostname. Each service account is encoded according to the SPIFFE VSID spec. // For example, a service account named "bar" in namespace "foo" is encoded as // "spiffe://cluster.local/ns/foo/sa/bar". func (c *Controller) GetIstioServiceAccounts(svc *model.Service, ports []int) []string { return model.GetServiceAccounts(svc, ports, c) } // AppendServiceHandler implements a service catalog operation func (c *Controller) AppendServiceHandler(f func(*model.Service, model.Event)) { c.serviceHandlers = append(c.serviceHandlers, f) } // AppendWorkloadHandler implements a service catalog operation func (c *Controller) AppendWorkloadHandler(f func(*model.WorkloadInstance, model.Event)) { c.workloadHandlers = append(c.workloadHandlers, f) }
pilot/pkg/serviceregistry/kube/controller/controller.go
0
https://github.com/istio/istio/commit/33d59c1dfd4678bea158d6940ce03f9dae537961
[ 0.008101117797195911, 0.00033660410554148257, 0.00016059158951975405, 0.0001695117389317602, 0.0008607752388343215 ]
{ "id": 5, "code_window": [ " servers:\n", " - port:\n", " number: 15443\n", " name: tls\n", " protocol: TLS\n", " tls:\n", " mode: AUTO_PASSTHROUGH\n", " hosts:\n" ], "labels": [ "keep", "keep", "replace", "keep", "keep", "keep", "keep", "keep" ], "after_edit": [ " number: 443\n" ], "file_path": "tests/integration/pilot/common/routing.go", "type": "replace", "edit_start_line_idx": 706 }
// Copyright Istio Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package mesh import ( "io/ioutil" "path/filepath" "regexp" "testing" "github.com/kylelemons/godebug/diff" "istio.io/istio/operator/pkg/util" ) func TestProfileDump(t *testing.T) { testDataDir = filepath.Join(operatorRootDir, "cmd/mesh/testdata/profile-dump") tests := []struct { desc string configPath string }{ { desc: "all_off", }, { desc: "config_path", configPath: "components", }, } installPackagePathRegex := regexp.MustCompile(" installPackagePath: .*") for _, tt := range tests { t.Run(tt.desc, func(t *testing.T) { inPath := filepath.Join(testDataDir, "input", tt.desc+".yaml") outPath := filepath.Join(testDataDir, "output", tt.desc+".yaml") got, err := runProfileDump(inPath, tt.configPath, snapshotCharts, "") if err != nil { t.Fatal(err) } // installPackagePath may change, we will remove it for consistent output got = installPackagePathRegex.ReplaceAllString(got, "") if refreshGoldenFiles() { t.Logf("Refreshing golden file for %s", outPath) if err := ioutil.WriteFile(outPath, []byte(got), 0644); err != nil { t.Error(err) } } want, err := readFile(outPath) if err != nil { t.Fatal(err) } if !util.IsYAMLEqual(got, want) { t.Errorf("profile-dump command(%s): got:\n%s\n\nwant:\n%s\nDiff:\n%s\n", tt.desc, got, want, util.YAMLDiff(got, want)) } }) } } func runProfileDump(profilePath, configPath string, chartSource chartSourceType, outfmt string) (string, error) { cmd := "profile dump -f " + profilePath if configPath != "" { cmd += " --config-path " + configPath } if len(chartSource) > 0 { cmd += " --manifests=" + string(chartSource) } if outfmt != "" { cmd += " --output=" + outfmt } return runCommand(cmd) } func TestProfileDumpFlags(t *testing.T) { testDataDir = filepath.Join(operatorRootDir, "cmd/mesh/testdata/profile-dump") tests := []struct { desc string configPath string }{ { desc: "all_off", }, { desc: "config_path", configPath: "components", }, } installPackagePathRegex := regexp.MustCompile("(?m)^installPackagePath=\".*\"\n") for _, tt := range tests { t.Run(tt.desc, func(t *testing.T) { inPath := filepath.Join(testDataDir, "input", tt.desc+".yaml") outPath := filepath.Join(testDataDir, "output", tt.desc+".txt") got, err := runProfileDump(inPath, tt.configPath, snapshotCharts, "flags") if err != nil { t.Fatal(err) } // installPackagePath may change, we will remove it for consistent output got = installPackagePathRegex.ReplaceAllString(got, "") if refreshGoldenFiles() { t.Logf("Refreshing golden file for %s", outPath) if err := ioutil.WriteFile(outPath, []byte(got), 0644); err != nil { t.Error(err) } } want, err := readFile(outPath) if err != nil { t.Fatal(err) } if got != want { t.Errorf("profile-dump command(%s): got:\n%s\n\nwant:\n%s\nDiff:\n%s\n", tt.desc, got, want, diff.Diff(got, want)) } }) } }
operator/cmd/mesh/profile-dump_test.go
0
https://github.com/istio/istio/commit/33d59c1dfd4678bea158d6940ce03f9dae537961
[ 0.00017570385534781963, 0.00017093312635552138, 0.0001669233024585992, 0.00017108028987422585, 0.0000027947801299887942 ]
{ "id": 0, "code_window": [ "| ----- | ---- | ----- | ----------- | -------------- |\n", "| node_id | [int32](#cockroach.server.serverpb.DecommissionPreCheckResponse-int32) | | | [reserved](#support-status) |\n", "| decommission_readiness | [DecommissionPreCheckResponse.NodeReadiness](#cockroach.server.serverpb.DecommissionPreCheckResponse-cockroach.server.serverpb.DecommissionPreCheckResponse.NodeReadiness) | | The node's decommission readiness status. | [reserved](#support-status) |\n", "| liveness_status | [cockroach.kv.kvserver.liveness.livenesspb.NodeLivenessStatus](#cockroach.server.serverpb.DecommissionPreCheckResponse-cockroach.kv.kvserver.liveness.livenesspb.NodeLivenessStatus) | | The liveness status of the given node. | [reserved](#support-status) |\n", "| replica_count | [int64](#cockroach.server.serverpb.DecommissionPreCheckResponse-int64) | | The number of total replicas on the node, computed by scanning range descriptors. | [reserved](#support-status) |\n", "| checked_ranges | [DecommissionPreCheckResponse.RangeCheckResult](#cockroach.server.serverpb.DecommissionPreCheckResponse-cockroach.server.serverpb.DecommissionPreCheckResponse.RangeCheckResult) | repeated | The details and recorded traces from preprocessing each range with a replica on the checked nodes that resulted in error, up to the maximum specified in the request. | [reserved](#support-status) |\n", "\n", "\n", "\n" ], "labels": [ "keep", "keep", "keep", "replace", "keep", "keep", "keep", "keep", "keep" ], "after_edit": [], "file_path": "docs/generated/http/full.md", "type": "replace", "edit_start_line_idx": 1000 }
// Copyright 2016 The Cockroach Authors. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt. // // As of the Change Date specified in that file, in accordance with // the Business Source License, use of this software will be governed // by the Apache License, Version 2.0, included in the file // licenses/APL.txt. syntax = "proto3"; package cockroach.server.serverpb; option go_package = "github.com/cockroachdb/cockroach/pkg/server/serverpb"; import "config/zonepb/zone.proto"; import "util/tracing/tracingpb/tracing.proto"; import "jobs/jobspb/jobs.proto"; import "server/serverpb/status.proto"; import "storage/enginepb/mvcc.proto"; import "kv/kvserver/liveness/livenesspb/liveness.proto"; import "kv/kvserver/loqrecovery/loqrecoverypb/recovery.proto"; import "kv/kvserver/kvserverpb/range_log.proto"; import "kv/kvpb/api.proto"; import "roachpb/metadata.proto"; import "roachpb/data.proto"; import "ts/catalog/chart_catalog.proto"; import "util/metric/metric.proto"; import "util/tracing/tracingpb/recorded_span.proto"; import "gogoproto/gogo.proto"; import "google/api/annotations.proto"; import "google/protobuf/timestamp.proto"; // ZoneConfigurationLevel indicates, for objects with a Zone Configuration, // the object level at which the configuration is defined. This is needed // because objects without a specifically indicated Zone Configuration will // inherit the configuration of their "parent". enum ZoneConfigurationLevel { UNKNOWN = 0; // CLUSTER indicates that this object uses the cluster default Zone Configuration. CLUSTER = 1; // DATABASE indicates that this object uses a database-level Zone Configuration. DATABASE = 2; // TABLE indicates that this object uses a table-level Zone Configuration. TABLE = 3; } // DatabasesRequest requests a list of databases. message DatabasesRequest { } // DatabasesResponse contains a list of databases. message DatabasesResponse { repeated string databases = 1; } // DatabaseDetailsRequest requests detailed information about the specified // database message DatabaseDetailsRequest { // database is the name of the database we are querying. string database = 1; // Setting this flag includes a computationally-expensive stats field // in the response. bool include_stats = 2; } // DatabaseDetailsResponse contains grant information, table names, // zone configuration, and size statistics for a database. message DatabaseDetailsResponse { message Grant { // user is the user that this grant applies to. string user = 1; // privileges are the abilities this grant gives to the user. repeated string privileges = 2; } message Stats { // A table which exists in the database, but for which we could not load stats // during this request. message MissingTable { // The name of the table for which we could not load stats. string name = 1; // The error message that resulted when the request for this table failed. string error_message = 2; } // A list of tables that exist in the database, but for which stats could // not be loaded due to failures during this request. repeated MissingTable missing_tables = 1; // The number of ranges, as determined from a query of range meta keys, // across all tables. int64 range_count = 2; // An approximation of the disk space (in bytes) used for all replicas // of all tables across the cluster. uint64 approximate_disk_bytes = 3; // node_ids is the ordered list of node ids on which data is stored. repeated int32 node_ids = 4 [(gogoproto.customname) = "NodeIDs", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/roachpb.NodeID"]; int32 num_index_recommendations = 5; } // grants are the results of SHOW GRANTS for this database. repeated Grant grants = 1 [(gogoproto.nullable) = false]; // table_names contains the names of all tables in this database. Note that // all responses will be schema-qualified (schema.table) and that every schema // or table that contains a "sql unsafe character" such as uppercase letters // or dots will be surrounded with double quotes, such as "naughty schema".table. repeated string table_names = 2; // descriptor_id is an identifier used to uniquely identify this database. int64 descriptor_id = 3 [(gogoproto.customname) = "DescriptorID"]; // The zone configuration in effect for this database. cockroach.config.zonepb.ZoneConfig zone_config = 4 [(gogoproto.nullable) = false]; // The level at which this object's zone configuration is set. ZoneConfigurationLevel zone_config_level = 5; // Size information about the database, present only when explicitly requested. Stats stats = 6; } // TableDetailsRequest is a request for detailed information about a table. message TableDetailsRequest { // database is the name of the database that contains the table we're // interested in. string database = 1; // table is the name of the table that we're querying. Table may be // schema-qualified (schema.table) and each name component that contains // sql unsafe characters such as . or uppercase letters must be surrounded // in double quotes like "naughty schema".table. string table = 2; } // TableDetailsResponse contains grants, column names, and indexes for // a table. message TableDetailsResponse { // Grant is an entry from SHOW GRANTS. message Grant { // user is the user that this grant applies to. string user = 1; // privileges are the abilities this grant gives to the user. repeated string privileges = 2; } message Column { // name is the name of the column. string name = 1; // type is the SQL type (INT, STRING, etc.) of this column. string type = 2; // nullable is whether this column can contain NULL. bool nullable = 3; // default_value is the default value of this column. string default_value = 4; // generation_expression is the generator expression if the column is computed. string generation_expression = 5; // hidden is whether this column is hidden. bool hidden = 6; } message Index { // name is the name of this index. string name = 1; // unique is whether this a unique index (i.e. CREATE UNIQUE INDEX). bool unique = 2; // seq is an internal variable that's passed along. int64 seq = 3; // column is the column that this index indexes. string column = 4; // direction is either "ASC" (ascending) or "DESC" (descending). string direction = 5; // storing is an internal variable that's passed along. bool storing = 6; // implicit is an internal variable that's passed along. bool implicit = 7; } repeated Grant grants = 1 [(gogoproto.nullable) = false]; repeated Column columns = 2 [(gogoproto.nullable) = false]; repeated Index indexes = 3 [(gogoproto.nullable) = false]; // range_count is the size of the table in ranges. This provides a rough // estimate of the storage requirements for the table. // TODO(mrtracy): The TableStats method also returns a range_count field which // is more accurate than this one; TableDetails calculates this number using // a potentially faster method that is subject to cache staleness. We should // consider removing or renaming this field to reflect that difference. See // GitHub issue #5435 for more information. int64 range_count = 4; // create_table_statement is the output of "SHOW CREATE" for this table; // it is a SQL statement that would re-create the table's current schema if // executed. string create_table_statement = 5; // The zone configuration in effect for this table. cockroach.config.zonepb.ZoneConfig zone_config = 6 [(gogoproto.nullable) = false]; // The level at which this object's zone configuration is set. ZoneConfigurationLevel zone_config_level = 7; // descriptor_id is an identifier used to uniquely identify this table. int64 descriptor_id = 8 [(gogoproto.customname) = "DescriptorID"]; // configure_zone_statement is the output of "SHOW ZONE CONFIGURATION FOR TABLE" // for this table. It is a SQL statement that would re-configure the table's current // zone if executed. string configure_zone_statement = 9; // stats_last_created_at is the time at which statistics were last created. google.protobuf.Timestamp stats_last_created_at = 10 [(gogoproto.stdtime) = true]; // has_index_recommendations notifies if the there are index recommendations // on this table. bool has_index_recommendations = 11; // data_total_bytes is the size in bytes of live and non-live data on the table. int64 data_total_bytes = 12; // data_live_bytes is the size in bytes of live (non MVCC) data on the table. int64 data_live_bytes = 13; // data_live_percentage is the percentage of live (non MVCC) data on the table. float data_live_percentage = 14; } // TableStatsRequest is a request for detailed, computationally expensive // information about a table. message TableStatsRequest { // database is the name of the database that contains the table we're // interested in. string database = 1; // table is the name of the table that we're querying. Table may be // schema-qualified (schema.table) and each name component that contains // sql unsafe characters such as . or uppercase letters must be surrounded // in double quotes like "naughty schema".table. string table = 2; } // TableStatsResponse contains detailed, computationally expensive information // about a table. message TableStatsResponse { // range_count is the number of ranges, as determined from a query of range // meta keys. int64 range_count = 1; // replica_count is the number of replicas of any range of this table, as // found by querying nodes which are known to have replicas. When compared // with range_count, this can be used to estimate the current replication // factor of the table. int64 replica_count = 2; // node_count is the number of nodes which contain data for this table, // according to a query of range meta keys. int64 node_count = 3; // stats is the summation of MVCCStats for all replicas of this table // across the cluster. cockroach.storage.enginepb.MVCCStats stats = 4 [(gogoproto.nullable) = false]; // approximate_disk_bytes is an approximation of the disk space (in bytes) // used for all replicas of this table across the cluster. uint64 approximate_disk_bytes = 6; // MissingNode represents information on a node which should contain data // for this table, but could not be contacted during this request. message MissingNode { // The ID of the missing node. string node_id = 1 [(gogoproto.customname) = "NodeID"]; // The error message that resulted when the query sent to this node failed. string error_message = 2; } // A list of nodes which should contain data for this table (according to // cluster metadata), but could not be contacted during this request. repeated MissingNode missing_nodes = 5 [(gogoproto.nullable) = false]; // node_ids is the ordered list of node ids on which the table data is stored. repeated int32 node_ids = 7 [(gogoproto.customname) = "NodeIDs", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/roachpb.NodeID"]; } // NonTableStatsRequest requests statistics on cluster data ranges that do not // belong to SQL tables. message NonTableStatsRequest { } // NonTableStatsResponse returns statistics on various cluster data ranges // that do not belong to SQL tables. The statistics for each range are returned // as a TableStatsResponse. message NonTableStatsResponse { // Information on time series ranges. TableStatsResponse time_series_stats = 1; // Information for remaining (non-table, non-time-series) ranges. TableStatsResponse internal_use_stats = 2; } // UsersRequest requests a list of users. message UsersRequest { } // UsersResponse returns a list of users. message UsersResponse { // User is a CockroachDB user. message User { string username = 1; } // usernames is a list of users for the CockroachDB cluster. repeated User users = 1 [(gogoproto.nullable) = false]; } // EventsRequest is a request for event log entries, optionally filtered // by the specified event type. message EventsRequest { string type = 1; reserved 2; // limit is the total number of results that are retrieved by the query. If // this is omitted or set to 0, the default maximum number of results are // returned. When set to > 0, at most only that number of results are // returned. When set to < 0, an unlimited number of results are returned. int32 limit = 3; // unredacted_events indicates that the values in the events should // not be redacted. The default is to redact, so that older versions // of `cockroach zip` do not see un-redacted values by default. // For good security, this field is only obeyed by the server after // checking that the client of the RPC is an admin user. bool unredacted_events = 4; } // EventsResponse contains a set of event log entries. This is always limited // to the latest N entries (N is enforced in the associated endpoint). message EventsResponse { message Event { // timestamp is the time at which the event occurred. google.protobuf.Timestamp timestamp = 1 [(gogoproto.nullable) = false, (gogoproto.stdtime) = true]; // event_type is the type of the event (e.g. "create_table", "drop_table". string event_type = 2; reserved 3; // reporting_id is the reporting ID for this event. int64 reporting_id = 4 [(gogoproto.customname) = "ReportingID"]; // info has more detailed information for the event. The contents vary // depending on the event. string info = 5; // unique_id is a unique identifier for this event. bytes unique_id = 6 [(gogoproto.customname) = "UniqueID"]; } repeated Event events = 1 [(gogoproto.nullable) = false]; } // SetUIDataRequest stores the given key/value pairs in the system.ui table. message SetUIDataRequest { // key_values is a map of keys to bytes values. Each key will be stored // with its corresponding value as a separate row in system.ui. map<string, bytes> key_values = 1; } // SetUIDataResponse is currently an empty response. message SetUIDataResponse { } // GETUIDataRequest requests the values for the given keys from the system.ui // table. message GetUIDataRequest { repeated string keys = 1; } // GetUIDataResponse contains the requested values and the times at which // the values were last updated. message GetUIDataResponse { message Value { // value is the value of the requested key. bytes value = 1; // last_updated is the time at which the value was last updated. google.protobuf.Timestamp last_updated = 2 [(gogoproto.nullable) = false, (gogoproto.stdtime) = true]; } // key_values maps keys to their retrieved values. If this doesn't contain a // a requested key, that key was not found. map<string, Value> key_values = 1 [(gogoproto.nullable) = false]; } // ClusterRequest requests metadata for the cluster. message ClusterRequest { } // ClusterResponse contains metadata for the cluster. message ClusterResponse { // The unique ID used to identify this cluster. string cluster_id = 1 [(gogoproto.customname) = "ClusterID"]; // True if diagnostics reporting is enabled for the cluster. bool reporting_enabled = 2; // True if enterprise features are enabled for the cluster. bool enterprise_enabled = 3; } // DrainRequest instructs the receiving node to drain. message DrainRequest { reserved 1; reserved 2; // When true, terminates the process after the server has started draining. // Setting both shutdown and do_drain to false causes // the request to only operate as a probe. // Setting do_drain to false and shutdown to true causes // the server to shut down immediately without // first draining. bool shutdown = 3; // When true, perform the drain phase. See the comment above on // shutdown for an explanation of the interaction between the two. // do_drain is also implied by a non-nil deprecated_probe_indicator. bool do_drain = 4; // node_id is a string so that "local" can be used to specify that no // forwarding is necessary. // For compatibility with v21.2 nodes, an empty node_id is // interpreted as "local". This behavior might be removed // in subsequent versions. string node_id = 5; // When true, more detailed information is logged during the range lease drain phase. bool verbose = 6; } // DrainResponse is the response to a successful DrainRequest. message DrainResponse { // is_draining is set to true iff the server is currently draining. // This is set to true in response to a request where skip_drain // is false; but it can also be set to true in response // to a probe request (!shutdown && skip_drain) if another // drain request has been issued prior or asynchronously. bool is_draining = 2; // drain_remaining_indicator measures, at the time of starting to // process the corresponding drain request, how many actions to // fully drain the node were deemed to be necessary. Some, but not // all, of these actions may already have been carried out by the // time this indicator is received by the client. The client should // issue requests until this indicator first reaches zero, which // indicates that the node is fully drained. // // The API contract is the following: // // - upon a first Drain call with do_drain set, the remaining // indicator will have some value >=0. If >0, it indicates that // drain is pushing state away from the node. (What this state // precisely means is left unspecified for this field. See below // for details.) // // - upon a subsequent Drain call with do_drain set, the remaining // indicator should have reduced in value. The drain process does best // effort at shedding state away from the node; hopefully, all the // state is shed away upon the first call and the progress // indicator can be zero as early as the second call. However, // if there was a lot of state to shed, it is possible for // timeout to be encountered upon the first call. In that case, the // second call will do some more work and return a non-zero value // as well. // // - eventually, in an iterated sequence of DrainRequests with // do_drain set, the remaining indicator should reduce to zero. At // that point the client can conclude that no state is left to // shed, and it should be safe to shut down the node with a // DrainRequest with shutdown = true. // // Note that this field is left unpopulated (and thus remains at // zero) for pre-20.1 nodes. A client can recognize this by // observing is_draining to be false after a request with do_drain = // true: the is_draining field is also left unpopulated by pre-20.1 // nodes. uint64 drain_remaining_indicator = 3; // drain_remaining_description is an informal (= not // machine-parsable) string that explains the progress of the drain // process to human eyes. This is intended for use mainly for // troubleshooting. // // The field is only populated if do_drain is true in the // request. string drain_remaining_description = 4; reserved 1; } // DecommissionPreCheckRequest requests that preliminary checks be run to // ensure that the specified node(s) can be decommissioned successfully. message DecommissionPreCheckRequest { repeated int32 node_ids = 1 [(gogoproto.customname) = "NodeIDs", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/roachpb.NodeID"]; // The maximum number of ranges for which to report errors. int32 num_replica_report = 2; // If true, all ranges on the checked nodes must only need replacement or // removal for decommissioning. bool strict_readiness = 3; // If true, collect traces for each range checked. // Requires num_replica_report > 0. bool collect_traces = 4; } // DecommissionPreCheckResponse returns the number of replicas that encountered // errors when running preliminary decommissioning checks, as well as the // associated error messages and traces, for each node. message DecommissionPreCheckResponse { enum NodeReadiness { option (gogoproto.goproto_enum_stringer) = false; UNKNOWN = 0; READY = 1; ALREADY_DECOMMISSIONED = 2; ALLOCATION_ERRORS = 3; } // The result of checking a range's readiness for the decommission. message RangeCheckResult { int32 range_id = 1 [ (gogoproto.customname) = "RangeID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/roachpb.RangeID"]; // The action determined by the allocator that is needed for the range. string action = 2; // All trace events collected while checking the range. repeated TraceEvent events = 3; // The error message from the allocator's processing, if any. string error = 4; } // The result of checking a single node's readiness for decommission. message NodeCheckResult { int32 node_id = 1 [ (gogoproto.customname) = "NodeID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/roachpb.NodeID"]; // The node's decommission readiness status. NodeReadiness decommission_readiness = 2; // The liveness status of the given node. kv.kvserver.liveness.livenesspb.NodeLivenessStatus liveness_status = 3; // The number of total replicas on the node, computed by scanning range // descriptors. int64 replica_count = 4; // The details and recorded traces from preprocessing each range with a // replica on the checked nodes that resulted in error, up to the maximum // specified in the request. repeated RangeCheckResult checked_ranges = 5 [(gogoproto.nullable) = false]; } // Status of the preliminary decommission checks across nodes. repeated NodeCheckResult checked_nodes = 1 [(gogoproto.nullable) = false]; } // DecommissionStatusRequest requests the decommissioning status for the // specified or, if none are specified, all nodes. message DecommissionStatusRequest { repeated int32 node_ids = 1 [(gogoproto.customname) = "NodeIDs", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/roachpb.NodeID"]; // The number of decommissioning replicas to be reported. int32 num_replica_report = 2; } // DecommissionRequest requests the server to set the membership status on // all nodes specified by NodeIDs to the value of TargetMembership. // // If no NodeIDs are given, it targets the recipient node. message DecommissionRequest { repeated int32 node_ids = 1 [(gogoproto.customname) = "NodeIDs", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/roachpb.NodeID"]; kv.kvserver.liveness.livenesspb.MembershipStatus target_membership = 2; // The number of decommissioning replicas to be reported. int32 num_replica_report = 3; } // DecommissionStatusResponse lists decommissioning statuses for a number of NodeIDs. message DecommissionStatusResponse { message Replica { int32 replica_id = 1 [ (gogoproto.customname) = "ReplicaID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/roachpb.ReplicaID"]; int32 range_id = 2 [ (gogoproto.customname) = "RangeID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/roachpb.RangeID"]; } message Status { int32 node_id = 1 [ (gogoproto.customname) = "NodeID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/roachpb.NodeID"]; bool is_live = 2; // The number of replicas on the node, computed by scanning meta2 ranges. int64 replica_count = 3; // The membership status of the given node. kv.kvserver.liveness.livenesspb.MembershipStatus membership = 4; bool draining = 5; // Decommissioning replicas on the given node to be reported. // How many replicas are reported is determined by what was specified in the // request. repeated Replica reported_replicas = 6; } // Status of all affected nodes. repeated Status status = 2 [(gogoproto.nullable) = false]; } // SettingsRequest inquires what are the current settings in the cluster. message SettingsRequest { // The array of setting names to retrieve. // An empty keys array means "all". repeated string keys = 1; // Indicate whether to see unredacted setting values. // This is opt-in so that a previous version `cockroach zip` // does not start reporting values when this becomes active. // For good security, the server only obeys this after it checks // that the logger-in user has admin privilege. bool unredacted_values = 2; } // SettingsResponse is the response to SettingsRequest. message SettingsResponse { message Value { string value = 1; string type = 2; string description = 3; bool public = 4; google.protobuf.Timestamp last_updated = 5 [(gogoproto.nullable) = true, (gogoproto.stdtime) = true]; } map<string, Value> key_values = 1 [(gogoproto.nullable) = false]; } // HealthRequest requests a liveness or readiness check. // // A liveness check is triggered via ready set to false. In this mode, // an empty response is returned immediately, that is, the caller merely // learns that the process is running. // // A readiness check (ready == true) is suitable for determining whether // user traffic should be directed at a given node, for example by a load // balancer. In this mode, a successful response is returned only if the // node: // // - is not in the process of shutting down or booting up (including // waiting for cluster bootstrap); // - is regarded as healthy by the cluster via the recent broadcast of // a liveness beacon. Absent either of these conditions, an error // code will result. // // API: PUBLIC message HealthRequest { // ready specifies whether the client wants to know whether the // target node is ready to receive traffic. If a node is unready, an // error will be returned. // API: PUBLIC bool ready = 1; } // HealthResponse is the response to HealthRequest. It currently does not // contain any information. // API: PUBLIC message HealthResponse { } // LivenessRequest requests liveness data for all nodes on the cluster. message LivenessRequest { } // LivenessResponse contains the liveness status of each node on the cluster. message LivenessResponse { repeated kv.kvserver.liveness.livenesspb.Liveness livenesses = 1 [(gogoproto.nullable) = false]; map<int32, kv.kvserver.liveness.livenesspb.NodeLivenessStatus> statuses = 2 [ (gogoproto.nullable) = false, (gogoproto.castkey) = "github.com/cockroachdb/cockroach/pkg/roachpb.NodeID" ]; } // JobsRequest requests system job information of the given status and type. message JobsRequest { int32 limit = 1; string status = 2; cockroach.sql.jobs.jobspb.Type type = 3; } // JobsResponse contains the job record for each matching job. message JobsResponse { repeated JobResponse jobs = 1 [(gogoproto.nullable) = false]; google.protobuf.Timestamp earliest_retained_time = 2 [(gogoproto.nullable) = false, (gogoproto.stdtime) = true]; } // JobRequest requests system job information for the given job_id. message JobRequest { int64 job_id = 1; } // JobResponse contains the job record for a job. message JobResponse { int64 id = 1 [(gogoproto.customname) = "ID"]; string type = 2; string description = 3; string statement = 16; string username = 4; repeated uint32 descriptor_ids = 5 [ (gogoproto.customname) = "DescriptorIDs", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb.ID" ]; string status = 6; google.protobuf.Timestamp created = 7 [(gogoproto.stdtime) = true]; google.protobuf.Timestamp started = 8 [(gogoproto.stdtime) = true]; google.protobuf.Timestamp finished = 9 [(gogoproto.stdtime) = true]; google.protobuf.Timestamp modified = 10 [(gogoproto.stdtime) = true]; float fraction_completed = 11; string error = 12; // highwater_timestamp is the highwater timestamp returned as normal // timestamp. This is appropriate for display to humans. google.protobuf.Timestamp highwater_timestamp = 13 [(gogoproto.stdtime) = true]; // highwater_decimal is the highwater timestamp in the proprietary decimal // form used by logical timestamps internally. This is appropriate to pass // to a "AS OF SYSTEM TIME" SQL statement. string highwater_decimal = 14; string running_status = 15; google.protobuf.Timestamp last_run = 17 [(gogoproto.stdtime) = true]; google.protobuf.Timestamp next_run = 18 [(gogoproto.stdtime) = true]; int64 num_runs = 19; // ExecutionFailure corresponds to a failure to execute the job with the // attempt starting at start and ending at end. message ExecutionFailure { // Status is the status of the job during the execution. string status = 1; // Start is the time at which the execution started. google.protobuf.Timestamp start = 2 [(gogoproto.stdtime) = true]; // End is the time at which the error occurred. google.protobuf.Timestamp end = 3 [(gogoproto.stdtime) = true]; // Error is the error which occurred. string error = 4; } // ExecutionFailures is a log of execution failures of the job. It is not // guaranteed to contain all execution failures and some execution failures // may not contain an error or end. repeated ExecutionFailure execution_failures = 20; // coordinator_id identifies the node coordinating the job. This value will // only be present for jobs that are currently running or recently ran. int64 coordinator_id = 21 [(gogoproto.customname) = "CoordinatorID"]; } // LocationsRequest requests system locality location information. message LocationsRequest { } // JobsResponse contains the job record for each matching job. message LocationsResponse { message Location { string locality_key = 1; string locality_value = 2; double latitude = 3; double longitude = 4; } repeated Location locations = 1 [(gogoproto.nullable) = false]; } // RangeLogRequest request the history of a range from the range log. message RangeLogRequest { // TODO(tamird): use [(gogoproto.customname) = "RangeID"] below. Need to // figure out how to teach grpc-gateway about custom names. // If RangeID is 0, returns range log history without filtering by range. int64 range_id = 1; // limit is the total number of results that are retrieved by the query. If // this is omitted or set to 0, the default maximum number of results are // returned. When set to > 0, at most only that number of results are // returned. When set to < 0, an unlimited number of results are returned. int32 limit = 2; } // RangeLogResponse contains a list of entries from the range log table. message RangeLogResponse { // To avoid porting the pretty printing of keys and descriptors to // javascript, they will be precomputed on the serverside. message PrettyInfo { string updated_desc = 1; string new_desc = 2; string added_replica = 3; string removed_replica = 4; string reason = 5; string details = 6; } message Event { cockroach.kv.kvserver.storagepb.RangeLogEvent event = 1 [(gogoproto.nullable) = false]; PrettyInfo pretty_info = 2 [(gogoproto.nullable) = false]; } reserved 1; // Previously used. repeated Event events = 2 [(gogoproto.nullable) = false]; } // QueryPlanRequest requests the query plans for a SQL string. message QueryPlanRequest { // query is the SQL query string. string query = 1; } // QueryPlanResponse contains the query plans for a SQL string (currently only // the distsql physical query plan). message QueryPlanResponse { string distsql_physical_query_plan = 1 [(gogoproto.customname) = "DistSQLPhysicalQueryPlan"]; } message DataDistributionRequest { } message DataDistributionResponse { message ZoneConfig { // target is the object the zone config applies to, e.g. "DATABASE db" or // "PARTITION north_america OF TABLE users". string target = 1; config.zonepb.ZoneConfig config = 2 [(gogoproto.nullable) = false]; reserved 3; // config_sql is the SQL representation of config. string config_sql = 4 [(gogoproto.customname) = "ConfigSQL"]; } message TableInfo { map<int32, int64> replica_count_by_node_id = 1 [(gogoproto.castkey) = "github.com/cockroachdb/cockroach/pkg/roachpb.NodeID"]; int64 zone_config_id = 2; google.protobuf.Timestamp dropped_at = 3 [(gogoproto.stdtime) = true]; } message DatabaseInfo { // By table name. map<string, TableInfo> table_info = 1 [(gogoproto.nullable) = false]; } // By database name. map<string, DatabaseInfo> database_info = 1 [(gogoproto.nullable) = false]; reserved 2; // By zone name. map<string, ZoneConfig> zone_configs = 3 [(gogoproto.nullable) = false]; } // MetricMetadataRequest requests metadata for all metrics. message MetricMetadataRequest { } // MetricMetadataResponse contains the metadata for all metrics. message MetricMetadataResponse { map<string, cockroach.util.metric.Metadata> metadata = 1 [(gogoproto.nullable) = false]; } message EnqueueRangeRequest { // The node on which the queue should process the range. If node_id is 0, // the request will be forwarded to all other nodes. int32 node_id = 1 [(gogoproto.customname) = "NodeID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/roachpb.NodeID"]; // The name of the replica queue to run the range through. Matched against // each queue's name field. See the implementation of baseQueue for details. string queue = 2; // The ID of the range to run through the queue. int32 range_id = 3 [(gogoproto.customname) = "RangeID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/roachpb.RangeID"]; // If set, run the queue's process method without first checking whether the // replica should be processed by calling shouldQueue. bool skip_should_queue = 4; } message EnqueueRangeResponse { message Details { int32 node_id = 1 [(gogoproto.customname) = "NodeID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/roachpb.NodeID"]; // All trace events collected while processing the range in the queue. repeated TraceEvent events = 2; // The error message from the queue's processing, if any. string error = 3; } repeated Details details = 1; } // ChartCatalogRequest requests returns a catalog of Admin UI charts. message ChartCatalogRequest { } // ChartCatalogResponse returns a catalog of Admin UI charts useful for debugging. message ChartCatalogResponse { repeated cockroach.ts.catalog.ChartSection catalog = 1 [(gogoproto.nullable) = false]; } // CARequest requests the CA cert anchoring this service. message CARequest { } // CAResponse contains a PEM encoded copy of the CA cert for this service. message CAResponse { bytes ca_cert = 1; } // CertBundleRequest requests the bundle of initialization CAs for a new node. // It provides authentication in the form of a joinToken containing a // sharedSecret. message CertBundleRequest { string token_id = 1 [(gogoproto.customname) = "TokenID"]; bytes shared_secret = 2; } // CertBundleResponse contains a copy of all CAs needed to initialize TLS for // a new node. message CertBundleResponse { bytes bundle = 1; } message RecoveryCollectReplicaInfoRequest {} // RecoveryCollectReplicaRestartNodeStream is sent by collector node to client // if it experiences a transient failure collecting data from one of the nodes. // This message instructs client to drop any data that it collected locally // for specified node as streaming for this node would be restarted. // This mechanism is needed to avoid restarting the whole collection procedure // in large cluster if one of the nodes fails transiently. message RecoveryCollectReplicaRestartNodeStream { int32 node_id = 1 [ (gogoproto.customname) = "NodeID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/roachpb.NodeID"]; } message RecoveryCollectReplicaInfoResponse { oneof info { roachpb.RangeDescriptor range_descriptor = 1; cockroach.kv.kvserver.loqrecovery.loqrecoverypb.ReplicaInfo replica_info = 2; RecoveryCollectReplicaRestartNodeStream node_stream_restarted = 3; cockroach.kv.kvserver.loqrecovery.loqrecoverypb.ClusterMetadata metadata = 4; } } message RecoveryCollectLocalReplicaInfoRequest { } message RecoveryCollectLocalReplicaInfoResponse { cockroach.kv.kvserver.loqrecovery.loqrecoverypb.ReplicaInfo replica_info = 1; } message RecoveryStagePlanRequest { // Plan is replica update plan to stage for application on next restart. Plan // could be empty in that case existing plan is removed if present. cockroach.kv.kvserver.loqrecovery.loqrecoverypb.ReplicaUpdatePlan plan = 1; // If all nodes is true, then receiver should act as a coordinator and perform // a fan-out to stage plan on all nodes of the cluster. bool all_nodes = 2; // ForcePlan tells receiver to ignore any plan already staged on the node if it // is present and replace it with new plan (including empty one). bool force_plan = 3; // ForceLocalInternalVersion tells server to update internal component of plan // version to the one of active cluster version. This option needs to be set // if target cluster is stuck in recovery where only part of nodes were // successfully migrated. bool force_local_internal_version = 4; } message RecoveryStagePlanResponse { // Errors contain error messages happened during plan staging. repeated string errors = 1; } message RecoveryNodeStatusRequest { } message RecoveryNodeStatusResponse { cockroach.kv.kvserver.loqrecovery.loqrecoverypb.NodeRecoveryStatus status = 1 [ (gogoproto.nullable) = false]; } message RecoveryVerifyRequest { // PlanID is ID of the plan to verify. bytes plan_id = 1 [ (gogoproto.customname) = "PendingPlanID", (gogoproto.customtype) = "github.com/cockroachdb/cockroach/pkg/util/uuid.UUID"]; // DecommissionedNodeIDs is a set of nodes that should be marked as decommissioned in // the cluster when loss of quorum recovery successfully applies. repeated int32 decommissioned_node_ids = 2 [(gogoproto.customname) = "DecommissionedNodeIDs", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/roachpb.NodeID"]; // MaxReportedRanges is the maximum number of failed ranges to report. // If more unhealthy ranges are found, error will be returned alongside range // to indicate that ranges were cut short. int32 max_reported_ranges = 3; } message RecoveryVerifyResponse { message UnavailableRanges { // Ranges contains descriptors of ranges that failed health check. // If there are too many ranges to report, error would contain relevant // message. repeated cockroach.kv.kvserver.loqrecovery.loqrecoverypb.RangeRecoveryStatus ranges = 1 [ (gogoproto.nullable) = false]; // Error contains an optional error if ranges validation can't complete. string error = 2; } // Statuses contain a list of recovery statuses of nodes updated during recovery. It // also contains nodes that were expected to be live (not decommissioned by recovery) // but failed to return status response. repeated cockroach.kv.kvserver.loqrecovery.loqrecoverypb.NodeRecoveryStatus statuses = 1 [ (gogoproto.nullable) = false]; // UnavailableRanges contains information about ranges that failed health check. UnavailableRanges unavailable_ranges = 2 [(gogoproto.nullable) = false]; // DecommissionedNodeStatuses contains a map of requested IDs with their // corresponding liveness statuses. map<int32, kv.kvserver.liveness.livenesspb.MembershipStatus> decommissioned_node_statuses = 3 [ (gogoproto.castkey) = "github.com/cockroachdb/cockroach/pkg/roachpb.NodeID"]; } // Admin is the gRPC API for the admin UI. Through grpc-gateway, we offer // REST-style HTTP endpoints that locally proxy to the gRPC endpoints. service Admin { rpc RequestCA(CARequest) returns (CAResponse) { option (google.api.http) = { get : "/_join/v1/ca" }; } rpc RequestCertBundle(CertBundleRequest) returns (CertBundleResponse) { option (google.api.http) = { get : "/_join/v1/requestbundle" }; } // URL: /_admin/v1/users rpc Users(UsersRequest) returns (UsersResponse) { option (google.api.http) = { get: "/_admin/v1/users" }; } // URL: /_admin/v1/databases rpc Databases(DatabasesRequest) returns (DatabasesResponse) { option (google.api.http) = { get: "/_admin/v1/databases" }; } // Example URL: /_admin/v1/databases/system rpc DatabaseDetails(DatabaseDetailsRequest) returns (DatabaseDetailsResponse) { option (google.api.http) = { get: "/_admin/v1/databases/{database}" }; } // Example URL: /_admin/v1/databases/system/tables/ui rpc TableDetails(TableDetailsRequest) returns (TableDetailsResponse) { option (google.api.http) = { get: "/_admin/v1/databases/{database}/tables/{table}" }; } // Example URL: /_admin/v1/databases/system/tables/ui/stats rpc TableStats(TableStatsRequest) returns (TableStatsResponse) { option (google.api.http) = { get: "/_admin/v1/databases/{database}/tables/{table}/stats" }; } // Example URL: /_admin/v1/nontablestats rpc NonTableStats(NonTableStatsRequest) returns (NonTableStatsResponse) { option (google.api.http) = { get: "/_admin/v1/nontablestats" }; } // Example URLs: // Example URLs: // - /_admin/v1/events // - /_admin/v1/events?limit=100 // - /_admin/v1/events?type=create_table // - /_admin/v1/events?type=create_table&limit=100 rpc Events(EventsRequest) returns (EventsResponse) { option (google.api.http) = { get: "/_admin/v1/events" }; } // This requires a POST. Because of the libraries we're using, the POST body // must be in the following format: // // {"key_values": // { "key1": "base64_encoded_value1"}, // ... // { "keyN": "base64_encoded_valueN"}, // } // // Note that all keys are quoted strings and that all values are base64- // encoded. // // Together, SetUIData and GetUIData provide access to a "cookie jar" for the // admin UI. The structure of the underlying data is meant to be opaque to the // server. rpc SetUIData(SetUIDataRequest) returns (SetUIDataResponse) { option (google.api.http) = { post: "/_admin/v1/uidata" body: "*" }; } // Example URLs: // - /_admin/v1/uidata?keys=MYKEY // - /_admin/v1/uidata?keys=MYKEY1&keys=MYKEY2 // // Yes, it's a little odd that the query parameter is named "keys" instead of // "key". I would've preferred that the URL parameter be named "key". However, // it's clearer for the protobuf field to be named "keys," which makes the URL // parameter "keys" as well. rpc GetUIData(GetUIDataRequest) returns (GetUIDataResponse) { option (google.api.http) = { get: "/_admin/v1/uidata" }; } // Cluster returns metadata for the cluster. rpc Cluster(ClusterRequest) returns (ClusterResponse) { option (google.api.http) = { get: "/_admin/v1/cluster" }; } // Settings returns the cluster-wide settings for the cluster. rpc Settings(SettingsRequest) returns (SettingsResponse) { option (google.api.http) = { get: "/_admin/v1/settings" }; } // Health returns liveness for the node target of the request. // API: PUBLIC rpc Health(HealthRequest) returns (HealthResponse) { option (google.api.http) = { get: "/_admin/v1/health" additional_bindings {get : "/health"} }; } // Liveness returns the liveness state of all nodes on the cluster. rpc Liveness(LivenessRequest) returns (LivenessResponse) { option (google.api.http) = { get: "/_admin/v1/liveness" }; } // Jobs returns the job records for all jobs of the given status and type. rpc Jobs(JobsRequest) returns (JobsResponse) { option (google.api.http) = { get: "/_admin/v1/jobs" }; } // Job returns the job record for the job of the given job_id. rpc Job(JobRequest) returns (JobResponse) { option (google.api.http) = { get: "/_admin/v1/jobs/{job_id}" }; } // Locations returns the locality location records. rpc Locations(LocationsRequest) returns (LocationsResponse) { option (google.api.http) = { get: "/_admin/v1/locations" }; } // QueryPlan returns the query plans for a SQL string. rpc QueryPlan(QueryPlanRequest) returns (QueryPlanResponse) { option (google.api.http) = { get: "/_admin/v1/queryplan" }; } // Drain puts the node into the specified drain mode(s) and optionally // instructs the process to terminate. // We do not expose this via HTTP unless we have a way to authenticate // + authorize streaming RPC connections. See #42567. rpc Drain(DrainRequest) returns (stream DrainResponse) { } // DecommissionPreCheck requests that the server execute preliminary checks // to evaluate the possibility of successfully decommissioning a given node. rpc DecommissionPreCheck(DecommissionPreCheckRequest) returns (DecommissionPreCheckResponse) { } // Decommission puts the node(s) into the specified decommissioning state. // If this ever becomes exposed via HTTP, ensure that it performs // authorization. See #42567. rpc Decommission(DecommissionRequest) returns (DecommissionStatusResponse) { } // DecommissionStatus retrieves the decommissioning status of the specified nodes. // If this ever becomes exposed via HTTP, ensure that it performs // authorization. See #42567. rpc DecommissionStatus(DecommissionStatusRequest) returns (DecommissionStatusResponse) { } // URL: /_admin/v1/rangelog // URL: /_admin/v1/rangelog?limit=100 // URL: /_admin/v1/rangelog/1 // URL: /_admin/v1/rangelog/1?limit=100 rpc RangeLog(RangeLogRequest) returns (RangeLogResponse) { option (google.api.http) = { get: "/_admin/v1/rangelog" additional_bindings { get: "/_admin/v1/rangelog/{range_id}" } }; } rpc DataDistribution(DataDistributionRequest) returns (DataDistributionResponse) { option (google.api.http) = { get: "/_admin/v1/data_distribution" }; } // URL: /_admin/v1/metricmetadata rpc AllMetricMetadata(MetricMetadataRequest) returns (MetricMetadataResponse) { option (google.api.http) = { get: "/_admin/v1/metricmetadata" }; } // URL: /_admin/v1/chartcatalog rpc ChartCatalog(ChartCatalogRequest) returns (ChartCatalogResponse) { option (google.api.http) = { get: "/_admin/v1/chartcatalog" }; } // EnqueueRange runs the specified range through the specified queue on the // range's leaseholder store, returning the detailed trace and error // information from doing so. Parameters must be provided in the body of the // POST request. // For example: // // { // "queue": "raftlog", // "rangeId": 10 // } rpc EnqueueRange(EnqueueRangeRequest) returns (EnqueueRangeResponse) { option (google.api.http) = { post: "/_admin/v1/enqueue_range" body : "*" }; } // SendKVBatch proxies the given BatchRequest into KV, returning the // response. It is used by the CLI `debug send-kv-batch` command. rpc SendKVBatch(roachpb.BatchRequest) returns (roachpb.BatchResponse) { } // ListTracingSnapshots retrieves the list of snapshots of the Active Spans // Registry that the node currently has in memory. A new snapshot can be // captured with TakeTracingSnapshots. rpc ListTracingSnapshots(ListTracingSnapshotsRequest) returns (ListTracingSnapshotsResponse) { option (google.api.http) = { get: "/_admin/v1/trace_snapshots" }; } // TakeTracingSnapshot captures a new snapshot of the Active Spans Registry. // The new snapshot is returned, and also made available through // ListTracingSnapshots. rpc TakeTracingSnapshot(TakeTracingSnapshotRequest) returns (TakeTracingSnapshotResponse) { option (google.api.http) = { post: "/_admin/v1/trace_snapshots" }; } // GetTracingSnapshot returns a snapshot of the tracing spans in the active // spans registry previously generated through TakeTracingSnapshots. rpc GetTracingSnapshot(GetTracingSnapshotRequest) returns (GetTracingSnapshotResponse) { option (google.api.http) = { get: "/_admin/v1/trace_snapshots/{snapshot_id}" }; } // GetTrace returns the trace with a specified ID. Depending on the request, // the trace is returned either from a snapshot that was previously taken, or // directly from the active spans registry. rpc GetTrace(GetTraceRequest) returns (GetTraceResponse) { option (google.api.http) = { post: "/_admin/v1/traces" body: "*" }; } // SetTraceRecordingType sets the recording mode of all or some of the spans // in a trace. rpc SetTraceRecordingType(SetTraceRecordingTypeRequest) returns (SetTraceRecordingTypeResponse) { option (google.api.http) = { post: "/_admin/v1/settracerecordingtype" body: "*" }; } // RecoveryCollectReplicaInfo retrieves information about: // 1. range descriptors contained in cluster meta ranges if meta ranges // are readable; // 2. replica information from all live nodes that have connection to // the target node. rpc RecoveryCollectReplicaInfo(RecoveryCollectReplicaInfoRequest) returns (stream RecoveryCollectReplicaInfoResponse) {} // RecoveryCollectLocalReplicaInfo retrieve information about all local // replicas in all stores on the node. rpc RecoveryCollectLocalReplicaInfo(RecoveryCollectLocalReplicaInfoRequest) returns (stream RecoveryCollectLocalReplicaInfoResponse) {} // RecoveryStagePlan stages recovery plan on target or all nodes in cluster // depending on request content and marks nodes deleted in the plan as // decommissioned in each node's local node tombstone storage. rpc RecoveryStagePlan(RecoveryStagePlanRequest) returns (RecoveryStagePlanResponse) {} // RecoveryNodeStatus retrieves loss of quorum recovery status of a single // node. rpc RecoveryNodeStatus(RecoveryNodeStatusRequest) returns (RecoveryNodeStatusResponse) {} // RecoveryVerify verifies that recovery plan is applied on all necessary // nodes, ranges are available and nodes removed in plan are marked as // decommissioned. rpc RecoveryVerify(RecoveryVerifyRequest) returns (RecoveryVerifyResponse) {} // ListTenants returns a list of active tenants in the cluster. rpc ListTenants(ListTenantsRequest) returns (ListTenantsResponse) { option (google.api.http) = { get: "/_admin/v1/tenants" }; } } message ListTenantsRequest{} message ListTenantsResponse { repeated Tenant tenants = 1; } message Tenant { roachpb.TenantID tenant_id = 1; string tenant_name = 2; string sql_addr = 3; string rpc_addr = 4; } message ListTracingSnapshotsRequest {} message ListTracingSnapshotsResponse { repeated SnapshotInfo snapshots = 1; } message SnapshotInfo { // SnapshotID identifies a specific snapshot which can be requested via a // GetTracingSnapshotRequest. Negative IDs are used for "automatic" snapshots. int64 snapshot_id = 1 [(gogoproto.customname) = "SnapshotID"]; google.protobuf.Timestamp captured_at = 2 [(gogoproto.stdtime) = true]; } message TakeTracingSnapshotRequest {} message TakeTracingSnapshotResponse { SnapshotInfo snapshot = 1; } message GetTracingSnapshotRequest { // SnapshotId indicates which snapshot is requested. ID may be negative when // requesting an "automatic" snapshot; see ListTracingSnapshotsResponse. int64 snapshot_id = 1; } message GetTracingSnapshotResponse { TracingSnapshot snapshot = 1; } // GetTrace represents the request of the GetTrace RPC. message GetTraceRequest { // If a snapshot is specified, the trace information is returned from that // snapshot. If a snapshot is not specified, information about currently // opened spans is returned from the active spans registry. int64 snapshot_id = 1 [(gogoproto.customname) = "SnapshotID"]; uint64 trace_id = 2 [(gogoproto.nullable)=false, (gogoproto.customname) = "TraceID", (gogoproto.customtype) = "github.com/cockroachdb/cockroach/pkg/util/tracing/tracingpb.TraceID"]; util.tracing.tracingpb.RecordingMode recording_type = 3; } // GetTrace represents the response to the GetTrace RPC. message GetTraceResponse { // snapshot_id identifies the snapshot that the trace was retrieved from. If // 0, the trace was not retrieved from a registry, but directly from the // active spans registry. int64 snapshot_id = 1 [(gogoproto.customname) = "SnapshotID"]; uint64 trace_id = 2 [(gogoproto.nullable)=false, (gogoproto.customname) = "TraceID", (gogoproto.customtype) = "github.com/cockroachdb/cockroach/pkg/util/tracing/tracingpb.TraceID"]; // still_exists is set if any spans from this trace are currently present in // the active spans registry. // // If snapshot_id is 0, still_exists is always set. bool still_exists = 3; // serialized_recording represents the serialization of trace recording. We // return the recording already serialized as formatted string for easy // consumption in the browser. string serialized_recording = 4; } // TracingSnapshot represents a snapshot of the active spans registry, including // all the spans that were open at the time when the snapshot was taken. message TracingSnapshot { int64 snapshot_id = 1 [(gogoproto.customname) = "SnapshotID"]; google.protobuf.Timestamp captured_at = 2 [(gogoproto.stdtime) = true]; repeated TracingSpan spans = 3; // Ideally we'd use int64 to match the goroutine_id type // but unfortunately, the way that grpc-gateway parses // these objects into Javascript results in odd encodings // of Long JS types that are difficult to interact with // as map keys. Thus, we settle for string. map<string, string> stacks = 4; } message NamedOperationMetadata { string name = 1; util.tracing.tracingpb.OperationMetadata metadata = 2 [(gogoproto.nullable) = false]; } // TracingSpan represents a span, in a form slightly processed for the use of // the tracing UI. message TracingSpan { string operation = 1; uint64 trace_id = 2 [(gogoproto.customname) = "TraceID"]; uint64 span_id = 3 [(gogoproto.customname) = "SpanID"]; uint64 parent_span_id = 4 [(gogoproto.customname) = "ParentSpanID"]; google.protobuf.Timestamp start = 5 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; uint64 goroutine_id = 6 [(gogoproto.customname) = "GoroutineID"]; repeated SpanTag processed_tags = 7; // current is set if the span is still alive (i.e. still present in the active // spans registry). bool current = 8; // current_recording_mode represents the span's current recording mode. This is // not set if current == false. util.tracing.tracingpb.RecordingMode current_recording_mode = 9; repeated NamedOperationMetadata children_metadata = 10; } // SpanTag represents a tag on a tracing span, in a form processed for the use // of the tracing UI. message SpanTag { string key = 1; string val = 2; string caption = 3; string link = 4; bool hidden = 5; bool highlight = 6; bool inherit = 7; bool inherited = 8; bool propagate_up = 9; bool copied_from_child = 10; repeated ChildSpanTag children = 11; // May be empty. } message ChildSpanTag { string key = 1; string val = 2; } // SetTraceRecordingTypeRequest is the request for SetTraceRecordingType, which // sets the recording mode of all or some of the spans in a trace. message SetTraceRecordingTypeRequest { // TraceID identifies the trace to toggle the recording of. It must always be // specified. uint64 trace_id = 1 [(gogoproto.nullable) = false, (gogoproto.customname) = "TraceID", (gogoproto.customtype) = "github.com/cockroachdb/cockroach/pkg/util/tracing/tracingpb.TraceID"]; // SpanID, if not zero, controls which spans in the trace get their recording // mode set. If zero, all spans in the trace are updated. If not zero, only // the respective span and its descendants get updated. uint64 span_id = 2 [(gogoproto.nullable) = false, (gogoproto.customname) = "SpanID", (gogoproto.customtype) = "github.com/cockroachdb/cockroach/pkg/util/tracing/tracingpb.SpanID"]; util.tracing.tracingpb.RecordingMode recording_mode = 3; } // SetTraceRecordingTypeRequest is the response for SetTraceRecordingType. message SetTraceRecordingTypeResponse{} // FeatureFlags within this struct are used within back-end/front-end code to show/hide features. message FeatureFlags { // isObservabiliyService is true when the server is an instance of the Observability Service bool is_observability_service = 1; // CanViewKVMetricDashboards is true when the logged in user is able to view KV-level metric dashboards. bool can_view_kv_metric_dashboards = 2; // DisableKVLevelAdvancedDebug is true when the UI should remove options to certain KV-level // debug operations. This is helpful in application tenant contexsts, where these requests // can only return errors since the tenant cannot perform the operations. bool disable_kv_level_advanced_debug = 3; }
pkg/server/serverpb/admin.proto
1
https://github.com/cockroachdb/cockroach/commit/977f72e4cc24f5d14424c278d6d6921016c3fc62
[ 0.052202749997377396, 0.002342925174161792, 0.0001590865576872602, 0.00017696891154628247, 0.00638567516580224 ]
{ "id": 0, "code_window": [ "| ----- | ---- | ----- | ----------- | -------------- |\n", "| node_id | [int32](#cockroach.server.serverpb.DecommissionPreCheckResponse-int32) | | | [reserved](#support-status) |\n", "| decommission_readiness | [DecommissionPreCheckResponse.NodeReadiness](#cockroach.server.serverpb.DecommissionPreCheckResponse-cockroach.server.serverpb.DecommissionPreCheckResponse.NodeReadiness) | | The node's decommission readiness status. | [reserved](#support-status) |\n", "| liveness_status | [cockroach.kv.kvserver.liveness.livenesspb.NodeLivenessStatus](#cockroach.server.serverpb.DecommissionPreCheckResponse-cockroach.kv.kvserver.liveness.livenesspb.NodeLivenessStatus) | | The liveness status of the given node. | [reserved](#support-status) |\n", "| replica_count | [int64](#cockroach.server.serverpb.DecommissionPreCheckResponse-int64) | | The number of total replicas on the node, computed by scanning range descriptors. | [reserved](#support-status) |\n", "| checked_ranges | [DecommissionPreCheckResponse.RangeCheckResult](#cockroach.server.serverpb.DecommissionPreCheckResponse-cockroach.server.serverpb.DecommissionPreCheckResponse.RangeCheckResult) | repeated | The details and recorded traces from preprocessing each range with a replica on the checked nodes that resulted in error, up to the maximum specified in the request. | [reserved](#support-status) |\n", "\n", "\n", "\n" ], "labels": [ "keep", "keep", "keep", "replace", "keep", "keep", "keep", "keep", "keep" ], "after_edit": [], "file_path": "docs/generated/http/full.md", "type": "replace", "edit_start_line_idx": 1000 }
<svg width="20" height="15" viewBox="0 0 20 15" fill="none" xmlns="http://www.w3.org/2000/svg"> <path fill-rule="evenodd" clip-rule="evenodd" d="M3.12831 3.31205C4.82208 1.75218 7.24647 0.25 10 0.25C12.7535 0.25 15.1779 1.75218 16.8717 3.31205C17.7269 4.09966 18.4233 4.92647 18.9113 5.63868C19.1549 5.99418 19.3538 6.33169 19.4951 6.63041C19.6226 6.89988 19.75 7.23247 19.75 7.54545C19.75 7.85844 19.6226 8.19103 19.4951 8.4605C19.3538 8.75922 19.1549 9.09673 18.9113 9.45222C18.4233 10.1644 17.7269 10.9912 16.8717 11.7789C15.1779 13.3387 12.7535 14.8409 10 14.8409C7.24647 14.8409 4.82208 13.3387 3.12831 11.7789C2.27309 10.9912 1.57671 10.1644 1.0887 9.45222C0.845114 9.09673 0.646176 8.75922 0.504894 8.4605C0.377444 8.19103 0.25 7.85844 0.25 7.54545C0.25 7.23247 0.377444 6.89988 0.504894 6.63041C0.646176 6.33169 0.845114 5.99418 1.0887 5.63868C1.57671 4.92647 2.27309 4.09966 3.12831 3.31205ZM1.75371 7.54545C1.76152 7.5735 1.78575 7.66032 1.86088 7.81917C1.95942 8.02752 2.11418 8.2951 2.32609 8.60436C2.74907 9.22167 3.37088 9.96306 4.14446 10.6755C5.70828 12.1157 7.78389 13.3409 10 13.3409C12.2161 13.3409 14.2917 12.1157 15.8555 10.6755C16.6291 9.96306 17.2509 9.22167 17.6739 8.60436C17.8858 8.2951 18.0406 8.02752 18.1391 7.81917C18.2143 7.66032 18.2385 7.5735 18.2463 7.54545C18.2385 7.5174 18.2143 7.43058 18.1391 7.27174C18.0406 7.06339 17.8858 6.79581 17.6739 6.48655C17.2509 5.86923 16.6291 5.12785 15.8555 4.41543C14.2917 2.97523 12.2161 1.75 10 1.75C7.78389 1.75 5.70828 2.97523 4.14446 4.41543C3.37088 5.12785 2.74907 5.86923 2.32609 6.48655C2.11418 6.79581 1.95942 7.06339 1.86088 7.27174C1.78575 7.43058 1.76152 7.5174 1.75371 7.54545Z" fill="#7E89A9"/> <path fill-rule="evenodd" clip-rule="evenodd" d="M9.99996 5.84094C9.05844 5.84094 8.29541 6.60397 8.29541 7.54549C8.29541 8.487 9.05844 9.25003 9.99996 9.25003C10.9415 9.25003 11.7045 8.487 11.7045 7.54549C11.7045 6.60397 10.9415 5.84094 9.99996 5.84094ZM6.79541 7.54549C6.79541 5.77555 8.23001 4.34094 9.99996 4.34094C11.7699 4.34094 13.2045 5.77555 13.2045 7.54549C13.2045 9.31543 11.7699 10.75 9.99996 10.75C8.23001 10.75 6.79541 9.31543 6.79541 7.54549Z" fill="#7E89A9"/> </svg>
pkg/ui/workspaces/cluster-ui/src/assets/eye.svg
0
https://github.com/cockroachdb/cockroach/commit/977f72e4cc24f5d14424c278d6d6921016c3fc62
[ 0.0001653806830290705, 0.0001653806830290705, 0.0001653806830290705, 0.0001653806830290705, 0 ]
{ "id": 0, "code_window": [ "| ----- | ---- | ----- | ----------- | -------------- |\n", "| node_id | [int32](#cockroach.server.serverpb.DecommissionPreCheckResponse-int32) | | | [reserved](#support-status) |\n", "| decommission_readiness | [DecommissionPreCheckResponse.NodeReadiness](#cockroach.server.serverpb.DecommissionPreCheckResponse-cockroach.server.serverpb.DecommissionPreCheckResponse.NodeReadiness) | | The node's decommission readiness status. | [reserved](#support-status) |\n", "| liveness_status | [cockroach.kv.kvserver.liveness.livenesspb.NodeLivenessStatus](#cockroach.server.serverpb.DecommissionPreCheckResponse-cockroach.kv.kvserver.liveness.livenesspb.NodeLivenessStatus) | | The liveness status of the given node. | [reserved](#support-status) |\n", "| replica_count | [int64](#cockroach.server.serverpb.DecommissionPreCheckResponse-int64) | | The number of total replicas on the node, computed by scanning range descriptors. | [reserved](#support-status) |\n", "| checked_ranges | [DecommissionPreCheckResponse.RangeCheckResult](#cockroach.server.serverpb.DecommissionPreCheckResponse-cockroach.server.serverpb.DecommissionPreCheckResponse.RangeCheckResult) | repeated | The details and recorded traces from preprocessing each range with a replica on the checked nodes that resulted in error, up to the maximum specified in the request. | [reserved](#support-status) |\n", "\n", "\n", "\n" ], "labels": [ "keep", "keep", "keep", "replace", "keep", "keep", "keep", "keep", "keep" ], "after_edit": [], "file_path": "docs/generated/http/full.md", "type": "replace", "edit_start_line_idx": 1000 }
### Bug fixes - Feature A release note 1 some text for note 1 [#1][#1] [2fcb5a5bd][2fcb5a5bd] - Feature A release note 2 some text for note 2 [#1][#1] [2fcb5a5bd][2fcb5a5bd] - Feature B release note 1 [#1][#1] [8e82a68d7][8e82a68d7] - Feature C [#1][#1] [170e4d8b1][170e4d8b1] - Feature D [#1][#1] [662a10125][662a10125] - Feature E [#1][#1] [76bb9f090][76bb9f090] ### Doc updates Docs team: Please add these manually. ### Contributors This release includes 2 merged PRs by 3 authors. We would like to thank the following contributors from the CockroachDB community: - bar (first-time contributor) - foo (first-time contributor) - test4 ### PRs merged by contributors - bar, foo, test4: - 2018-04-22 [#1 ][#1 ] [32204525a][32204525a] (+ 0 - 0 ~ 0/ 0) PR title (5 commits) - test4: - 2018-04-22 [#100 ][#100 ] [1525c88bd][1525c88bd] (+ 0 - 0 ~ 0/ 0) PR title alternate format [#1]: https://github.com/cockroachdb/cockroach/pull/1 [#100]: https://github.com/cockroachdb/cockroach/pull/100 [1525c88bd]: https://github.com/cockroachdb/cockroach/commit/1525c88bd [170e4d8b1]: https://github.com/cockroachdb/cockroach/commit/170e4d8b1 [2fcb5a5bd]: https://github.com/cockroachdb/cockroach/commit/2fcb5a5bd [32204525a]: https://github.com/cockroachdb/cockroach/commit/32204525a [662a10125]: https://github.com/cockroachdb/cockroach/commit/662a10125 [76bb9f090]: https://github.com/cockroachdb/cockroach/commit/76bb9f090 [8e82a68d7]: https://github.com/cockroachdb/cockroach/commit/8e82a68d7
scripts/release-notes/test4.notes.ref.txt
0
https://github.com/cockroachdb/cockroach/commit/977f72e4cc24f5d14424c278d6d6921016c3fc62
[ 0.00023212509404402226, 0.00018424069276079535, 0.00016803732432890683, 0.00017122816643677652, 0.000024441198547719978 ]
{ "id": 0, "code_window": [ "| ----- | ---- | ----- | ----------- | -------------- |\n", "| node_id | [int32](#cockroach.server.serverpb.DecommissionPreCheckResponse-int32) | | | [reserved](#support-status) |\n", "| decommission_readiness | [DecommissionPreCheckResponse.NodeReadiness](#cockroach.server.serverpb.DecommissionPreCheckResponse-cockroach.server.serverpb.DecommissionPreCheckResponse.NodeReadiness) | | The node's decommission readiness status. | [reserved](#support-status) |\n", "| liveness_status | [cockroach.kv.kvserver.liveness.livenesspb.NodeLivenessStatus](#cockroach.server.serverpb.DecommissionPreCheckResponse-cockroach.kv.kvserver.liveness.livenesspb.NodeLivenessStatus) | | The liveness status of the given node. | [reserved](#support-status) |\n", "| replica_count | [int64](#cockroach.server.serverpb.DecommissionPreCheckResponse-int64) | | The number of total replicas on the node, computed by scanning range descriptors. | [reserved](#support-status) |\n", "| checked_ranges | [DecommissionPreCheckResponse.RangeCheckResult](#cockroach.server.serverpb.DecommissionPreCheckResponse-cockroach.server.serverpb.DecommissionPreCheckResponse.RangeCheckResult) | repeated | The details and recorded traces from preprocessing each range with a replica on the checked nodes that resulted in error, up to the maximum specified in the request. | [reserved](#support-status) |\n", "\n", "\n", "\n" ], "labels": [ "keep", "keep", "keep", "replace", "keep", "keep", "keep", "keep", "keep" ], "after_edit": [], "file_path": "docs/generated/http/full.md", "type": "replace", "edit_start_line_idx": 1000 }
CC += -std=c99 CPPFLAGS += -I/usr/include/postgresql LDLIBS += -lpq -lpqtypes
pkg/acceptance/testdata/c/Makefile
0
https://github.com/cockroachdb/cockroach/commit/977f72e4cc24f5d14424c278d6d6921016c3fc62
[ 0.00016431529365945607, 0.00016431529365945607, 0.00016431529365945607, 0.00016431529365945607, 0 ]
{ "id": 1, "code_window": [ "\t\t\tresultsByNodeID[nID] = serverpb.DecommissionPreCheckResponse_NodeCheckResult{\n", "\t\t\t\tNodeID: nID,\n", "\t\t\t\tDecommissionReadiness: serverpb.DecommissionPreCheckResponse_UNKNOWN,\n", "\t\t\t\tLivenessStatus: livenessStatus,\n", "\t\t\t}\n", "\t\t} else if livenessStatus == livenesspb.NodeLivenessStatus_DECOMMISSIONED {\n", "\t\t\tresultsByNodeID[nID] = serverpb.DecommissionPreCheckResponse_NodeCheckResult{\n", "\t\t\t\tNodeID: nID,\n", "\t\t\t\tDecommissionReadiness: serverpb.DecommissionPreCheckResponse_ALREADY_DECOMMISSIONED,\n" ], "labels": [ "keep", "keep", "keep", "replace", "keep", "keep", "keep", "keep", "keep" ], "after_edit": [], "file_path": "pkg/server/admin.go", "type": "replace", "edit_start_line_idx": 1000 }
// Copyright 2016 The Cockroach Authors. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt. // // As of the Change Date specified in that file, in accordance with // the Business Source License, use of this software will be governed // by the Apache License, Version 2.0, included in the file // licenses/APL.txt. syntax = "proto3"; package cockroach.server.serverpb; option go_package = "github.com/cockroachdb/cockroach/pkg/server/serverpb"; import "config/zonepb/zone.proto"; import "util/tracing/tracingpb/tracing.proto"; import "jobs/jobspb/jobs.proto"; import "server/serverpb/status.proto"; import "storage/enginepb/mvcc.proto"; import "kv/kvserver/liveness/livenesspb/liveness.proto"; import "kv/kvserver/loqrecovery/loqrecoverypb/recovery.proto"; import "kv/kvserver/kvserverpb/range_log.proto"; import "kv/kvpb/api.proto"; import "roachpb/metadata.proto"; import "roachpb/data.proto"; import "ts/catalog/chart_catalog.proto"; import "util/metric/metric.proto"; import "util/tracing/tracingpb/recorded_span.proto"; import "gogoproto/gogo.proto"; import "google/api/annotations.proto"; import "google/protobuf/timestamp.proto"; // ZoneConfigurationLevel indicates, for objects with a Zone Configuration, // the object level at which the configuration is defined. This is needed // because objects without a specifically indicated Zone Configuration will // inherit the configuration of their "parent". enum ZoneConfigurationLevel { UNKNOWN = 0; // CLUSTER indicates that this object uses the cluster default Zone Configuration. CLUSTER = 1; // DATABASE indicates that this object uses a database-level Zone Configuration. DATABASE = 2; // TABLE indicates that this object uses a table-level Zone Configuration. TABLE = 3; } // DatabasesRequest requests a list of databases. message DatabasesRequest { } // DatabasesResponse contains a list of databases. message DatabasesResponse { repeated string databases = 1; } // DatabaseDetailsRequest requests detailed information about the specified // database message DatabaseDetailsRequest { // database is the name of the database we are querying. string database = 1; // Setting this flag includes a computationally-expensive stats field // in the response. bool include_stats = 2; } // DatabaseDetailsResponse contains grant information, table names, // zone configuration, and size statistics for a database. message DatabaseDetailsResponse { message Grant { // user is the user that this grant applies to. string user = 1; // privileges are the abilities this grant gives to the user. repeated string privileges = 2; } message Stats { // A table which exists in the database, but for which we could not load stats // during this request. message MissingTable { // The name of the table for which we could not load stats. string name = 1; // The error message that resulted when the request for this table failed. string error_message = 2; } // A list of tables that exist in the database, but for which stats could // not be loaded due to failures during this request. repeated MissingTable missing_tables = 1; // The number of ranges, as determined from a query of range meta keys, // across all tables. int64 range_count = 2; // An approximation of the disk space (in bytes) used for all replicas // of all tables across the cluster. uint64 approximate_disk_bytes = 3; // node_ids is the ordered list of node ids on which data is stored. repeated int32 node_ids = 4 [(gogoproto.customname) = "NodeIDs", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/roachpb.NodeID"]; int32 num_index_recommendations = 5; } // grants are the results of SHOW GRANTS for this database. repeated Grant grants = 1 [(gogoproto.nullable) = false]; // table_names contains the names of all tables in this database. Note that // all responses will be schema-qualified (schema.table) and that every schema // or table that contains a "sql unsafe character" such as uppercase letters // or dots will be surrounded with double quotes, such as "naughty schema".table. repeated string table_names = 2; // descriptor_id is an identifier used to uniquely identify this database. int64 descriptor_id = 3 [(gogoproto.customname) = "DescriptorID"]; // The zone configuration in effect for this database. cockroach.config.zonepb.ZoneConfig zone_config = 4 [(gogoproto.nullable) = false]; // The level at which this object's zone configuration is set. ZoneConfigurationLevel zone_config_level = 5; // Size information about the database, present only when explicitly requested. Stats stats = 6; } // TableDetailsRequest is a request for detailed information about a table. message TableDetailsRequest { // database is the name of the database that contains the table we're // interested in. string database = 1; // table is the name of the table that we're querying. Table may be // schema-qualified (schema.table) and each name component that contains // sql unsafe characters such as . or uppercase letters must be surrounded // in double quotes like "naughty schema".table. string table = 2; } // TableDetailsResponse contains grants, column names, and indexes for // a table. message TableDetailsResponse { // Grant is an entry from SHOW GRANTS. message Grant { // user is the user that this grant applies to. string user = 1; // privileges are the abilities this grant gives to the user. repeated string privileges = 2; } message Column { // name is the name of the column. string name = 1; // type is the SQL type (INT, STRING, etc.) of this column. string type = 2; // nullable is whether this column can contain NULL. bool nullable = 3; // default_value is the default value of this column. string default_value = 4; // generation_expression is the generator expression if the column is computed. string generation_expression = 5; // hidden is whether this column is hidden. bool hidden = 6; } message Index { // name is the name of this index. string name = 1; // unique is whether this a unique index (i.e. CREATE UNIQUE INDEX). bool unique = 2; // seq is an internal variable that's passed along. int64 seq = 3; // column is the column that this index indexes. string column = 4; // direction is either "ASC" (ascending) or "DESC" (descending). string direction = 5; // storing is an internal variable that's passed along. bool storing = 6; // implicit is an internal variable that's passed along. bool implicit = 7; } repeated Grant grants = 1 [(gogoproto.nullable) = false]; repeated Column columns = 2 [(gogoproto.nullable) = false]; repeated Index indexes = 3 [(gogoproto.nullable) = false]; // range_count is the size of the table in ranges. This provides a rough // estimate of the storage requirements for the table. // TODO(mrtracy): The TableStats method also returns a range_count field which // is more accurate than this one; TableDetails calculates this number using // a potentially faster method that is subject to cache staleness. We should // consider removing or renaming this field to reflect that difference. See // GitHub issue #5435 for more information. int64 range_count = 4; // create_table_statement is the output of "SHOW CREATE" for this table; // it is a SQL statement that would re-create the table's current schema if // executed. string create_table_statement = 5; // The zone configuration in effect for this table. cockroach.config.zonepb.ZoneConfig zone_config = 6 [(gogoproto.nullable) = false]; // The level at which this object's zone configuration is set. ZoneConfigurationLevel zone_config_level = 7; // descriptor_id is an identifier used to uniquely identify this table. int64 descriptor_id = 8 [(gogoproto.customname) = "DescriptorID"]; // configure_zone_statement is the output of "SHOW ZONE CONFIGURATION FOR TABLE" // for this table. It is a SQL statement that would re-configure the table's current // zone if executed. string configure_zone_statement = 9; // stats_last_created_at is the time at which statistics were last created. google.protobuf.Timestamp stats_last_created_at = 10 [(gogoproto.stdtime) = true]; // has_index_recommendations notifies if the there are index recommendations // on this table. bool has_index_recommendations = 11; // data_total_bytes is the size in bytes of live and non-live data on the table. int64 data_total_bytes = 12; // data_live_bytes is the size in bytes of live (non MVCC) data on the table. int64 data_live_bytes = 13; // data_live_percentage is the percentage of live (non MVCC) data on the table. float data_live_percentage = 14; } // TableStatsRequest is a request for detailed, computationally expensive // information about a table. message TableStatsRequest { // database is the name of the database that contains the table we're // interested in. string database = 1; // table is the name of the table that we're querying. Table may be // schema-qualified (schema.table) and each name component that contains // sql unsafe characters such as . or uppercase letters must be surrounded // in double quotes like "naughty schema".table. string table = 2; } // TableStatsResponse contains detailed, computationally expensive information // about a table. message TableStatsResponse { // range_count is the number of ranges, as determined from a query of range // meta keys. int64 range_count = 1; // replica_count is the number of replicas of any range of this table, as // found by querying nodes which are known to have replicas. When compared // with range_count, this can be used to estimate the current replication // factor of the table. int64 replica_count = 2; // node_count is the number of nodes which contain data for this table, // according to a query of range meta keys. int64 node_count = 3; // stats is the summation of MVCCStats for all replicas of this table // across the cluster. cockroach.storage.enginepb.MVCCStats stats = 4 [(gogoproto.nullable) = false]; // approximate_disk_bytes is an approximation of the disk space (in bytes) // used for all replicas of this table across the cluster. uint64 approximate_disk_bytes = 6; // MissingNode represents information on a node which should contain data // for this table, but could not be contacted during this request. message MissingNode { // The ID of the missing node. string node_id = 1 [(gogoproto.customname) = "NodeID"]; // The error message that resulted when the query sent to this node failed. string error_message = 2; } // A list of nodes which should contain data for this table (according to // cluster metadata), but could not be contacted during this request. repeated MissingNode missing_nodes = 5 [(gogoproto.nullable) = false]; // node_ids is the ordered list of node ids on which the table data is stored. repeated int32 node_ids = 7 [(gogoproto.customname) = "NodeIDs", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/roachpb.NodeID"]; } // NonTableStatsRequest requests statistics on cluster data ranges that do not // belong to SQL tables. message NonTableStatsRequest { } // NonTableStatsResponse returns statistics on various cluster data ranges // that do not belong to SQL tables. The statistics for each range are returned // as a TableStatsResponse. message NonTableStatsResponse { // Information on time series ranges. TableStatsResponse time_series_stats = 1; // Information for remaining (non-table, non-time-series) ranges. TableStatsResponse internal_use_stats = 2; } // UsersRequest requests a list of users. message UsersRequest { } // UsersResponse returns a list of users. message UsersResponse { // User is a CockroachDB user. message User { string username = 1; } // usernames is a list of users for the CockroachDB cluster. repeated User users = 1 [(gogoproto.nullable) = false]; } // EventsRequest is a request for event log entries, optionally filtered // by the specified event type. message EventsRequest { string type = 1; reserved 2; // limit is the total number of results that are retrieved by the query. If // this is omitted or set to 0, the default maximum number of results are // returned. When set to > 0, at most only that number of results are // returned. When set to < 0, an unlimited number of results are returned. int32 limit = 3; // unredacted_events indicates that the values in the events should // not be redacted. The default is to redact, so that older versions // of `cockroach zip` do not see un-redacted values by default. // For good security, this field is only obeyed by the server after // checking that the client of the RPC is an admin user. bool unredacted_events = 4; } // EventsResponse contains a set of event log entries. This is always limited // to the latest N entries (N is enforced in the associated endpoint). message EventsResponse { message Event { // timestamp is the time at which the event occurred. google.protobuf.Timestamp timestamp = 1 [(gogoproto.nullable) = false, (gogoproto.stdtime) = true]; // event_type is the type of the event (e.g. "create_table", "drop_table". string event_type = 2; reserved 3; // reporting_id is the reporting ID for this event. int64 reporting_id = 4 [(gogoproto.customname) = "ReportingID"]; // info has more detailed information for the event. The contents vary // depending on the event. string info = 5; // unique_id is a unique identifier for this event. bytes unique_id = 6 [(gogoproto.customname) = "UniqueID"]; } repeated Event events = 1 [(gogoproto.nullable) = false]; } // SetUIDataRequest stores the given key/value pairs in the system.ui table. message SetUIDataRequest { // key_values is a map of keys to bytes values. Each key will be stored // with its corresponding value as a separate row in system.ui. map<string, bytes> key_values = 1; } // SetUIDataResponse is currently an empty response. message SetUIDataResponse { } // GETUIDataRequest requests the values for the given keys from the system.ui // table. message GetUIDataRequest { repeated string keys = 1; } // GetUIDataResponse contains the requested values and the times at which // the values were last updated. message GetUIDataResponse { message Value { // value is the value of the requested key. bytes value = 1; // last_updated is the time at which the value was last updated. google.protobuf.Timestamp last_updated = 2 [(gogoproto.nullable) = false, (gogoproto.stdtime) = true]; } // key_values maps keys to their retrieved values. If this doesn't contain a // a requested key, that key was not found. map<string, Value> key_values = 1 [(gogoproto.nullable) = false]; } // ClusterRequest requests metadata for the cluster. message ClusterRequest { } // ClusterResponse contains metadata for the cluster. message ClusterResponse { // The unique ID used to identify this cluster. string cluster_id = 1 [(gogoproto.customname) = "ClusterID"]; // True if diagnostics reporting is enabled for the cluster. bool reporting_enabled = 2; // True if enterprise features are enabled for the cluster. bool enterprise_enabled = 3; } // DrainRequest instructs the receiving node to drain. message DrainRequest { reserved 1; reserved 2; // When true, terminates the process after the server has started draining. // Setting both shutdown and do_drain to false causes // the request to only operate as a probe. // Setting do_drain to false and shutdown to true causes // the server to shut down immediately without // first draining. bool shutdown = 3; // When true, perform the drain phase. See the comment above on // shutdown for an explanation of the interaction between the two. // do_drain is also implied by a non-nil deprecated_probe_indicator. bool do_drain = 4; // node_id is a string so that "local" can be used to specify that no // forwarding is necessary. // For compatibility with v21.2 nodes, an empty node_id is // interpreted as "local". This behavior might be removed // in subsequent versions. string node_id = 5; // When true, more detailed information is logged during the range lease drain phase. bool verbose = 6; } // DrainResponse is the response to a successful DrainRequest. message DrainResponse { // is_draining is set to true iff the server is currently draining. // This is set to true in response to a request where skip_drain // is false; but it can also be set to true in response // to a probe request (!shutdown && skip_drain) if another // drain request has been issued prior or asynchronously. bool is_draining = 2; // drain_remaining_indicator measures, at the time of starting to // process the corresponding drain request, how many actions to // fully drain the node were deemed to be necessary. Some, but not // all, of these actions may already have been carried out by the // time this indicator is received by the client. The client should // issue requests until this indicator first reaches zero, which // indicates that the node is fully drained. // // The API contract is the following: // // - upon a first Drain call with do_drain set, the remaining // indicator will have some value >=0. If >0, it indicates that // drain is pushing state away from the node. (What this state // precisely means is left unspecified for this field. See below // for details.) // // - upon a subsequent Drain call with do_drain set, the remaining // indicator should have reduced in value. The drain process does best // effort at shedding state away from the node; hopefully, all the // state is shed away upon the first call and the progress // indicator can be zero as early as the second call. However, // if there was a lot of state to shed, it is possible for // timeout to be encountered upon the first call. In that case, the // second call will do some more work and return a non-zero value // as well. // // - eventually, in an iterated sequence of DrainRequests with // do_drain set, the remaining indicator should reduce to zero. At // that point the client can conclude that no state is left to // shed, and it should be safe to shut down the node with a // DrainRequest with shutdown = true. // // Note that this field is left unpopulated (and thus remains at // zero) for pre-20.1 nodes. A client can recognize this by // observing is_draining to be false after a request with do_drain = // true: the is_draining field is also left unpopulated by pre-20.1 // nodes. uint64 drain_remaining_indicator = 3; // drain_remaining_description is an informal (= not // machine-parsable) string that explains the progress of the drain // process to human eyes. This is intended for use mainly for // troubleshooting. // // The field is only populated if do_drain is true in the // request. string drain_remaining_description = 4; reserved 1; } // DecommissionPreCheckRequest requests that preliminary checks be run to // ensure that the specified node(s) can be decommissioned successfully. message DecommissionPreCheckRequest { repeated int32 node_ids = 1 [(gogoproto.customname) = "NodeIDs", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/roachpb.NodeID"]; // The maximum number of ranges for which to report errors. int32 num_replica_report = 2; // If true, all ranges on the checked nodes must only need replacement or // removal for decommissioning. bool strict_readiness = 3; // If true, collect traces for each range checked. // Requires num_replica_report > 0. bool collect_traces = 4; } // DecommissionPreCheckResponse returns the number of replicas that encountered // errors when running preliminary decommissioning checks, as well as the // associated error messages and traces, for each node. message DecommissionPreCheckResponse { enum NodeReadiness { option (gogoproto.goproto_enum_stringer) = false; UNKNOWN = 0; READY = 1; ALREADY_DECOMMISSIONED = 2; ALLOCATION_ERRORS = 3; } // The result of checking a range's readiness for the decommission. message RangeCheckResult { int32 range_id = 1 [ (gogoproto.customname) = "RangeID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/roachpb.RangeID"]; // The action determined by the allocator that is needed for the range. string action = 2; // All trace events collected while checking the range. repeated TraceEvent events = 3; // The error message from the allocator's processing, if any. string error = 4; } // The result of checking a single node's readiness for decommission. message NodeCheckResult { int32 node_id = 1 [ (gogoproto.customname) = "NodeID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/roachpb.NodeID"]; // The node's decommission readiness status. NodeReadiness decommission_readiness = 2; // The liveness status of the given node. kv.kvserver.liveness.livenesspb.NodeLivenessStatus liveness_status = 3; // The number of total replicas on the node, computed by scanning range // descriptors. int64 replica_count = 4; // The details and recorded traces from preprocessing each range with a // replica on the checked nodes that resulted in error, up to the maximum // specified in the request. repeated RangeCheckResult checked_ranges = 5 [(gogoproto.nullable) = false]; } // Status of the preliminary decommission checks across nodes. repeated NodeCheckResult checked_nodes = 1 [(gogoproto.nullable) = false]; } // DecommissionStatusRequest requests the decommissioning status for the // specified or, if none are specified, all nodes. message DecommissionStatusRequest { repeated int32 node_ids = 1 [(gogoproto.customname) = "NodeIDs", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/roachpb.NodeID"]; // The number of decommissioning replicas to be reported. int32 num_replica_report = 2; } // DecommissionRequest requests the server to set the membership status on // all nodes specified by NodeIDs to the value of TargetMembership. // // If no NodeIDs are given, it targets the recipient node. message DecommissionRequest { repeated int32 node_ids = 1 [(gogoproto.customname) = "NodeIDs", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/roachpb.NodeID"]; kv.kvserver.liveness.livenesspb.MembershipStatus target_membership = 2; // The number of decommissioning replicas to be reported. int32 num_replica_report = 3; } // DecommissionStatusResponse lists decommissioning statuses for a number of NodeIDs. message DecommissionStatusResponse { message Replica { int32 replica_id = 1 [ (gogoproto.customname) = "ReplicaID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/roachpb.ReplicaID"]; int32 range_id = 2 [ (gogoproto.customname) = "RangeID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/roachpb.RangeID"]; } message Status { int32 node_id = 1 [ (gogoproto.customname) = "NodeID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/roachpb.NodeID"]; bool is_live = 2; // The number of replicas on the node, computed by scanning meta2 ranges. int64 replica_count = 3; // The membership status of the given node. kv.kvserver.liveness.livenesspb.MembershipStatus membership = 4; bool draining = 5; // Decommissioning replicas on the given node to be reported. // How many replicas are reported is determined by what was specified in the // request. repeated Replica reported_replicas = 6; } // Status of all affected nodes. repeated Status status = 2 [(gogoproto.nullable) = false]; } // SettingsRequest inquires what are the current settings in the cluster. message SettingsRequest { // The array of setting names to retrieve. // An empty keys array means "all". repeated string keys = 1; // Indicate whether to see unredacted setting values. // This is opt-in so that a previous version `cockroach zip` // does not start reporting values when this becomes active. // For good security, the server only obeys this after it checks // that the logger-in user has admin privilege. bool unredacted_values = 2; } // SettingsResponse is the response to SettingsRequest. message SettingsResponse { message Value { string value = 1; string type = 2; string description = 3; bool public = 4; google.protobuf.Timestamp last_updated = 5 [(gogoproto.nullable) = true, (gogoproto.stdtime) = true]; } map<string, Value> key_values = 1 [(gogoproto.nullable) = false]; } // HealthRequest requests a liveness or readiness check. // // A liveness check is triggered via ready set to false. In this mode, // an empty response is returned immediately, that is, the caller merely // learns that the process is running. // // A readiness check (ready == true) is suitable for determining whether // user traffic should be directed at a given node, for example by a load // balancer. In this mode, a successful response is returned only if the // node: // // - is not in the process of shutting down or booting up (including // waiting for cluster bootstrap); // - is regarded as healthy by the cluster via the recent broadcast of // a liveness beacon. Absent either of these conditions, an error // code will result. // // API: PUBLIC message HealthRequest { // ready specifies whether the client wants to know whether the // target node is ready to receive traffic. If a node is unready, an // error will be returned. // API: PUBLIC bool ready = 1; } // HealthResponse is the response to HealthRequest. It currently does not // contain any information. // API: PUBLIC message HealthResponse { } // LivenessRequest requests liveness data for all nodes on the cluster. message LivenessRequest { } // LivenessResponse contains the liveness status of each node on the cluster. message LivenessResponse { repeated kv.kvserver.liveness.livenesspb.Liveness livenesses = 1 [(gogoproto.nullable) = false]; map<int32, kv.kvserver.liveness.livenesspb.NodeLivenessStatus> statuses = 2 [ (gogoproto.nullable) = false, (gogoproto.castkey) = "github.com/cockroachdb/cockroach/pkg/roachpb.NodeID" ]; } // JobsRequest requests system job information of the given status and type. message JobsRequest { int32 limit = 1; string status = 2; cockroach.sql.jobs.jobspb.Type type = 3; } // JobsResponse contains the job record for each matching job. message JobsResponse { repeated JobResponse jobs = 1 [(gogoproto.nullable) = false]; google.protobuf.Timestamp earliest_retained_time = 2 [(gogoproto.nullable) = false, (gogoproto.stdtime) = true]; } // JobRequest requests system job information for the given job_id. message JobRequest { int64 job_id = 1; } // JobResponse contains the job record for a job. message JobResponse { int64 id = 1 [(gogoproto.customname) = "ID"]; string type = 2; string description = 3; string statement = 16; string username = 4; repeated uint32 descriptor_ids = 5 [ (gogoproto.customname) = "DescriptorIDs", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb.ID" ]; string status = 6; google.protobuf.Timestamp created = 7 [(gogoproto.stdtime) = true]; google.protobuf.Timestamp started = 8 [(gogoproto.stdtime) = true]; google.protobuf.Timestamp finished = 9 [(gogoproto.stdtime) = true]; google.protobuf.Timestamp modified = 10 [(gogoproto.stdtime) = true]; float fraction_completed = 11; string error = 12; // highwater_timestamp is the highwater timestamp returned as normal // timestamp. This is appropriate for display to humans. google.protobuf.Timestamp highwater_timestamp = 13 [(gogoproto.stdtime) = true]; // highwater_decimal is the highwater timestamp in the proprietary decimal // form used by logical timestamps internally. This is appropriate to pass // to a "AS OF SYSTEM TIME" SQL statement. string highwater_decimal = 14; string running_status = 15; google.protobuf.Timestamp last_run = 17 [(gogoproto.stdtime) = true]; google.protobuf.Timestamp next_run = 18 [(gogoproto.stdtime) = true]; int64 num_runs = 19; // ExecutionFailure corresponds to a failure to execute the job with the // attempt starting at start and ending at end. message ExecutionFailure { // Status is the status of the job during the execution. string status = 1; // Start is the time at which the execution started. google.protobuf.Timestamp start = 2 [(gogoproto.stdtime) = true]; // End is the time at which the error occurred. google.protobuf.Timestamp end = 3 [(gogoproto.stdtime) = true]; // Error is the error which occurred. string error = 4; } // ExecutionFailures is a log of execution failures of the job. It is not // guaranteed to contain all execution failures and some execution failures // may not contain an error or end. repeated ExecutionFailure execution_failures = 20; // coordinator_id identifies the node coordinating the job. This value will // only be present for jobs that are currently running or recently ran. int64 coordinator_id = 21 [(gogoproto.customname) = "CoordinatorID"]; } // LocationsRequest requests system locality location information. message LocationsRequest { } // JobsResponse contains the job record for each matching job. message LocationsResponse { message Location { string locality_key = 1; string locality_value = 2; double latitude = 3; double longitude = 4; } repeated Location locations = 1 [(gogoproto.nullable) = false]; } // RangeLogRequest request the history of a range from the range log. message RangeLogRequest { // TODO(tamird): use [(gogoproto.customname) = "RangeID"] below. Need to // figure out how to teach grpc-gateway about custom names. // If RangeID is 0, returns range log history without filtering by range. int64 range_id = 1; // limit is the total number of results that are retrieved by the query. If // this is omitted or set to 0, the default maximum number of results are // returned. When set to > 0, at most only that number of results are // returned. When set to < 0, an unlimited number of results are returned. int32 limit = 2; } // RangeLogResponse contains a list of entries from the range log table. message RangeLogResponse { // To avoid porting the pretty printing of keys and descriptors to // javascript, they will be precomputed on the serverside. message PrettyInfo { string updated_desc = 1; string new_desc = 2; string added_replica = 3; string removed_replica = 4; string reason = 5; string details = 6; } message Event { cockroach.kv.kvserver.storagepb.RangeLogEvent event = 1 [(gogoproto.nullable) = false]; PrettyInfo pretty_info = 2 [(gogoproto.nullable) = false]; } reserved 1; // Previously used. repeated Event events = 2 [(gogoproto.nullable) = false]; } // QueryPlanRequest requests the query plans for a SQL string. message QueryPlanRequest { // query is the SQL query string. string query = 1; } // QueryPlanResponse contains the query plans for a SQL string (currently only // the distsql physical query plan). message QueryPlanResponse { string distsql_physical_query_plan = 1 [(gogoproto.customname) = "DistSQLPhysicalQueryPlan"]; } message DataDistributionRequest { } message DataDistributionResponse { message ZoneConfig { // target is the object the zone config applies to, e.g. "DATABASE db" or // "PARTITION north_america OF TABLE users". string target = 1; config.zonepb.ZoneConfig config = 2 [(gogoproto.nullable) = false]; reserved 3; // config_sql is the SQL representation of config. string config_sql = 4 [(gogoproto.customname) = "ConfigSQL"]; } message TableInfo { map<int32, int64> replica_count_by_node_id = 1 [(gogoproto.castkey) = "github.com/cockroachdb/cockroach/pkg/roachpb.NodeID"]; int64 zone_config_id = 2; google.protobuf.Timestamp dropped_at = 3 [(gogoproto.stdtime) = true]; } message DatabaseInfo { // By table name. map<string, TableInfo> table_info = 1 [(gogoproto.nullable) = false]; } // By database name. map<string, DatabaseInfo> database_info = 1 [(gogoproto.nullable) = false]; reserved 2; // By zone name. map<string, ZoneConfig> zone_configs = 3 [(gogoproto.nullable) = false]; } // MetricMetadataRequest requests metadata for all metrics. message MetricMetadataRequest { } // MetricMetadataResponse contains the metadata for all metrics. message MetricMetadataResponse { map<string, cockroach.util.metric.Metadata> metadata = 1 [(gogoproto.nullable) = false]; } message EnqueueRangeRequest { // The node on which the queue should process the range. If node_id is 0, // the request will be forwarded to all other nodes. int32 node_id = 1 [(gogoproto.customname) = "NodeID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/roachpb.NodeID"]; // The name of the replica queue to run the range through. Matched against // each queue's name field. See the implementation of baseQueue for details. string queue = 2; // The ID of the range to run through the queue. int32 range_id = 3 [(gogoproto.customname) = "RangeID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/roachpb.RangeID"]; // If set, run the queue's process method without first checking whether the // replica should be processed by calling shouldQueue. bool skip_should_queue = 4; } message EnqueueRangeResponse { message Details { int32 node_id = 1 [(gogoproto.customname) = "NodeID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/roachpb.NodeID"]; // All trace events collected while processing the range in the queue. repeated TraceEvent events = 2; // The error message from the queue's processing, if any. string error = 3; } repeated Details details = 1; } // ChartCatalogRequest requests returns a catalog of Admin UI charts. message ChartCatalogRequest { } // ChartCatalogResponse returns a catalog of Admin UI charts useful for debugging. message ChartCatalogResponse { repeated cockroach.ts.catalog.ChartSection catalog = 1 [(gogoproto.nullable) = false]; } // CARequest requests the CA cert anchoring this service. message CARequest { } // CAResponse contains a PEM encoded copy of the CA cert for this service. message CAResponse { bytes ca_cert = 1; } // CertBundleRequest requests the bundle of initialization CAs for a new node. // It provides authentication in the form of a joinToken containing a // sharedSecret. message CertBundleRequest { string token_id = 1 [(gogoproto.customname) = "TokenID"]; bytes shared_secret = 2; } // CertBundleResponse contains a copy of all CAs needed to initialize TLS for // a new node. message CertBundleResponse { bytes bundle = 1; } message RecoveryCollectReplicaInfoRequest {} // RecoveryCollectReplicaRestartNodeStream is sent by collector node to client // if it experiences a transient failure collecting data from one of the nodes. // This message instructs client to drop any data that it collected locally // for specified node as streaming for this node would be restarted. // This mechanism is needed to avoid restarting the whole collection procedure // in large cluster if one of the nodes fails transiently. message RecoveryCollectReplicaRestartNodeStream { int32 node_id = 1 [ (gogoproto.customname) = "NodeID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/roachpb.NodeID"]; } message RecoveryCollectReplicaInfoResponse { oneof info { roachpb.RangeDescriptor range_descriptor = 1; cockroach.kv.kvserver.loqrecovery.loqrecoverypb.ReplicaInfo replica_info = 2; RecoveryCollectReplicaRestartNodeStream node_stream_restarted = 3; cockroach.kv.kvserver.loqrecovery.loqrecoverypb.ClusterMetadata metadata = 4; } } message RecoveryCollectLocalReplicaInfoRequest { } message RecoveryCollectLocalReplicaInfoResponse { cockroach.kv.kvserver.loqrecovery.loqrecoverypb.ReplicaInfo replica_info = 1; } message RecoveryStagePlanRequest { // Plan is replica update plan to stage for application on next restart. Plan // could be empty in that case existing plan is removed if present. cockroach.kv.kvserver.loqrecovery.loqrecoverypb.ReplicaUpdatePlan plan = 1; // If all nodes is true, then receiver should act as a coordinator and perform // a fan-out to stage plan on all nodes of the cluster. bool all_nodes = 2; // ForcePlan tells receiver to ignore any plan already staged on the node if it // is present and replace it with new plan (including empty one). bool force_plan = 3; // ForceLocalInternalVersion tells server to update internal component of plan // version to the one of active cluster version. This option needs to be set // if target cluster is stuck in recovery where only part of nodes were // successfully migrated. bool force_local_internal_version = 4; } message RecoveryStagePlanResponse { // Errors contain error messages happened during plan staging. repeated string errors = 1; } message RecoveryNodeStatusRequest { } message RecoveryNodeStatusResponse { cockroach.kv.kvserver.loqrecovery.loqrecoverypb.NodeRecoveryStatus status = 1 [ (gogoproto.nullable) = false]; } message RecoveryVerifyRequest { // PlanID is ID of the plan to verify. bytes plan_id = 1 [ (gogoproto.customname) = "PendingPlanID", (gogoproto.customtype) = "github.com/cockroachdb/cockroach/pkg/util/uuid.UUID"]; // DecommissionedNodeIDs is a set of nodes that should be marked as decommissioned in // the cluster when loss of quorum recovery successfully applies. repeated int32 decommissioned_node_ids = 2 [(gogoproto.customname) = "DecommissionedNodeIDs", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/roachpb.NodeID"]; // MaxReportedRanges is the maximum number of failed ranges to report. // If more unhealthy ranges are found, error will be returned alongside range // to indicate that ranges were cut short. int32 max_reported_ranges = 3; } message RecoveryVerifyResponse { message UnavailableRanges { // Ranges contains descriptors of ranges that failed health check. // If there are too many ranges to report, error would contain relevant // message. repeated cockroach.kv.kvserver.loqrecovery.loqrecoverypb.RangeRecoveryStatus ranges = 1 [ (gogoproto.nullable) = false]; // Error contains an optional error if ranges validation can't complete. string error = 2; } // Statuses contain a list of recovery statuses of nodes updated during recovery. It // also contains nodes that were expected to be live (not decommissioned by recovery) // but failed to return status response. repeated cockroach.kv.kvserver.loqrecovery.loqrecoverypb.NodeRecoveryStatus statuses = 1 [ (gogoproto.nullable) = false]; // UnavailableRanges contains information about ranges that failed health check. UnavailableRanges unavailable_ranges = 2 [(gogoproto.nullable) = false]; // DecommissionedNodeStatuses contains a map of requested IDs with their // corresponding liveness statuses. map<int32, kv.kvserver.liveness.livenesspb.MembershipStatus> decommissioned_node_statuses = 3 [ (gogoproto.castkey) = "github.com/cockroachdb/cockroach/pkg/roachpb.NodeID"]; } // Admin is the gRPC API for the admin UI. Through grpc-gateway, we offer // REST-style HTTP endpoints that locally proxy to the gRPC endpoints. service Admin { rpc RequestCA(CARequest) returns (CAResponse) { option (google.api.http) = { get : "/_join/v1/ca" }; } rpc RequestCertBundle(CertBundleRequest) returns (CertBundleResponse) { option (google.api.http) = { get : "/_join/v1/requestbundle" }; } // URL: /_admin/v1/users rpc Users(UsersRequest) returns (UsersResponse) { option (google.api.http) = { get: "/_admin/v1/users" }; } // URL: /_admin/v1/databases rpc Databases(DatabasesRequest) returns (DatabasesResponse) { option (google.api.http) = { get: "/_admin/v1/databases" }; } // Example URL: /_admin/v1/databases/system rpc DatabaseDetails(DatabaseDetailsRequest) returns (DatabaseDetailsResponse) { option (google.api.http) = { get: "/_admin/v1/databases/{database}" }; } // Example URL: /_admin/v1/databases/system/tables/ui rpc TableDetails(TableDetailsRequest) returns (TableDetailsResponse) { option (google.api.http) = { get: "/_admin/v1/databases/{database}/tables/{table}" }; } // Example URL: /_admin/v1/databases/system/tables/ui/stats rpc TableStats(TableStatsRequest) returns (TableStatsResponse) { option (google.api.http) = { get: "/_admin/v1/databases/{database}/tables/{table}/stats" }; } // Example URL: /_admin/v1/nontablestats rpc NonTableStats(NonTableStatsRequest) returns (NonTableStatsResponse) { option (google.api.http) = { get: "/_admin/v1/nontablestats" }; } // Example URLs: // Example URLs: // - /_admin/v1/events // - /_admin/v1/events?limit=100 // - /_admin/v1/events?type=create_table // - /_admin/v1/events?type=create_table&limit=100 rpc Events(EventsRequest) returns (EventsResponse) { option (google.api.http) = { get: "/_admin/v1/events" }; } // This requires a POST. Because of the libraries we're using, the POST body // must be in the following format: // // {"key_values": // { "key1": "base64_encoded_value1"}, // ... // { "keyN": "base64_encoded_valueN"}, // } // // Note that all keys are quoted strings and that all values are base64- // encoded. // // Together, SetUIData and GetUIData provide access to a "cookie jar" for the // admin UI. The structure of the underlying data is meant to be opaque to the // server. rpc SetUIData(SetUIDataRequest) returns (SetUIDataResponse) { option (google.api.http) = { post: "/_admin/v1/uidata" body: "*" }; } // Example URLs: // - /_admin/v1/uidata?keys=MYKEY // - /_admin/v1/uidata?keys=MYKEY1&keys=MYKEY2 // // Yes, it's a little odd that the query parameter is named "keys" instead of // "key". I would've preferred that the URL parameter be named "key". However, // it's clearer for the protobuf field to be named "keys," which makes the URL // parameter "keys" as well. rpc GetUIData(GetUIDataRequest) returns (GetUIDataResponse) { option (google.api.http) = { get: "/_admin/v1/uidata" }; } // Cluster returns metadata for the cluster. rpc Cluster(ClusterRequest) returns (ClusterResponse) { option (google.api.http) = { get: "/_admin/v1/cluster" }; } // Settings returns the cluster-wide settings for the cluster. rpc Settings(SettingsRequest) returns (SettingsResponse) { option (google.api.http) = { get: "/_admin/v1/settings" }; } // Health returns liveness for the node target of the request. // API: PUBLIC rpc Health(HealthRequest) returns (HealthResponse) { option (google.api.http) = { get: "/_admin/v1/health" additional_bindings {get : "/health"} }; } // Liveness returns the liveness state of all nodes on the cluster. rpc Liveness(LivenessRequest) returns (LivenessResponse) { option (google.api.http) = { get: "/_admin/v1/liveness" }; } // Jobs returns the job records for all jobs of the given status and type. rpc Jobs(JobsRequest) returns (JobsResponse) { option (google.api.http) = { get: "/_admin/v1/jobs" }; } // Job returns the job record for the job of the given job_id. rpc Job(JobRequest) returns (JobResponse) { option (google.api.http) = { get: "/_admin/v1/jobs/{job_id}" }; } // Locations returns the locality location records. rpc Locations(LocationsRequest) returns (LocationsResponse) { option (google.api.http) = { get: "/_admin/v1/locations" }; } // QueryPlan returns the query plans for a SQL string. rpc QueryPlan(QueryPlanRequest) returns (QueryPlanResponse) { option (google.api.http) = { get: "/_admin/v1/queryplan" }; } // Drain puts the node into the specified drain mode(s) and optionally // instructs the process to terminate. // We do not expose this via HTTP unless we have a way to authenticate // + authorize streaming RPC connections. See #42567. rpc Drain(DrainRequest) returns (stream DrainResponse) { } // DecommissionPreCheck requests that the server execute preliminary checks // to evaluate the possibility of successfully decommissioning a given node. rpc DecommissionPreCheck(DecommissionPreCheckRequest) returns (DecommissionPreCheckResponse) { } // Decommission puts the node(s) into the specified decommissioning state. // If this ever becomes exposed via HTTP, ensure that it performs // authorization. See #42567. rpc Decommission(DecommissionRequest) returns (DecommissionStatusResponse) { } // DecommissionStatus retrieves the decommissioning status of the specified nodes. // If this ever becomes exposed via HTTP, ensure that it performs // authorization. See #42567. rpc DecommissionStatus(DecommissionStatusRequest) returns (DecommissionStatusResponse) { } // URL: /_admin/v1/rangelog // URL: /_admin/v1/rangelog?limit=100 // URL: /_admin/v1/rangelog/1 // URL: /_admin/v1/rangelog/1?limit=100 rpc RangeLog(RangeLogRequest) returns (RangeLogResponse) { option (google.api.http) = { get: "/_admin/v1/rangelog" additional_bindings { get: "/_admin/v1/rangelog/{range_id}" } }; } rpc DataDistribution(DataDistributionRequest) returns (DataDistributionResponse) { option (google.api.http) = { get: "/_admin/v1/data_distribution" }; } // URL: /_admin/v1/metricmetadata rpc AllMetricMetadata(MetricMetadataRequest) returns (MetricMetadataResponse) { option (google.api.http) = { get: "/_admin/v1/metricmetadata" }; } // URL: /_admin/v1/chartcatalog rpc ChartCatalog(ChartCatalogRequest) returns (ChartCatalogResponse) { option (google.api.http) = { get: "/_admin/v1/chartcatalog" }; } // EnqueueRange runs the specified range through the specified queue on the // range's leaseholder store, returning the detailed trace and error // information from doing so. Parameters must be provided in the body of the // POST request. // For example: // // { // "queue": "raftlog", // "rangeId": 10 // } rpc EnqueueRange(EnqueueRangeRequest) returns (EnqueueRangeResponse) { option (google.api.http) = { post: "/_admin/v1/enqueue_range" body : "*" }; } // SendKVBatch proxies the given BatchRequest into KV, returning the // response. It is used by the CLI `debug send-kv-batch` command. rpc SendKVBatch(roachpb.BatchRequest) returns (roachpb.BatchResponse) { } // ListTracingSnapshots retrieves the list of snapshots of the Active Spans // Registry that the node currently has in memory. A new snapshot can be // captured with TakeTracingSnapshots. rpc ListTracingSnapshots(ListTracingSnapshotsRequest) returns (ListTracingSnapshotsResponse) { option (google.api.http) = { get: "/_admin/v1/trace_snapshots" }; } // TakeTracingSnapshot captures a new snapshot of the Active Spans Registry. // The new snapshot is returned, and also made available through // ListTracingSnapshots. rpc TakeTracingSnapshot(TakeTracingSnapshotRequest) returns (TakeTracingSnapshotResponse) { option (google.api.http) = { post: "/_admin/v1/trace_snapshots" }; } // GetTracingSnapshot returns a snapshot of the tracing spans in the active // spans registry previously generated through TakeTracingSnapshots. rpc GetTracingSnapshot(GetTracingSnapshotRequest) returns (GetTracingSnapshotResponse) { option (google.api.http) = { get: "/_admin/v1/trace_snapshots/{snapshot_id}" }; } // GetTrace returns the trace with a specified ID. Depending on the request, // the trace is returned either from a snapshot that was previously taken, or // directly from the active spans registry. rpc GetTrace(GetTraceRequest) returns (GetTraceResponse) { option (google.api.http) = { post: "/_admin/v1/traces" body: "*" }; } // SetTraceRecordingType sets the recording mode of all or some of the spans // in a trace. rpc SetTraceRecordingType(SetTraceRecordingTypeRequest) returns (SetTraceRecordingTypeResponse) { option (google.api.http) = { post: "/_admin/v1/settracerecordingtype" body: "*" }; } // RecoveryCollectReplicaInfo retrieves information about: // 1. range descriptors contained in cluster meta ranges if meta ranges // are readable; // 2. replica information from all live nodes that have connection to // the target node. rpc RecoveryCollectReplicaInfo(RecoveryCollectReplicaInfoRequest) returns (stream RecoveryCollectReplicaInfoResponse) {} // RecoveryCollectLocalReplicaInfo retrieve information about all local // replicas in all stores on the node. rpc RecoveryCollectLocalReplicaInfo(RecoveryCollectLocalReplicaInfoRequest) returns (stream RecoveryCollectLocalReplicaInfoResponse) {} // RecoveryStagePlan stages recovery plan on target or all nodes in cluster // depending on request content and marks nodes deleted in the plan as // decommissioned in each node's local node tombstone storage. rpc RecoveryStagePlan(RecoveryStagePlanRequest) returns (RecoveryStagePlanResponse) {} // RecoveryNodeStatus retrieves loss of quorum recovery status of a single // node. rpc RecoveryNodeStatus(RecoveryNodeStatusRequest) returns (RecoveryNodeStatusResponse) {} // RecoveryVerify verifies that recovery plan is applied on all necessary // nodes, ranges are available and nodes removed in plan are marked as // decommissioned. rpc RecoveryVerify(RecoveryVerifyRequest) returns (RecoveryVerifyResponse) {} // ListTenants returns a list of active tenants in the cluster. rpc ListTenants(ListTenantsRequest) returns (ListTenantsResponse) { option (google.api.http) = { get: "/_admin/v1/tenants" }; } } message ListTenantsRequest{} message ListTenantsResponse { repeated Tenant tenants = 1; } message Tenant { roachpb.TenantID tenant_id = 1; string tenant_name = 2; string sql_addr = 3; string rpc_addr = 4; } message ListTracingSnapshotsRequest {} message ListTracingSnapshotsResponse { repeated SnapshotInfo snapshots = 1; } message SnapshotInfo { // SnapshotID identifies a specific snapshot which can be requested via a // GetTracingSnapshotRequest. Negative IDs are used for "automatic" snapshots. int64 snapshot_id = 1 [(gogoproto.customname) = "SnapshotID"]; google.protobuf.Timestamp captured_at = 2 [(gogoproto.stdtime) = true]; } message TakeTracingSnapshotRequest {} message TakeTracingSnapshotResponse { SnapshotInfo snapshot = 1; } message GetTracingSnapshotRequest { // SnapshotId indicates which snapshot is requested. ID may be negative when // requesting an "automatic" snapshot; see ListTracingSnapshotsResponse. int64 snapshot_id = 1; } message GetTracingSnapshotResponse { TracingSnapshot snapshot = 1; } // GetTrace represents the request of the GetTrace RPC. message GetTraceRequest { // If a snapshot is specified, the trace information is returned from that // snapshot. If a snapshot is not specified, information about currently // opened spans is returned from the active spans registry. int64 snapshot_id = 1 [(gogoproto.customname) = "SnapshotID"]; uint64 trace_id = 2 [(gogoproto.nullable)=false, (gogoproto.customname) = "TraceID", (gogoproto.customtype) = "github.com/cockroachdb/cockroach/pkg/util/tracing/tracingpb.TraceID"]; util.tracing.tracingpb.RecordingMode recording_type = 3; } // GetTrace represents the response to the GetTrace RPC. message GetTraceResponse { // snapshot_id identifies the snapshot that the trace was retrieved from. If // 0, the trace was not retrieved from a registry, but directly from the // active spans registry. int64 snapshot_id = 1 [(gogoproto.customname) = "SnapshotID"]; uint64 trace_id = 2 [(gogoproto.nullable)=false, (gogoproto.customname) = "TraceID", (gogoproto.customtype) = "github.com/cockroachdb/cockroach/pkg/util/tracing/tracingpb.TraceID"]; // still_exists is set if any spans from this trace are currently present in // the active spans registry. // // If snapshot_id is 0, still_exists is always set. bool still_exists = 3; // serialized_recording represents the serialization of trace recording. We // return the recording already serialized as formatted string for easy // consumption in the browser. string serialized_recording = 4; } // TracingSnapshot represents a snapshot of the active spans registry, including // all the spans that were open at the time when the snapshot was taken. message TracingSnapshot { int64 snapshot_id = 1 [(gogoproto.customname) = "SnapshotID"]; google.protobuf.Timestamp captured_at = 2 [(gogoproto.stdtime) = true]; repeated TracingSpan spans = 3; // Ideally we'd use int64 to match the goroutine_id type // but unfortunately, the way that grpc-gateway parses // these objects into Javascript results in odd encodings // of Long JS types that are difficult to interact with // as map keys. Thus, we settle for string. map<string, string> stacks = 4; } message NamedOperationMetadata { string name = 1; util.tracing.tracingpb.OperationMetadata metadata = 2 [(gogoproto.nullable) = false]; } // TracingSpan represents a span, in a form slightly processed for the use of // the tracing UI. message TracingSpan { string operation = 1; uint64 trace_id = 2 [(gogoproto.customname) = "TraceID"]; uint64 span_id = 3 [(gogoproto.customname) = "SpanID"]; uint64 parent_span_id = 4 [(gogoproto.customname) = "ParentSpanID"]; google.protobuf.Timestamp start = 5 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; uint64 goroutine_id = 6 [(gogoproto.customname) = "GoroutineID"]; repeated SpanTag processed_tags = 7; // current is set if the span is still alive (i.e. still present in the active // spans registry). bool current = 8; // current_recording_mode represents the span's current recording mode. This is // not set if current == false. util.tracing.tracingpb.RecordingMode current_recording_mode = 9; repeated NamedOperationMetadata children_metadata = 10; } // SpanTag represents a tag on a tracing span, in a form processed for the use // of the tracing UI. message SpanTag { string key = 1; string val = 2; string caption = 3; string link = 4; bool hidden = 5; bool highlight = 6; bool inherit = 7; bool inherited = 8; bool propagate_up = 9; bool copied_from_child = 10; repeated ChildSpanTag children = 11; // May be empty. } message ChildSpanTag { string key = 1; string val = 2; } // SetTraceRecordingTypeRequest is the request for SetTraceRecordingType, which // sets the recording mode of all or some of the spans in a trace. message SetTraceRecordingTypeRequest { // TraceID identifies the trace to toggle the recording of. It must always be // specified. uint64 trace_id = 1 [(gogoproto.nullable) = false, (gogoproto.customname) = "TraceID", (gogoproto.customtype) = "github.com/cockroachdb/cockroach/pkg/util/tracing/tracingpb.TraceID"]; // SpanID, if not zero, controls which spans in the trace get their recording // mode set. If zero, all spans in the trace are updated. If not zero, only // the respective span and its descendants get updated. uint64 span_id = 2 [(gogoproto.nullable) = false, (gogoproto.customname) = "SpanID", (gogoproto.customtype) = "github.com/cockroachdb/cockroach/pkg/util/tracing/tracingpb.SpanID"]; util.tracing.tracingpb.RecordingMode recording_mode = 3; } // SetTraceRecordingTypeRequest is the response for SetTraceRecordingType. message SetTraceRecordingTypeResponse{} // FeatureFlags within this struct are used within back-end/front-end code to show/hide features. message FeatureFlags { // isObservabiliyService is true when the server is an instance of the Observability Service bool is_observability_service = 1; // CanViewKVMetricDashboards is true when the logged in user is able to view KV-level metric dashboards. bool can_view_kv_metric_dashboards = 2; // DisableKVLevelAdvancedDebug is true when the UI should remove options to certain KV-level // debug operations. This is helpful in application tenant contexsts, where these requests // can only return errors since the tenant cannot perform the operations. bool disable_kv_level_advanced_debug = 3; }
pkg/server/serverpb/admin.proto
1
https://github.com/cockroachdb/cockroach/commit/977f72e4cc24f5d14424c278d6d6921016c3fc62
[ 0.011312483809888363, 0.0006289592711254954, 0.00016132583550643176, 0.00017111407942138612, 0.001435846439562738 ]
{ "id": 1, "code_window": [ "\t\t\tresultsByNodeID[nID] = serverpb.DecommissionPreCheckResponse_NodeCheckResult{\n", "\t\t\t\tNodeID: nID,\n", "\t\t\t\tDecommissionReadiness: serverpb.DecommissionPreCheckResponse_UNKNOWN,\n", "\t\t\t\tLivenessStatus: livenessStatus,\n", "\t\t\t}\n", "\t\t} else if livenessStatus == livenesspb.NodeLivenessStatus_DECOMMISSIONED {\n", "\t\t\tresultsByNodeID[nID] = serverpb.DecommissionPreCheckResponse_NodeCheckResult{\n", "\t\t\t\tNodeID: nID,\n", "\t\t\t\tDecommissionReadiness: serverpb.DecommissionPreCheckResponse_ALREADY_DECOMMISSIONED,\n" ], "labels": [ "keep", "keep", "keep", "replace", "keep", "keep", "keep", "keep", "keep" ], "after_edit": [], "file_path": "pkg/server/admin.go", "type": "replace", "edit_start_line_idx": 1000 }
use registry=a ---- ev tableid=1 indexid=1 key=key id=a duration=1 ---- # Add an event on a non-SQL key. evnonsql key=key id=a duration=1 ---- check ---- tableID=1 indexID=1 num contention events: 1 cumulative contention time: 1ns keys: /Table/1/1/"key" contending txns: id=a count=1 non-SQL key "\xff\x12key\x00\x01" contending txns: num contention events: 1 cumulative contention time: 1ns id=a count=1 # Add another event to the same key contending with a different txn and ensure # correct aggregation. ev tableid=1 indexid=1 key=key id=b duration=3 ---- # Add another event on the same non-SQL key contending with a different txn. evnonsql key=key id=b duration=3 ---- check ---- tableID=1 indexID=1 num contention events: 2 cumulative contention time: 4ns keys: /Table/1/1/"key" contending txns: id=b count=1 id=a count=1 non-SQL key "\xff\x12key\x00\x01" contending txns: num contention events: 2 cumulative contention time: 4ns id=b count=1 id=a count=1 # Add txn with an ID that was already encountered. ev tableid=1 indexid=1 key=key id=a duration=5 ---- # Add txn with an ID that was already encountered by non-SQL key. evnonsql key=key id=a duration=5 ---- check ---- tableID=1 indexID=1 num contention events: 3 cumulative contention time: 9ns keys: /Table/1/1/"key" contending txns: id=a count=2 id=b count=1 non-SQL key "\xff\x12key\x00\x01" contending txns: num contention events: 3 cumulative contention time: 9ns id=a count=2 id=b count=1 # Add txn on another key. ev tableid=1 indexid=1 key=keyc id=a duration=11 ---- # Add txn on another non-SQL key. evnonsql key=keyc id=a duration=11 ---- check ---- tableID=1 indexID=1 num contention events: 4 cumulative contention time: 20ns keys: /Table/1/1/"key" contending txns: id=a count=2 id=b count=1 /Table/1/1/"keyc" contending txns: id=a count=1 non-SQL key "\xff\x12key\x00\x01" contending txns: num contention events: 3 cumulative contention time: 9ns id=a count=2 id=b count=1 non-SQL key "\xff\x12keyc\x00\x01" contending txns: num contention events: 1 cumulative contention time: 11ns id=a count=1 # Ensure keys are in sorted order. ev tableid=1 indexid=1 key=keyb id=a duration=1 ---- # Ensure non-SQL keys are in sorted order. evnonsql key=keyb id=a duration=1 ---- check ---- tableID=1 indexID=1 num contention events: 5 cumulative contention time: 21ns keys: /Table/1/1/"key" contending txns: id=a count=2 id=b count=1 /Table/1/1/"keyb" contending txns: id=a count=1 /Table/1/1/"keyc" contending txns: id=a count=1 non-SQL key "\xff\x12key\x00\x01" contending txns: num contention events: 3 cumulative contention time: 9ns id=a count=2 id=b count=1 non-SQL key "\xff\x12keyb\x00\x01" contending txns: num contention events: 1 cumulative contention time: 1ns id=a count=1 non-SQL key "\xff\x12keyc\x00\x01" contending txns: num contention events: 1 cumulative contention time: 11ns id=a count=1 # Add another index on the same table. evcheck tableid=1 indexid=2 key=key id=a duration=1 ---- tableID=1 indexID=1 num contention events: 5 cumulative contention time: 21ns keys: /Table/1/1/"key" contending txns: id=a count=2 id=b count=1 /Table/1/1/"keyb" contending txns: id=a count=1 /Table/1/1/"keyc" contending txns: id=a count=1 tableID=1 indexID=2 num contention events: 1 cumulative contention time: 1ns keys: /Table/1/2/"key" contending txns: id=a count=1 non-SQL key "\xff\x12key\x00\x01" contending txns: num contention events: 3 cumulative contention time: 9ns id=a count=2 id=b count=1 non-SQL key "\xff\x12keyb\x00\x01" contending txns: num contention events: 1 cumulative contention time: 1ns id=a count=1 non-SQL key "\xff\x12keyc\x00\x01" contending txns: num contention events: 1 cumulative contention time: 11ns id=a count=1 # Add another table. evcheck tableid=2 indexid=1 key=key id=a duration=1 ---- tableID=1 indexID=1 num contention events: 5 cumulative contention time: 21ns keys: /Table/1/1/"key" contending txns: id=a count=2 id=b count=1 /Table/1/1/"keyb" contending txns: id=a count=1 /Table/1/1/"keyc" contending txns: id=a count=1 tableID=2 indexID=1 num contention events: 1 cumulative contention time: 1ns keys: /Table/2/1/"key" contending txns: id=a count=1 tableID=1 indexID=2 num contention events: 1 cumulative contention time: 1ns keys: /Table/1/2/"key" contending txns: id=a count=1 non-SQL key "\xff\x12key\x00\x01" contending txns: num contention events: 3 cumulative contention time: 9ns id=a count=2 id=b count=1 non-SQL key "\xff\x12keyb\x00\x01" contending txns: num contention events: 1 cumulative contention time: 1ns id=a count=1 non-SQL key "\xff\x12keyc\x00\x01" contending txns: num contention events: 1 cumulative contention time: 11ns id=a count=1 # Test merging the serialized representation of registries. use registry=b ---- # Add an event that overlaps with an event in registry a. ev tableid=1 indexid=1 key=key id=b duration=2 ---- # Add an event on non-SQL key that overlaps with an event in registry a. evnonsql key=key id=b duration=2 ---- check ---- tableID=1 indexID=1 num contention events: 1 cumulative contention time: 2ns keys: /Table/1/1/"key" contending txns: id=b count=1 non-SQL key "\xff\x12key\x00\x01" contending txns: num contention events: 1 cumulative contention time: 2ns id=b count=1 # Add an event that overlaps with a key but not with a txnID in registry a. ev tableid=1 indexid=1 key=key id=c duration=3 ---- # Add an event on non-SQL key that overlaps with a non-SQL key but not with a # txnID in registry a. evnonsql key=key id=c duration=3 ---- check ---- tableID=1 indexID=1 num contention events: 2 cumulative contention time: 5ns keys: /Table/1/1/"key" contending txns: id=c count=1 id=b count=1 non-SQL key "\xff\x12key\x00\x01" contending txns: num contention events: 2 cumulative contention time: 5ns id=c count=1 id=b count=1 # Add an event that doesn't overlap with events in registry a. ev tableid=1 indexid=1 key=new_key id=b duration=4 ---- # Add an event on non-SQL key that doesn't overlap with events in registry a. evnonsql key=new_key id=b duration=4 ---- check ---- tableID=1 indexID=1 num contention events: 3 cumulative contention time: 9ns keys: /Table/1/1/"key" contending txns: id=c count=1 id=b count=1 /Table/1/1/"new_key" contending txns: id=b count=1 non-SQL key "\xff\x12key\x00\x01" contending txns: num contention events: 2 cumulative contention time: 5ns id=c count=1 id=b count=1 non-SQL key "\xff\x12new_key\x00\x01" contending txns: num contention events: 1 cumulative contention time: 4ns id=b count=1 # Add yet another table. evcheck tableid=3 indexid=3 key=key id=c duration=1 ---- tableID=1 indexID=1 num contention events: 3 cumulative contention time: 9ns keys: /Table/1/1/"key" contending txns: id=c count=1 id=b count=1 /Table/1/1/"new_key" contending txns: id=b count=1 tableID=3 indexID=3 num contention events: 1 cumulative contention time: 1ns keys: /Table/3/3/"key" contending txns: id=c count=1 non-SQL key "\xff\x12key\x00\x01" contending txns: num contention events: 2 cumulative contention time: 5ns id=c count=1 id=b count=1 non-SQL key "\xff\x12new_key\x00\x01" contending txns: num contention events: 1 cumulative contention time: 4ns id=b count=1 merge first=a second=b ---- tableID=1 indexID=1 num contention events: 8 cumulative contention time: 30ns keys: /Table/1/1/"key" contending txns: id=a count=2 id=b count=2 id=c count=1 /Table/1/1/"keyb" contending txns: id=a count=1 /Table/1/1/"keyc" contending txns: id=a count=1 /Table/1/1/"new_key" contending txns: id=b count=1 tableID=2 indexID=1 num contention events: 1 cumulative contention time: 1ns keys: /Table/2/1/"key" contending txns: id=a count=1 tableID=1 indexID=2 num contention events: 1 cumulative contention time: 1ns keys: /Table/1/2/"key" contending txns: id=a count=1 tableID=3 indexID=3 num contention events: 1 cumulative contention time: 1ns keys: /Table/3/3/"key" contending txns: id=c count=1 non-SQL key "\xff\x12key\x00\x01" contending txns: num contention events: 5 cumulative contention time: 14ns id=a count=2 id=b count=2 id=c count=1 non-SQL key "\xff\x12keyb\x00\x01" contending txns: num contention events: 1 cumulative contention time: 1ns id=a count=1 non-SQL key "\xff\x12keyc\x00\x01" contending txns: num contention events: 1 cumulative contention time: 11ns id=a count=1 non-SQL key "\xff\x12new_key\x00\x01" contending txns: num contention events: 1 cumulative contention time: 4ns id=b count=1
pkg/sql/contention/testdata/contention_registry
0
https://github.com/cockroachdb/cockroach/commit/977f72e4cc24f5d14424c278d6d6921016c3fc62
[ 0.00017352608847431839, 0.00016996808699332178, 0.0001618462847545743, 0.00017039508384186774, 0.000002182018533858354 ]
{ "id": 1, "code_window": [ "\t\t\tresultsByNodeID[nID] = serverpb.DecommissionPreCheckResponse_NodeCheckResult{\n", "\t\t\t\tNodeID: nID,\n", "\t\t\t\tDecommissionReadiness: serverpb.DecommissionPreCheckResponse_UNKNOWN,\n", "\t\t\t\tLivenessStatus: livenessStatus,\n", "\t\t\t}\n", "\t\t} else if livenessStatus == livenesspb.NodeLivenessStatus_DECOMMISSIONED {\n", "\t\t\tresultsByNodeID[nID] = serverpb.DecommissionPreCheckResponse_NodeCheckResult{\n", "\t\t\t\tNodeID: nID,\n", "\t\t\t\tDecommissionReadiness: serverpb.DecommissionPreCheckResponse_ALREADY_DECOMMISSIONED,\n" ], "labels": [ "keep", "keep", "keep", "replace", "keep", "keep", "keep", "keep", "keep" ], "after_edit": [], "file_path": "pkg/server/admin.go", "type": "replace", "edit_start_line_idx": 1000 }
// Copyright 2021 The Cockroach Authors. // // Licensed as a CockroachDB Enterprise file under the Cockroach Community // License (the "License"); you may not use this file except in compliance with // the License. You may obtain a copy of the License at // // https://github.com/cockroachdb/cockroach/blob/master/licenses/CCL.txt package telemetryccl import ( "testing" "github.com/cockroachdb/cockroach/pkg/base" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/sql/sqltestutils" "github.com/cockroachdb/cockroach/pkg/testutils/skip" "github.com/cockroachdb/cockroach/pkg/util/leaktest" "github.com/cockroachdb/cockroach/pkg/util/log" ) func TestTelemetry(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) skip.UnderRace(t, "takes >1min under race") skip.UnderDeadlock(t, "takes >1min under deadlock") sqltestutils.TelemetryTest( t, []base.TestServerArgs{ { Locality: roachpb.Locality{ Tiers: []roachpb.Tier{{Key: "region", Value: "us-east-1"}}, }, }, { Locality: roachpb.Locality{ Tiers: []roachpb.Tier{{Key: "region", Value: "ca-central-1"}}, }, }, { Locality: roachpb.Locality{ Tiers: []roachpb.Tier{{Key: "region", Value: "ap-southeast-2"}}, }, }, }, false, /* testTenant */ ) }
pkg/ccl/telemetryccl/telemetry_test.go
0
https://github.com/cockroachdb/cockroach/commit/977f72e4cc24f5d14424c278d6d6921016c3fc62
[ 0.0001766678033163771, 0.0001716892293188721, 0.00016520527424290776, 0.0001725480251479894, 0.0000040052400436252356 ]
{ "id": 1, "code_window": [ "\t\t\tresultsByNodeID[nID] = serverpb.DecommissionPreCheckResponse_NodeCheckResult{\n", "\t\t\t\tNodeID: nID,\n", "\t\t\t\tDecommissionReadiness: serverpb.DecommissionPreCheckResponse_UNKNOWN,\n", "\t\t\t\tLivenessStatus: livenessStatus,\n", "\t\t\t}\n", "\t\t} else if livenessStatus == livenesspb.NodeLivenessStatus_DECOMMISSIONED {\n", "\t\t\tresultsByNodeID[nID] = serverpb.DecommissionPreCheckResponse_NodeCheckResult{\n", "\t\t\t\tNodeID: nID,\n", "\t\t\t\tDecommissionReadiness: serverpb.DecommissionPreCheckResponse_ALREADY_DECOMMISSIONED,\n" ], "labels": [ "keep", "keep", "keep", "replace", "keep", "keep", "keep", "keep", "keep" ], "after_edit": [], "file_path": "pkg/server/admin.go", "type": "replace", "edit_start_line_idx": 1000 }
// Copyright 2021 The Cockroach Authors. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt. // // As of the Change Date specified in that file, in accordance with // the Business Source License, use of this software will be governed // by the Apache License, Version 2.0, included in the file // licenses/APL.txt. package opgen import ( "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scop" "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scpb" ) func init() { opRegistry.register((*scpb.IndexComment)(nil), toPublic( scpb.Status_ABSENT, to(scpb.Status_PUBLIC, emit(func(this *scpb.IndexComment) *scop.UpsertIndexComment { return &scop.UpsertIndexComment{ TableID: this.TableID, IndexID: this.IndexID, Comment: this.Comment, } }), ), ), toTransientAbsentLikePublic(), toAbsent( scpb.Status_PUBLIC, to(scpb.Status_ABSENT, emit(func(this *scpb.IndexComment) *scop.RemoveIndexComment { return &scop.RemoveIndexComment{ TableID: this.TableID, IndexID: this.IndexID, } }), ), ), ) }
pkg/sql/schemachanger/scplan/internal/opgen/opgen_index_comment.go
0
https://github.com/cockroachdb/cockroach/commit/977f72e4cc24f5d14424c278d6d6921016c3fc62
[ 0.00017842077068053186, 0.0001704571332084015, 0.00016432421398349106, 0.00016929190314840525, 0.000005157967734703561 ]
{ "id": 3, "code_window": [ "\n", "\t\tresultsByNodeID[nID] = serverpb.DecommissionPreCheckResponse_NodeCheckResult{\n", "\t\t\tNodeID: nID,\n", "\t\t\tDecommissionReadiness: readiness,\n", "\t\t\tLivenessStatus: livenessStatusByNodeID[nID],\n", "\t\t\tReplicaCount: int64(numReplicas),\n", "\t\t\tCheckedRanges: rangeCheckErrsByNode[nID],\n", "\t\t}\n" ], "labels": [ "keep", "keep", "keep", "keep", "replace", "keep", "keep", "keep" ], "after_edit": [], "file_path": "pkg/server/admin.go", "type": "replace", "edit_start_line_idx": 1051 }
// Copyright 2016 The Cockroach Authors. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt. // // As of the Change Date specified in that file, in accordance with // the Business Source License, use of this software will be governed // by the Apache License, Version 2.0, included in the file // licenses/APL.txt. syntax = "proto3"; package cockroach.server.serverpb; option go_package = "github.com/cockroachdb/cockroach/pkg/server/serverpb"; import "config/zonepb/zone.proto"; import "util/tracing/tracingpb/tracing.proto"; import "jobs/jobspb/jobs.proto"; import "server/serverpb/status.proto"; import "storage/enginepb/mvcc.proto"; import "kv/kvserver/liveness/livenesspb/liveness.proto"; import "kv/kvserver/loqrecovery/loqrecoverypb/recovery.proto"; import "kv/kvserver/kvserverpb/range_log.proto"; import "kv/kvpb/api.proto"; import "roachpb/metadata.proto"; import "roachpb/data.proto"; import "ts/catalog/chart_catalog.proto"; import "util/metric/metric.proto"; import "util/tracing/tracingpb/recorded_span.proto"; import "gogoproto/gogo.proto"; import "google/api/annotations.proto"; import "google/protobuf/timestamp.proto"; // ZoneConfigurationLevel indicates, for objects with a Zone Configuration, // the object level at which the configuration is defined. This is needed // because objects without a specifically indicated Zone Configuration will // inherit the configuration of their "parent". enum ZoneConfigurationLevel { UNKNOWN = 0; // CLUSTER indicates that this object uses the cluster default Zone Configuration. CLUSTER = 1; // DATABASE indicates that this object uses a database-level Zone Configuration. DATABASE = 2; // TABLE indicates that this object uses a table-level Zone Configuration. TABLE = 3; } // DatabasesRequest requests a list of databases. message DatabasesRequest { } // DatabasesResponse contains a list of databases. message DatabasesResponse { repeated string databases = 1; } // DatabaseDetailsRequest requests detailed information about the specified // database message DatabaseDetailsRequest { // database is the name of the database we are querying. string database = 1; // Setting this flag includes a computationally-expensive stats field // in the response. bool include_stats = 2; } // DatabaseDetailsResponse contains grant information, table names, // zone configuration, and size statistics for a database. message DatabaseDetailsResponse { message Grant { // user is the user that this grant applies to. string user = 1; // privileges are the abilities this grant gives to the user. repeated string privileges = 2; } message Stats { // A table which exists in the database, but for which we could not load stats // during this request. message MissingTable { // The name of the table for which we could not load stats. string name = 1; // The error message that resulted when the request for this table failed. string error_message = 2; } // A list of tables that exist in the database, but for which stats could // not be loaded due to failures during this request. repeated MissingTable missing_tables = 1; // The number of ranges, as determined from a query of range meta keys, // across all tables. int64 range_count = 2; // An approximation of the disk space (in bytes) used for all replicas // of all tables across the cluster. uint64 approximate_disk_bytes = 3; // node_ids is the ordered list of node ids on which data is stored. repeated int32 node_ids = 4 [(gogoproto.customname) = "NodeIDs", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/roachpb.NodeID"]; int32 num_index_recommendations = 5; } // grants are the results of SHOW GRANTS for this database. repeated Grant grants = 1 [(gogoproto.nullable) = false]; // table_names contains the names of all tables in this database. Note that // all responses will be schema-qualified (schema.table) and that every schema // or table that contains a "sql unsafe character" such as uppercase letters // or dots will be surrounded with double quotes, such as "naughty schema".table. repeated string table_names = 2; // descriptor_id is an identifier used to uniquely identify this database. int64 descriptor_id = 3 [(gogoproto.customname) = "DescriptorID"]; // The zone configuration in effect for this database. cockroach.config.zonepb.ZoneConfig zone_config = 4 [(gogoproto.nullable) = false]; // The level at which this object's zone configuration is set. ZoneConfigurationLevel zone_config_level = 5; // Size information about the database, present only when explicitly requested. Stats stats = 6; } // TableDetailsRequest is a request for detailed information about a table. message TableDetailsRequest { // database is the name of the database that contains the table we're // interested in. string database = 1; // table is the name of the table that we're querying. Table may be // schema-qualified (schema.table) and each name component that contains // sql unsafe characters such as . or uppercase letters must be surrounded // in double quotes like "naughty schema".table. string table = 2; } // TableDetailsResponse contains grants, column names, and indexes for // a table. message TableDetailsResponse { // Grant is an entry from SHOW GRANTS. message Grant { // user is the user that this grant applies to. string user = 1; // privileges are the abilities this grant gives to the user. repeated string privileges = 2; } message Column { // name is the name of the column. string name = 1; // type is the SQL type (INT, STRING, etc.) of this column. string type = 2; // nullable is whether this column can contain NULL. bool nullable = 3; // default_value is the default value of this column. string default_value = 4; // generation_expression is the generator expression if the column is computed. string generation_expression = 5; // hidden is whether this column is hidden. bool hidden = 6; } message Index { // name is the name of this index. string name = 1; // unique is whether this a unique index (i.e. CREATE UNIQUE INDEX). bool unique = 2; // seq is an internal variable that's passed along. int64 seq = 3; // column is the column that this index indexes. string column = 4; // direction is either "ASC" (ascending) or "DESC" (descending). string direction = 5; // storing is an internal variable that's passed along. bool storing = 6; // implicit is an internal variable that's passed along. bool implicit = 7; } repeated Grant grants = 1 [(gogoproto.nullable) = false]; repeated Column columns = 2 [(gogoproto.nullable) = false]; repeated Index indexes = 3 [(gogoproto.nullable) = false]; // range_count is the size of the table in ranges. This provides a rough // estimate of the storage requirements for the table. // TODO(mrtracy): The TableStats method also returns a range_count field which // is more accurate than this one; TableDetails calculates this number using // a potentially faster method that is subject to cache staleness. We should // consider removing or renaming this field to reflect that difference. See // GitHub issue #5435 for more information. int64 range_count = 4; // create_table_statement is the output of "SHOW CREATE" for this table; // it is a SQL statement that would re-create the table's current schema if // executed. string create_table_statement = 5; // The zone configuration in effect for this table. cockroach.config.zonepb.ZoneConfig zone_config = 6 [(gogoproto.nullable) = false]; // The level at which this object's zone configuration is set. ZoneConfigurationLevel zone_config_level = 7; // descriptor_id is an identifier used to uniquely identify this table. int64 descriptor_id = 8 [(gogoproto.customname) = "DescriptorID"]; // configure_zone_statement is the output of "SHOW ZONE CONFIGURATION FOR TABLE" // for this table. It is a SQL statement that would re-configure the table's current // zone if executed. string configure_zone_statement = 9; // stats_last_created_at is the time at which statistics were last created. google.protobuf.Timestamp stats_last_created_at = 10 [(gogoproto.stdtime) = true]; // has_index_recommendations notifies if the there are index recommendations // on this table. bool has_index_recommendations = 11; // data_total_bytes is the size in bytes of live and non-live data on the table. int64 data_total_bytes = 12; // data_live_bytes is the size in bytes of live (non MVCC) data on the table. int64 data_live_bytes = 13; // data_live_percentage is the percentage of live (non MVCC) data on the table. float data_live_percentage = 14; } // TableStatsRequest is a request for detailed, computationally expensive // information about a table. message TableStatsRequest { // database is the name of the database that contains the table we're // interested in. string database = 1; // table is the name of the table that we're querying. Table may be // schema-qualified (schema.table) and each name component that contains // sql unsafe characters such as . or uppercase letters must be surrounded // in double quotes like "naughty schema".table. string table = 2; } // TableStatsResponse contains detailed, computationally expensive information // about a table. message TableStatsResponse { // range_count is the number of ranges, as determined from a query of range // meta keys. int64 range_count = 1; // replica_count is the number of replicas of any range of this table, as // found by querying nodes which are known to have replicas. When compared // with range_count, this can be used to estimate the current replication // factor of the table. int64 replica_count = 2; // node_count is the number of nodes which contain data for this table, // according to a query of range meta keys. int64 node_count = 3; // stats is the summation of MVCCStats for all replicas of this table // across the cluster. cockroach.storage.enginepb.MVCCStats stats = 4 [(gogoproto.nullable) = false]; // approximate_disk_bytes is an approximation of the disk space (in bytes) // used for all replicas of this table across the cluster. uint64 approximate_disk_bytes = 6; // MissingNode represents information on a node which should contain data // for this table, but could not be contacted during this request. message MissingNode { // The ID of the missing node. string node_id = 1 [(gogoproto.customname) = "NodeID"]; // The error message that resulted when the query sent to this node failed. string error_message = 2; } // A list of nodes which should contain data for this table (according to // cluster metadata), but could not be contacted during this request. repeated MissingNode missing_nodes = 5 [(gogoproto.nullable) = false]; // node_ids is the ordered list of node ids on which the table data is stored. repeated int32 node_ids = 7 [(gogoproto.customname) = "NodeIDs", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/roachpb.NodeID"]; } // NonTableStatsRequest requests statistics on cluster data ranges that do not // belong to SQL tables. message NonTableStatsRequest { } // NonTableStatsResponse returns statistics on various cluster data ranges // that do not belong to SQL tables. The statistics for each range are returned // as a TableStatsResponse. message NonTableStatsResponse { // Information on time series ranges. TableStatsResponse time_series_stats = 1; // Information for remaining (non-table, non-time-series) ranges. TableStatsResponse internal_use_stats = 2; } // UsersRequest requests a list of users. message UsersRequest { } // UsersResponse returns a list of users. message UsersResponse { // User is a CockroachDB user. message User { string username = 1; } // usernames is a list of users for the CockroachDB cluster. repeated User users = 1 [(gogoproto.nullable) = false]; } // EventsRequest is a request for event log entries, optionally filtered // by the specified event type. message EventsRequest { string type = 1; reserved 2; // limit is the total number of results that are retrieved by the query. If // this is omitted or set to 0, the default maximum number of results are // returned. When set to > 0, at most only that number of results are // returned. When set to < 0, an unlimited number of results are returned. int32 limit = 3; // unredacted_events indicates that the values in the events should // not be redacted. The default is to redact, so that older versions // of `cockroach zip` do not see un-redacted values by default. // For good security, this field is only obeyed by the server after // checking that the client of the RPC is an admin user. bool unredacted_events = 4; } // EventsResponse contains a set of event log entries. This is always limited // to the latest N entries (N is enforced in the associated endpoint). message EventsResponse { message Event { // timestamp is the time at which the event occurred. google.protobuf.Timestamp timestamp = 1 [(gogoproto.nullable) = false, (gogoproto.stdtime) = true]; // event_type is the type of the event (e.g. "create_table", "drop_table". string event_type = 2; reserved 3; // reporting_id is the reporting ID for this event. int64 reporting_id = 4 [(gogoproto.customname) = "ReportingID"]; // info has more detailed information for the event. The contents vary // depending on the event. string info = 5; // unique_id is a unique identifier for this event. bytes unique_id = 6 [(gogoproto.customname) = "UniqueID"]; } repeated Event events = 1 [(gogoproto.nullable) = false]; } // SetUIDataRequest stores the given key/value pairs in the system.ui table. message SetUIDataRequest { // key_values is a map of keys to bytes values. Each key will be stored // with its corresponding value as a separate row in system.ui. map<string, bytes> key_values = 1; } // SetUIDataResponse is currently an empty response. message SetUIDataResponse { } // GETUIDataRequest requests the values for the given keys from the system.ui // table. message GetUIDataRequest { repeated string keys = 1; } // GetUIDataResponse contains the requested values and the times at which // the values were last updated. message GetUIDataResponse { message Value { // value is the value of the requested key. bytes value = 1; // last_updated is the time at which the value was last updated. google.protobuf.Timestamp last_updated = 2 [(gogoproto.nullable) = false, (gogoproto.stdtime) = true]; } // key_values maps keys to their retrieved values. If this doesn't contain a // a requested key, that key was not found. map<string, Value> key_values = 1 [(gogoproto.nullable) = false]; } // ClusterRequest requests metadata for the cluster. message ClusterRequest { } // ClusterResponse contains metadata for the cluster. message ClusterResponse { // The unique ID used to identify this cluster. string cluster_id = 1 [(gogoproto.customname) = "ClusterID"]; // True if diagnostics reporting is enabled for the cluster. bool reporting_enabled = 2; // True if enterprise features are enabled for the cluster. bool enterprise_enabled = 3; } // DrainRequest instructs the receiving node to drain. message DrainRequest { reserved 1; reserved 2; // When true, terminates the process after the server has started draining. // Setting both shutdown and do_drain to false causes // the request to only operate as a probe. // Setting do_drain to false and shutdown to true causes // the server to shut down immediately without // first draining. bool shutdown = 3; // When true, perform the drain phase. See the comment above on // shutdown for an explanation of the interaction between the two. // do_drain is also implied by a non-nil deprecated_probe_indicator. bool do_drain = 4; // node_id is a string so that "local" can be used to specify that no // forwarding is necessary. // For compatibility with v21.2 nodes, an empty node_id is // interpreted as "local". This behavior might be removed // in subsequent versions. string node_id = 5; // When true, more detailed information is logged during the range lease drain phase. bool verbose = 6; } // DrainResponse is the response to a successful DrainRequest. message DrainResponse { // is_draining is set to true iff the server is currently draining. // This is set to true in response to a request where skip_drain // is false; but it can also be set to true in response // to a probe request (!shutdown && skip_drain) if another // drain request has been issued prior or asynchronously. bool is_draining = 2; // drain_remaining_indicator measures, at the time of starting to // process the corresponding drain request, how many actions to // fully drain the node were deemed to be necessary. Some, but not // all, of these actions may already have been carried out by the // time this indicator is received by the client. The client should // issue requests until this indicator first reaches zero, which // indicates that the node is fully drained. // // The API contract is the following: // // - upon a first Drain call with do_drain set, the remaining // indicator will have some value >=0. If >0, it indicates that // drain is pushing state away from the node. (What this state // precisely means is left unspecified for this field. See below // for details.) // // - upon a subsequent Drain call with do_drain set, the remaining // indicator should have reduced in value. The drain process does best // effort at shedding state away from the node; hopefully, all the // state is shed away upon the first call and the progress // indicator can be zero as early as the second call. However, // if there was a lot of state to shed, it is possible for // timeout to be encountered upon the first call. In that case, the // second call will do some more work and return a non-zero value // as well. // // - eventually, in an iterated sequence of DrainRequests with // do_drain set, the remaining indicator should reduce to zero. At // that point the client can conclude that no state is left to // shed, and it should be safe to shut down the node with a // DrainRequest with shutdown = true. // // Note that this field is left unpopulated (and thus remains at // zero) for pre-20.1 nodes. A client can recognize this by // observing is_draining to be false after a request with do_drain = // true: the is_draining field is also left unpopulated by pre-20.1 // nodes. uint64 drain_remaining_indicator = 3; // drain_remaining_description is an informal (= not // machine-parsable) string that explains the progress of the drain // process to human eyes. This is intended for use mainly for // troubleshooting. // // The field is only populated if do_drain is true in the // request. string drain_remaining_description = 4; reserved 1; } // DecommissionPreCheckRequest requests that preliminary checks be run to // ensure that the specified node(s) can be decommissioned successfully. message DecommissionPreCheckRequest { repeated int32 node_ids = 1 [(gogoproto.customname) = "NodeIDs", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/roachpb.NodeID"]; // The maximum number of ranges for which to report errors. int32 num_replica_report = 2; // If true, all ranges on the checked nodes must only need replacement or // removal for decommissioning. bool strict_readiness = 3; // If true, collect traces for each range checked. // Requires num_replica_report > 0. bool collect_traces = 4; } // DecommissionPreCheckResponse returns the number of replicas that encountered // errors when running preliminary decommissioning checks, as well as the // associated error messages and traces, for each node. message DecommissionPreCheckResponse { enum NodeReadiness { option (gogoproto.goproto_enum_stringer) = false; UNKNOWN = 0; READY = 1; ALREADY_DECOMMISSIONED = 2; ALLOCATION_ERRORS = 3; } // The result of checking a range's readiness for the decommission. message RangeCheckResult { int32 range_id = 1 [ (gogoproto.customname) = "RangeID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/roachpb.RangeID"]; // The action determined by the allocator that is needed for the range. string action = 2; // All trace events collected while checking the range. repeated TraceEvent events = 3; // The error message from the allocator's processing, if any. string error = 4; } // The result of checking a single node's readiness for decommission. message NodeCheckResult { int32 node_id = 1 [ (gogoproto.customname) = "NodeID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/roachpb.NodeID"]; // The node's decommission readiness status. NodeReadiness decommission_readiness = 2; // The liveness status of the given node. kv.kvserver.liveness.livenesspb.NodeLivenessStatus liveness_status = 3; // The number of total replicas on the node, computed by scanning range // descriptors. int64 replica_count = 4; // The details and recorded traces from preprocessing each range with a // replica on the checked nodes that resulted in error, up to the maximum // specified in the request. repeated RangeCheckResult checked_ranges = 5 [(gogoproto.nullable) = false]; } // Status of the preliminary decommission checks across nodes. repeated NodeCheckResult checked_nodes = 1 [(gogoproto.nullable) = false]; } // DecommissionStatusRequest requests the decommissioning status for the // specified or, if none are specified, all nodes. message DecommissionStatusRequest { repeated int32 node_ids = 1 [(gogoproto.customname) = "NodeIDs", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/roachpb.NodeID"]; // The number of decommissioning replicas to be reported. int32 num_replica_report = 2; } // DecommissionRequest requests the server to set the membership status on // all nodes specified by NodeIDs to the value of TargetMembership. // // If no NodeIDs are given, it targets the recipient node. message DecommissionRequest { repeated int32 node_ids = 1 [(gogoproto.customname) = "NodeIDs", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/roachpb.NodeID"]; kv.kvserver.liveness.livenesspb.MembershipStatus target_membership = 2; // The number of decommissioning replicas to be reported. int32 num_replica_report = 3; } // DecommissionStatusResponse lists decommissioning statuses for a number of NodeIDs. message DecommissionStatusResponse { message Replica { int32 replica_id = 1 [ (gogoproto.customname) = "ReplicaID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/roachpb.ReplicaID"]; int32 range_id = 2 [ (gogoproto.customname) = "RangeID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/roachpb.RangeID"]; } message Status { int32 node_id = 1 [ (gogoproto.customname) = "NodeID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/roachpb.NodeID"]; bool is_live = 2; // The number of replicas on the node, computed by scanning meta2 ranges. int64 replica_count = 3; // The membership status of the given node. kv.kvserver.liveness.livenesspb.MembershipStatus membership = 4; bool draining = 5; // Decommissioning replicas on the given node to be reported. // How many replicas are reported is determined by what was specified in the // request. repeated Replica reported_replicas = 6; } // Status of all affected nodes. repeated Status status = 2 [(gogoproto.nullable) = false]; } // SettingsRequest inquires what are the current settings in the cluster. message SettingsRequest { // The array of setting names to retrieve. // An empty keys array means "all". repeated string keys = 1; // Indicate whether to see unredacted setting values. // This is opt-in so that a previous version `cockroach zip` // does not start reporting values when this becomes active. // For good security, the server only obeys this after it checks // that the logger-in user has admin privilege. bool unredacted_values = 2; } // SettingsResponse is the response to SettingsRequest. message SettingsResponse { message Value { string value = 1; string type = 2; string description = 3; bool public = 4; google.protobuf.Timestamp last_updated = 5 [(gogoproto.nullable) = true, (gogoproto.stdtime) = true]; } map<string, Value> key_values = 1 [(gogoproto.nullable) = false]; } // HealthRequest requests a liveness or readiness check. // // A liveness check is triggered via ready set to false. In this mode, // an empty response is returned immediately, that is, the caller merely // learns that the process is running. // // A readiness check (ready == true) is suitable for determining whether // user traffic should be directed at a given node, for example by a load // balancer. In this mode, a successful response is returned only if the // node: // // - is not in the process of shutting down or booting up (including // waiting for cluster bootstrap); // - is regarded as healthy by the cluster via the recent broadcast of // a liveness beacon. Absent either of these conditions, an error // code will result. // // API: PUBLIC message HealthRequest { // ready specifies whether the client wants to know whether the // target node is ready to receive traffic. If a node is unready, an // error will be returned. // API: PUBLIC bool ready = 1; } // HealthResponse is the response to HealthRequest. It currently does not // contain any information. // API: PUBLIC message HealthResponse { } // LivenessRequest requests liveness data for all nodes on the cluster. message LivenessRequest { } // LivenessResponse contains the liveness status of each node on the cluster. message LivenessResponse { repeated kv.kvserver.liveness.livenesspb.Liveness livenesses = 1 [(gogoproto.nullable) = false]; map<int32, kv.kvserver.liveness.livenesspb.NodeLivenessStatus> statuses = 2 [ (gogoproto.nullable) = false, (gogoproto.castkey) = "github.com/cockroachdb/cockroach/pkg/roachpb.NodeID" ]; } // JobsRequest requests system job information of the given status and type. message JobsRequest { int32 limit = 1; string status = 2; cockroach.sql.jobs.jobspb.Type type = 3; } // JobsResponse contains the job record for each matching job. message JobsResponse { repeated JobResponse jobs = 1 [(gogoproto.nullable) = false]; google.protobuf.Timestamp earliest_retained_time = 2 [(gogoproto.nullable) = false, (gogoproto.stdtime) = true]; } // JobRequest requests system job information for the given job_id. message JobRequest { int64 job_id = 1; } // JobResponse contains the job record for a job. message JobResponse { int64 id = 1 [(gogoproto.customname) = "ID"]; string type = 2; string description = 3; string statement = 16; string username = 4; repeated uint32 descriptor_ids = 5 [ (gogoproto.customname) = "DescriptorIDs", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb.ID" ]; string status = 6; google.protobuf.Timestamp created = 7 [(gogoproto.stdtime) = true]; google.protobuf.Timestamp started = 8 [(gogoproto.stdtime) = true]; google.protobuf.Timestamp finished = 9 [(gogoproto.stdtime) = true]; google.protobuf.Timestamp modified = 10 [(gogoproto.stdtime) = true]; float fraction_completed = 11; string error = 12; // highwater_timestamp is the highwater timestamp returned as normal // timestamp. This is appropriate for display to humans. google.protobuf.Timestamp highwater_timestamp = 13 [(gogoproto.stdtime) = true]; // highwater_decimal is the highwater timestamp in the proprietary decimal // form used by logical timestamps internally. This is appropriate to pass // to a "AS OF SYSTEM TIME" SQL statement. string highwater_decimal = 14; string running_status = 15; google.protobuf.Timestamp last_run = 17 [(gogoproto.stdtime) = true]; google.protobuf.Timestamp next_run = 18 [(gogoproto.stdtime) = true]; int64 num_runs = 19; // ExecutionFailure corresponds to a failure to execute the job with the // attempt starting at start and ending at end. message ExecutionFailure { // Status is the status of the job during the execution. string status = 1; // Start is the time at which the execution started. google.protobuf.Timestamp start = 2 [(gogoproto.stdtime) = true]; // End is the time at which the error occurred. google.protobuf.Timestamp end = 3 [(gogoproto.stdtime) = true]; // Error is the error which occurred. string error = 4; } // ExecutionFailures is a log of execution failures of the job. It is not // guaranteed to contain all execution failures and some execution failures // may not contain an error or end. repeated ExecutionFailure execution_failures = 20; // coordinator_id identifies the node coordinating the job. This value will // only be present for jobs that are currently running or recently ran. int64 coordinator_id = 21 [(gogoproto.customname) = "CoordinatorID"]; } // LocationsRequest requests system locality location information. message LocationsRequest { } // JobsResponse contains the job record for each matching job. message LocationsResponse { message Location { string locality_key = 1; string locality_value = 2; double latitude = 3; double longitude = 4; } repeated Location locations = 1 [(gogoproto.nullable) = false]; } // RangeLogRequest request the history of a range from the range log. message RangeLogRequest { // TODO(tamird): use [(gogoproto.customname) = "RangeID"] below. Need to // figure out how to teach grpc-gateway about custom names. // If RangeID is 0, returns range log history without filtering by range. int64 range_id = 1; // limit is the total number of results that are retrieved by the query. If // this is omitted or set to 0, the default maximum number of results are // returned. When set to > 0, at most only that number of results are // returned. When set to < 0, an unlimited number of results are returned. int32 limit = 2; } // RangeLogResponse contains a list of entries from the range log table. message RangeLogResponse { // To avoid porting the pretty printing of keys and descriptors to // javascript, they will be precomputed on the serverside. message PrettyInfo { string updated_desc = 1; string new_desc = 2; string added_replica = 3; string removed_replica = 4; string reason = 5; string details = 6; } message Event { cockroach.kv.kvserver.storagepb.RangeLogEvent event = 1 [(gogoproto.nullable) = false]; PrettyInfo pretty_info = 2 [(gogoproto.nullable) = false]; } reserved 1; // Previously used. repeated Event events = 2 [(gogoproto.nullable) = false]; } // QueryPlanRequest requests the query plans for a SQL string. message QueryPlanRequest { // query is the SQL query string. string query = 1; } // QueryPlanResponse contains the query plans for a SQL string (currently only // the distsql physical query plan). message QueryPlanResponse { string distsql_physical_query_plan = 1 [(gogoproto.customname) = "DistSQLPhysicalQueryPlan"]; } message DataDistributionRequest { } message DataDistributionResponse { message ZoneConfig { // target is the object the zone config applies to, e.g. "DATABASE db" or // "PARTITION north_america OF TABLE users". string target = 1; config.zonepb.ZoneConfig config = 2 [(gogoproto.nullable) = false]; reserved 3; // config_sql is the SQL representation of config. string config_sql = 4 [(gogoproto.customname) = "ConfigSQL"]; } message TableInfo { map<int32, int64> replica_count_by_node_id = 1 [(gogoproto.castkey) = "github.com/cockroachdb/cockroach/pkg/roachpb.NodeID"]; int64 zone_config_id = 2; google.protobuf.Timestamp dropped_at = 3 [(gogoproto.stdtime) = true]; } message DatabaseInfo { // By table name. map<string, TableInfo> table_info = 1 [(gogoproto.nullable) = false]; } // By database name. map<string, DatabaseInfo> database_info = 1 [(gogoproto.nullable) = false]; reserved 2; // By zone name. map<string, ZoneConfig> zone_configs = 3 [(gogoproto.nullable) = false]; } // MetricMetadataRequest requests metadata for all metrics. message MetricMetadataRequest { } // MetricMetadataResponse contains the metadata for all metrics. message MetricMetadataResponse { map<string, cockroach.util.metric.Metadata> metadata = 1 [(gogoproto.nullable) = false]; } message EnqueueRangeRequest { // The node on which the queue should process the range. If node_id is 0, // the request will be forwarded to all other nodes. int32 node_id = 1 [(gogoproto.customname) = "NodeID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/roachpb.NodeID"]; // The name of the replica queue to run the range through. Matched against // each queue's name field. See the implementation of baseQueue for details. string queue = 2; // The ID of the range to run through the queue. int32 range_id = 3 [(gogoproto.customname) = "RangeID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/roachpb.RangeID"]; // If set, run the queue's process method without first checking whether the // replica should be processed by calling shouldQueue. bool skip_should_queue = 4; } message EnqueueRangeResponse { message Details { int32 node_id = 1 [(gogoproto.customname) = "NodeID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/roachpb.NodeID"]; // All trace events collected while processing the range in the queue. repeated TraceEvent events = 2; // The error message from the queue's processing, if any. string error = 3; } repeated Details details = 1; } // ChartCatalogRequest requests returns a catalog of Admin UI charts. message ChartCatalogRequest { } // ChartCatalogResponse returns a catalog of Admin UI charts useful for debugging. message ChartCatalogResponse { repeated cockroach.ts.catalog.ChartSection catalog = 1 [(gogoproto.nullable) = false]; } // CARequest requests the CA cert anchoring this service. message CARequest { } // CAResponse contains a PEM encoded copy of the CA cert for this service. message CAResponse { bytes ca_cert = 1; } // CertBundleRequest requests the bundle of initialization CAs for a new node. // It provides authentication in the form of a joinToken containing a // sharedSecret. message CertBundleRequest { string token_id = 1 [(gogoproto.customname) = "TokenID"]; bytes shared_secret = 2; } // CertBundleResponse contains a copy of all CAs needed to initialize TLS for // a new node. message CertBundleResponse { bytes bundle = 1; } message RecoveryCollectReplicaInfoRequest {} // RecoveryCollectReplicaRestartNodeStream is sent by collector node to client // if it experiences a transient failure collecting data from one of the nodes. // This message instructs client to drop any data that it collected locally // for specified node as streaming for this node would be restarted. // This mechanism is needed to avoid restarting the whole collection procedure // in large cluster if one of the nodes fails transiently. message RecoveryCollectReplicaRestartNodeStream { int32 node_id = 1 [ (gogoproto.customname) = "NodeID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/roachpb.NodeID"]; } message RecoveryCollectReplicaInfoResponse { oneof info { roachpb.RangeDescriptor range_descriptor = 1; cockroach.kv.kvserver.loqrecovery.loqrecoverypb.ReplicaInfo replica_info = 2; RecoveryCollectReplicaRestartNodeStream node_stream_restarted = 3; cockroach.kv.kvserver.loqrecovery.loqrecoverypb.ClusterMetadata metadata = 4; } } message RecoveryCollectLocalReplicaInfoRequest { } message RecoveryCollectLocalReplicaInfoResponse { cockroach.kv.kvserver.loqrecovery.loqrecoverypb.ReplicaInfo replica_info = 1; } message RecoveryStagePlanRequest { // Plan is replica update plan to stage for application on next restart. Plan // could be empty in that case existing plan is removed if present. cockroach.kv.kvserver.loqrecovery.loqrecoverypb.ReplicaUpdatePlan plan = 1; // If all nodes is true, then receiver should act as a coordinator and perform // a fan-out to stage plan on all nodes of the cluster. bool all_nodes = 2; // ForcePlan tells receiver to ignore any plan already staged on the node if it // is present and replace it with new plan (including empty one). bool force_plan = 3; // ForceLocalInternalVersion tells server to update internal component of plan // version to the one of active cluster version. This option needs to be set // if target cluster is stuck in recovery where only part of nodes were // successfully migrated. bool force_local_internal_version = 4; } message RecoveryStagePlanResponse { // Errors contain error messages happened during plan staging. repeated string errors = 1; } message RecoveryNodeStatusRequest { } message RecoveryNodeStatusResponse { cockroach.kv.kvserver.loqrecovery.loqrecoverypb.NodeRecoveryStatus status = 1 [ (gogoproto.nullable) = false]; } message RecoveryVerifyRequest { // PlanID is ID of the plan to verify. bytes plan_id = 1 [ (gogoproto.customname) = "PendingPlanID", (gogoproto.customtype) = "github.com/cockroachdb/cockroach/pkg/util/uuid.UUID"]; // DecommissionedNodeIDs is a set of nodes that should be marked as decommissioned in // the cluster when loss of quorum recovery successfully applies. repeated int32 decommissioned_node_ids = 2 [(gogoproto.customname) = "DecommissionedNodeIDs", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/roachpb.NodeID"]; // MaxReportedRanges is the maximum number of failed ranges to report. // If more unhealthy ranges are found, error will be returned alongside range // to indicate that ranges were cut short. int32 max_reported_ranges = 3; } message RecoveryVerifyResponse { message UnavailableRanges { // Ranges contains descriptors of ranges that failed health check. // If there are too many ranges to report, error would contain relevant // message. repeated cockroach.kv.kvserver.loqrecovery.loqrecoverypb.RangeRecoveryStatus ranges = 1 [ (gogoproto.nullable) = false]; // Error contains an optional error if ranges validation can't complete. string error = 2; } // Statuses contain a list of recovery statuses of nodes updated during recovery. It // also contains nodes that were expected to be live (not decommissioned by recovery) // but failed to return status response. repeated cockroach.kv.kvserver.loqrecovery.loqrecoverypb.NodeRecoveryStatus statuses = 1 [ (gogoproto.nullable) = false]; // UnavailableRanges contains information about ranges that failed health check. UnavailableRanges unavailable_ranges = 2 [(gogoproto.nullable) = false]; // DecommissionedNodeStatuses contains a map of requested IDs with their // corresponding liveness statuses. map<int32, kv.kvserver.liveness.livenesspb.MembershipStatus> decommissioned_node_statuses = 3 [ (gogoproto.castkey) = "github.com/cockroachdb/cockroach/pkg/roachpb.NodeID"]; } // Admin is the gRPC API for the admin UI. Through grpc-gateway, we offer // REST-style HTTP endpoints that locally proxy to the gRPC endpoints. service Admin { rpc RequestCA(CARequest) returns (CAResponse) { option (google.api.http) = { get : "/_join/v1/ca" }; } rpc RequestCertBundle(CertBundleRequest) returns (CertBundleResponse) { option (google.api.http) = { get : "/_join/v1/requestbundle" }; } // URL: /_admin/v1/users rpc Users(UsersRequest) returns (UsersResponse) { option (google.api.http) = { get: "/_admin/v1/users" }; } // URL: /_admin/v1/databases rpc Databases(DatabasesRequest) returns (DatabasesResponse) { option (google.api.http) = { get: "/_admin/v1/databases" }; } // Example URL: /_admin/v1/databases/system rpc DatabaseDetails(DatabaseDetailsRequest) returns (DatabaseDetailsResponse) { option (google.api.http) = { get: "/_admin/v1/databases/{database}" }; } // Example URL: /_admin/v1/databases/system/tables/ui rpc TableDetails(TableDetailsRequest) returns (TableDetailsResponse) { option (google.api.http) = { get: "/_admin/v1/databases/{database}/tables/{table}" }; } // Example URL: /_admin/v1/databases/system/tables/ui/stats rpc TableStats(TableStatsRequest) returns (TableStatsResponse) { option (google.api.http) = { get: "/_admin/v1/databases/{database}/tables/{table}/stats" }; } // Example URL: /_admin/v1/nontablestats rpc NonTableStats(NonTableStatsRequest) returns (NonTableStatsResponse) { option (google.api.http) = { get: "/_admin/v1/nontablestats" }; } // Example URLs: // Example URLs: // - /_admin/v1/events // - /_admin/v1/events?limit=100 // - /_admin/v1/events?type=create_table // - /_admin/v1/events?type=create_table&limit=100 rpc Events(EventsRequest) returns (EventsResponse) { option (google.api.http) = { get: "/_admin/v1/events" }; } // This requires a POST. Because of the libraries we're using, the POST body // must be in the following format: // // {"key_values": // { "key1": "base64_encoded_value1"}, // ... // { "keyN": "base64_encoded_valueN"}, // } // // Note that all keys are quoted strings and that all values are base64- // encoded. // // Together, SetUIData and GetUIData provide access to a "cookie jar" for the // admin UI. The structure of the underlying data is meant to be opaque to the // server. rpc SetUIData(SetUIDataRequest) returns (SetUIDataResponse) { option (google.api.http) = { post: "/_admin/v1/uidata" body: "*" }; } // Example URLs: // - /_admin/v1/uidata?keys=MYKEY // - /_admin/v1/uidata?keys=MYKEY1&keys=MYKEY2 // // Yes, it's a little odd that the query parameter is named "keys" instead of // "key". I would've preferred that the URL parameter be named "key". However, // it's clearer for the protobuf field to be named "keys," which makes the URL // parameter "keys" as well. rpc GetUIData(GetUIDataRequest) returns (GetUIDataResponse) { option (google.api.http) = { get: "/_admin/v1/uidata" }; } // Cluster returns metadata for the cluster. rpc Cluster(ClusterRequest) returns (ClusterResponse) { option (google.api.http) = { get: "/_admin/v1/cluster" }; } // Settings returns the cluster-wide settings for the cluster. rpc Settings(SettingsRequest) returns (SettingsResponse) { option (google.api.http) = { get: "/_admin/v1/settings" }; } // Health returns liveness for the node target of the request. // API: PUBLIC rpc Health(HealthRequest) returns (HealthResponse) { option (google.api.http) = { get: "/_admin/v1/health" additional_bindings {get : "/health"} }; } // Liveness returns the liveness state of all nodes on the cluster. rpc Liveness(LivenessRequest) returns (LivenessResponse) { option (google.api.http) = { get: "/_admin/v1/liveness" }; } // Jobs returns the job records for all jobs of the given status and type. rpc Jobs(JobsRequest) returns (JobsResponse) { option (google.api.http) = { get: "/_admin/v1/jobs" }; } // Job returns the job record for the job of the given job_id. rpc Job(JobRequest) returns (JobResponse) { option (google.api.http) = { get: "/_admin/v1/jobs/{job_id}" }; } // Locations returns the locality location records. rpc Locations(LocationsRequest) returns (LocationsResponse) { option (google.api.http) = { get: "/_admin/v1/locations" }; } // QueryPlan returns the query plans for a SQL string. rpc QueryPlan(QueryPlanRequest) returns (QueryPlanResponse) { option (google.api.http) = { get: "/_admin/v1/queryplan" }; } // Drain puts the node into the specified drain mode(s) and optionally // instructs the process to terminate. // We do not expose this via HTTP unless we have a way to authenticate // + authorize streaming RPC connections. See #42567. rpc Drain(DrainRequest) returns (stream DrainResponse) { } // DecommissionPreCheck requests that the server execute preliminary checks // to evaluate the possibility of successfully decommissioning a given node. rpc DecommissionPreCheck(DecommissionPreCheckRequest) returns (DecommissionPreCheckResponse) { } // Decommission puts the node(s) into the specified decommissioning state. // If this ever becomes exposed via HTTP, ensure that it performs // authorization. See #42567. rpc Decommission(DecommissionRequest) returns (DecommissionStatusResponse) { } // DecommissionStatus retrieves the decommissioning status of the specified nodes. // If this ever becomes exposed via HTTP, ensure that it performs // authorization. See #42567. rpc DecommissionStatus(DecommissionStatusRequest) returns (DecommissionStatusResponse) { } // URL: /_admin/v1/rangelog // URL: /_admin/v1/rangelog?limit=100 // URL: /_admin/v1/rangelog/1 // URL: /_admin/v1/rangelog/1?limit=100 rpc RangeLog(RangeLogRequest) returns (RangeLogResponse) { option (google.api.http) = { get: "/_admin/v1/rangelog" additional_bindings { get: "/_admin/v1/rangelog/{range_id}" } }; } rpc DataDistribution(DataDistributionRequest) returns (DataDistributionResponse) { option (google.api.http) = { get: "/_admin/v1/data_distribution" }; } // URL: /_admin/v1/metricmetadata rpc AllMetricMetadata(MetricMetadataRequest) returns (MetricMetadataResponse) { option (google.api.http) = { get: "/_admin/v1/metricmetadata" }; } // URL: /_admin/v1/chartcatalog rpc ChartCatalog(ChartCatalogRequest) returns (ChartCatalogResponse) { option (google.api.http) = { get: "/_admin/v1/chartcatalog" }; } // EnqueueRange runs the specified range through the specified queue on the // range's leaseholder store, returning the detailed trace and error // information from doing so. Parameters must be provided in the body of the // POST request. // For example: // // { // "queue": "raftlog", // "rangeId": 10 // } rpc EnqueueRange(EnqueueRangeRequest) returns (EnqueueRangeResponse) { option (google.api.http) = { post: "/_admin/v1/enqueue_range" body : "*" }; } // SendKVBatch proxies the given BatchRequest into KV, returning the // response. It is used by the CLI `debug send-kv-batch` command. rpc SendKVBatch(roachpb.BatchRequest) returns (roachpb.BatchResponse) { } // ListTracingSnapshots retrieves the list of snapshots of the Active Spans // Registry that the node currently has in memory. A new snapshot can be // captured with TakeTracingSnapshots. rpc ListTracingSnapshots(ListTracingSnapshotsRequest) returns (ListTracingSnapshotsResponse) { option (google.api.http) = { get: "/_admin/v1/trace_snapshots" }; } // TakeTracingSnapshot captures a new snapshot of the Active Spans Registry. // The new snapshot is returned, and also made available through // ListTracingSnapshots. rpc TakeTracingSnapshot(TakeTracingSnapshotRequest) returns (TakeTracingSnapshotResponse) { option (google.api.http) = { post: "/_admin/v1/trace_snapshots" }; } // GetTracingSnapshot returns a snapshot of the tracing spans in the active // spans registry previously generated through TakeTracingSnapshots. rpc GetTracingSnapshot(GetTracingSnapshotRequest) returns (GetTracingSnapshotResponse) { option (google.api.http) = { get: "/_admin/v1/trace_snapshots/{snapshot_id}" }; } // GetTrace returns the trace with a specified ID. Depending on the request, // the trace is returned either from a snapshot that was previously taken, or // directly from the active spans registry. rpc GetTrace(GetTraceRequest) returns (GetTraceResponse) { option (google.api.http) = { post: "/_admin/v1/traces" body: "*" }; } // SetTraceRecordingType sets the recording mode of all or some of the spans // in a trace. rpc SetTraceRecordingType(SetTraceRecordingTypeRequest) returns (SetTraceRecordingTypeResponse) { option (google.api.http) = { post: "/_admin/v1/settracerecordingtype" body: "*" }; } // RecoveryCollectReplicaInfo retrieves information about: // 1. range descriptors contained in cluster meta ranges if meta ranges // are readable; // 2. replica information from all live nodes that have connection to // the target node. rpc RecoveryCollectReplicaInfo(RecoveryCollectReplicaInfoRequest) returns (stream RecoveryCollectReplicaInfoResponse) {} // RecoveryCollectLocalReplicaInfo retrieve information about all local // replicas in all stores on the node. rpc RecoveryCollectLocalReplicaInfo(RecoveryCollectLocalReplicaInfoRequest) returns (stream RecoveryCollectLocalReplicaInfoResponse) {} // RecoveryStagePlan stages recovery plan on target or all nodes in cluster // depending on request content and marks nodes deleted in the plan as // decommissioned in each node's local node tombstone storage. rpc RecoveryStagePlan(RecoveryStagePlanRequest) returns (RecoveryStagePlanResponse) {} // RecoveryNodeStatus retrieves loss of quorum recovery status of a single // node. rpc RecoveryNodeStatus(RecoveryNodeStatusRequest) returns (RecoveryNodeStatusResponse) {} // RecoveryVerify verifies that recovery plan is applied on all necessary // nodes, ranges are available and nodes removed in plan are marked as // decommissioned. rpc RecoveryVerify(RecoveryVerifyRequest) returns (RecoveryVerifyResponse) {} // ListTenants returns a list of active tenants in the cluster. rpc ListTenants(ListTenantsRequest) returns (ListTenantsResponse) { option (google.api.http) = { get: "/_admin/v1/tenants" }; } } message ListTenantsRequest{} message ListTenantsResponse { repeated Tenant tenants = 1; } message Tenant { roachpb.TenantID tenant_id = 1; string tenant_name = 2; string sql_addr = 3; string rpc_addr = 4; } message ListTracingSnapshotsRequest {} message ListTracingSnapshotsResponse { repeated SnapshotInfo snapshots = 1; } message SnapshotInfo { // SnapshotID identifies a specific snapshot which can be requested via a // GetTracingSnapshotRequest. Negative IDs are used for "automatic" snapshots. int64 snapshot_id = 1 [(gogoproto.customname) = "SnapshotID"]; google.protobuf.Timestamp captured_at = 2 [(gogoproto.stdtime) = true]; } message TakeTracingSnapshotRequest {} message TakeTracingSnapshotResponse { SnapshotInfo snapshot = 1; } message GetTracingSnapshotRequest { // SnapshotId indicates which snapshot is requested. ID may be negative when // requesting an "automatic" snapshot; see ListTracingSnapshotsResponse. int64 snapshot_id = 1; } message GetTracingSnapshotResponse { TracingSnapshot snapshot = 1; } // GetTrace represents the request of the GetTrace RPC. message GetTraceRequest { // If a snapshot is specified, the trace information is returned from that // snapshot. If a snapshot is not specified, information about currently // opened spans is returned from the active spans registry. int64 snapshot_id = 1 [(gogoproto.customname) = "SnapshotID"]; uint64 trace_id = 2 [(gogoproto.nullable)=false, (gogoproto.customname) = "TraceID", (gogoproto.customtype) = "github.com/cockroachdb/cockroach/pkg/util/tracing/tracingpb.TraceID"]; util.tracing.tracingpb.RecordingMode recording_type = 3; } // GetTrace represents the response to the GetTrace RPC. message GetTraceResponse { // snapshot_id identifies the snapshot that the trace was retrieved from. If // 0, the trace was not retrieved from a registry, but directly from the // active spans registry. int64 snapshot_id = 1 [(gogoproto.customname) = "SnapshotID"]; uint64 trace_id = 2 [(gogoproto.nullable)=false, (gogoproto.customname) = "TraceID", (gogoproto.customtype) = "github.com/cockroachdb/cockroach/pkg/util/tracing/tracingpb.TraceID"]; // still_exists is set if any spans from this trace are currently present in // the active spans registry. // // If snapshot_id is 0, still_exists is always set. bool still_exists = 3; // serialized_recording represents the serialization of trace recording. We // return the recording already serialized as formatted string for easy // consumption in the browser. string serialized_recording = 4; } // TracingSnapshot represents a snapshot of the active spans registry, including // all the spans that were open at the time when the snapshot was taken. message TracingSnapshot { int64 snapshot_id = 1 [(gogoproto.customname) = "SnapshotID"]; google.protobuf.Timestamp captured_at = 2 [(gogoproto.stdtime) = true]; repeated TracingSpan spans = 3; // Ideally we'd use int64 to match the goroutine_id type // but unfortunately, the way that grpc-gateway parses // these objects into Javascript results in odd encodings // of Long JS types that are difficult to interact with // as map keys. Thus, we settle for string. map<string, string> stacks = 4; } message NamedOperationMetadata { string name = 1; util.tracing.tracingpb.OperationMetadata metadata = 2 [(gogoproto.nullable) = false]; } // TracingSpan represents a span, in a form slightly processed for the use of // the tracing UI. message TracingSpan { string operation = 1; uint64 trace_id = 2 [(gogoproto.customname) = "TraceID"]; uint64 span_id = 3 [(gogoproto.customname) = "SpanID"]; uint64 parent_span_id = 4 [(gogoproto.customname) = "ParentSpanID"]; google.protobuf.Timestamp start = 5 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; uint64 goroutine_id = 6 [(gogoproto.customname) = "GoroutineID"]; repeated SpanTag processed_tags = 7; // current is set if the span is still alive (i.e. still present in the active // spans registry). bool current = 8; // current_recording_mode represents the span's current recording mode. This is // not set if current == false. util.tracing.tracingpb.RecordingMode current_recording_mode = 9; repeated NamedOperationMetadata children_metadata = 10; } // SpanTag represents a tag on a tracing span, in a form processed for the use // of the tracing UI. message SpanTag { string key = 1; string val = 2; string caption = 3; string link = 4; bool hidden = 5; bool highlight = 6; bool inherit = 7; bool inherited = 8; bool propagate_up = 9; bool copied_from_child = 10; repeated ChildSpanTag children = 11; // May be empty. } message ChildSpanTag { string key = 1; string val = 2; } // SetTraceRecordingTypeRequest is the request for SetTraceRecordingType, which // sets the recording mode of all or some of the spans in a trace. message SetTraceRecordingTypeRequest { // TraceID identifies the trace to toggle the recording of. It must always be // specified. uint64 trace_id = 1 [(gogoproto.nullable) = false, (gogoproto.customname) = "TraceID", (gogoproto.customtype) = "github.com/cockroachdb/cockroach/pkg/util/tracing/tracingpb.TraceID"]; // SpanID, if not zero, controls which spans in the trace get their recording // mode set. If zero, all spans in the trace are updated. If not zero, only // the respective span and its descendants get updated. uint64 span_id = 2 [(gogoproto.nullable) = false, (gogoproto.customname) = "SpanID", (gogoproto.customtype) = "github.com/cockroachdb/cockroach/pkg/util/tracing/tracingpb.SpanID"]; util.tracing.tracingpb.RecordingMode recording_mode = 3; } // SetTraceRecordingTypeRequest is the response for SetTraceRecordingType. message SetTraceRecordingTypeResponse{} // FeatureFlags within this struct are used within back-end/front-end code to show/hide features. message FeatureFlags { // isObservabiliyService is true when the server is an instance of the Observability Service bool is_observability_service = 1; // CanViewKVMetricDashboards is true when the logged in user is able to view KV-level metric dashboards. bool can_view_kv_metric_dashboards = 2; // DisableKVLevelAdvancedDebug is true when the UI should remove options to certain KV-level // debug operations. This is helpful in application tenant contexsts, where these requests // can only return errors since the tenant cannot perform the operations. bool disable_kv_level_advanced_debug = 3; }
pkg/server/serverpb/admin.proto
1
https://github.com/cockroachdb/cockroach/commit/977f72e4cc24f5d14424c278d6d6921016c3fc62
[ 0.02688104659318924, 0.0010579858208075166, 0.00016206265718210489, 0.0001719602005323395, 0.002835372928529978 ]
{ "id": 3, "code_window": [ "\n", "\t\tresultsByNodeID[nID] = serverpb.DecommissionPreCheckResponse_NodeCheckResult{\n", "\t\t\tNodeID: nID,\n", "\t\t\tDecommissionReadiness: readiness,\n", "\t\t\tLivenessStatus: livenessStatusByNodeID[nID],\n", "\t\t\tReplicaCount: int64(numReplicas),\n", "\t\t\tCheckedRanges: rangeCheckErrsByNode[nID],\n", "\t\t}\n" ], "labels": [ "keep", "keep", "keep", "keep", "replace", "keep", "keep", "keep" ], "after_edit": [], "file_path": "pkg/server/admin.go", "type": "replace", "edit_start_line_idx": 1051 }
# The directory Mix will write compiled artifacts to. /_build/ # If you run "mix test --cover", coverage assets end up here. /cover/ # The directory Mix downloads your dependencies sources to. /deps/ # Where third-party dependencies like ExDoc output generated docs. /doc/ # Ignore .fetch files in case you like to edit your project deps locally. /.fetch # If the VM crashes, it generates a dump, let's ignore it too. erl_crash.dump # Also ignore archive artifacts (built via "mix archive.build"). *.ez # Ignore package tarball (built via "mix hex.build"). debug-*.tar
pkg/acceptance/testdata/elixir/test_crdb/.gitignore
0
https://github.com/cockroachdb/cockroach/commit/977f72e4cc24f5d14424c278d6d6921016c3fc62
[ 0.00017789941921364516, 0.00017489127640146762, 0.00017256596765946597, 0.00017420845688320696, 0.000002230263589808601 ]
{ "id": 3, "code_window": [ "\n", "\t\tresultsByNodeID[nID] = serverpb.DecommissionPreCheckResponse_NodeCheckResult{\n", "\t\t\tNodeID: nID,\n", "\t\t\tDecommissionReadiness: readiness,\n", "\t\t\tLivenessStatus: livenessStatusByNodeID[nID],\n", "\t\t\tReplicaCount: int64(numReplicas),\n", "\t\t\tCheckedRanges: rangeCheckErrsByNode[nID],\n", "\t\t}\n" ], "labels": [ "keep", "keep", "keep", "keep", "replace", "keep", "keep", "keep" ], "after_edit": [], "file_path": "pkg/server/admin.go", "type": "replace", "edit_start_line_idx": 1051 }
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") go_library( name = "prometheus", srcs = ["prometheus.go"], importpath = "github.com/cockroachdb/cockroach/pkg/roachprod/prometheus", visibility = ["//visibility:public"], deps = [ "//pkg/roachprod/install", "//pkg/roachprod/logger", "//pkg/roachprod/vm", "@com_github_cockroachdb_errors//:errors", "@com_github_prometheus_client_golang//api/prometheus/v1:prometheus", "@com_github_prometheus_common//model", "@in_gopkg_yaml_v2//:yaml_v2", ], ) go_test( name = "prometheus_test", srcs = ["prometheus_test.go"], args = ["-test.timeout=295s"], data = glob(["testdata/**"]), embed = [":prometheus"], deps = [ "//pkg/roachprod/install", "//pkg/testutils/datapathutils", "//pkg/testutils/echotest", "@com_github_stretchr_testify//require", ], )
pkg/roachprod/prometheus/BUILD.bazel
0
https://github.com/cockroachdb/cockroach/commit/977f72e4cc24f5d14424c278d6d6921016c3fc62
[ 0.00017651911184657365, 0.00017589675553608686, 0.00017514383944217116, 0.00017596202087588608, 4.997689302399522e-7 ]
{ "id": 3, "code_window": [ "\n", "\t\tresultsByNodeID[nID] = serverpb.DecommissionPreCheckResponse_NodeCheckResult{\n", "\t\t\tNodeID: nID,\n", "\t\t\tDecommissionReadiness: readiness,\n", "\t\t\tLivenessStatus: livenessStatusByNodeID[nID],\n", "\t\t\tReplicaCount: int64(numReplicas),\n", "\t\t\tCheckedRanges: rangeCheckErrsByNode[nID],\n", "\t\t}\n" ], "labels": [ "keep", "keep", "keep", "keep", "replace", "keep", "keep", "keep" ], "after_edit": [], "file_path": "pkg/server/admin.go", "type": "replace", "edit_start_line_idx": 1051 }
// Copyright 2016 The Cockroach Authors. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt. // // As of the Change Date specified in that file, in accordance with // the Business Source License, use of this software will be governed // by the Apache License, Version 2.0, included in the file // licenses/APL.txt. package kvserver import ( "container/list" "context" "fmt" "sync" "unsafe" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/util" "github.com/cockroachdb/cockroach/pkg/util/log" "github.com/cockroachdb/cockroach/pkg/util/stop" "github.com/cockroachdb/cockroach/pkg/util/syncutil" "github.com/cockroachdb/cockroach/pkg/util/timeutil" ) const rangeIDChunkSize = 1000 // priorityIDsValue is a placeholder value for raftScheduler.priorityIDs. IntMap // requires an unsafe.Pointer value, but we don't care about the value (only // the key), so we can reuse the same allocation. var priorityIDsValue = unsafe.Pointer(new(bool)) type rangeIDChunk struct { // Valid contents are buf[rd:wr], read at buf[rd], write at buf[wr]. buf [rangeIDChunkSize]roachpb.RangeID rd, wr int } func (c *rangeIDChunk) PushBack(id roachpb.RangeID) bool { if c.WriteCap() == 0 { return false } c.buf[c.wr] = id c.wr++ return true } func (c *rangeIDChunk) PopFront() (roachpb.RangeID, bool) { if c.Len() == 0 { return 0, false } id := c.buf[c.rd] c.rd++ return id, true } func (c *rangeIDChunk) WriteCap() int { return len(c.buf) - c.wr } func (c *rangeIDChunk) Len() int { return c.wr - c.rd } // rangeIDQueue is a chunked queue of range IDs. Instead of a separate list // element for every range ID, it uses a rangeIDChunk to hold many range IDs, // amortizing the allocation/GC cost. Using a chunk queue avoids any copying // that would occur if a slice were used (the copying would occur on slice // reallocation). // // The queue implements a FIFO queueing policy with no prioritization of some // ranges over others. type rangeIDQueue struct { len int chunks list.List } func (q *rangeIDQueue) Push(id roachpb.RangeID) { q.len++ if q.chunks.Len() == 0 || q.back().WriteCap() == 0 { q.chunks.PushBack(&rangeIDChunk{}) } if !q.back().PushBack(id) { panic(fmt.Sprintf( "unable to push rangeID to chunk: len=%d, cap=%d", q.back().Len(), q.back().WriteCap())) } } func (q *rangeIDQueue) PopFront() (roachpb.RangeID, bool) { if q.len == 0 { return 0, false } q.len-- frontElem := q.chunks.Front() front := frontElem.Value.(*rangeIDChunk) id, ok := front.PopFront() if !ok { panic("encountered empty chunk") } if front.Len() == 0 && front.WriteCap() == 0 { q.chunks.Remove(frontElem) } return id, true } func (q *rangeIDQueue) Len() int { return q.len } func (q *rangeIDQueue) back() *rangeIDChunk { return q.chunks.Back().Value.(*rangeIDChunk) } type raftProcessor interface { // Process a raft.Ready struct containing entries and messages that are // ready to read, be saved to stable storage, committed, or sent to other // peers. // // This method does not take a ctx; the implementation is expected to use a // ctx annotated with the range information, according to RangeID. processReady(roachpb.RangeID) // Process all queued messages for the specified range. // Return true if the range should be queued for ready processing. processRequestQueue(context.Context, roachpb.RangeID) bool // Process a raft tick for the specified range. // Return true if the range should be queued for ready processing. processTick(context.Context, roachpb.RangeID) bool } type raftScheduleFlags int const ( stateQueued raftScheduleFlags = 1 << iota stateRaftReady stateRaftRequest stateRaftTick ) type raftScheduleState struct { flags raftScheduleFlags begin int64 // nanoseconds // The number of ticks queued. Usually it's 0 or 1, but may go above if the // scheduling or processing is slow. It is limited by raftScheduler.maxTicks, // so that the cost of processing all the ticks doesn't grow uncontrollably. // If ticks consistently reaches maxTicks, the node/range is too slow, and it // is safer to not deliver all the ticks as it may cause a cascading effect // (the range events take longer and longer to process). // TODO(pavelkalinnikov): add a node health metric for the ticks. // // INVARIANT: flags&stateRaftTick == 0 iff ticks == 0. ticks int } var raftSchedulerBatchPool = sync.Pool{ New: func() interface{} { return new(raftSchedulerBatch) }, } // raftSchedulerBatch is a batch of range IDs to enqueue. It enables // efficient per-shard enqueueing. type raftSchedulerBatch struct { rangeIDs [][]roachpb.RangeID // by shard priorityIDs map[roachpb.RangeID]bool } func newRaftSchedulerBatch(numShards int, priorityIDs *syncutil.IntMap) *raftSchedulerBatch { b := raftSchedulerBatchPool.Get().(*raftSchedulerBatch) if cap(b.rangeIDs) >= numShards { b.rangeIDs = b.rangeIDs[:numShards] } else { b.rangeIDs = make([][]roachpb.RangeID, numShards) } if b.priorityIDs == nil { b.priorityIDs = make(map[roachpb.RangeID]bool, 8) // expect few ranges, if any } // Cache the priority range IDs in an owned map, since we expect this to be // very small or empty and we do a lookup for every Add() call. priorityIDs.Range(func(id int64, _ unsafe.Pointer) bool { b.priorityIDs[roachpb.RangeID(id)] = true return true }) return b } func (b *raftSchedulerBatch) Add(id roachpb.RangeID) { shardIdx := shardIndex(id, len(b.rangeIDs), b.priorityIDs[id]) b.rangeIDs[shardIdx] = append(b.rangeIDs[shardIdx], id) } func (b *raftSchedulerBatch) Close() { for i := range b.rangeIDs { b.rangeIDs[i] = b.rangeIDs[i][:0] } for i := range b.priorityIDs { delete(b.priorityIDs, i) } raftSchedulerBatchPool.Put(b) } // shardIndex returns the raftScheduler shard index of the given range ID based // on the shard count and the range's priority. Priority ranges are assigned to // the reserved shard 0, other ranges are modulo range ID (ignoring shard 0). // numShards will always be 2 or more (1 priority, 1 regular). func shardIndex(id roachpb.RangeID, numShards int, priority bool) int { if priority { return 0 } return 1 + int(int64(id)%int64(numShards-1)) // int64s to avoid overflow } type raftScheduler struct { ambientContext log.AmbientContext processor raftProcessor metrics *StoreMetrics // shards contains scheduler shards. Ranges and workers are allocated to // separate shards to reduce contention at high worker counts. Allocation // is modulo range ID, with shard 0 reserved for priority ranges. shards []*raftSchedulerShard // 1 + RangeID % (len(shards) - 1) priorityIDs syncutil.IntMap done sync.WaitGroup } type raftSchedulerShard struct { syncutil.Mutex cond *sync.Cond queue rangeIDQueue state map[roachpb.RangeID]raftScheduleState numWorkers int maxTicks int stopped bool } func newRaftScheduler( ambient log.AmbientContext, metrics *StoreMetrics, processor raftProcessor, numWorkers int, shardSize int, priorityWorkers int, maxTicks int, ) *raftScheduler { s := &raftScheduler{ ambientContext: ambient, processor: processor, metrics: metrics, } // Priority shard at index 0. if priorityWorkers <= 0 { priorityWorkers = 1 } s.shards = append(s.shards, newRaftSchedulerShard(priorityWorkers, maxTicks)) // Regular shards, excluding priority shard. numShards := 1 if shardSize > 0 && numWorkers > shardSize { numShards = (numWorkers-1)/shardSize + 1 // ceiling division } for i := 0; i < numShards; i++ { shardWorkers := numWorkers / numShards if i < numWorkers%numShards { // distribute remainder shardWorkers++ } if shardWorkers <= 0 { shardWorkers = 1 // ensure we always have a worker } s.shards = append(s.shards, newRaftSchedulerShard(shardWorkers, maxTicks)) } return s } func newRaftSchedulerShard(numWorkers, maxTicks int) *raftSchedulerShard { shard := &raftSchedulerShard{ state: map[roachpb.RangeID]raftScheduleState{}, numWorkers: numWorkers, maxTicks: maxTicks, } shard.cond = sync.NewCond(&shard.Mutex) return shard } func (s *raftScheduler) Start(stopper *stop.Stopper) { ctx := s.ambientContext.AnnotateCtx(context.Background()) waitQuiesce := func(context.Context) { <-stopper.ShouldQuiesce() for _, shard := range s.shards { shard.Lock() shard.stopped = true shard.Unlock() shard.cond.Broadcast() } } if err := stopper.RunAsyncTaskEx(ctx, stop.TaskOpts{ TaskName: "raftsched-wait-quiesce", // This task doesn't reference a parent because it runs for the server's // lifetime. SpanOpt: stop.SterileRootSpan, }, waitQuiesce); err != nil { waitQuiesce(ctx) } for _, shard := range s.shards { s.done.Add(shard.numWorkers) shard := shard // pin loop variable for i := 0; i < shard.numWorkers; i++ { if err := stopper.RunAsyncTaskEx(ctx, stop.TaskOpts{ TaskName: "raft-worker", // This task doesn't reference a parent because it runs for the server's // lifetime. SpanOpt: stop.SterileRootSpan, }, func(ctx context.Context) { shard.worker(ctx, s.processor, s.metrics) s.done.Done() }, ); err != nil { s.done.Done() } } } } func (s *raftScheduler) Wait(context.Context) { s.done.Wait() } // AddPriorityID adds the given range ID to the set of priority ranges. func (s *raftScheduler) AddPriorityID(rangeID roachpb.RangeID) { s.priorityIDs.Store(int64(rangeID), priorityIDsValue) } // RemovePriorityID removes the given range ID from the set of priority ranges. func (s *raftScheduler) RemovePriorityID(rangeID roachpb.RangeID) { s.priorityIDs.Delete(int64(rangeID)) } // PriorityIDs returns the current priority ranges. func (s *raftScheduler) PriorityIDs() []roachpb.RangeID { var priorityIDs []roachpb.RangeID s.priorityIDs.Range(func(id int64, _ unsafe.Pointer) bool { priorityIDs = append(priorityIDs, roachpb.RangeID(id)) return true }) return priorityIDs } func (ss *raftSchedulerShard) worker( ctx context.Context, processor raftProcessor, metrics *StoreMetrics, ) { // We use a sync.Cond for worker notification instead of a buffered // channel. Buffered channels have internal overhead for maintaining the // buffer even when the elements are empty. And the buffer isn't necessary as // the raftScheduler work is already buffered on the internal queue. Lastly, // signaling a sync.Cond is significantly faster than selecting and sending // on a buffered channel. ss.Lock() for { var id roachpb.RangeID for { if ss.stopped { ss.Unlock() return } var ok bool if id, ok = ss.queue.PopFront(); ok { break } ss.cond.Wait() } // Grab and clear the existing state for the range ID. Note that we leave // the range ID marked as "queued" so that a concurrent Enqueue* will not // queue the range ID again. state := ss.state[id] ss.state[id] = raftScheduleState{flags: stateQueued} ss.Unlock() // Record the scheduling latency for the range. lat := nowNanos() - state.begin metrics.RaftSchedulerLatency.RecordValue(lat) // Process requests first. This avoids a scenario where a tick and a // "quiesce" message are processed in the same iteration and intervening // raft ready processing unquiesces the replica because the tick triggers // an election. if state.flags&stateRaftRequest != 0 { // processRequestQueue returns true if the range should perform ready // processing. Do not reorder this below the call to processReady. if processor.processRequestQueue(ctx, id) { state.flags |= stateRaftReady } } if util.RaceEnabled { // assert the ticks invariant if tick := state.flags&stateRaftTick != 0; tick != (state.ticks != 0) { log.Fatalf(ctx, "stateRaftTick is %v with ticks %v", tick, state.ticks) } } if state.flags&stateRaftTick != 0 { for t := state.ticks; t > 0; t-- { // processRaftTick returns true if the range should perform ready // processing. Do not reorder this below the call to processReady. if processor.processTick(ctx, id) { state.flags |= stateRaftReady } } } if state.flags&stateRaftReady != 0 { processor.processReady(id) } ss.Lock() state = ss.state[id] if state.flags == stateQueued { // No further processing required by the range ID, clear it from the // state map. delete(ss.state, id) } else { // There was a concurrent call to one of the Enqueue* methods. Queue // the range ID for further processing. // // Even though the Enqueue* method did not signal after detecting // that the range was being processed, there also is no need for us // to signal the condition variable. This is because this worker // goroutine will loop back around and continue working without ever // going back to sleep. // // We can prove this out through a short derivation. // - For optimal concurrency, we want: // awake_workers = min(max_workers, num_ranges) // - The condition variable / mutex structure ensures that: // awake_workers = cur_awake_workers + num_signals // - So we need the following number of signals for optimal concurrency: // num_signals = min(max_workers, num_ranges) - cur_awake_workers // - If we re-enqueue a range that's currently being processed, the // num_ranges does not change once the current iteration completes // and the worker does not go back to sleep between the current // iteration and the next iteration, so no change to num_signals // is needed. ss.queue.Push(id) } } } // NewEnqueueBatch creates a new range ID batch for enqueueing via // EnqueueRaft(Ticks|Requests). The caller must call Close() on the batch when // done. func (s *raftScheduler) NewEnqueueBatch() *raftSchedulerBatch { return newRaftSchedulerBatch(len(s.shards), &s.priorityIDs) } func (ss *raftSchedulerShard) enqueue1Locked( addFlags raftScheduleFlags, id roachpb.RangeID, now int64, ) int { ticks := int((addFlags & stateRaftTick) / stateRaftTick) // 0 or 1 prevState := ss.state[id] if prevState.flags&addFlags == addFlags && ticks == 0 { return 0 } var queued int newState := prevState newState.flags = newState.flags | addFlags newState.ticks += ticks if newState.ticks > ss.maxTicks { newState.ticks = ss.maxTicks } if newState.flags&stateQueued == 0 { newState.flags |= stateQueued queued++ ss.queue.Push(id) } if newState.begin == 0 { newState.begin = now } ss.state[id] = newState return queued } func (s *raftScheduler) enqueue1(addFlags raftScheduleFlags, id roachpb.RangeID) { now := nowNanos() _, hasPriority := s.priorityIDs.Load(int64(id)) shardIdx := shardIndex(id, len(s.shards), hasPriority) shard := s.shards[shardIdx] shard.Lock() n := shard.enqueue1Locked(addFlags, id, now) shard.Unlock() shard.signal(n) } func (ss *raftSchedulerShard) enqueueN(addFlags raftScheduleFlags, ids ...roachpb.RangeID) int { // Enqueue the ids in chunks to avoid holding mutex for too long. const enqueueChunkSize = 128 // Avoid locking for 0 new ranges. if len(ids) == 0 { return 0 } now := nowNanos() ss.Lock() var count int for i, id := range ids { count += ss.enqueue1Locked(addFlags, id, now) if (i+1)%enqueueChunkSize == 0 { ss.Unlock() now = nowNanos() ss.Lock() } } ss.Unlock() return count } func (s *raftScheduler) enqueueBatch(addFlags raftScheduleFlags, batch *raftSchedulerBatch) { for shardIdx, ids := range batch.rangeIDs { count := s.shards[shardIdx].enqueueN(addFlags, ids...) s.shards[shardIdx].signal(count) } } func (ss *raftSchedulerShard) signal(count int) { if count >= ss.numWorkers { ss.cond.Broadcast() } else { for i := 0; i < count; i++ { ss.cond.Signal() } } } func (s *raftScheduler) EnqueueRaftReady(id roachpb.RangeID) { s.enqueue1(stateRaftReady, id) } func (s *raftScheduler) EnqueueRaftRequest(id roachpb.RangeID) { s.enqueue1(stateRaftRequest, id) } func (s *raftScheduler) EnqueueRaftRequests(batch *raftSchedulerBatch) { s.enqueueBatch(stateRaftRequest, batch) } func (s *raftScheduler) EnqueueRaftTicks(batch *raftSchedulerBatch) { s.enqueueBatch(stateRaftTick, batch) } func nowNanos() int64 { return timeutil.Now().UnixNano() }
pkg/kv/kvserver/scheduler.go
0
https://github.com/cockroachdb/cockroach/commit/977f72e4cc24f5d14424c278d6d6921016c3fc62
[ 0.007494134828448296, 0.0003879683499690145, 0.00016308404156006873, 0.00017086698790080845, 0.0010295023676007986 ]
{ "id": 5, "code_window": [ "\trequire.Equal(t, serverpb.DecommissionPreCheckResponse_NodeCheckResult{\n", "\t\tNodeID: invalidDecommissioningNodeID,\n", "\t\tDecommissionReadiness: serverpb.DecommissionPreCheckResponse_UNKNOWN,\n", "\t\tLivenessStatus: livenesspb.NodeLivenessStatus_UNKNOWN,\n", "\t\tReplicaCount: 0,\n", "\t\tCheckedRanges: nil,\n", "\t}, resp.CheckedNodes[1])\n", "}\n" ], "labels": [ "keep", "keep", "keep", "replace", "keep", "keep", "keep", "keep" ], "after_edit": [], "file_path": "pkg/server/admin_test.go", "type": "replace", "edit_start_line_idx": 1360 }
// Copyright 2016 The Cockroach Authors. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt. // // As of the Change Date specified in that file, in accordance with // the Business Source License, use of this software will be governed // by the Apache License, Version 2.0, included in the file // licenses/APL.txt. syntax = "proto3"; package cockroach.server.serverpb; option go_package = "github.com/cockroachdb/cockroach/pkg/server/serverpb"; import "config/zonepb/zone.proto"; import "util/tracing/tracingpb/tracing.proto"; import "jobs/jobspb/jobs.proto"; import "server/serverpb/status.proto"; import "storage/enginepb/mvcc.proto"; import "kv/kvserver/liveness/livenesspb/liveness.proto"; import "kv/kvserver/loqrecovery/loqrecoverypb/recovery.proto"; import "kv/kvserver/kvserverpb/range_log.proto"; import "kv/kvpb/api.proto"; import "roachpb/metadata.proto"; import "roachpb/data.proto"; import "ts/catalog/chart_catalog.proto"; import "util/metric/metric.proto"; import "util/tracing/tracingpb/recorded_span.proto"; import "gogoproto/gogo.proto"; import "google/api/annotations.proto"; import "google/protobuf/timestamp.proto"; // ZoneConfigurationLevel indicates, for objects with a Zone Configuration, // the object level at which the configuration is defined. This is needed // because objects without a specifically indicated Zone Configuration will // inherit the configuration of their "parent". enum ZoneConfigurationLevel { UNKNOWN = 0; // CLUSTER indicates that this object uses the cluster default Zone Configuration. CLUSTER = 1; // DATABASE indicates that this object uses a database-level Zone Configuration. DATABASE = 2; // TABLE indicates that this object uses a table-level Zone Configuration. TABLE = 3; } // DatabasesRequest requests a list of databases. message DatabasesRequest { } // DatabasesResponse contains a list of databases. message DatabasesResponse { repeated string databases = 1; } // DatabaseDetailsRequest requests detailed information about the specified // database message DatabaseDetailsRequest { // database is the name of the database we are querying. string database = 1; // Setting this flag includes a computationally-expensive stats field // in the response. bool include_stats = 2; } // DatabaseDetailsResponse contains grant information, table names, // zone configuration, and size statistics for a database. message DatabaseDetailsResponse { message Grant { // user is the user that this grant applies to. string user = 1; // privileges are the abilities this grant gives to the user. repeated string privileges = 2; } message Stats { // A table which exists in the database, but for which we could not load stats // during this request. message MissingTable { // The name of the table for which we could not load stats. string name = 1; // The error message that resulted when the request for this table failed. string error_message = 2; } // A list of tables that exist in the database, but for which stats could // not be loaded due to failures during this request. repeated MissingTable missing_tables = 1; // The number of ranges, as determined from a query of range meta keys, // across all tables. int64 range_count = 2; // An approximation of the disk space (in bytes) used for all replicas // of all tables across the cluster. uint64 approximate_disk_bytes = 3; // node_ids is the ordered list of node ids on which data is stored. repeated int32 node_ids = 4 [(gogoproto.customname) = "NodeIDs", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/roachpb.NodeID"]; int32 num_index_recommendations = 5; } // grants are the results of SHOW GRANTS for this database. repeated Grant grants = 1 [(gogoproto.nullable) = false]; // table_names contains the names of all tables in this database. Note that // all responses will be schema-qualified (schema.table) and that every schema // or table that contains a "sql unsafe character" such as uppercase letters // or dots will be surrounded with double quotes, such as "naughty schema".table. repeated string table_names = 2; // descriptor_id is an identifier used to uniquely identify this database. int64 descriptor_id = 3 [(gogoproto.customname) = "DescriptorID"]; // The zone configuration in effect for this database. cockroach.config.zonepb.ZoneConfig zone_config = 4 [(gogoproto.nullable) = false]; // The level at which this object's zone configuration is set. ZoneConfigurationLevel zone_config_level = 5; // Size information about the database, present only when explicitly requested. Stats stats = 6; } // TableDetailsRequest is a request for detailed information about a table. message TableDetailsRequest { // database is the name of the database that contains the table we're // interested in. string database = 1; // table is the name of the table that we're querying. Table may be // schema-qualified (schema.table) and each name component that contains // sql unsafe characters such as . or uppercase letters must be surrounded // in double quotes like "naughty schema".table. string table = 2; } // TableDetailsResponse contains grants, column names, and indexes for // a table. message TableDetailsResponse { // Grant is an entry from SHOW GRANTS. message Grant { // user is the user that this grant applies to. string user = 1; // privileges are the abilities this grant gives to the user. repeated string privileges = 2; } message Column { // name is the name of the column. string name = 1; // type is the SQL type (INT, STRING, etc.) of this column. string type = 2; // nullable is whether this column can contain NULL. bool nullable = 3; // default_value is the default value of this column. string default_value = 4; // generation_expression is the generator expression if the column is computed. string generation_expression = 5; // hidden is whether this column is hidden. bool hidden = 6; } message Index { // name is the name of this index. string name = 1; // unique is whether this a unique index (i.e. CREATE UNIQUE INDEX). bool unique = 2; // seq is an internal variable that's passed along. int64 seq = 3; // column is the column that this index indexes. string column = 4; // direction is either "ASC" (ascending) or "DESC" (descending). string direction = 5; // storing is an internal variable that's passed along. bool storing = 6; // implicit is an internal variable that's passed along. bool implicit = 7; } repeated Grant grants = 1 [(gogoproto.nullable) = false]; repeated Column columns = 2 [(gogoproto.nullable) = false]; repeated Index indexes = 3 [(gogoproto.nullable) = false]; // range_count is the size of the table in ranges. This provides a rough // estimate of the storage requirements for the table. // TODO(mrtracy): The TableStats method also returns a range_count field which // is more accurate than this one; TableDetails calculates this number using // a potentially faster method that is subject to cache staleness. We should // consider removing or renaming this field to reflect that difference. See // GitHub issue #5435 for more information. int64 range_count = 4; // create_table_statement is the output of "SHOW CREATE" for this table; // it is a SQL statement that would re-create the table's current schema if // executed. string create_table_statement = 5; // The zone configuration in effect for this table. cockroach.config.zonepb.ZoneConfig zone_config = 6 [(gogoproto.nullable) = false]; // The level at which this object's zone configuration is set. ZoneConfigurationLevel zone_config_level = 7; // descriptor_id is an identifier used to uniquely identify this table. int64 descriptor_id = 8 [(gogoproto.customname) = "DescriptorID"]; // configure_zone_statement is the output of "SHOW ZONE CONFIGURATION FOR TABLE" // for this table. It is a SQL statement that would re-configure the table's current // zone if executed. string configure_zone_statement = 9; // stats_last_created_at is the time at which statistics were last created. google.protobuf.Timestamp stats_last_created_at = 10 [(gogoproto.stdtime) = true]; // has_index_recommendations notifies if the there are index recommendations // on this table. bool has_index_recommendations = 11; // data_total_bytes is the size in bytes of live and non-live data on the table. int64 data_total_bytes = 12; // data_live_bytes is the size in bytes of live (non MVCC) data on the table. int64 data_live_bytes = 13; // data_live_percentage is the percentage of live (non MVCC) data on the table. float data_live_percentage = 14; } // TableStatsRequest is a request for detailed, computationally expensive // information about a table. message TableStatsRequest { // database is the name of the database that contains the table we're // interested in. string database = 1; // table is the name of the table that we're querying. Table may be // schema-qualified (schema.table) and each name component that contains // sql unsafe characters such as . or uppercase letters must be surrounded // in double quotes like "naughty schema".table. string table = 2; } // TableStatsResponse contains detailed, computationally expensive information // about a table. message TableStatsResponse { // range_count is the number of ranges, as determined from a query of range // meta keys. int64 range_count = 1; // replica_count is the number of replicas of any range of this table, as // found by querying nodes which are known to have replicas. When compared // with range_count, this can be used to estimate the current replication // factor of the table. int64 replica_count = 2; // node_count is the number of nodes which contain data for this table, // according to a query of range meta keys. int64 node_count = 3; // stats is the summation of MVCCStats for all replicas of this table // across the cluster. cockroach.storage.enginepb.MVCCStats stats = 4 [(gogoproto.nullable) = false]; // approximate_disk_bytes is an approximation of the disk space (in bytes) // used for all replicas of this table across the cluster. uint64 approximate_disk_bytes = 6; // MissingNode represents information on a node which should contain data // for this table, but could not be contacted during this request. message MissingNode { // The ID of the missing node. string node_id = 1 [(gogoproto.customname) = "NodeID"]; // The error message that resulted when the query sent to this node failed. string error_message = 2; } // A list of nodes which should contain data for this table (according to // cluster metadata), but could not be contacted during this request. repeated MissingNode missing_nodes = 5 [(gogoproto.nullable) = false]; // node_ids is the ordered list of node ids on which the table data is stored. repeated int32 node_ids = 7 [(gogoproto.customname) = "NodeIDs", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/roachpb.NodeID"]; } // NonTableStatsRequest requests statistics on cluster data ranges that do not // belong to SQL tables. message NonTableStatsRequest { } // NonTableStatsResponse returns statistics on various cluster data ranges // that do not belong to SQL tables. The statistics for each range are returned // as a TableStatsResponse. message NonTableStatsResponse { // Information on time series ranges. TableStatsResponse time_series_stats = 1; // Information for remaining (non-table, non-time-series) ranges. TableStatsResponse internal_use_stats = 2; } // UsersRequest requests a list of users. message UsersRequest { } // UsersResponse returns a list of users. message UsersResponse { // User is a CockroachDB user. message User { string username = 1; } // usernames is a list of users for the CockroachDB cluster. repeated User users = 1 [(gogoproto.nullable) = false]; } // EventsRequest is a request for event log entries, optionally filtered // by the specified event type. message EventsRequest { string type = 1; reserved 2; // limit is the total number of results that are retrieved by the query. If // this is omitted or set to 0, the default maximum number of results are // returned. When set to > 0, at most only that number of results are // returned. When set to < 0, an unlimited number of results are returned. int32 limit = 3; // unredacted_events indicates that the values in the events should // not be redacted. The default is to redact, so that older versions // of `cockroach zip` do not see un-redacted values by default. // For good security, this field is only obeyed by the server after // checking that the client of the RPC is an admin user. bool unredacted_events = 4; } // EventsResponse contains a set of event log entries. This is always limited // to the latest N entries (N is enforced in the associated endpoint). message EventsResponse { message Event { // timestamp is the time at which the event occurred. google.protobuf.Timestamp timestamp = 1 [(gogoproto.nullable) = false, (gogoproto.stdtime) = true]; // event_type is the type of the event (e.g. "create_table", "drop_table". string event_type = 2; reserved 3; // reporting_id is the reporting ID for this event. int64 reporting_id = 4 [(gogoproto.customname) = "ReportingID"]; // info has more detailed information for the event. The contents vary // depending on the event. string info = 5; // unique_id is a unique identifier for this event. bytes unique_id = 6 [(gogoproto.customname) = "UniqueID"]; } repeated Event events = 1 [(gogoproto.nullable) = false]; } // SetUIDataRequest stores the given key/value pairs in the system.ui table. message SetUIDataRequest { // key_values is a map of keys to bytes values. Each key will be stored // with its corresponding value as a separate row in system.ui. map<string, bytes> key_values = 1; } // SetUIDataResponse is currently an empty response. message SetUIDataResponse { } // GETUIDataRequest requests the values for the given keys from the system.ui // table. message GetUIDataRequest { repeated string keys = 1; } // GetUIDataResponse contains the requested values and the times at which // the values were last updated. message GetUIDataResponse { message Value { // value is the value of the requested key. bytes value = 1; // last_updated is the time at which the value was last updated. google.protobuf.Timestamp last_updated = 2 [(gogoproto.nullable) = false, (gogoproto.stdtime) = true]; } // key_values maps keys to their retrieved values. If this doesn't contain a // a requested key, that key was not found. map<string, Value> key_values = 1 [(gogoproto.nullable) = false]; } // ClusterRequest requests metadata for the cluster. message ClusterRequest { } // ClusterResponse contains metadata for the cluster. message ClusterResponse { // The unique ID used to identify this cluster. string cluster_id = 1 [(gogoproto.customname) = "ClusterID"]; // True if diagnostics reporting is enabled for the cluster. bool reporting_enabled = 2; // True if enterprise features are enabled for the cluster. bool enterprise_enabled = 3; } // DrainRequest instructs the receiving node to drain. message DrainRequest { reserved 1; reserved 2; // When true, terminates the process after the server has started draining. // Setting both shutdown and do_drain to false causes // the request to only operate as a probe. // Setting do_drain to false and shutdown to true causes // the server to shut down immediately without // first draining. bool shutdown = 3; // When true, perform the drain phase. See the comment above on // shutdown for an explanation of the interaction between the two. // do_drain is also implied by a non-nil deprecated_probe_indicator. bool do_drain = 4; // node_id is a string so that "local" can be used to specify that no // forwarding is necessary. // For compatibility with v21.2 nodes, an empty node_id is // interpreted as "local". This behavior might be removed // in subsequent versions. string node_id = 5; // When true, more detailed information is logged during the range lease drain phase. bool verbose = 6; } // DrainResponse is the response to a successful DrainRequest. message DrainResponse { // is_draining is set to true iff the server is currently draining. // This is set to true in response to a request where skip_drain // is false; but it can also be set to true in response // to a probe request (!shutdown && skip_drain) if another // drain request has been issued prior or asynchronously. bool is_draining = 2; // drain_remaining_indicator measures, at the time of starting to // process the corresponding drain request, how many actions to // fully drain the node were deemed to be necessary. Some, but not // all, of these actions may already have been carried out by the // time this indicator is received by the client. The client should // issue requests until this indicator first reaches zero, which // indicates that the node is fully drained. // // The API contract is the following: // // - upon a first Drain call with do_drain set, the remaining // indicator will have some value >=0. If >0, it indicates that // drain is pushing state away from the node. (What this state // precisely means is left unspecified for this field. See below // for details.) // // - upon a subsequent Drain call with do_drain set, the remaining // indicator should have reduced in value. The drain process does best // effort at shedding state away from the node; hopefully, all the // state is shed away upon the first call and the progress // indicator can be zero as early as the second call. However, // if there was a lot of state to shed, it is possible for // timeout to be encountered upon the first call. In that case, the // second call will do some more work and return a non-zero value // as well. // // - eventually, in an iterated sequence of DrainRequests with // do_drain set, the remaining indicator should reduce to zero. At // that point the client can conclude that no state is left to // shed, and it should be safe to shut down the node with a // DrainRequest with shutdown = true. // // Note that this field is left unpopulated (and thus remains at // zero) for pre-20.1 nodes. A client can recognize this by // observing is_draining to be false after a request with do_drain = // true: the is_draining field is also left unpopulated by pre-20.1 // nodes. uint64 drain_remaining_indicator = 3; // drain_remaining_description is an informal (= not // machine-parsable) string that explains the progress of the drain // process to human eyes. This is intended for use mainly for // troubleshooting. // // The field is only populated if do_drain is true in the // request. string drain_remaining_description = 4; reserved 1; } // DecommissionPreCheckRequest requests that preliminary checks be run to // ensure that the specified node(s) can be decommissioned successfully. message DecommissionPreCheckRequest { repeated int32 node_ids = 1 [(gogoproto.customname) = "NodeIDs", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/roachpb.NodeID"]; // The maximum number of ranges for which to report errors. int32 num_replica_report = 2; // If true, all ranges on the checked nodes must only need replacement or // removal for decommissioning. bool strict_readiness = 3; // If true, collect traces for each range checked. // Requires num_replica_report > 0. bool collect_traces = 4; } // DecommissionPreCheckResponse returns the number of replicas that encountered // errors when running preliminary decommissioning checks, as well as the // associated error messages and traces, for each node. message DecommissionPreCheckResponse { enum NodeReadiness { option (gogoproto.goproto_enum_stringer) = false; UNKNOWN = 0; READY = 1; ALREADY_DECOMMISSIONED = 2; ALLOCATION_ERRORS = 3; } // The result of checking a range's readiness for the decommission. message RangeCheckResult { int32 range_id = 1 [ (gogoproto.customname) = "RangeID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/roachpb.RangeID"]; // The action determined by the allocator that is needed for the range. string action = 2; // All trace events collected while checking the range. repeated TraceEvent events = 3; // The error message from the allocator's processing, if any. string error = 4; } // The result of checking a single node's readiness for decommission. message NodeCheckResult { int32 node_id = 1 [ (gogoproto.customname) = "NodeID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/roachpb.NodeID"]; // The node's decommission readiness status. NodeReadiness decommission_readiness = 2; // The liveness status of the given node. kv.kvserver.liveness.livenesspb.NodeLivenessStatus liveness_status = 3; // The number of total replicas on the node, computed by scanning range // descriptors. int64 replica_count = 4; // The details and recorded traces from preprocessing each range with a // replica on the checked nodes that resulted in error, up to the maximum // specified in the request. repeated RangeCheckResult checked_ranges = 5 [(gogoproto.nullable) = false]; } // Status of the preliminary decommission checks across nodes. repeated NodeCheckResult checked_nodes = 1 [(gogoproto.nullable) = false]; } // DecommissionStatusRequest requests the decommissioning status for the // specified or, if none are specified, all nodes. message DecommissionStatusRequest { repeated int32 node_ids = 1 [(gogoproto.customname) = "NodeIDs", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/roachpb.NodeID"]; // The number of decommissioning replicas to be reported. int32 num_replica_report = 2; } // DecommissionRequest requests the server to set the membership status on // all nodes specified by NodeIDs to the value of TargetMembership. // // If no NodeIDs are given, it targets the recipient node. message DecommissionRequest { repeated int32 node_ids = 1 [(gogoproto.customname) = "NodeIDs", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/roachpb.NodeID"]; kv.kvserver.liveness.livenesspb.MembershipStatus target_membership = 2; // The number of decommissioning replicas to be reported. int32 num_replica_report = 3; } // DecommissionStatusResponse lists decommissioning statuses for a number of NodeIDs. message DecommissionStatusResponse { message Replica { int32 replica_id = 1 [ (gogoproto.customname) = "ReplicaID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/roachpb.ReplicaID"]; int32 range_id = 2 [ (gogoproto.customname) = "RangeID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/roachpb.RangeID"]; } message Status { int32 node_id = 1 [ (gogoproto.customname) = "NodeID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/roachpb.NodeID"]; bool is_live = 2; // The number of replicas on the node, computed by scanning meta2 ranges. int64 replica_count = 3; // The membership status of the given node. kv.kvserver.liveness.livenesspb.MembershipStatus membership = 4; bool draining = 5; // Decommissioning replicas on the given node to be reported. // How many replicas are reported is determined by what was specified in the // request. repeated Replica reported_replicas = 6; } // Status of all affected nodes. repeated Status status = 2 [(gogoproto.nullable) = false]; } // SettingsRequest inquires what are the current settings in the cluster. message SettingsRequest { // The array of setting names to retrieve. // An empty keys array means "all". repeated string keys = 1; // Indicate whether to see unredacted setting values. // This is opt-in so that a previous version `cockroach zip` // does not start reporting values when this becomes active. // For good security, the server only obeys this after it checks // that the logger-in user has admin privilege. bool unredacted_values = 2; } // SettingsResponse is the response to SettingsRequest. message SettingsResponse { message Value { string value = 1; string type = 2; string description = 3; bool public = 4; google.protobuf.Timestamp last_updated = 5 [(gogoproto.nullable) = true, (gogoproto.stdtime) = true]; } map<string, Value> key_values = 1 [(gogoproto.nullable) = false]; } // HealthRequest requests a liveness or readiness check. // // A liveness check is triggered via ready set to false. In this mode, // an empty response is returned immediately, that is, the caller merely // learns that the process is running. // // A readiness check (ready == true) is suitable for determining whether // user traffic should be directed at a given node, for example by a load // balancer. In this mode, a successful response is returned only if the // node: // // - is not in the process of shutting down or booting up (including // waiting for cluster bootstrap); // - is regarded as healthy by the cluster via the recent broadcast of // a liveness beacon. Absent either of these conditions, an error // code will result. // // API: PUBLIC message HealthRequest { // ready specifies whether the client wants to know whether the // target node is ready to receive traffic. If a node is unready, an // error will be returned. // API: PUBLIC bool ready = 1; } // HealthResponse is the response to HealthRequest. It currently does not // contain any information. // API: PUBLIC message HealthResponse { } // LivenessRequest requests liveness data for all nodes on the cluster. message LivenessRequest { } // LivenessResponse contains the liveness status of each node on the cluster. message LivenessResponse { repeated kv.kvserver.liveness.livenesspb.Liveness livenesses = 1 [(gogoproto.nullable) = false]; map<int32, kv.kvserver.liveness.livenesspb.NodeLivenessStatus> statuses = 2 [ (gogoproto.nullable) = false, (gogoproto.castkey) = "github.com/cockroachdb/cockroach/pkg/roachpb.NodeID" ]; } // JobsRequest requests system job information of the given status and type. message JobsRequest { int32 limit = 1; string status = 2; cockroach.sql.jobs.jobspb.Type type = 3; } // JobsResponse contains the job record for each matching job. message JobsResponse { repeated JobResponse jobs = 1 [(gogoproto.nullable) = false]; google.protobuf.Timestamp earliest_retained_time = 2 [(gogoproto.nullable) = false, (gogoproto.stdtime) = true]; } // JobRequest requests system job information for the given job_id. message JobRequest { int64 job_id = 1; } // JobResponse contains the job record for a job. message JobResponse { int64 id = 1 [(gogoproto.customname) = "ID"]; string type = 2; string description = 3; string statement = 16; string username = 4; repeated uint32 descriptor_ids = 5 [ (gogoproto.customname) = "DescriptorIDs", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb.ID" ]; string status = 6; google.protobuf.Timestamp created = 7 [(gogoproto.stdtime) = true]; google.protobuf.Timestamp started = 8 [(gogoproto.stdtime) = true]; google.protobuf.Timestamp finished = 9 [(gogoproto.stdtime) = true]; google.protobuf.Timestamp modified = 10 [(gogoproto.stdtime) = true]; float fraction_completed = 11; string error = 12; // highwater_timestamp is the highwater timestamp returned as normal // timestamp. This is appropriate for display to humans. google.protobuf.Timestamp highwater_timestamp = 13 [(gogoproto.stdtime) = true]; // highwater_decimal is the highwater timestamp in the proprietary decimal // form used by logical timestamps internally. This is appropriate to pass // to a "AS OF SYSTEM TIME" SQL statement. string highwater_decimal = 14; string running_status = 15; google.protobuf.Timestamp last_run = 17 [(gogoproto.stdtime) = true]; google.protobuf.Timestamp next_run = 18 [(gogoproto.stdtime) = true]; int64 num_runs = 19; // ExecutionFailure corresponds to a failure to execute the job with the // attempt starting at start and ending at end. message ExecutionFailure { // Status is the status of the job during the execution. string status = 1; // Start is the time at which the execution started. google.protobuf.Timestamp start = 2 [(gogoproto.stdtime) = true]; // End is the time at which the error occurred. google.protobuf.Timestamp end = 3 [(gogoproto.stdtime) = true]; // Error is the error which occurred. string error = 4; } // ExecutionFailures is a log of execution failures of the job. It is not // guaranteed to contain all execution failures and some execution failures // may not contain an error or end. repeated ExecutionFailure execution_failures = 20; // coordinator_id identifies the node coordinating the job. This value will // only be present for jobs that are currently running or recently ran. int64 coordinator_id = 21 [(gogoproto.customname) = "CoordinatorID"]; } // LocationsRequest requests system locality location information. message LocationsRequest { } // JobsResponse contains the job record for each matching job. message LocationsResponse { message Location { string locality_key = 1; string locality_value = 2; double latitude = 3; double longitude = 4; } repeated Location locations = 1 [(gogoproto.nullable) = false]; } // RangeLogRequest request the history of a range from the range log. message RangeLogRequest { // TODO(tamird): use [(gogoproto.customname) = "RangeID"] below. Need to // figure out how to teach grpc-gateway about custom names. // If RangeID is 0, returns range log history without filtering by range. int64 range_id = 1; // limit is the total number of results that are retrieved by the query. If // this is omitted or set to 0, the default maximum number of results are // returned. When set to > 0, at most only that number of results are // returned. When set to < 0, an unlimited number of results are returned. int32 limit = 2; } // RangeLogResponse contains a list of entries from the range log table. message RangeLogResponse { // To avoid porting the pretty printing of keys and descriptors to // javascript, they will be precomputed on the serverside. message PrettyInfo { string updated_desc = 1; string new_desc = 2; string added_replica = 3; string removed_replica = 4; string reason = 5; string details = 6; } message Event { cockroach.kv.kvserver.storagepb.RangeLogEvent event = 1 [(gogoproto.nullable) = false]; PrettyInfo pretty_info = 2 [(gogoproto.nullable) = false]; } reserved 1; // Previously used. repeated Event events = 2 [(gogoproto.nullable) = false]; } // QueryPlanRequest requests the query plans for a SQL string. message QueryPlanRequest { // query is the SQL query string. string query = 1; } // QueryPlanResponse contains the query plans for a SQL string (currently only // the distsql physical query plan). message QueryPlanResponse { string distsql_physical_query_plan = 1 [(gogoproto.customname) = "DistSQLPhysicalQueryPlan"]; } message DataDistributionRequest { } message DataDistributionResponse { message ZoneConfig { // target is the object the zone config applies to, e.g. "DATABASE db" or // "PARTITION north_america OF TABLE users". string target = 1; config.zonepb.ZoneConfig config = 2 [(gogoproto.nullable) = false]; reserved 3; // config_sql is the SQL representation of config. string config_sql = 4 [(gogoproto.customname) = "ConfigSQL"]; } message TableInfo { map<int32, int64> replica_count_by_node_id = 1 [(gogoproto.castkey) = "github.com/cockroachdb/cockroach/pkg/roachpb.NodeID"]; int64 zone_config_id = 2; google.protobuf.Timestamp dropped_at = 3 [(gogoproto.stdtime) = true]; } message DatabaseInfo { // By table name. map<string, TableInfo> table_info = 1 [(gogoproto.nullable) = false]; } // By database name. map<string, DatabaseInfo> database_info = 1 [(gogoproto.nullable) = false]; reserved 2; // By zone name. map<string, ZoneConfig> zone_configs = 3 [(gogoproto.nullable) = false]; } // MetricMetadataRequest requests metadata for all metrics. message MetricMetadataRequest { } // MetricMetadataResponse contains the metadata for all metrics. message MetricMetadataResponse { map<string, cockroach.util.metric.Metadata> metadata = 1 [(gogoproto.nullable) = false]; } message EnqueueRangeRequest { // The node on which the queue should process the range. If node_id is 0, // the request will be forwarded to all other nodes. int32 node_id = 1 [(gogoproto.customname) = "NodeID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/roachpb.NodeID"]; // The name of the replica queue to run the range through. Matched against // each queue's name field. See the implementation of baseQueue for details. string queue = 2; // The ID of the range to run through the queue. int32 range_id = 3 [(gogoproto.customname) = "RangeID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/roachpb.RangeID"]; // If set, run the queue's process method without first checking whether the // replica should be processed by calling shouldQueue. bool skip_should_queue = 4; } message EnqueueRangeResponse { message Details { int32 node_id = 1 [(gogoproto.customname) = "NodeID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/roachpb.NodeID"]; // All trace events collected while processing the range in the queue. repeated TraceEvent events = 2; // The error message from the queue's processing, if any. string error = 3; } repeated Details details = 1; } // ChartCatalogRequest requests returns a catalog of Admin UI charts. message ChartCatalogRequest { } // ChartCatalogResponse returns a catalog of Admin UI charts useful for debugging. message ChartCatalogResponse { repeated cockroach.ts.catalog.ChartSection catalog = 1 [(gogoproto.nullable) = false]; } // CARequest requests the CA cert anchoring this service. message CARequest { } // CAResponse contains a PEM encoded copy of the CA cert for this service. message CAResponse { bytes ca_cert = 1; } // CertBundleRequest requests the bundle of initialization CAs for a new node. // It provides authentication in the form of a joinToken containing a // sharedSecret. message CertBundleRequest { string token_id = 1 [(gogoproto.customname) = "TokenID"]; bytes shared_secret = 2; } // CertBundleResponse contains a copy of all CAs needed to initialize TLS for // a new node. message CertBundleResponse { bytes bundle = 1; } message RecoveryCollectReplicaInfoRequest {} // RecoveryCollectReplicaRestartNodeStream is sent by collector node to client // if it experiences a transient failure collecting data from one of the nodes. // This message instructs client to drop any data that it collected locally // for specified node as streaming for this node would be restarted. // This mechanism is needed to avoid restarting the whole collection procedure // in large cluster if one of the nodes fails transiently. message RecoveryCollectReplicaRestartNodeStream { int32 node_id = 1 [ (gogoproto.customname) = "NodeID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/roachpb.NodeID"]; } message RecoveryCollectReplicaInfoResponse { oneof info { roachpb.RangeDescriptor range_descriptor = 1; cockroach.kv.kvserver.loqrecovery.loqrecoverypb.ReplicaInfo replica_info = 2; RecoveryCollectReplicaRestartNodeStream node_stream_restarted = 3; cockroach.kv.kvserver.loqrecovery.loqrecoverypb.ClusterMetadata metadata = 4; } } message RecoveryCollectLocalReplicaInfoRequest { } message RecoveryCollectLocalReplicaInfoResponse { cockroach.kv.kvserver.loqrecovery.loqrecoverypb.ReplicaInfo replica_info = 1; } message RecoveryStagePlanRequest { // Plan is replica update plan to stage for application on next restart. Plan // could be empty in that case existing plan is removed if present. cockroach.kv.kvserver.loqrecovery.loqrecoverypb.ReplicaUpdatePlan plan = 1; // If all nodes is true, then receiver should act as a coordinator and perform // a fan-out to stage plan on all nodes of the cluster. bool all_nodes = 2; // ForcePlan tells receiver to ignore any plan already staged on the node if it // is present and replace it with new plan (including empty one). bool force_plan = 3; // ForceLocalInternalVersion tells server to update internal component of plan // version to the one of active cluster version. This option needs to be set // if target cluster is stuck in recovery where only part of nodes were // successfully migrated. bool force_local_internal_version = 4; } message RecoveryStagePlanResponse { // Errors contain error messages happened during plan staging. repeated string errors = 1; } message RecoveryNodeStatusRequest { } message RecoveryNodeStatusResponse { cockroach.kv.kvserver.loqrecovery.loqrecoverypb.NodeRecoveryStatus status = 1 [ (gogoproto.nullable) = false]; } message RecoveryVerifyRequest { // PlanID is ID of the plan to verify. bytes plan_id = 1 [ (gogoproto.customname) = "PendingPlanID", (gogoproto.customtype) = "github.com/cockroachdb/cockroach/pkg/util/uuid.UUID"]; // DecommissionedNodeIDs is a set of nodes that should be marked as decommissioned in // the cluster when loss of quorum recovery successfully applies. repeated int32 decommissioned_node_ids = 2 [(gogoproto.customname) = "DecommissionedNodeIDs", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/roachpb.NodeID"]; // MaxReportedRanges is the maximum number of failed ranges to report. // If more unhealthy ranges are found, error will be returned alongside range // to indicate that ranges were cut short. int32 max_reported_ranges = 3; } message RecoveryVerifyResponse { message UnavailableRanges { // Ranges contains descriptors of ranges that failed health check. // If there are too many ranges to report, error would contain relevant // message. repeated cockroach.kv.kvserver.loqrecovery.loqrecoverypb.RangeRecoveryStatus ranges = 1 [ (gogoproto.nullable) = false]; // Error contains an optional error if ranges validation can't complete. string error = 2; } // Statuses contain a list of recovery statuses of nodes updated during recovery. It // also contains nodes that were expected to be live (not decommissioned by recovery) // but failed to return status response. repeated cockroach.kv.kvserver.loqrecovery.loqrecoverypb.NodeRecoveryStatus statuses = 1 [ (gogoproto.nullable) = false]; // UnavailableRanges contains information about ranges that failed health check. UnavailableRanges unavailable_ranges = 2 [(gogoproto.nullable) = false]; // DecommissionedNodeStatuses contains a map of requested IDs with their // corresponding liveness statuses. map<int32, kv.kvserver.liveness.livenesspb.MembershipStatus> decommissioned_node_statuses = 3 [ (gogoproto.castkey) = "github.com/cockroachdb/cockroach/pkg/roachpb.NodeID"]; } // Admin is the gRPC API for the admin UI. Through grpc-gateway, we offer // REST-style HTTP endpoints that locally proxy to the gRPC endpoints. service Admin { rpc RequestCA(CARequest) returns (CAResponse) { option (google.api.http) = { get : "/_join/v1/ca" }; } rpc RequestCertBundle(CertBundleRequest) returns (CertBundleResponse) { option (google.api.http) = { get : "/_join/v1/requestbundle" }; } // URL: /_admin/v1/users rpc Users(UsersRequest) returns (UsersResponse) { option (google.api.http) = { get: "/_admin/v1/users" }; } // URL: /_admin/v1/databases rpc Databases(DatabasesRequest) returns (DatabasesResponse) { option (google.api.http) = { get: "/_admin/v1/databases" }; } // Example URL: /_admin/v1/databases/system rpc DatabaseDetails(DatabaseDetailsRequest) returns (DatabaseDetailsResponse) { option (google.api.http) = { get: "/_admin/v1/databases/{database}" }; } // Example URL: /_admin/v1/databases/system/tables/ui rpc TableDetails(TableDetailsRequest) returns (TableDetailsResponse) { option (google.api.http) = { get: "/_admin/v1/databases/{database}/tables/{table}" }; } // Example URL: /_admin/v1/databases/system/tables/ui/stats rpc TableStats(TableStatsRequest) returns (TableStatsResponse) { option (google.api.http) = { get: "/_admin/v1/databases/{database}/tables/{table}/stats" }; } // Example URL: /_admin/v1/nontablestats rpc NonTableStats(NonTableStatsRequest) returns (NonTableStatsResponse) { option (google.api.http) = { get: "/_admin/v1/nontablestats" }; } // Example URLs: // Example URLs: // - /_admin/v1/events // - /_admin/v1/events?limit=100 // - /_admin/v1/events?type=create_table // - /_admin/v1/events?type=create_table&limit=100 rpc Events(EventsRequest) returns (EventsResponse) { option (google.api.http) = { get: "/_admin/v1/events" }; } // This requires a POST. Because of the libraries we're using, the POST body // must be in the following format: // // {"key_values": // { "key1": "base64_encoded_value1"}, // ... // { "keyN": "base64_encoded_valueN"}, // } // // Note that all keys are quoted strings and that all values are base64- // encoded. // // Together, SetUIData and GetUIData provide access to a "cookie jar" for the // admin UI. The structure of the underlying data is meant to be opaque to the // server. rpc SetUIData(SetUIDataRequest) returns (SetUIDataResponse) { option (google.api.http) = { post: "/_admin/v1/uidata" body: "*" }; } // Example URLs: // - /_admin/v1/uidata?keys=MYKEY // - /_admin/v1/uidata?keys=MYKEY1&keys=MYKEY2 // // Yes, it's a little odd that the query parameter is named "keys" instead of // "key". I would've preferred that the URL parameter be named "key". However, // it's clearer for the protobuf field to be named "keys," which makes the URL // parameter "keys" as well. rpc GetUIData(GetUIDataRequest) returns (GetUIDataResponse) { option (google.api.http) = { get: "/_admin/v1/uidata" }; } // Cluster returns metadata for the cluster. rpc Cluster(ClusterRequest) returns (ClusterResponse) { option (google.api.http) = { get: "/_admin/v1/cluster" }; } // Settings returns the cluster-wide settings for the cluster. rpc Settings(SettingsRequest) returns (SettingsResponse) { option (google.api.http) = { get: "/_admin/v1/settings" }; } // Health returns liveness for the node target of the request. // API: PUBLIC rpc Health(HealthRequest) returns (HealthResponse) { option (google.api.http) = { get: "/_admin/v1/health" additional_bindings {get : "/health"} }; } // Liveness returns the liveness state of all nodes on the cluster. rpc Liveness(LivenessRequest) returns (LivenessResponse) { option (google.api.http) = { get: "/_admin/v1/liveness" }; } // Jobs returns the job records for all jobs of the given status and type. rpc Jobs(JobsRequest) returns (JobsResponse) { option (google.api.http) = { get: "/_admin/v1/jobs" }; } // Job returns the job record for the job of the given job_id. rpc Job(JobRequest) returns (JobResponse) { option (google.api.http) = { get: "/_admin/v1/jobs/{job_id}" }; } // Locations returns the locality location records. rpc Locations(LocationsRequest) returns (LocationsResponse) { option (google.api.http) = { get: "/_admin/v1/locations" }; } // QueryPlan returns the query plans for a SQL string. rpc QueryPlan(QueryPlanRequest) returns (QueryPlanResponse) { option (google.api.http) = { get: "/_admin/v1/queryplan" }; } // Drain puts the node into the specified drain mode(s) and optionally // instructs the process to terminate. // We do not expose this via HTTP unless we have a way to authenticate // + authorize streaming RPC connections. See #42567. rpc Drain(DrainRequest) returns (stream DrainResponse) { } // DecommissionPreCheck requests that the server execute preliminary checks // to evaluate the possibility of successfully decommissioning a given node. rpc DecommissionPreCheck(DecommissionPreCheckRequest) returns (DecommissionPreCheckResponse) { } // Decommission puts the node(s) into the specified decommissioning state. // If this ever becomes exposed via HTTP, ensure that it performs // authorization. See #42567. rpc Decommission(DecommissionRequest) returns (DecommissionStatusResponse) { } // DecommissionStatus retrieves the decommissioning status of the specified nodes. // If this ever becomes exposed via HTTP, ensure that it performs // authorization. See #42567. rpc DecommissionStatus(DecommissionStatusRequest) returns (DecommissionStatusResponse) { } // URL: /_admin/v1/rangelog // URL: /_admin/v1/rangelog?limit=100 // URL: /_admin/v1/rangelog/1 // URL: /_admin/v1/rangelog/1?limit=100 rpc RangeLog(RangeLogRequest) returns (RangeLogResponse) { option (google.api.http) = { get: "/_admin/v1/rangelog" additional_bindings { get: "/_admin/v1/rangelog/{range_id}" } }; } rpc DataDistribution(DataDistributionRequest) returns (DataDistributionResponse) { option (google.api.http) = { get: "/_admin/v1/data_distribution" }; } // URL: /_admin/v1/metricmetadata rpc AllMetricMetadata(MetricMetadataRequest) returns (MetricMetadataResponse) { option (google.api.http) = { get: "/_admin/v1/metricmetadata" }; } // URL: /_admin/v1/chartcatalog rpc ChartCatalog(ChartCatalogRequest) returns (ChartCatalogResponse) { option (google.api.http) = { get: "/_admin/v1/chartcatalog" }; } // EnqueueRange runs the specified range through the specified queue on the // range's leaseholder store, returning the detailed trace and error // information from doing so. Parameters must be provided in the body of the // POST request. // For example: // // { // "queue": "raftlog", // "rangeId": 10 // } rpc EnqueueRange(EnqueueRangeRequest) returns (EnqueueRangeResponse) { option (google.api.http) = { post: "/_admin/v1/enqueue_range" body : "*" }; } // SendKVBatch proxies the given BatchRequest into KV, returning the // response. It is used by the CLI `debug send-kv-batch` command. rpc SendKVBatch(roachpb.BatchRequest) returns (roachpb.BatchResponse) { } // ListTracingSnapshots retrieves the list of snapshots of the Active Spans // Registry that the node currently has in memory. A new snapshot can be // captured with TakeTracingSnapshots. rpc ListTracingSnapshots(ListTracingSnapshotsRequest) returns (ListTracingSnapshotsResponse) { option (google.api.http) = { get: "/_admin/v1/trace_snapshots" }; } // TakeTracingSnapshot captures a new snapshot of the Active Spans Registry. // The new snapshot is returned, and also made available through // ListTracingSnapshots. rpc TakeTracingSnapshot(TakeTracingSnapshotRequest) returns (TakeTracingSnapshotResponse) { option (google.api.http) = { post: "/_admin/v1/trace_snapshots" }; } // GetTracingSnapshot returns a snapshot of the tracing spans in the active // spans registry previously generated through TakeTracingSnapshots. rpc GetTracingSnapshot(GetTracingSnapshotRequest) returns (GetTracingSnapshotResponse) { option (google.api.http) = { get: "/_admin/v1/trace_snapshots/{snapshot_id}" }; } // GetTrace returns the trace with a specified ID. Depending on the request, // the trace is returned either from a snapshot that was previously taken, or // directly from the active spans registry. rpc GetTrace(GetTraceRequest) returns (GetTraceResponse) { option (google.api.http) = { post: "/_admin/v1/traces" body: "*" }; } // SetTraceRecordingType sets the recording mode of all or some of the spans // in a trace. rpc SetTraceRecordingType(SetTraceRecordingTypeRequest) returns (SetTraceRecordingTypeResponse) { option (google.api.http) = { post: "/_admin/v1/settracerecordingtype" body: "*" }; } // RecoveryCollectReplicaInfo retrieves information about: // 1. range descriptors contained in cluster meta ranges if meta ranges // are readable; // 2. replica information from all live nodes that have connection to // the target node. rpc RecoveryCollectReplicaInfo(RecoveryCollectReplicaInfoRequest) returns (stream RecoveryCollectReplicaInfoResponse) {} // RecoveryCollectLocalReplicaInfo retrieve information about all local // replicas in all stores on the node. rpc RecoveryCollectLocalReplicaInfo(RecoveryCollectLocalReplicaInfoRequest) returns (stream RecoveryCollectLocalReplicaInfoResponse) {} // RecoveryStagePlan stages recovery plan on target or all nodes in cluster // depending on request content and marks nodes deleted in the plan as // decommissioned in each node's local node tombstone storage. rpc RecoveryStagePlan(RecoveryStagePlanRequest) returns (RecoveryStagePlanResponse) {} // RecoveryNodeStatus retrieves loss of quorum recovery status of a single // node. rpc RecoveryNodeStatus(RecoveryNodeStatusRequest) returns (RecoveryNodeStatusResponse) {} // RecoveryVerify verifies that recovery plan is applied on all necessary // nodes, ranges are available and nodes removed in plan are marked as // decommissioned. rpc RecoveryVerify(RecoveryVerifyRequest) returns (RecoveryVerifyResponse) {} // ListTenants returns a list of active tenants in the cluster. rpc ListTenants(ListTenantsRequest) returns (ListTenantsResponse) { option (google.api.http) = { get: "/_admin/v1/tenants" }; } } message ListTenantsRequest{} message ListTenantsResponse { repeated Tenant tenants = 1; } message Tenant { roachpb.TenantID tenant_id = 1; string tenant_name = 2; string sql_addr = 3; string rpc_addr = 4; } message ListTracingSnapshotsRequest {} message ListTracingSnapshotsResponse { repeated SnapshotInfo snapshots = 1; } message SnapshotInfo { // SnapshotID identifies a specific snapshot which can be requested via a // GetTracingSnapshotRequest. Negative IDs are used for "automatic" snapshots. int64 snapshot_id = 1 [(gogoproto.customname) = "SnapshotID"]; google.protobuf.Timestamp captured_at = 2 [(gogoproto.stdtime) = true]; } message TakeTracingSnapshotRequest {} message TakeTracingSnapshotResponse { SnapshotInfo snapshot = 1; } message GetTracingSnapshotRequest { // SnapshotId indicates which snapshot is requested. ID may be negative when // requesting an "automatic" snapshot; see ListTracingSnapshotsResponse. int64 snapshot_id = 1; } message GetTracingSnapshotResponse { TracingSnapshot snapshot = 1; } // GetTrace represents the request of the GetTrace RPC. message GetTraceRequest { // If a snapshot is specified, the trace information is returned from that // snapshot. If a snapshot is not specified, information about currently // opened spans is returned from the active spans registry. int64 snapshot_id = 1 [(gogoproto.customname) = "SnapshotID"]; uint64 trace_id = 2 [(gogoproto.nullable)=false, (gogoproto.customname) = "TraceID", (gogoproto.customtype) = "github.com/cockroachdb/cockroach/pkg/util/tracing/tracingpb.TraceID"]; util.tracing.tracingpb.RecordingMode recording_type = 3; } // GetTrace represents the response to the GetTrace RPC. message GetTraceResponse { // snapshot_id identifies the snapshot that the trace was retrieved from. If // 0, the trace was not retrieved from a registry, but directly from the // active spans registry. int64 snapshot_id = 1 [(gogoproto.customname) = "SnapshotID"]; uint64 trace_id = 2 [(gogoproto.nullable)=false, (gogoproto.customname) = "TraceID", (gogoproto.customtype) = "github.com/cockroachdb/cockroach/pkg/util/tracing/tracingpb.TraceID"]; // still_exists is set if any spans from this trace are currently present in // the active spans registry. // // If snapshot_id is 0, still_exists is always set. bool still_exists = 3; // serialized_recording represents the serialization of trace recording. We // return the recording already serialized as formatted string for easy // consumption in the browser. string serialized_recording = 4; } // TracingSnapshot represents a snapshot of the active spans registry, including // all the spans that were open at the time when the snapshot was taken. message TracingSnapshot { int64 snapshot_id = 1 [(gogoproto.customname) = "SnapshotID"]; google.protobuf.Timestamp captured_at = 2 [(gogoproto.stdtime) = true]; repeated TracingSpan spans = 3; // Ideally we'd use int64 to match the goroutine_id type // but unfortunately, the way that grpc-gateway parses // these objects into Javascript results in odd encodings // of Long JS types that are difficult to interact with // as map keys. Thus, we settle for string. map<string, string> stacks = 4; } message NamedOperationMetadata { string name = 1; util.tracing.tracingpb.OperationMetadata metadata = 2 [(gogoproto.nullable) = false]; } // TracingSpan represents a span, in a form slightly processed for the use of // the tracing UI. message TracingSpan { string operation = 1; uint64 trace_id = 2 [(gogoproto.customname) = "TraceID"]; uint64 span_id = 3 [(gogoproto.customname) = "SpanID"]; uint64 parent_span_id = 4 [(gogoproto.customname) = "ParentSpanID"]; google.protobuf.Timestamp start = 5 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; uint64 goroutine_id = 6 [(gogoproto.customname) = "GoroutineID"]; repeated SpanTag processed_tags = 7; // current is set if the span is still alive (i.e. still present in the active // spans registry). bool current = 8; // current_recording_mode represents the span's current recording mode. This is // not set if current == false. util.tracing.tracingpb.RecordingMode current_recording_mode = 9; repeated NamedOperationMetadata children_metadata = 10; } // SpanTag represents a tag on a tracing span, in a form processed for the use // of the tracing UI. message SpanTag { string key = 1; string val = 2; string caption = 3; string link = 4; bool hidden = 5; bool highlight = 6; bool inherit = 7; bool inherited = 8; bool propagate_up = 9; bool copied_from_child = 10; repeated ChildSpanTag children = 11; // May be empty. } message ChildSpanTag { string key = 1; string val = 2; } // SetTraceRecordingTypeRequest is the request for SetTraceRecordingType, which // sets the recording mode of all or some of the spans in a trace. message SetTraceRecordingTypeRequest { // TraceID identifies the trace to toggle the recording of. It must always be // specified. uint64 trace_id = 1 [(gogoproto.nullable) = false, (gogoproto.customname) = "TraceID", (gogoproto.customtype) = "github.com/cockroachdb/cockroach/pkg/util/tracing/tracingpb.TraceID"]; // SpanID, if not zero, controls which spans in the trace get their recording // mode set. If zero, all spans in the trace are updated. If not zero, only // the respective span and its descendants get updated. uint64 span_id = 2 [(gogoproto.nullable) = false, (gogoproto.customname) = "SpanID", (gogoproto.customtype) = "github.com/cockroachdb/cockroach/pkg/util/tracing/tracingpb.SpanID"]; util.tracing.tracingpb.RecordingMode recording_mode = 3; } // SetTraceRecordingTypeRequest is the response for SetTraceRecordingType. message SetTraceRecordingTypeResponse{} // FeatureFlags within this struct are used within back-end/front-end code to show/hide features. message FeatureFlags { // isObservabiliyService is true when the server is an instance of the Observability Service bool is_observability_service = 1; // CanViewKVMetricDashboards is true when the logged in user is able to view KV-level metric dashboards. bool can_view_kv_metric_dashboards = 2; // DisableKVLevelAdvancedDebug is true when the UI should remove options to certain KV-level // debug operations. This is helpful in application tenant contexsts, where these requests // can only return errors since the tenant cannot perform the operations. bool disable_kv_level_advanced_debug = 3; }
pkg/server/serverpb/admin.proto
1
https://github.com/cockroachdb/cockroach/commit/977f72e4cc24f5d14424c278d6d6921016c3fc62
[ 0.01775066740810871, 0.0010241479612886906, 0.00016002464690245688, 0.0001717632112558931, 0.0024853493086993694 ]
{ "id": 5, "code_window": [ "\trequire.Equal(t, serverpb.DecommissionPreCheckResponse_NodeCheckResult{\n", "\t\tNodeID: invalidDecommissioningNodeID,\n", "\t\tDecommissionReadiness: serverpb.DecommissionPreCheckResponse_UNKNOWN,\n", "\t\tLivenessStatus: livenesspb.NodeLivenessStatus_UNKNOWN,\n", "\t\tReplicaCount: 0,\n", "\t\tCheckedRanges: nil,\n", "\t}, resp.CheckedNodes[1])\n", "}\n" ], "labels": [ "keep", "keep", "keep", "replace", "keep", "keep", "keep", "keep" ], "after_edit": [], "file_path": "pkg/server/admin_test.go", "type": "replace", "edit_start_line_idx": 1360 }
load("@io_bazel_rules_go//go:def.bzl", "go_test") go_test( name = "backuprand_test", srcs = [ "backup_rand_test.go", "main_test.go", ], args = ["-test.timeout=295s"], data = ["//c-deps:libgeos"], tags = ["ccl_test"], deps = [ "//pkg/base", "//pkg/ccl", "//pkg/ccl/backupccl/backuptestutils", "//pkg/ccl/storageccl", "//pkg/internal/sqlsmith", "//pkg/security/securityassets", "//pkg/security/securitytest", "//pkg/server", "//pkg/sql/randgen", "//pkg/sql/sem/tree", "//pkg/testutils", "//pkg/testutils/fingerprintutils", "//pkg/testutils/serverutils", "//pkg/testutils/skip", "//pkg/testutils/sqlutils", "//pkg/testutils/testcluster", "//pkg/util/leaktest", "//pkg/util/log", "//pkg/util/randutil", "@com_github_stretchr_testify//assert", "@com_github_stretchr_testify//require", ], )
pkg/ccl/backupccl/backuprand/BUILD.bazel
0
https://github.com/cockroachdb/cockroach/commit/977f72e4cc24f5d14424c278d6d6921016c3fc62
[ 0.0001758496800903231, 0.00017170370847452432, 0.00016793832764960825, 0.00017151341307908297, 0.0000032250234198727412 ]
{ "id": 5, "code_window": [ "\trequire.Equal(t, serverpb.DecommissionPreCheckResponse_NodeCheckResult{\n", "\t\tNodeID: invalidDecommissioningNodeID,\n", "\t\tDecommissionReadiness: serverpb.DecommissionPreCheckResponse_UNKNOWN,\n", "\t\tLivenessStatus: livenesspb.NodeLivenessStatus_UNKNOWN,\n", "\t\tReplicaCount: 0,\n", "\t\tCheckedRanges: nil,\n", "\t}, resp.CheckedNodes[1])\n", "}\n" ], "labels": [ "keep", "keep", "keep", "replace", "keep", "keep", "keep", "keep" ], "after_edit": [], "file_path": "pkg/server/admin_test.go", "type": "replace", "edit_start_line_idx": 1360 }
{ "annotations": { "list": [ { "builtIn": 1, "datasource": "-- Grafana --", "enable": true, "hide": true, "iconColor": "rgba(0, 211, 255, 1)", "name": "Annotations & Alerts", "type": "dashboard" } ] }, "editable": true, "gnetId": null, "graphTooltip": 0, "iteration": 1623960872107, "links": [], "panels": [ { "aliasColors": {}, "bars": false, "dashLength": 10, "dashes": false, "datasource": null, "fieldConfig": { "defaults": {}, "overrides": [] }, "fill": 1, "fillGradient": 0, "gridPos": { "h": 7, "w": 24, "x": 0, "y": 0 }, "hiddenSeries": false, "id": 2, "legend": { "avg": false, "current": false, "max": false, "min": false, "show": true, "total": false, "values": false }, "lines": true, "linewidth": 1, "nullPointMode": "null", "options": { "alertThreshold": true }, "percentage": false, "pluginVersion": "", "pointradius": 2, "points": false, "renderer": "flot", "seriesOverrides": [], "spaceLength": 10, "stack": false, "steppedLine": false, "targets": [ { "exemplar": true, "expr": "sum(requests_slow_raft{job=\"cockroachdb\",cluster=\"$cluster\"})", "interval": "", "legendFormat": "Slow Raft Proposals", "refId": "A" } ], "thresholds": [], "timeFrom": null, "timeRegions": [], "timeShift": null, "title": "Slow Raft Proposals", "tooltip": { "shared": true, "sort": 0, "value_type": "individual" }, "type": "graph", "xaxis": { "buckets": null, "mode": "time", "name": null, "show": true, "values": [] }, "yaxes": [ { "$$hashKey": "object:141", "format": "short", "label": "proposals", "logBase": 1, "max": null, "min": "0", "show": true }, { "$$hashKey": "object:142", "format": "short", "label": "", "logBase": 1, "max": null, "min": null, "show": true } ], "yaxis": { "align": false, "alignLevel": null } }, { "aliasColors": {}, "bars": false, "dashLength": 10, "dashes": false, "datasource": null, "fieldConfig": { "defaults": {}, "overrides": [] }, "fill": 1, "fillGradient": 0, "gridPos": { "h": 7, "w": 24, "x": 0, "y": 7 }, "hiddenSeries": false, "id": 4, "legend": { "avg": false, "current": false, "max": false, "min": false, "show": true, "total": false, "values": false }, "lines": true, "linewidth": 1, "nullPointMode": "null", "options": { "alertThreshold": true }, "percentage": false, "pluginVersion": "", "pointradius": 2, "points": false, "renderer": "flot", "seriesOverrides": [], "spaceLength": 10, "stack": false, "steppedLine": false, "targets": [ { "exemplar": true, "expr": "sum(requests_slow_distsender{job=\"cockroachdb\",cluster=\"$cluster\"})", "interval": "", "legendFormat": "Slow DistSender RPCs", "queryType": "randomWalk", "refId": "A" } ], "thresholds": [], "timeFrom": null, "timeRegions": [], "timeShift": null, "title": "Slow DistSender RPCs", "tooltip": { "shared": true, "sort": 0, "value_type": "individual" }, "type": "graph", "xaxis": { "buckets": null, "mode": "time", "name": null, "show": true, "values": [] }, "yaxes": [ { "$$hashKey": "object:88", "format": "short", "label": "proposals", "logBase": 1, "max": null, "min": "0", "show": true }, { "$$hashKey": "object:89", "format": "short", "label": null, "logBase": 1, "max": null, "min": null, "show": true } ], "yaxis": { "align": false, "alignLevel": null } }, { "aliasColors": {}, "bars": false, "dashLength": 10, "dashes": false, "datasource": null, "fieldConfig": { "defaults": {}, "overrides": [] }, "fill": 1, "fillGradient": 0, "gridPos": { "h": 7, "w": 24, "x": 0, "y": 14 }, "hiddenSeries": false, "id": 6, "legend": { "avg": false, "current": false, "max": false, "min": false, "show": true, "total": false, "values": false }, "lines": true, "linewidth": 1, "nullPointMode": "null", "options": { "alertThreshold": true }, "percentage": false, "pluginVersion": "", "pointradius": 2, "points": false, "renderer": "flot", "seriesOverrides": [], "spaceLength": 10, "stack": false, "steppedLine": false, "targets": [ { "exemplar": true, "expr": "sum(requests_slow_lease{job=\"cockroachdb\",cluster=\"$cluster\"})", "interval": "", "legendFormat": "Slow Lease Acquisitions", "refId": "A" } ], "thresholds": [], "timeFrom": null, "timeRegions": [], "timeShift": null, "title": "Slow Lease Acquisitions", "tooltip": { "shared": true, "sort": 0, "value_type": "individual" }, "type": "graph", "xaxis": { "buckets": null, "mode": "time", "name": null, "show": true, "values": [] }, "yaxes": [ { "$$hashKey": "object:245", "format": "short", "label": "lease acquisitions", "logBase": 1, "max": null, "min": "0", "show": true }, { "$$hashKey": "object:246", "format": "short", "label": null, "logBase": 1, "max": null, "min": null, "show": true } ], "yaxis": { "align": false, "alignLevel": null } }, { "aliasColors": {}, "bars": false, "dashLength": 10, "dashes": false, "datasource": null, "fieldConfig": { "defaults": {}, "overrides": [] }, "fill": 1, "fillGradient": 0, "gridPos": { "h": 7, "w": 24, "x": 0, "y": 21 }, "hiddenSeries": false, "id": 8, "legend": { "avg": false, "current": false, "max": false, "min": false, "show": true, "total": false, "values": false }, "lines": true, "linewidth": 1, "nullPointMode": "null", "options": { "alertThreshold": true }, "percentage": false, "pluginVersion": "", "pointradius": 2, "points": false, "renderer": "flot", "seriesOverrides": [], "spaceLength": 10, "stack": false, "steppedLine": false, "targets": [ { "exemplar": true, "expr": "sum(requests_slow_latch{job=\"cockroachdb\",cluster=\"$cluster\"})", "interval": "", "legendFormat": "Slow Latch Acquisitions", "refId": "A" } ], "thresholds": [], "timeFrom": null, "timeRegions": [], "timeShift": null, "title": "Slow Latch Acquisitions", "tooltip": { "shared": true, "sort": 0, "value_type": "individual" }, "type": "graph", "xaxis": { "buckets": null, "mode": "time", "name": null, "show": true, "values": [] }, "yaxes": [ { "$$hashKey": "object:298", "format": "short", "label": "latch acquisitions", "logBase": 1, "max": null, "min": "0", "show": true }, { "$$hashKey": "object:299", "format": "short", "label": null, "logBase": 1, "max": null, "min": null, "show": true } ], "yaxis": { "align": false, "alignLevel": null } } ], "schemaVersion": 27, "style": "dark", "tags": [], "templating": { "list": [ { "current": { "selected": false, "text": "Prometheus", "value": "Prometheus" }, "description": null, "error": null, "hide": 0, "includeAll": false, "label": "datasource", "multi": false, "name": "DS_PROMETHEUS", "options": [], "query": "prometheus", "refresh": 1, "regex": "", "skipUrlSync": false, "type": "datasource" }, { "allValue": null, "current": { "selected": false, "text": "my-cockroachdb-cluster", "value": "my-cockroachdb-cluster" }, "datasource": "${DS_PROMETHEUS}", "definition": "sys_uptime{job=\"cockroachdb\"}", "description": null, "error": null, "hide": 0, "includeAll": false, "label": "cluster", "multi": false, "name": "cluster", "options": [], "query": { "query": "sys_uptime{job=\"cockroachdb\"}", "refId": "Prometheus-cluster-Variable-Query" }, "refresh": 1, "regex": "/cluster=\"([^\"]+)\"/", "skipUrlSync": false, "sort": 1, "tagValuesQuery": "", "tags": [], "tagsQuery": "", "type": "query", "useTags": false }, { "allValue": ".*", "current": { "selected": false, "text": "All", "value": "$__all" }, "datasource": "${DS_PROMETHEUS}", "definition": "label_values(sys_uptime{job=\"cockroachdb\",cluster=\"$cluster\"},instance)", "description": null, "error": null, "hide": 0, "includeAll": true, "label": "Node", "multi": false, "name": "node", "options": [], "query": { "query": "label_values(sys_uptime{job=\"cockroachdb\",cluster=\"$cluster\"},instance)", "refId": "Prometheus-node-Variable-Query" }, "refresh": 1, "regex": "", "skipUrlSync": false, "sort": 3, "tagValuesQuery": "", "tags": [], "tagsQuery": "", "type": "query", "useTags": false }, { "auto": false, "auto_count": 30, "auto_min": "10s", "current": { "selected": false, "text": "30s", "value": "30s" }, "description": null, "error": null, "hide": 0, "label": "Rate Interval", "name": "rate_interval", "options": [ { "selected": true, "text": "30s", "value": "30s" }, { "selected": false, "text": "1m", "value": "1m" }, { "selected": false, "text": "5m", "value": "5m" }, { "selected": false, "text": "10m", "value": "10m" }, { "selected": false, "text": "30m", "value": "30m" }, { "selected": false, "text": "1h", "value": "1h" }, { "selected": false, "text": "6h", "value": "6h" }, { "selected": false, "text": "12h", "value": "12h" }, { "selected": false, "text": "1d", "value": "1d" } ], "query": "30s,1m,5m,10m,30m,1h,6h,12h,1d", "refresh": 2, "skipUrlSync": false, "type": "interval" } ] }, "time": { "from": "now-1h", "to": "now" }, "timepicker": {}, "timezone": "utc", "title": "CRDB Console: Slow Requests", "uid": "crdb-console-slow-request", "version": 2 }
monitoring/grafana-dashboards/slow_request.json
0
https://github.com/cockroachdb/cockroach/commit/977f72e4cc24f5d14424c278d6d6921016c3fc62
[ 0.00017718908202368766, 0.00017332912830170244, 0.00016993963799905032, 0.00017304063658230007, 0.0000018557506109573296 ]
{ "id": 5, "code_window": [ "\trequire.Equal(t, serverpb.DecommissionPreCheckResponse_NodeCheckResult{\n", "\t\tNodeID: invalidDecommissioningNodeID,\n", "\t\tDecommissionReadiness: serverpb.DecommissionPreCheckResponse_UNKNOWN,\n", "\t\tLivenessStatus: livenesspb.NodeLivenessStatus_UNKNOWN,\n", "\t\tReplicaCount: 0,\n", "\t\tCheckedRanges: nil,\n", "\t}, resp.CheckedNodes[1])\n", "}\n" ], "labels": [ "keep", "keep", "keep", "replace", "keep", "keep", "keep", "keep" ], "after_edit": [], "file_path": "pkg/server/admin_test.go", "type": "replace", "edit_start_line_idx": 1360 }
// Copyright 2021 The Cockroach Authors. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt. // // As of the Change Date specified in that file, in accordance with // the Business Source License, use of this software will be governed // by the Apache License, Version 2.0, included in the file // licenses/APL.txt. syntax = "proto3"; package cockroach.util.log.eventpb; option go_package = "github.com/cockroachdb/cockroach/pkg/util/log/eventpb"; import "gogoproto/gogo.proto"; import "sql/catalog/descpb/structured.proto"; import "util/log/eventpb/events.proto"; import "util/log/eventpb/sql_audit_events.proto"; import "util/log/logpb/event.proto"; // Category: Telemetry events // Channel: TELEMETRY // Notes to CockroachDB maintainers: refer to doc.go at the package // level for more details. Beware that JSON compatibility rules apply // here, not protobuf. // The comment at the top has a specific format for the doc generator. // *Really look at doc.go before modifying this file.* // SampledQuery is the SQL query event logged to the telemetry channel. It // contains common SQL event/execution details. message SampledQuery { CommonEventDetails common = 1 [(gogoproto.nullable) = false, (gogoproto.jsontag) = "", (gogoproto.embed) = true]; CommonSQLEventDetails sql = 2 [(gogoproto.nullable) = false, (gogoproto.jsontag) = "", (gogoproto.embed) = true]; CommonSQLExecDetails exec = 3 [(gogoproto.nullable) = false, (gogoproto.jsontag) = "", (gogoproto.embed) = true]; // skipped_queries indicate how many SQL statements were not // considered for sampling prior to this one. If the field is // omitted, or its value is zero, this indicates that no statement // was omitted since the last event. uint64 skipped_queries = 4 [(gogoproto.jsontag) = ",omitempty"]; // Cost of the query as estimated by the optimizer. double cost_estimate = 5 [(gogoproto.jsontag) = ",omitempty"]; // The distribution of the DistSQL query plan (local, full, or partial). string distribution = 6 [(gogoproto.jsontag) = ",omitempty", (gogoproto.moretags) = "redact:\"nonsensitive\""]; // The query's plan gist bytes as a base64 encoded string. string plan_gist = 7 [(gogoproto.jsontag) = ',omitempty', (gogoproto.moretags) = "redact:\"nonsensitive\""]; // SessionID is the ID of the session that initiated the query. string session_id = 8 [(gogoproto.customname) = "SessionID", (gogoproto.jsontag) = ",omitempty", (gogoproto.moretags) = "redact:\"nonsensitive\""]; // Name of the database that initiated the query. string database = 9 [(gogoproto.jsontag) = ",omitempty", (gogoproto.moretags) = "redact:\"nonsensitive\""]; // Statement ID of the query. string statement_id = 10 [(gogoproto.customname) = "StatementID", (gogoproto.jsontag) = ',omitempty', (gogoproto.moretags) = "redact:\"nonsensitive\""]; // Transaction ID of the query. string transaction_id = 11 [(gogoproto.customname) = "TransactionID", (gogoproto.jsontag) = ',omitempty', (gogoproto.moretags) = "redact:\"nonsensitive\""]; // Statement fingerprint ID of the query. uint64 statement_fingerprint_id = 13 [(gogoproto.customname) = "StatementFingerprintID", (gogoproto.jsontag) = ',omitempty']; // Maximum number of rows scanned by a full scan, as estimated by the // optimizer. double max_full_scan_rows_estimate = 14 [(gogoproto.jsontag) = ",omitempty"]; // Total number of rows read by all scans in the query, as estimated by the // optimizer. double total_scan_rows_estimate = 15 [(gogoproto.jsontag) = ",omitempty"]; // The number of rows output by the query, as estimated by the optimizer. double output_rows_estimate = 16 [(gogoproto.jsontag) = ",omitempty"]; // Whether table statistics were available to the optimizer when planning the // query. bool stats_available = 17 [(gogoproto.jsontag) = ",omitempty"]; // The maximum number of nanoseconds that have passed since stats were // collected on any table scanned by this query. int64 nanos_since_stats_collected = 18 [(gogoproto.jsontag) = ",omitempty"]; // The number of bytes read from disk. int64 bytes_read = 19 [(gogoproto.jsontag) = ",omitempty"]; // The number of rows read from disk. int64 rows_read = 20 [(gogoproto.jsontag) = ",omitempty"]; // The number of rows written. int64 rows_written = 21 [(gogoproto.jsontag) = ",omitempty"]; // The number of inner joins in the query plan. int64 inner_join_count = 22 [(gogoproto.jsontag) = ",omitempty"]; // The number of left (or right) outer joins in the query plan. int64 left_outer_join_count = 23 [(gogoproto.jsontag) = ",omitempty"]; // The number of full outer joins in the query plan. int64 full_outer_join_count = 24 [(gogoproto.jsontag) = ",omitempty"]; // The number of semi joins in the query plan. int64 semi_join_count = 25 [(gogoproto.jsontag) = ",omitempty"]; // The number of anti joins in the query plan. int64 anti_join_count = 26 [(gogoproto.jsontag) = ",omitempty"]; // The number of intersect all joins in the query plan. int64 intersect_all_join_count = 27 [(gogoproto.jsontag) = ",omitempty"]; // The number of except all joins in the query plan. int64 except_all_join_count = 28 [(gogoproto.jsontag) = ",omitempty"]; // The number of hash joins in the query plan. int64 hash_join_count = 29 [(gogoproto.jsontag) = ",omitempty"]; // The number of cross joins in the query plan. int64 cross_join_count = 30 [(gogoproto.jsontag) = ",omitempty"]; // The number of index joins in the query plan. int64 index_join_count = 31 [(gogoproto.jsontag) = ",omitempty"]; // The number of lookup joins in the query plan. int64 lookup_join_count = 32 [(gogoproto.jsontag) = ",omitempty"]; // The number of merge joins in the query plan. int64 merge_join_count = 33 [(gogoproto.jsontag) = ",omitempty"]; // The number of inverted joins in the query plan. int64 inverted_join_count = 34 [(gogoproto.jsontag) = ",omitempty"]; // The number of apply joins in the query plan. int64 apply_join_count = 35 [(gogoproto.jsontag) = ",omitempty"]; // The number of zig zag joins in the query plan. int64 zig_zag_join_count = 36 [(gogoproto.jsontag) = ",omitempty"]; // The duration of time in nanoseconds that the query experienced contention. int64 contention_nanos = 37 [(gogoproto.jsontag) = ',omitempty']; // The regions of the nodes where SQL processors ran. repeated string regions = 38 [(gogoproto.jsontag) = ',omitempty', (gogoproto.moretags) = "redact:\"nonsensitive\""]; // The number of network bytes sent by nodes for this query. int64 network_bytes_sent = 39 [(gogoproto.jsontag) = ',omitempty']; // The maximum amount of memory usage by nodes for this query. int64 max_mem_usage = 40 [(gogoproto.jsontag) = ',omitempty']; // The maximum amount of disk usage by nodes for this query. int64 max_disk_usage = 41 [(gogoproto.jsontag) = ',omitempty']; // The number of bytes read at the KV layer for this query. int64 kv_bytes_read = 42 [(gogoproto.customname) = "KVBytesRead", (gogoproto.jsontag) = ',omitempty']; // The number of key-value pairs read at the KV layer for this query. int64 kv_pairs_read = 75 [(gogoproto.customname) = "KVPairsRead", (gogoproto.jsontag) = ',omitempty']; // The number of rows read at the KV layer for this query. int64 kv_rows_read = 43 [(gogoproto.customname) = "KVRowsRead", (gogoproto.jsontag) = ',omitempty']; // The number of network messages sent by nodes for this query. int64 network_messages = 44 [(gogoproto.jsontag) = ',omitempty']; // Generated index recommendations for this query. repeated string index_recommendations = 45 [(gogoproto.jsontag) = ',omitempty', (gogoproto.moretags) = "redact:\"nonsensitive\""]; // The number of scans in the query plan. int64 scan_count = 46 [(gogoproto.jsontag) = ",omitempty"]; // The number of scans using statistics (including forecasted statistics) in // the query plan. int64 scan_with_stats_count = 47 [(gogoproto.jsontag) = ",omitempty"]; // The number of scans using forecasted statistics in the query plan. int64 scan_with_stats_forecast_count = 48 [(gogoproto.jsontag) = ",omitempty"]; // Total number of rows read by all scans in the query, as estimated by the // optimizer without using forecasts. double total_scan_rows_without_forecasts_estimate = 49 [(gogoproto.jsontag) = ",omitempty"]; // The greatest quantity of nanoseconds that have passed since the forecast // time (or until the forecast time, if it is in the future, in which case it // will be negative) for any table with forecasted stats scanned by this // query. int64 nanos_since_stats_forecasted = 50 [(gogoproto.jsontag) = ",omitempty"]; // The list of indexes used by this query. repeated string indexes = 51 [(gogoproto.jsontag) = ',omitempty', (gogoproto.moretags) = "redact:\"nonsensitive\""]; // Collects the cumulative CPU time spent executing SQL operations in // nanoseconds. Currently, it is only collected for statements without // mutations that have a vectorized plan. int64 cpu_time_nanos = 52 [(gogoproto.jsontag) = ",omitempty"]; // The number of grpc calls done to get data form KV nodes int64 kv_grpc_calls = 53 [(gogoproto.jsontag) = ",omitempty"]; // Cumulated time spent waiting for a KV request. This includes disk IO time // and potentially network time (if any of the keys are not local). int64 kv_time_nanos = 54 [(gogoproto.jsontag) = ",omitempty"]; // The time to service the query, from start of parse to end of execute. int64 service_latency_nanos = 56 [(gogoproto.jsontag) = ",omitempty"]; // The difference between service latency and the sum of parse latency + plan latency + run latency . int64 overhead_latency_nanos = 57 [(gogoproto.jsontag) = ",omitempty"]; // The time to run the query and fetch or compute the result rows. int64 run_latency_nanos = 58 [(gogoproto.jsontag) = ",omitempty"]; // The time to transform the AST into a logical query plan. int64 plan_latency_nanos = 59 [(gogoproto.jsontag) = ",omitempty"]; // The time between statement executions in a transaction int64 idle_latency_nanos = 60 [(gogoproto.jsontag) = ",omitempty"]; // The time to transform the SQL string into an abstract syntax tree (AST). int64 parse_latency_nanos = 61 [(gogoproto.jsontag) = ",omitempty"]; // StepCount collects the number of times the iterator moved forward or backward over the // DB's underlying storage keyspace. // For details, see pkg/storage/engine.go and pkg/sql/opt/exec/factory.go. int64 mvcc_step_count = 62 [(gogoproto.jsontag) = ",omitempty"]; // StepCountInternal collects the number of times the iterator moved forward or backward // over LSM internal keys. // For details, see pkg/storage/engine.go and pkg/sql/opt/exec/factory.go. int64 mvcc_step_count_internal = 63 [(gogoproto.jsontag) = ",omitempty"]; // SeekCount collects the number of times the iterator moved to a specific key/value pair // in the DB's underlying storage keyspace. // For details, see pkg/storage/engine.go and pkg/sql/opt/exec/factory.go. int64 mvcc_seek_count = 64 [(gogoproto.jsontag) = ",omitempty"]; // SeekCountInternal collects the number of times the iterator moved to a specific LSM // internal key. // For details, see pkg/storage/engine.go and pkg/sql/opt/exec/factory.go. int64 mvcc_seek_count_internal = 65 [(gogoproto.jsontag) = ",omitempty"]; // BlockBytes collects the bytes in the loaded SSTable data blocks. // For details, see pebble.InternalIteratorStats. int64 mvcc_block_bytes = 66 [(gogoproto.jsontag) = ",omitempty"]; // BlockBytesInCache collects the subset of BlockBytes in the block cache. // For details, see pebble.InternalIteratorStats. int64 mvcc_block_bytes_in_cache = 67 [(gogoproto.jsontag) = ",omitempty"]; // KeyBytes collects the bytes in keys that were iterated over. // For details, see pebble.InternalIteratorStats. int64 mvcc_key_bytes = 68 [(gogoproto.jsontag) = ",omitempty"]; // ValueBytes collects the bytes in values that were iterated over. // For details, see pebble.InternalIteratorStats. int64 mvcc_value_bytes = 69 [(gogoproto.jsontag) = ",omitempty"]; // PointCount collects the count of point keys iterated over. // For details, see pebble.InternalIteratorStats. int64 mvcc_point_count = 70 [(gogoproto.jsontag) = ",omitempty"]; // PointsCoveredByRangeTombstones collects the count of point keys that were iterated over that // were covered by range tombstones. // For details, see pebble.InternalIteratorStats and // https://github.com/cockroachdb/cockroach/blob/master/docs/tech-notes/mvcc-range-tombstones.md. int64 mvcc_points_covered_by_range_tombstones = 71 [(gogoproto.jsontag) = ",omitempty"]; // RangeKeyCount collects the count of range keys encountered during iteration. // For details, see pebble.RangeKeyIteratorStats and // https://github.com/cockroachdb/cockroach/blob/master/docs/tech-notes/mvcc-range-tombstones.md. int64 mvcc_range_key_count = 72 [(gogoproto.jsontag) = ",omitempty"]; // RangeKeyContainedPoints collects the count of point keys encountered within the bounds of // a range key. // For details, see pebble.RangeKeyIteratorStats and // https://github.com/cockroachdb/cockroach/blob/master/docs/tech-notes/mvcc-range-tombstones.md. int64 mvcc_range_key_contained_points = 73 [(gogoproto.jsontag) = ",omitempty"]; // RangeKeySkippedPoints collects the count of the subset of ContainedPoints point keys that // were skipped during iteration due to range-key masking. // For details, see pkg/storage/engine.go, pebble.RangeKeyIteratorStats, and // https://github.com/cockroachdb/cockroach/blob/master/docs/tech-notes/mvcc-range-tombstones.md. int64 mvcc_range_key_skipped_points = 74 [(gogoproto.jsontag) = ",omitempty"]; // SchemaChangerMode is the mode that was used to execute the schema change, // if any. string schema_changer_mode = 76 [(gogoproto.jsontag) = ',omitempty', (gogoproto.moretags) = "redact:\"nonsensitive\""]; reserved 12; // Next available ID: 77. } // CapturedIndexUsageStats message CapturedIndexUsageStats { CommonEventDetails common = 1 [(gogoproto.nullable) = false, (gogoproto.jsontag) = "", (gogoproto.embed) = true]; // Couldn't use roachpb.CollectedIndexUsageStatistics due to circular dependency. // TotalReadCount is the number of times the index has been read. uint64 total_read_count = 2; // LastRead is the timestamp at which the index was last read. string last_read = 3 [(gogoproto.jsontag) = ",omitempty", (gogoproto.moretags) = "redact:\"nonsensitive\""]; // TableID is the ID of the table on which the index was created. This is same as // descpb.TableID and is unique within the cluster. uint32 table_id = 4 [(gogoproto.customname) = "TableID"]; // IndexID is the ID of the index within the scope of the given table. uint32 index_id = 5 [(gogoproto.customname) = "IndexID"]; // DatabaseName is the name of the database in which the index was created. string database_name = 6 [(gogoproto.jsontag) = ",omitempty", (gogoproto.moretags) = "redact:\"nonsensitive\""]; // TableName is the name of the table on which the index was created. string table_name = 7 [(gogoproto.jsontag) = ",omitempty", (gogoproto.moretags) = "redact:\"nonsensitive\""]; // IndexName is the name of the index within the scope of the given table. string index_name = 8 [(gogoproto.jsontag) = ",omitempty", (gogoproto.moretags) = "redact:\"nonsensitive\""]; // IndexType is the type of the index. Index types include "primary" and "secondary". string index_type = 9 [(gogoproto.jsontag) = ",omitempty", (gogoproto.moretags) = "redact:\"nonsensitive\""]; // IsUnique indicates if the index has a UNIQUE constraint. bool is_unique = 10 [(gogoproto.jsontag) = ",omitempty"]; // IsInverted indicates if the index is an inverted index. bool is_inverted = 11 [(gogoproto.jsontag) = ",omitempty"]; // CreatedAt is the timestamp at which the index was created. string created_at = 12 [(gogoproto.jsontag) = ",omitempty", (gogoproto.moretags) = "redact:\"nonsensitive\""]; // SchemaName is the name of the schema in which the index was created. string schema_name = 13 [(gogoproto.jsontag) = ",omitempty", (gogoproto.moretags) = "redact:\"nonsensitive\""]; } // CreateChangefeed is an event for any CREATE CHANGEFEED query that // successfully starts running. Failed CREATE statements will show up as // ChangefeedFailed events. message CreateChangefeed { CommonChangefeedEventDetails common = 1 [(gogoproto.nullable) = false, (gogoproto.jsontag) = "", (gogoproto.embed) = true]; bool transformation = 2 [(gogoproto.jsontag) = ",omitempty", (gogoproto.moretags) = "redact:\"nonsensitive\""]; } // ChangefeedFailed is an event for any Changefeed failure since the plan hook // was triggered. message ChangefeedFailed { CommonChangefeedEventDetails common = 1 [(gogoproto.nullable) = false, (gogoproto.jsontag) = "", (gogoproto.embed) = true]; // The reason / environment with which the changefeed failed (ex: // connection_closed, changefeed_behind) string failure_type = 2 [(gogoproto.jsontag) = ",omitempty", (gogoproto.moretags) = "redact:\"nonsensitive\""]; } // ChangefeedEmittedBytes is an event representing the bytes emitted by a changefeed over an interval. message ChangefeedEmittedBytes { CommonChangefeedEventDetails common = 1 [(gogoproto.nullable) = false, (gogoproto.jsontag) = "", (gogoproto.embed) = true]; // The job id for enterprise changefeeds. int64 job_id = 2 [(gogoproto.jsontag) = ",omitempty", (gogoproto.moretags) = "redact:\"nonsensitive\""]; // The number of bytes emitted. int64 emitted_bytes = 3 [(gogoproto.jsontag) = ",omitempty", (gogoproto.moretags) = "redact:\"nonsensitive\""]; // The time period in nanoseconds between emitting telemetry events of this type (per-aggregator). int64 logging_interval = 4 [(gogoproto.jsontag) = ",omitempty", (gogoproto.moretags) = "redact:\"nonsensitive\""]; // Flag to indicate that the changefeed is closing. bool closing = 5 [(gogoproto.jsontag) = ",omitempty", (gogoproto.moretags) = "redact:\"nonsensitive\""]; } // RecoveryEvent is an event that is logged on every invocation of BACKUP, // RESTORE, and on every BACKUP schedule creation, with the appropriate subset // of fields populated depending on the type of event. This event is is also // logged whenever a BACKUP and RESTORE job completes or fails. message RecoveryEvent { // Common fields for all events. CommonEventDetails common = 1 [(gogoproto.nullable) = false, (gogoproto.jsontag) = "", (gogoproto.embed) = true]; // RecoveryType is the type of recovery described by this event, which is one of // - backup // - scheduled_backup // - create_schedule // - restore // // It can also be a job event corresponding to the recovery, which is one of // - backup_job // - scheduled_backup_job // - restore_job string recovery_type = 2 [(gogoproto.jsontag) = ",omitempty", (gogoproto.customtype) = "RecoveryEventType", (gogoproto.nullable) = false, (gogoproto.moretags) = "redact:\"nonsensitive\""]; // Fields that are common to BACKUP and RESTORE statements. // TargetScope is the largest scope of the targets that the user is backing up // or restoring based on the following order: // table < schema < database < full cluster. string target_scope = 3 [(gogoproto.jsontag) = ",omitempty", (gogoproto.moretags) = "redact:\"nonsensitive\""]; // IsMultiregionTarget is true if any of the targets contain objects with // multi-region primitives. bool is_multiregion_target = 4 [(gogoproto.jsontag) = ",omitempty"]; // TargetCount is the number of targets the in the BACKUP/RESTORE. uint32 target_count = 5 [(gogoproto.jsontag) = ",omitempty"]; // DestinationSubdirType is // - latest: if using the latest subdir // - standard: if using a date-based subdir // - custom: if using a custom subdir that's not date-based string destination_subdir_type = 6 [(gogoproto.jsontag) = ",omitempty", (gogoproto.moretags) = "redact:\"nonsensitive\""]; // DestinationStorageTypes are the types of storage that the user is backing // up to or restoring from. repeated string destination_storage_types = 7 [(gogoproto.jsontag) = ",omitempty", (gogoproto.moretags) = "redact:\"nonsensitive\""]; // DestinationAuthTypes are the types of authentication methods that the user // is using to access the destination storage. repeated string destination_auth_types = 8 [(gogoproto.jsontag) = ",omitempty", (gogoproto.moretags) = "redact:\"nonsensitive\""]; // IsLocalityAware indicates if the BACKUP or RESTORE is locality aware. bool is_locality_aware = 9 [(gogoproto.jsontag) = ",omitempty"]; // AsOfInterval is the time interval in nanoseconds between the statement // timestamp and the timestamp resolved by the AS OF SYSTEM TIME expression. // The interval is expressed in nanoseconds. int64 as_of_interval = 10 [(gogoproto.jsontag) = ",omitempty"]; // WithRevisionHistory is true if the BACKUP includes revision history. bool with_revision_history = 11 [(gogoproto.jsontag) = ",omitempty"]; // HasEncryptionPassphrase is true if the user provided an encryption // passphrase to encrypt/decrypt their backup. bool has_encryption_passphrase = 13 [(gogoproto.jsontag) = ",omitempty"]; // KMSType is the type of KMS the user is using to encrypt/decrypt their // backup. string kms_type = 14 [(gogoproto.jsontag) = ",omitempty", (gogoproto.customname) = "KMSType", (gogoproto.moretags) = "redact:\"nonsensitive\""]; // KMSCount is the number of KMS the user is using. uint32 kms_count = 15 [(gogoproto.jsontag) = ",omitempty", (gogoproto.customname) = "KMSCount"]; // Options contain all the names of the options specified by the user in the // BACKUP or RESTORE statement. For options that are accompanied by a value, // only those with non-empty values will be present. // // It's important to note that there are no option values anywhere in the // event payload. Future changes to telemetry should refrain from adding // values to the payload unless they are properly redacted. repeated string options = 16 [(gogoproto.jsontag) = ",omitempty", (gogoproto.moretags) = "redact:\"nonsensitive\""]; // DebugPauseOn is the type of event that the restore should pause on for // debugging purposes. Currently only "error" is supported. string debug_pause_on = 17 [(gogoproto.jsontag) = ",omitempty", (gogoproto.moretags) = "redact:\"nonsensitive\""]; // Fields that are common to BACKUP and RESTORE jobs. // JobID is the ID of the BACKUP/RESTORE job. uint64 job_id = 18 [(gogoproto.jsontag) = ",omitempty", (gogoproto.customname) = "JobID"]; // ResultStatus indicates whether the job succeeded or failed. string result_status = 20 [(gogoproto.jsontag) = ",omitempty", (gogoproto.moretags) = "redact:\"nonsensitive\""]; // ErrorText is the text of the error that caused the job to fail. string error_text = 21 [(gogoproto.jsontag) = ",omitempty", (gogoproto.customtype) = "github.com/cockroachdb/redact.RedactableString", (gogoproto.nullable) = false, (gogoproto.moretags) = "redact:\"mixed\""]; // Fields only for BACKUP schedules. // RecurringCron is the crontab for the incremental backup. string recurring_cron = 24 [(gogoproto.jsontag) = ",omitempty", (gogoproto.moretags) = "redact:\"nonsensitive\""]; // FullBackupCron is the crontab for the full backup. string full_backup_cron = 25 [(gogoproto.jsontag) = ",omitempty", (gogoproto.moretags) = "redact:\"nonsensitive\""]; // CustomFirstRunTime is the timestamp for the user configured first run time. // Expressed as nanoseconds since the Unix epoch. int64 custom_first_run_time = 26 [(gogoproto.jsontag) = ",omitempty"]; // OnExecutionFailure describes the desired behavior if the schedule fails to // execute. string on_execution_failure = 27 [(gogoproto.jsontag) = ",omitempty", (gogoproto.moretags) = "redact:\"nonsensitive\""]; // OnPreviousRunning describes the desired behavior if the previously // scheduled BACKUP is still running. string on_previous_running = 28 [(gogoproto.jsontag) = ",omitempty", (gogoproto.moretags) = "redact:\"nonsensitive\""]; // IgnoreExistingBackup is true iff the BACKUP schedule should still be // created even if a backup is already present in its destination. bool ignore_existing_backup = 29 [(gogoproto.jsontag) = ",omitempty"]; // The application name for the session where recovery event was created. string application_name = 30 [(gogoproto.jsontag) = ",omitempty", (gogoproto.moretags) = "redact:\"nonsensitive\""]; // NumRows is the number of rows successfully imported, backed up or restored. int64 num_rows = 31 [(gogoproto.jsontag) = ",omitempty"]; } // SchemaDescriptor is an event for schema telemetry, whose purpose is // to take periodic snapshots of the cluster's SQL schema and publish them in // the telemetry log channel. For all intents and purposes, the data in such a // snapshot can be thought of the outer join of certain system tables: // namespace, descriptor, and at some point perhaps zones, etc. // // Snapshots are too large to conveniently be published as a single log event, // so instead they're broken down into SchemaDescriptor events which // contain the data in one record of this outer join projection. These events // are prefixed by a header (a SchemaSnapshotMetadata event). message SchemaDescriptor { CommonEventDetails common = 1 [(gogoproto.nullable) = false, (gogoproto.jsontag) = "", (gogoproto.embed) = true]; // SnapshotID is the unique identifier of the snapshot that this event // is part of. string snapshot_id = 2 [(gogoproto.customname) = "SnapshotID", (gogoproto.moretags) = "redact:\"nonsensitive\""]; // ParentDatabaseID matches the same key column in system.namespace. uint32 parent_database_id = 3 [(gogoproto.customname) = "ParentDatabaseID", (gogoproto.jsontag) = ",omitempty"]; // ParentSchemaID matches the same key column in system.namespace. uint32 parent_schema_id = 4 [(gogoproto.customname) = "ParentSchemaID", (gogoproto.jsontag) = ",omitempty"]; // Name matches the same key column in system.namespace. string name = 5 [(gogoproto.moretags) = "redact:\"nonsensitive\""]; // DescID matches the 'id' column in system.namespace and system.descriptor. uint32 desc_id = 6 [(gogoproto.customname) = "DescID", (gogoproto.jsontag) = ",omitempty"]; // Desc matches the 'descriptor' column in system.descriptor. // Some contents of the descriptor may be redacted to prevent leaking PII. cockroach.sql.sqlbase.Descriptor desc = 7 [(gogoproto.jsontag) = ",omitempty", (gogoproto.moretags) = "redact:\"nonsensitive\""]; // TODO(postamar): include system.zones data. } // SchemaSnapshotMetadata is an event describing a schema snapshot, which // is a set of SchemaDescriptor messages sharing the same SnapshotID. message SchemaSnapshotMetadata { CommonEventDetails common = 1 [(gogoproto.nullable) = false, (gogoproto.jsontag) = "", (gogoproto.embed) = true]; // SnapshotID is the unique identifier of this snapshot. string snapshot_id = 2 [(gogoproto.customname) = "SnapshotID", (gogoproto.moretags) = "redact:\"nonsensitive\""]; // NumRecords is how many SchemaDescriptor events are in the // snapshot. uint32 num_records = 3; // AsOfTimestamp is when the snapshot was taken. // This is equivalent to the timestamp given in the AS OF SYSTEM TIME // clause when querying the namespace and descriptor tables in the // system database. // Expressed as nanoseconds since the Unix epoch. int64 as_of_timestamp = 4 [(gogoproto.jsontag) = ",omitempty"]; // Errors records any errors encountered when post-processing this snapshot, // which includes the redaction of any potential PII. repeated string errors = 5 [(gogoproto.jsontag) = ",omitempty"]; } // HotRangesStats message HotRangesStats { CommonEventDetails common = 1 [(gogoproto.nullable) = false, (gogoproto.jsontag) = "", (gogoproto.embed) = true]; int64 range_id = 2 [(gogoproto.jsontag) = ",omitempty", (gogoproto.customname) = "RangeID"]; double qps = 3 [(gogoproto.jsontag) = ",includeempty"]; // DatabaseName is the name of the database in which the index was created. string database_name = 4 [(gogoproto.jsontag) = ",omitempty"]; // TableName is the name of the table on which the index was created. string table_name = 5 [(gogoproto.jsontag) = ",omitempty"]; // IndexName is the name of the index within the scope of the given table. string index_name = 6 [(gogoproto.jsontag) = ",omitempty"]; // SchemaName is the name of the schema in which the index was created. string schema_name = 7 [(gogoproto.jsontag) = ",omitempty"]; // LeaseholderNodeID indicates the Node ID that is the current leaseholder for the given range. int32 leaseholder_node_id = 8 [(gogoproto.customname) = "LeaseholderNodeID", (gogoproto.jsontag) = ",omitempty"]; // Writes per second is the recent number of keys written per second on // this range. double writes_per_second = 9 [(gogoproto.jsontag) = ",omitempty"]; // Reads per second is the recent number of keys read per second on // this range. double reads_per_second = 10 [(gogoproto.jsontag) = ",omitempty"]; // Write bytes per second is the recent number of bytes written per second on // this range. double write_bytes_per_second = 11 [(gogoproto.jsontag) = ",omitempty"]; // Read bytes per second is the recent number of bytes read per second on // this range. double read_bytes_per_second = 12 [(gogoproto.jsontag) = ",omitempty"]; // CPU time per second is the recent cpu usage in nanoseconds of this range. double cpu_time_per_second = 13 [(gogoproto.customname) = "CPUTimePerSecond", (gogoproto.jsontag) = ",omitempty"]; }
pkg/util/log/eventpb/telemetry.proto
0
https://github.com/cockroachdb/cockroach/commit/977f72e4cc24f5d14424c278d6d6921016c3fc62
[ 0.00017817545449361205, 0.00017000120715238154, 0.00016398815205320716, 0.0001701976580079645, 0.0000034954196053149644 ]
{ "id": 6, "code_window": [ " (gogoproto.casttype) = \"github.com/cockroachdb/cockroach/pkg/roachpb.NodeID\"];\n", "\n", " // The node's decommission readiness status.\n", " NodeReadiness decommission_readiness = 2;\n", "\n", " // The liveness status of the given node.\n", " kv.kvserver.liveness.livenesspb.NodeLivenessStatus liveness_status = 3;\n", "\n", " // The number of total replicas on the node, computed by scanning range\n", " // descriptors.\n" ], "labels": [ "keep", "keep", "keep", "keep", "keep", "replace", "replace", "keep", "keep", "keep" ], "after_edit": [ " reserved 3; // Previously used\n" ], "file_path": "pkg/server/serverpb/admin.proto", "type": "replace", "edit_start_line_idx": 534 }
// Copyright 2016 The Cockroach Authors. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt. // // As of the Change Date specified in that file, in accordance with // the Business Source License, use of this software will be governed // by the Apache License, Version 2.0, included in the file // licenses/APL.txt. syntax = "proto3"; package cockroach.server.serverpb; option go_package = "github.com/cockroachdb/cockroach/pkg/server/serverpb"; import "config/zonepb/zone.proto"; import "util/tracing/tracingpb/tracing.proto"; import "jobs/jobspb/jobs.proto"; import "server/serverpb/status.proto"; import "storage/enginepb/mvcc.proto"; import "kv/kvserver/liveness/livenesspb/liveness.proto"; import "kv/kvserver/loqrecovery/loqrecoverypb/recovery.proto"; import "kv/kvserver/kvserverpb/range_log.proto"; import "kv/kvpb/api.proto"; import "roachpb/metadata.proto"; import "roachpb/data.proto"; import "ts/catalog/chart_catalog.proto"; import "util/metric/metric.proto"; import "util/tracing/tracingpb/recorded_span.proto"; import "gogoproto/gogo.proto"; import "google/api/annotations.proto"; import "google/protobuf/timestamp.proto"; // ZoneConfigurationLevel indicates, for objects with a Zone Configuration, // the object level at which the configuration is defined. This is needed // because objects without a specifically indicated Zone Configuration will // inherit the configuration of their "parent". enum ZoneConfigurationLevel { UNKNOWN = 0; // CLUSTER indicates that this object uses the cluster default Zone Configuration. CLUSTER = 1; // DATABASE indicates that this object uses a database-level Zone Configuration. DATABASE = 2; // TABLE indicates that this object uses a table-level Zone Configuration. TABLE = 3; } // DatabasesRequest requests a list of databases. message DatabasesRequest { } // DatabasesResponse contains a list of databases. message DatabasesResponse { repeated string databases = 1; } // DatabaseDetailsRequest requests detailed information about the specified // database message DatabaseDetailsRequest { // database is the name of the database we are querying. string database = 1; // Setting this flag includes a computationally-expensive stats field // in the response. bool include_stats = 2; } // DatabaseDetailsResponse contains grant information, table names, // zone configuration, and size statistics for a database. message DatabaseDetailsResponse { message Grant { // user is the user that this grant applies to. string user = 1; // privileges are the abilities this grant gives to the user. repeated string privileges = 2; } message Stats { // A table which exists in the database, but for which we could not load stats // during this request. message MissingTable { // The name of the table for which we could not load stats. string name = 1; // The error message that resulted when the request for this table failed. string error_message = 2; } // A list of tables that exist in the database, but for which stats could // not be loaded due to failures during this request. repeated MissingTable missing_tables = 1; // The number of ranges, as determined from a query of range meta keys, // across all tables. int64 range_count = 2; // An approximation of the disk space (in bytes) used for all replicas // of all tables across the cluster. uint64 approximate_disk_bytes = 3; // node_ids is the ordered list of node ids on which data is stored. repeated int32 node_ids = 4 [(gogoproto.customname) = "NodeIDs", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/roachpb.NodeID"]; int32 num_index_recommendations = 5; } // grants are the results of SHOW GRANTS for this database. repeated Grant grants = 1 [(gogoproto.nullable) = false]; // table_names contains the names of all tables in this database. Note that // all responses will be schema-qualified (schema.table) and that every schema // or table that contains a "sql unsafe character" such as uppercase letters // or dots will be surrounded with double quotes, such as "naughty schema".table. repeated string table_names = 2; // descriptor_id is an identifier used to uniquely identify this database. int64 descriptor_id = 3 [(gogoproto.customname) = "DescriptorID"]; // The zone configuration in effect for this database. cockroach.config.zonepb.ZoneConfig zone_config = 4 [(gogoproto.nullable) = false]; // The level at which this object's zone configuration is set. ZoneConfigurationLevel zone_config_level = 5; // Size information about the database, present only when explicitly requested. Stats stats = 6; } // TableDetailsRequest is a request for detailed information about a table. message TableDetailsRequest { // database is the name of the database that contains the table we're // interested in. string database = 1; // table is the name of the table that we're querying. Table may be // schema-qualified (schema.table) and each name component that contains // sql unsafe characters such as . or uppercase letters must be surrounded // in double quotes like "naughty schema".table. string table = 2; } // TableDetailsResponse contains grants, column names, and indexes for // a table. message TableDetailsResponse { // Grant is an entry from SHOW GRANTS. message Grant { // user is the user that this grant applies to. string user = 1; // privileges are the abilities this grant gives to the user. repeated string privileges = 2; } message Column { // name is the name of the column. string name = 1; // type is the SQL type (INT, STRING, etc.) of this column. string type = 2; // nullable is whether this column can contain NULL. bool nullable = 3; // default_value is the default value of this column. string default_value = 4; // generation_expression is the generator expression if the column is computed. string generation_expression = 5; // hidden is whether this column is hidden. bool hidden = 6; } message Index { // name is the name of this index. string name = 1; // unique is whether this a unique index (i.e. CREATE UNIQUE INDEX). bool unique = 2; // seq is an internal variable that's passed along. int64 seq = 3; // column is the column that this index indexes. string column = 4; // direction is either "ASC" (ascending) or "DESC" (descending). string direction = 5; // storing is an internal variable that's passed along. bool storing = 6; // implicit is an internal variable that's passed along. bool implicit = 7; } repeated Grant grants = 1 [(gogoproto.nullable) = false]; repeated Column columns = 2 [(gogoproto.nullable) = false]; repeated Index indexes = 3 [(gogoproto.nullable) = false]; // range_count is the size of the table in ranges. This provides a rough // estimate of the storage requirements for the table. // TODO(mrtracy): The TableStats method also returns a range_count field which // is more accurate than this one; TableDetails calculates this number using // a potentially faster method that is subject to cache staleness. We should // consider removing or renaming this field to reflect that difference. See // GitHub issue #5435 for more information. int64 range_count = 4; // create_table_statement is the output of "SHOW CREATE" for this table; // it is a SQL statement that would re-create the table's current schema if // executed. string create_table_statement = 5; // The zone configuration in effect for this table. cockroach.config.zonepb.ZoneConfig zone_config = 6 [(gogoproto.nullable) = false]; // The level at which this object's zone configuration is set. ZoneConfigurationLevel zone_config_level = 7; // descriptor_id is an identifier used to uniquely identify this table. int64 descriptor_id = 8 [(gogoproto.customname) = "DescriptorID"]; // configure_zone_statement is the output of "SHOW ZONE CONFIGURATION FOR TABLE" // for this table. It is a SQL statement that would re-configure the table's current // zone if executed. string configure_zone_statement = 9; // stats_last_created_at is the time at which statistics were last created. google.protobuf.Timestamp stats_last_created_at = 10 [(gogoproto.stdtime) = true]; // has_index_recommendations notifies if the there are index recommendations // on this table. bool has_index_recommendations = 11; // data_total_bytes is the size in bytes of live and non-live data on the table. int64 data_total_bytes = 12; // data_live_bytes is the size in bytes of live (non MVCC) data on the table. int64 data_live_bytes = 13; // data_live_percentage is the percentage of live (non MVCC) data on the table. float data_live_percentage = 14; } // TableStatsRequest is a request for detailed, computationally expensive // information about a table. message TableStatsRequest { // database is the name of the database that contains the table we're // interested in. string database = 1; // table is the name of the table that we're querying. Table may be // schema-qualified (schema.table) and each name component that contains // sql unsafe characters such as . or uppercase letters must be surrounded // in double quotes like "naughty schema".table. string table = 2; } // TableStatsResponse contains detailed, computationally expensive information // about a table. message TableStatsResponse { // range_count is the number of ranges, as determined from a query of range // meta keys. int64 range_count = 1; // replica_count is the number of replicas of any range of this table, as // found by querying nodes which are known to have replicas. When compared // with range_count, this can be used to estimate the current replication // factor of the table. int64 replica_count = 2; // node_count is the number of nodes which contain data for this table, // according to a query of range meta keys. int64 node_count = 3; // stats is the summation of MVCCStats for all replicas of this table // across the cluster. cockroach.storage.enginepb.MVCCStats stats = 4 [(gogoproto.nullable) = false]; // approximate_disk_bytes is an approximation of the disk space (in bytes) // used for all replicas of this table across the cluster. uint64 approximate_disk_bytes = 6; // MissingNode represents information on a node which should contain data // for this table, but could not be contacted during this request. message MissingNode { // The ID of the missing node. string node_id = 1 [(gogoproto.customname) = "NodeID"]; // The error message that resulted when the query sent to this node failed. string error_message = 2; } // A list of nodes which should contain data for this table (according to // cluster metadata), but could not be contacted during this request. repeated MissingNode missing_nodes = 5 [(gogoproto.nullable) = false]; // node_ids is the ordered list of node ids on which the table data is stored. repeated int32 node_ids = 7 [(gogoproto.customname) = "NodeIDs", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/roachpb.NodeID"]; } // NonTableStatsRequest requests statistics on cluster data ranges that do not // belong to SQL tables. message NonTableStatsRequest { } // NonTableStatsResponse returns statistics on various cluster data ranges // that do not belong to SQL tables. The statistics for each range are returned // as a TableStatsResponse. message NonTableStatsResponse { // Information on time series ranges. TableStatsResponse time_series_stats = 1; // Information for remaining (non-table, non-time-series) ranges. TableStatsResponse internal_use_stats = 2; } // UsersRequest requests a list of users. message UsersRequest { } // UsersResponse returns a list of users. message UsersResponse { // User is a CockroachDB user. message User { string username = 1; } // usernames is a list of users for the CockroachDB cluster. repeated User users = 1 [(gogoproto.nullable) = false]; } // EventsRequest is a request for event log entries, optionally filtered // by the specified event type. message EventsRequest { string type = 1; reserved 2; // limit is the total number of results that are retrieved by the query. If // this is omitted or set to 0, the default maximum number of results are // returned. When set to > 0, at most only that number of results are // returned. When set to < 0, an unlimited number of results are returned. int32 limit = 3; // unredacted_events indicates that the values in the events should // not be redacted. The default is to redact, so that older versions // of `cockroach zip` do not see un-redacted values by default. // For good security, this field is only obeyed by the server after // checking that the client of the RPC is an admin user. bool unredacted_events = 4; } // EventsResponse contains a set of event log entries. This is always limited // to the latest N entries (N is enforced in the associated endpoint). message EventsResponse { message Event { // timestamp is the time at which the event occurred. google.protobuf.Timestamp timestamp = 1 [(gogoproto.nullable) = false, (gogoproto.stdtime) = true]; // event_type is the type of the event (e.g. "create_table", "drop_table". string event_type = 2; reserved 3; // reporting_id is the reporting ID for this event. int64 reporting_id = 4 [(gogoproto.customname) = "ReportingID"]; // info has more detailed information for the event. The contents vary // depending on the event. string info = 5; // unique_id is a unique identifier for this event. bytes unique_id = 6 [(gogoproto.customname) = "UniqueID"]; } repeated Event events = 1 [(gogoproto.nullable) = false]; } // SetUIDataRequest stores the given key/value pairs in the system.ui table. message SetUIDataRequest { // key_values is a map of keys to bytes values. Each key will be stored // with its corresponding value as a separate row in system.ui. map<string, bytes> key_values = 1; } // SetUIDataResponse is currently an empty response. message SetUIDataResponse { } // GETUIDataRequest requests the values for the given keys from the system.ui // table. message GetUIDataRequest { repeated string keys = 1; } // GetUIDataResponse contains the requested values and the times at which // the values were last updated. message GetUIDataResponse { message Value { // value is the value of the requested key. bytes value = 1; // last_updated is the time at which the value was last updated. google.protobuf.Timestamp last_updated = 2 [(gogoproto.nullable) = false, (gogoproto.stdtime) = true]; } // key_values maps keys to their retrieved values. If this doesn't contain a // a requested key, that key was not found. map<string, Value> key_values = 1 [(gogoproto.nullable) = false]; } // ClusterRequest requests metadata for the cluster. message ClusterRequest { } // ClusterResponse contains metadata for the cluster. message ClusterResponse { // The unique ID used to identify this cluster. string cluster_id = 1 [(gogoproto.customname) = "ClusterID"]; // True if diagnostics reporting is enabled for the cluster. bool reporting_enabled = 2; // True if enterprise features are enabled for the cluster. bool enterprise_enabled = 3; } // DrainRequest instructs the receiving node to drain. message DrainRequest { reserved 1; reserved 2; // When true, terminates the process after the server has started draining. // Setting both shutdown and do_drain to false causes // the request to only operate as a probe. // Setting do_drain to false and shutdown to true causes // the server to shut down immediately without // first draining. bool shutdown = 3; // When true, perform the drain phase. See the comment above on // shutdown for an explanation of the interaction between the two. // do_drain is also implied by a non-nil deprecated_probe_indicator. bool do_drain = 4; // node_id is a string so that "local" can be used to specify that no // forwarding is necessary. // For compatibility with v21.2 nodes, an empty node_id is // interpreted as "local". This behavior might be removed // in subsequent versions. string node_id = 5; // When true, more detailed information is logged during the range lease drain phase. bool verbose = 6; } // DrainResponse is the response to a successful DrainRequest. message DrainResponse { // is_draining is set to true iff the server is currently draining. // This is set to true in response to a request where skip_drain // is false; but it can also be set to true in response // to a probe request (!shutdown && skip_drain) if another // drain request has been issued prior or asynchronously. bool is_draining = 2; // drain_remaining_indicator measures, at the time of starting to // process the corresponding drain request, how many actions to // fully drain the node were deemed to be necessary. Some, but not // all, of these actions may already have been carried out by the // time this indicator is received by the client. The client should // issue requests until this indicator first reaches zero, which // indicates that the node is fully drained. // // The API contract is the following: // // - upon a first Drain call with do_drain set, the remaining // indicator will have some value >=0. If >0, it indicates that // drain is pushing state away from the node. (What this state // precisely means is left unspecified for this field. See below // for details.) // // - upon a subsequent Drain call with do_drain set, the remaining // indicator should have reduced in value. The drain process does best // effort at shedding state away from the node; hopefully, all the // state is shed away upon the first call and the progress // indicator can be zero as early as the second call. However, // if there was a lot of state to shed, it is possible for // timeout to be encountered upon the first call. In that case, the // second call will do some more work and return a non-zero value // as well. // // - eventually, in an iterated sequence of DrainRequests with // do_drain set, the remaining indicator should reduce to zero. At // that point the client can conclude that no state is left to // shed, and it should be safe to shut down the node with a // DrainRequest with shutdown = true. // // Note that this field is left unpopulated (and thus remains at // zero) for pre-20.1 nodes. A client can recognize this by // observing is_draining to be false after a request with do_drain = // true: the is_draining field is also left unpopulated by pre-20.1 // nodes. uint64 drain_remaining_indicator = 3; // drain_remaining_description is an informal (= not // machine-parsable) string that explains the progress of the drain // process to human eyes. This is intended for use mainly for // troubleshooting. // // The field is only populated if do_drain is true in the // request. string drain_remaining_description = 4; reserved 1; } // DecommissionPreCheckRequest requests that preliminary checks be run to // ensure that the specified node(s) can be decommissioned successfully. message DecommissionPreCheckRequest { repeated int32 node_ids = 1 [(gogoproto.customname) = "NodeIDs", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/roachpb.NodeID"]; // The maximum number of ranges for which to report errors. int32 num_replica_report = 2; // If true, all ranges on the checked nodes must only need replacement or // removal for decommissioning. bool strict_readiness = 3; // If true, collect traces for each range checked. // Requires num_replica_report > 0. bool collect_traces = 4; } // DecommissionPreCheckResponse returns the number of replicas that encountered // errors when running preliminary decommissioning checks, as well as the // associated error messages and traces, for each node. message DecommissionPreCheckResponse { enum NodeReadiness { option (gogoproto.goproto_enum_stringer) = false; UNKNOWN = 0; READY = 1; ALREADY_DECOMMISSIONED = 2; ALLOCATION_ERRORS = 3; } // The result of checking a range's readiness for the decommission. message RangeCheckResult { int32 range_id = 1 [ (gogoproto.customname) = "RangeID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/roachpb.RangeID"]; // The action determined by the allocator that is needed for the range. string action = 2; // All trace events collected while checking the range. repeated TraceEvent events = 3; // The error message from the allocator's processing, if any. string error = 4; } // The result of checking a single node's readiness for decommission. message NodeCheckResult { int32 node_id = 1 [ (gogoproto.customname) = "NodeID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/roachpb.NodeID"]; // The node's decommission readiness status. NodeReadiness decommission_readiness = 2; // The liveness status of the given node. kv.kvserver.liveness.livenesspb.NodeLivenessStatus liveness_status = 3; // The number of total replicas on the node, computed by scanning range // descriptors. int64 replica_count = 4; // The details and recorded traces from preprocessing each range with a // replica on the checked nodes that resulted in error, up to the maximum // specified in the request. repeated RangeCheckResult checked_ranges = 5 [(gogoproto.nullable) = false]; } // Status of the preliminary decommission checks across nodes. repeated NodeCheckResult checked_nodes = 1 [(gogoproto.nullable) = false]; } // DecommissionStatusRequest requests the decommissioning status for the // specified or, if none are specified, all nodes. message DecommissionStatusRequest { repeated int32 node_ids = 1 [(gogoproto.customname) = "NodeIDs", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/roachpb.NodeID"]; // The number of decommissioning replicas to be reported. int32 num_replica_report = 2; } // DecommissionRequest requests the server to set the membership status on // all nodes specified by NodeIDs to the value of TargetMembership. // // If no NodeIDs are given, it targets the recipient node. message DecommissionRequest { repeated int32 node_ids = 1 [(gogoproto.customname) = "NodeIDs", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/roachpb.NodeID"]; kv.kvserver.liveness.livenesspb.MembershipStatus target_membership = 2; // The number of decommissioning replicas to be reported. int32 num_replica_report = 3; } // DecommissionStatusResponse lists decommissioning statuses for a number of NodeIDs. message DecommissionStatusResponse { message Replica { int32 replica_id = 1 [ (gogoproto.customname) = "ReplicaID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/roachpb.ReplicaID"]; int32 range_id = 2 [ (gogoproto.customname) = "RangeID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/roachpb.RangeID"]; } message Status { int32 node_id = 1 [ (gogoproto.customname) = "NodeID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/roachpb.NodeID"]; bool is_live = 2; // The number of replicas on the node, computed by scanning meta2 ranges. int64 replica_count = 3; // The membership status of the given node. kv.kvserver.liveness.livenesspb.MembershipStatus membership = 4; bool draining = 5; // Decommissioning replicas on the given node to be reported. // How many replicas are reported is determined by what was specified in the // request. repeated Replica reported_replicas = 6; } // Status of all affected nodes. repeated Status status = 2 [(gogoproto.nullable) = false]; } // SettingsRequest inquires what are the current settings in the cluster. message SettingsRequest { // The array of setting names to retrieve. // An empty keys array means "all". repeated string keys = 1; // Indicate whether to see unredacted setting values. // This is opt-in so that a previous version `cockroach zip` // does not start reporting values when this becomes active. // For good security, the server only obeys this after it checks // that the logger-in user has admin privilege. bool unredacted_values = 2; } // SettingsResponse is the response to SettingsRequest. message SettingsResponse { message Value { string value = 1; string type = 2; string description = 3; bool public = 4; google.protobuf.Timestamp last_updated = 5 [(gogoproto.nullable) = true, (gogoproto.stdtime) = true]; } map<string, Value> key_values = 1 [(gogoproto.nullable) = false]; } // HealthRequest requests a liveness or readiness check. // // A liveness check is triggered via ready set to false. In this mode, // an empty response is returned immediately, that is, the caller merely // learns that the process is running. // // A readiness check (ready == true) is suitable for determining whether // user traffic should be directed at a given node, for example by a load // balancer. In this mode, a successful response is returned only if the // node: // // - is not in the process of shutting down or booting up (including // waiting for cluster bootstrap); // - is regarded as healthy by the cluster via the recent broadcast of // a liveness beacon. Absent either of these conditions, an error // code will result. // // API: PUBLIC message HealthRequest { // ready specifies whether the client wants to know whether the // target node is ready to receive traffic. If a node is unready, an // error will be returned. // API: PUBLIC bool ready = 1; } // HealthResponse is the response to HealthRequest. It currently does not // contain any information. // API: PUBLIC message HealthResponse { } // LivenessRequest requests liveness data for all nodes on the cluster. message LivenessRequest { } // LivenessResponse contains the liveness status of each node on the cluster. message LivenessResponse { repeated kv.kvserver.liveness.livenesspb.Liveness livenesses = 1 [(gogoproto.nullable) = false]; map<int32, kv.kvserver.liveness.livenesspb.NodeLivenessStatus> statuses = 2 [ (gogoproto.nullable) = false, (gogoproto.castkey) = "github.com/cockroachdb/cockroach/pkg/roachpb.NodeID" ]; } // JobsRequest requests system job information of the given status and type. message JobsRequest { int32 limit = 1; string status = 2; cockroach.sql.jobs.jobspb.Type type = 3; } // JobsResponse contains the job record for each matching job. message JobsResponse { repeated JobResponse jobs = 1 [(gogoproto.nullable) = false]; google.protobuf.Timestamp earliest_retained_time = 2 [(gogoproto.nullable) = false, (gogoproto.stdtime) = true]; } // JobRequest requests system job information for the given job_id. message JobRequest { int64 job_id = 1; } // JobResponse contains the job record for a job. message JobResponse { int64 id = 1 [(gogoproto.customname) = "ID"]; string type = 2; string description = 3; string statement = 16; string username = 4; repeated uint32 descriptor_ids = 5 [ (gogoproto.customname) = "DescriptorIDs", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb.ID" ]; string status = 6; google.protobuf.Timestamp created = 7 [(gogoproto.stdtime) = true]; google.protobuf.Timestamp started = 8 [(gogoproto.stdtime) = true]; google.protobuf.Timestamp finished = 9 [(gogoproto.stdtime) = true]; google.protobuf.Timestamp modified = 10 [(gogoproto.stdtime) = true]; float fraction_completed = 11; string error = 12; // highwater_timestamp is the highwater timestamp returned as normal // timestamp. This is appropriate for display to humans. google.protobuf.Timestamp highwater_timestamp = 13 [(gogoproto.stdtime) = true]; // highwater_decimal is the highwater timestamp in the proprietary decimal // form used by logical timestamps internally. This is appropriate to pass // to a "AS OF SYSTEM TIME" SQL statement. string highwater_decimal = 14; string running_status = 15; google.protobuf.Timestamp last_run = 17 [(gogoproto.stdtime) = true]; google.protobuf.Timestamp next_run = 18 [(gogoproto.stdtime) = true]; int64 num_runs = 19; // ExecutionFailure corresponds to a failure to execute the job with the // attempt starting at start and ending at end. message ExecutionFailure { // Status is the status of the job during the execution. string status = 1; // Start is the time at which the execution started. google.protobuf.Timestamp start = 2 [(gogoproto.stdtime) = true]; // End is the time at which the error occurred. google.protobuf.Timestamp end = 3 [(gogoproto.stdtime) = true]; // Error is the error which occurred. string error = 4; } // ExecutionFailures is a log of execution failures of the job. It is not // guaranteed to contain all execution failures and some execution failures // may not contain an error or end. repeated ExecutionFailure execution_failures = 20; // coordinator_id identifies the node coordinating the job. This value will // only be present for jobs that are currently running or recently ran. int64 coordinator_id = 21 [(gogoproto.customname) = "CoordinatorID"]; } // LocationsRequest requests system locality location information. message LocationsRequest { } // JobsResponse contains the job record for each matching job. message LocationsResponse { message Location { string locality_key = 1; string locality_value = 2; double latitude = 3; double longitude = 4; } repeated Location locations = 1 [(gogoproto.nullable) = false]; } // RangeLogRequest request the history of a range from the range log. message RangeLogRequest { // TODO(tamird): use [(gogoproto.customname) = "RangeID"] below. Need to // figure out how to teach grpc-gateway about custom names. // If RangeID is 0, returns range log history without filtering by range. int64 range_id = 1; // limit is the total number of results that are retrieved by the query. If // this is omitted or set to 0, the default maximum number of results are // returned. When set to > 0, at most only that number of results are // returned. When set to < 0, an unlimited number of results are returned. int32 limit = 2; } // RangeLogResponse contains a list of entries from the range log table. message RangeLogResponse { // To avoid porting the pretty printing of keys and descriptors to // javascript, they will be precomputed on the serverside. message PrettyInfo { string updated_desc = 1; string new_desc = 2; string added_replica = 3; string removed_replica = 4; string reason = 5; string details = 6; } message Event { cockroach.kv.kvserver.storagepb.RangeLogEvent event = 1 [(gogoproto.nullable) = false]; PrettyInfo pretty_info = 2 [(gogoproto.nullable) = false]; } reserved 1; // Previously used. repeated Event events = 2 [(gogoproto.nullable) = false]; } // QueryPlanRequest requests the query plans for a SQL string. message QueryPlanRequest { // query is the SQL query string. string query = 1; } // QueryPlanResponse contains the query plans for a SQL string (currently only // the distsql physical query plan). message QueryPlanResponse { string distsql_physical_query_plan = 1 [(gogoproto.customname) = "DistSQLPhysicalQueryPlan"]; } message DataDistributionRequest { } message DataDistributionResponse { message ZoneConfig { // target is the object the zone config applies to, e.g. "DATABASE db" or // "PARTITION north_america OF TABLE users". string target = 1; config.zonepb.ZoneConfig config = 2 [(gogoproto.nullable) = false]; reserved 3; // config_sql is the SQL representation of config. string config_sql = 4 [(gogoproto.customname) = "ConfigSQL"]; } message TableInfo { map<int32, int64> replica_count_by_node_id = 1 [(gogoproto.castkey) = "github.com/cockroachdb/cockroach/pkg/roachpb.NodeID"]; int64 zone_config_id = 2; google.protobuf.Timestamp dropped_at = 3 [(gogoproto.stdtime) = true]; } message DatabaseInfo { // By table name. map<string, TableInfo> table_info = 1 [(gogoproto.nullable) = false]; } // By database name. map<string, DatabaseInfo> database_info = 1 [(gogoproto.nullable) = false]; reserved 2; // By zone name. map<string, ZoneConfig> zone_configs = 3 [(gogoproto.nullable) = false]; } // MetricMetadataRequest requests metadata for all metrics. message MetricMetadataRequest { } // MetricMetadataResponse contains the metadata for all metrics. message MetricMetadataResponse { map<string, cockroach.util.metric.Metadata> metadata = 1 [(gogoproto.nullable) = false]; } message EnqueueRangeRequest { // The node on which the queue should process the range. If node_id is 0, // the request will be forwarded to all other nodes. int32 node_id = 1 [(gogoproto.customname) = "NodeID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/roachpb.NodeID"]; // The name of the replica queue to run the range through. Matched against // each queue's name field. See the implementation of baseQueue for details. string queue = 2; // The ID of the range to run through the queue. int32 range_id = 3 [(gogoproto.customname) = "RangeID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/roachpb.RangeID"]; // If set, run the queue's process method without first checking whether the // replica should be processed by calling shouldQueue. bool skip_should_queue = 4; } message EnqueueRangeResponse { message Details { int32 node_id = 1 [(gogoproto.customname) = "NodeID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/roachpb.NodeID"]; // All trace events collected while processing the range in the queue. repeated TraceEvent events = 2; // The error message from the queue's processing, if any. string error = 3; } repeated Details details = 1; } // ChartCatalogRequest requests returns a catalog of Admin UI charts. message ChartCatalogRequest { } // ChartCatalogResponse returns a catalog of Admin UI charts useful for debugging. message ChartCatalogResponse { repeated cockroach.ts.catalog.ChartSection catalog = 1 [(gogoproto.nullable) = false]; } // CARequest requests the CA cert anchoring this service. message CARequest { } // CAResponse contains a PEM encoded copy of the CA cert for this service. message CAResponse { bytes ca_cert = 1; } // CertBundleRequest requests the bundle of initialization CAs for a new node. // It provides authentication in the form of a joinToken containing a // sharedSecret. message CertBundleRequest { string token_id = 1 [(gogoproto.customname) = "TokenID"]; bytes shared_secret = 2; } // CertBundleResponse contains a copy of all CAs needed to initialize TLS for // a new node. message CertBundleResponse { bytes bundle = 1; } message RecoveryCollectReplicaInfoRequest {} // RecoveryCollectReplicaRestartNodeStream is sent by collector node to client // if it experiences a transient failure collecting data from one of the nodes. // This message instructs client to drop any data that it collected locally // for specified node as streaming for this node would be restarted. // This mechanism is needed to avoid restarting the whole collection procedure // in large cluster if one of the nodes fails transiently. message RecoveryCollectReplicaRestartNodeStream { int32 node_id = 1 [ (gogoproto.customname) = "NodeID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/roachpb.NodeID"]; } message RecoveryCollectReplicaInfoResponse { oneof info { roachpb.RangeDescriptor range_descriptor = 1; cockroach.kv.kvserver.loqrecovery.loqrecoverypb.ReplicaInfo replica_info = 2; RecoveryCollectReplicaRestartNodeStream node_stream_restarted = 3; cockroach.kv.kvserver.loqrecovery.loqrecoverypb.ClusterMetadata metadata = 4; } } message RecoveryCollectLocalReplicaInfoRequest { } message RecoveryCollectLocalReplicaInfoResponse { cockroach.kv.kvserver.loqrecovery.loqrecoverypb.ReplicaInfo replica_info = 1; } message RecoveryStagePlanRequest { // Plan is replica update plan to stage for application on next restart. Plan // could be empty in that case existing plan is removed if present. cockroach.kv.kvserver.loqrecovery.loqrecoverypb.ReplicaUpdatePlan plan = 1; // If all nodes is true, then receiver should act as a coordinator and perform // a fan-out to stage plan on all nodes of the cluster. bool all_nodes = 2; // ForcePlan tells receiver to ignore any plan already staged on the node if it // is present and replace it with new plan (including empty one). bool force_plan = 3; // ForceLocalInternalVersion tells server to update internal component of plan // version to the one of active cluster version. This option needs to be set // if target cluster is stuck in recovery where only part of nodes were // successfully migrated. bool force_local_internal_version = 4; } message RecoveryStagePlanResponse { // Errors contain error messages happened during plan staging. repeated string errors = 1; } message RecoveryNodeStatusRequest { } message RecoveryNodeStatusResponse { cockroach.kv.kvserver.loqrecovery.loqrecoverypb.NodeRecoveryStatus status = 1 [ (gogoproto.nullable) = false]; } message RecoveryVerifyRequest { // PlanID is ID of the plan to verify. bytes plan_id = 1 [ (gogoproto.customname) = "PendingPlanID", (gogoproto.customtype) = "github.com/cockroachdb/cockroach/pkg/util/uuid.UUID"]; // DecommissionedNodeIDs is a set of nodes that should be marked as decommissioned in // the cluster when loss of quorum recovery successfully applies. repeated int32 decommissioned_node_ids = 2 [(gogoproto.customname) = "DecommissionedNodeIDs", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/roachpb.NodeID"]; // MaxReportedRanges is the maximum number of failed ranges to report. // If more unhealthy ranges are found, error will be returned alongside range // to indicate that ranges were cut short. int32 max_reported_ranges = 3; } message RecoveryVerifyResponse { message UnavailableRanges { // Ranges contains descriptors of ranges that failed health check. // If there are too many ranges to report, error would contain relevant // message. repeated cockroach.kv.kvserver.loqrecovery.loqrecoverypb.RangeRecoveryStatus ranges = 1 [ (gogoproto.nullable) = false]; // Error contains an optional error if ranges validation can't complete. string error = 2; } // Statuses contain a list of recovery statuses of nodes updated during recovery. It // also contains nodes that were expected to be live (not decommissioned by recovery) // but failed to return status response. repeated cockroach.kv.kvserver.loqrecovery.loqrecoverypb.NodeRecoveryStatus statuses = 1 [ (gogoproto.nullable) = false]; // UnavailableRanges contains information about ranges that failed health check. UnavailableRanges unavailable_ranges = 2 [(gogoproto.nullable) = false]; // DecommissionedNodeStatuses contains a map of requested IDs with their // corresponding liveness statuses. map<int32, kv.kvserver.liveness.livenesspb.MembershipStatus> decommissioned_node_statuses = 3 [ (gogoproto.castkey) = "github.com/cockroachdb/cockroach/pkg/roachpb.NodeID"]; } // Admin is the gRPC API for the admin UI. Through grpc-gateway, we offer // REST-style HTTP endpoints that locally proxy to the gRPC endpoints. service Admin { rpc RequestCA(CARequest) returns (CAResponse) { option (google.api.http) = { get : "/_join/v1/ca" }; } rpc RequestCertBundle(CertBundleRequest) returns (CertBundleResponse) { option (google.api.http) = { get : "/_join/v1/requestbundle" }; } // URL: /_admin/v1/users rpc Users(UsersRequest) returns (UsersResponse) { option (google.api.http) = { get: "/_admin/v1/users" }; } // URL: /_admin/v1/databases rpc Databases(DatabasesRequest) returns (DatabasesResponse) { option (google.api.http) = { get: "/_admin/v1/databases" }; } // Example URL: /_admin/v1/databases/system rpc DatabaseDetails(DatabaseDetailsRequest) returns (DatabaseDetailsResponse) { option (google.api.http) = { get: "/_admin/v1/databases/{database}" }; } // Example URL: /_admin/v1/databases/system/tables/ui rpc TableDetails(TableDetailsRequest) returns (TableDetailsResponse) { option (google.api.http) = { get: "/_admin/v1/databases/{database}/tables/{table}" }; } // Example URL: /_admin/v1/databases/system/tables/ui/stats rpc TableStats(TableStatsRequest) returns (TableStatsResponse) { option (google.api.http) = { get: "/_admin/v1/databases/{database}/tables/{table}/stats" }; } // Example URL: /_admin/v1/nontablestats rpc NonTableStats(NonTableStatsRequest) returns (NonTableStatsResponse) { option (google.api.http) = { get: "/_admin/v1/nontablestats" }; } // Example URLs: // Example URLs: // - /_admin/v1/events // - /_admin/v1/events?limit=100 // - /_admin/v1/events?type=create_table // - /_admin/v1/events?type=create_table&limit=100 rpc Events(EventsRequest) returns (EventsResponse) { option (google.api.http) = { get: "/_admin/v1/events" }; } // This requires a POST. Because of the libraries we're using, the POST body // must be in the following format: // // {"key_values": // { "key1": "base64_encoded_value1"}, // ... // { "keyN": "base64_encoded_valueN"}, // } // // Note that all keys are quoted strings and that all values are base64- // encoded. // // Together, SetUIData and GetUIData provide access to a "cookie jar" for the // admin UI. The structure of the underlying data is meant to be opaque to the // server. rpc SetUIData(SetUIDataRequest) returns (SetUIDataResponse) { option (google.api.http) = { post: "/_admin/v1/uidata" body: "*" }; } // Example URLs: // - /_admin/v1/uidata?keys=MYKEY // - /_admin/v1/uidata?keys=MYKEY1&keys=MYKEY2 // // Yes, it's a little odd that the query parameter is named "keys" instead of // "key". I would've preferred that the URL parameter be named "key". However, // it's clearer for the protobuf field to be named "keys," which makes the URL // parameter "keys" as well. rpc GetUIData(GetUIDataRequest) returns (GetUIDataResponse) { option (google.api.http) = { get: "/_admin/v1/uidata" }; } // Cluster returns metadata for the cluster. rpc Cluster(ClusterRequest) returns (ClusterResponse) { option (google.api.http) = { get: "/_admin/v1/cluster" }; } // Settings returns the cluster-wide settings for the cluster. rpc Settings(SettingsRequest) returns (SettingsResponse) { option (google.api.http) = { get: "/_admin/v1/settings" }; } // Health returns liveness for the node target of the request. // API: PUBLIC rpc Health(HealthRequest) returns (HealthResponse) { option (google.api.http) = { get: "/_admin/v1/health" additional_bindings {get : "/health"} }; } // Liveness returns the liveness state of all nodes on the cluster. rpc Liveness(LivenessRequest) returns (LivenessResponse) { option (google.api.http) = { get: "/_admin/v1/liveness" }; } // Jobs returns the job records for all jobs of the given status and type. rpc Jobs(JobsRequest) returns (JobsResponse) { option (google.api.http) = { get: "/_admin/v1/jobs" }; } // Job returns the job record for the job of the given job_id. rpc Job(JobRequest) returns (JobResponse) { option (google.api.http) = { get: "/_admin/v1/jobs/{job_id}" }; } // Locations returns the locality location records. rpc Locations(LocationsRequest) returns (LocationsResponse) { option (google.api.http) = { get: "/_admin/v1/locations" }; } // QueryPlan returns the query plans for a SQL string. rpc QueryPlan(QueryPlanRequest) returns (QueryPlanResponse) { option (google.api.http) = { get: "/_admin/v1/queryplan" }; } // Drain puts the node into the specified drain mode(s) and optionally // instructs the process to terminate. // We do not expose this via HTTP unless we have a way to authenticate // + authorize streaming RPC connections. See #42567. rpc Drain(DrainRequest) returns (stream DrainResponse) { } // DecommissionPreCheck requests that the server execute preliminary checks // to evaluate the possibility of successfully decommissioning a given node. rpc DecommissionPreCheck(DecommissionPreCheckRequest) returns (DecommissionPreCheckResponse) { } // Decommission puts the node(s) into the specified decommissioning state. // If this ever becomes exposed via HTTP, ensure that it performs // authorization. See #42567. rpc Decommission(DecommissionRequest) returns (DecommissionStatusResponse) { } // DecommissionStatus retrieves the decommissioning status of the specified nodes. // If this ever becomes exposed via HTTP, ensure that it performs // authorization. See #42567. rpc DecommissionStatus(DecommissionStatusRequest) returns (DecommissionStatusResponse) { } // URL: /_admin/v1/rangelog // URL: /_admin/v1/rangelog?limit=100 // URL: /_admin/v1/rangelog/1 // URL: /_admin/v1/rangelog/1?limit=100 rpc RangeLog(RangeLogRequest) returns (RangeLogResponse) { option (google.api.http) = { get: "/_admin/v1/rangelog" additional_bindings { get: "/_admin/v1/rangelog/{range_id}" } }; } rpc DataDistribution(DataDistributionRequest) returns (DataDistributionResponse) { option (google.api.http) = { get: "/_admin/v1/data_distribution" }; } // URL: /_admin/v1/metricmetadata rpc AllMetricMetadata(MetricMetadataRequest) returns (MetricMetadataResponse) { option (google.api.http) = { get: "/_admin/v1/metricmetadata" }; } // URL: /_admin/v1/chartcatalog rpc ChartCatalog(ChartCatalogRequest) returns (ChartCatalogResponse) { option (google.api.http) = { get: "/_admin/v1/chartcatalog" }; } // EnqueueRange runs the specified range through the specified queue on the // range's leaseholder store, returning the detailed trace and error // information from doing so. Parameters must be provided in the body of the // POST request. // For example: // // { // "queue": "raftlog", // "rangeId": 10 // } rpc EnqueueRange(EnqueueRangeRequest) returns (EnqueueRangeResponse) { option (google.api.http) = { post: "/_admin/v1/enqueue_range" body : "*" }; } // SendKVBatch proxies the given BatchRequest into KV, returning the // response. It is used by the CLI `debug send-kv-batch` command. rpc SendKVBatch(roachpb.BatchRequest) returns (roachpb.BatchResponse) { } // ListTracingSnapshots retrieves the list of snapshots of the Active Spans // Registry that the node currently has in memory. A new snapshot can be // captured with TakeTracingSnapshots. rpc ListTracingSnapshots(ListTracingSnapshotsRequest) returns (ListTracingSnapshotsResponse) { option (google.api.http) = { get: "/_admin/v1/trace_snapshots" }; } // TakeTracingSnapshot captures a new snapshot of the Active Spans Registry. // The new snapshot is returned, and also made available through // ListTracingSnapshots. rpc TakeTracingSnapshot(TakeTracingSnapshotRequest) returns (TakeTracingSnapshotResponse) { option (google.api.http) = { post: "/_admin/v1/trace_snapshots" }; } // GetTracingSnapshot returns a snapshot of the tracing spans in the active // spans registry previously generated through TakeTracingSnapshots. rpc GetTracingSnapshot(GetTracingSnapshotRequest) returns (GetTracingSnapshotResponse) { option (google.api.http) = { get: "/_admin/v1/trace_snapshots/{snapshot_id}" }; } // GetTrace returns the trace with a specified ID. Depending on the request, // the trace is returned either from a snapshot that was previously taken, or // directly from the active spans registry. rpc GetTrace(GetTraceRequest) returns (GetTraceResponse) { option (google.api.http) = { post: "/_admin/v1/traces" body: "*" }; } // SetTraceRecordingType sets the recording mode of all or some of the spans // in a trace. rpc SetTraceRecordingType(SetTraceRecordingTypeRequest) returns (SetTraceRecordingTypeResponse) { option (google.api.http) = { post: "/_admin/v1/settracerecordingtype" body: "*" }; } // RecoveryCollectReplicaInfo retrieves information about: // 1. range descriptors contained in cluster meta ranges if meta ranges // are readable; // 2. replica information from all live nodes that have connection to // the target node. rpc RecoveryCollectReplicaInfo(RecoveryCollectReplicaInfoRequest) returns (stream RecoveryCollectReplicaInfoResponse) {} // RecoveryCollectLocalReplicaInfo retrieve information about all local // replicas in all stores on the node. rpc RecoveryCollectLocalReplicaInfo(RecoveryCollectLocalReplicaInfoRequest) returns (stream RecoveryCollectLocalReplicaInfoResponse) {} // RecoveryStagePlan stages recovery plan on target or all nodes in cluster // depending on request content and marks nodes deleted in the plan as // decommissioned in each node's local node tombstone storage. rpc RecoveryStagePlan(RecoveryStagePlanRequest) returns (RecoveryStagePlanResponse) {} // RecoveryNodeStatus retrieves loss of quorum recovery status of a single // node. rpc RecoveryNodeStatus(RecoveryNodeStatusRequest) returns (RecoveryNodeStatusResponse) {} // RecoveryVerify verifies that recovery plan is applied on all necessary // nodes, ranges are available and nodes removed in plan are marked as // decommissioned. rpc RecoveryVerify(RecoveryVerifyRequest) returns (RecoveryVerifyResponse) {} // ListTenants returns a list of active tenants in the cluster. rpc ListTenants(ListTenantsRequest) returns (ListTenantsResponse) { option (google.api.http) = { get: "/_admin/v1/tenants" }; } } message ListTenantsRequest{} message ListTenantsResponse { repeated Tenant tenants = 1; } message Tenant { roachpb.TenantID tenant_id = 1; string tenant_name = 2; string sql_addr = 3; string rpc_addr = 4; } message ListTracingSnapshotsRequest {} message ListTracingSnapshotsResponse { repeated SnapshotInfo snapshots = 1; } message SnapshotInfo { // SnapshotID identifies a specific snapshot which can be requested via a // GetTracingSnapshotRequest. Negative IDs are used for "automatic" snapshots. int64 snapshot_id = 1 [(gogoproto.customname) = "SnapshotID"]; google.protobuf.Timestamp captured_at = 2 [(gogoproto.stdtime) = true]; } message TakeTracingSnapshotRequest {} message TakeTracingSnapshotResponse { SnapshotInfo snapshot = 1; } message GetTracingSnapshotRequest { // SnapshotId indicates which snapshot is requested. ID may be negative when // requesting an "automatic" snapshot; see ListTracingSnapshotsResponse. int64 snapshot_id = 1; } message GetTracingSnapshotResponse { TracingSnapshot snapshot = 1; } // GetTrace represents the request of the GetTrace RPC. message GetTraceRequest { // If a snapshot is specified, the trace information is returned from that // snapshot. If a snapshot is not specified, information about currently // opened spans is returned from the active spans registry. int64 snapshot_id = 1 [(gogoproto.customname) = "SnapshotID"]; uint64 trace_id = 2 [(gogoproto.nullable)=false, (gogoproto.customname) = "TraceID", (gogoproto.customtype) = "github.com/cockroachdb/cockroach/pkg/util/tracing/tracingpb.TraceID"]; util.tracing.tracingpb.RecordingMode recording_type = 3; } // GetTrace represents the response to the GetTrace RPC. message GetTraceResponse { // snapshot_id identifies the snapshot that the trace was retrieved from. If // 0, the trace was not retrieved from a registry, but directly from the // active spans registry. int64 snapshot_id = 1 [(gogoproto.customname) = "SnapshotID"]; uint64 trace_id = 2 [(gogoproto.nullable)=false, (gogoproto.customname) = "TraceID", (gogoproto.customtype) = "github.com/cockroachdb/cockroach/pkg/util/tracing/tracingpb.TraceID"]; // still_exists is set if any spans from this trace are currently present in // the active spans registry. // // If snapshot_id is 0, still_exists is always set. bool still_exists = 3; // serialized_recording represents the serialization of trace recording. We // return the recording already serialized as formatted string for easy // consumption in the browser. string serialized_recording = 4; } // TracingSnapshot represents a snapshot of the active spans registry, including // all the spans that were open at the time when the snapshot was taken. message TracingSnapshot { int64 snapshot_id = 1 [(gogoproto.customname) = "SnapshotID"]; google.protobuf.Timestamp captured_at = 2 [(gogoproto.stdtime) = true]; repeated TracingSpan spans = 3; // Ideally we'd use int64 to match the goroutine_id type // but unfortunately, the way that grpc-gateway parses // these objects into Javascript results in odd encodings // of Long JS types that are difficult to interact with // as map keys. Thus, we settle for string. map<string, string> stacks = 4; } message NamedOperationMetadata { string name = 1; util.tracing.tracingpb.OperationMetadata metadata = 2 [(gogoproto.nullable) = false]; } // TracingSpan represents a span, in a form slightly processed for the use of // the tracing UI. message TracingSpan { string operation = 1; uint64 trace_id = 2 [(gogoproto.customname) = "TraceID"]; uint64 span_id = 3 [(gogoproto.customname) = "SpanID"]; uint64 parent_span_id = 4 [(gogoproto.customname) = "ParentSpanID"]; google.protobuf.Timestamp start = 5 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; uint64 goroutine_id = 6 [(gogoproto.customname) = "GoroutineID"]; repeated SpanTag processed_tags = 7; // current is set if the span is still alive (i.e. still present in the active // spans registry). bool current = 8; // current_recording_mode represents the span's current recording mode. This is // not set if current == false. util.tracing.tracingpb.RecordingMode current_recording_mode = 9; repeated NamedOperationMetadata children_metadata = 10; } // SpanTag represents a tag on a tracing span, in a form processed for the use // of the tracing UI. message SpanTag { string key = 1; string val = 2; string caption = 3; string link = 4; bool hidden = 5; bool highlight = 6; bool inherit = 7; bool inherited = 8; bool propagate_up = 9; bool copied_from_child = 10; repeated ChildSpanTag children = 11; // May be empty. } message ChildSpanTag { string key = 1; string val = 2; } // SetTraceRecordingTypeRequest is the request for SetTraceRecordingType, which // sets the recording mode of all or some of the spans in a trace. message SetTraceRecordingTypeRequest { // TraceID identifies the trace to toggle the recording of. It must always be // specified. uint64 trace_id = 1 [(gogoproto.nullable) = false, (gogoproto.customname) = "TraceID", (gogoproto.customtype) = "github.com/cockroachdb/cockroach/pkg/util/tracing/tracingpb.TraceID"]; // SpanID, if not zero, controls which spans in the trace get their recording // mode set. If zero, all spans in the trace are updated. If not zero, only // the respective span and its descendants get updated. uint64 span_id = 2 [(gogoproto.nullable) = false, (gogoproto.customname) = "SpanID", (gogoproto.customtype) = "github.com/cockroachdb/cockroach/pkg/util/tracing/tracingpb.SpanID"]; util.tracing.tracingpb.RecordingMode recording_mode = 3; } // SetTraceRecordingTypeRequest is the response for SetTraceRecordingType. message SetTraceRecordingTypeResponse{} // FeatureFlags within this struct are used within back-end/front-end code to show/hide features. message FeatureFlags { // isObservabiliyService is true when the server is an instance of the Observability Service bool is_observability_service = 1; // CanViewKVMetricDashboards is true when the logged in user is able to view KV-level metric dashboards. bool can_view_kv_metric_dashboards = 2; // DisableKVLevelAdvancedDebug is true when the UI should remove options to certain KV-level // debug operations. This is helpful in application tenant contexsts, where these requests // can only return errors since the tenant cannot perform the operations. bool disable_kv_level_advanced_debug = 3; }
pkg/server/serverpb/admin.proto
1
https://github.com/cockroachdb/cockroach/commit/977f72e4cc24f5d14424c278d6d6921016c3fc62
[ 0.9985815286636353, 0.034970417618751526, 0.00016244183643721044, 0.0002355801552766934, 0.15831175446510315 ]
{ "id": 6, "code_window": [ " (gogoproto.casttype) = \"github.com/cockroachdb/cockroach/pkg/roachpb.NodeID\"];\n", "\n", " // The node's decommission readiness status.\n", " NodeReadiness decommission_readiness = 2;\n", "\n", " // The liveness status of the given node.\n", " kv.kvserver.liveness.livenesspb.NodeLivenessStatus liveness_status = 3;\n", "\n", " // The number of total replicas on the node, computed by scanning range\n", " // descriptors.\n" ], "labels": [ "keep", "keep", "keep", "keep", "keep", "replace", "replace", "keep", "keep", "keep" ], "after_edit": [ " reserved 3; // Previously used\n" ], "file_path": "pkg/server/serverpb/admin.proto", "type": "replace", "edit_start_line_idx": 534 }
# This is a dummy rule that always triggers an alert groups: - name: rules/dummy.rules rules: - alert: TestAlertManager expr: vector(1)
monitoring/rules/dummy.rules.yml
0
https://github.com/cockroachdb/cockroach/commit/977f72e4cc24f5d14424c278d6d6921016c3fc62
[ 0.00017574004596099257, 0.00017574004596099257, 0.00017574004596099257, 0.00017574004596099257, 0 ]
{ "id": 6, "code_window": [ " (gogoproto.casttype) = \"github.com/cockroachdb/cockroach/pkg/roachpb.NodeID\"];\n", "\n", " // The node's decommission readiness status.\n", " NodeReadiness decommission_readiness = 2;\n", "\n", " // The liveness status of the given node.\n", " kv.kvserver.liveness.livenesspb.NodeLivenessStatus liveness_status = 3;\n", "\n", " // The number of total replicas on the node, computed by scanning range\n", " // descriptors.\n" ], "labels": [ "keep", "keep", "keep", "keep", "keep", "replace", "replace", "keep", "keep", "keep" ], "after_edit": [ " reserved 3; // Previously used\n" ], "file_path": "pkg/server/serverpb/admin.proto", "type": "replace", "edit_start_line_idx": 534 }
// Copyright 2019 The Cockroach Authors. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt. // // As of the Change Date specified in that file, in accordance with // the Business Source License, use of this software will be governed // by the Apache License, Version 2.0, included in the file // licenses/APL.txt. package timeutil import ( "time" "github.com/cockroachdb/cockroach/pkg/util/grunning" "github.com/cockroachdb/cockroach/pkg/util/syncutil" ) // StopWatch is a utility stop watch that can be safely started and stopped // multiple times and can be used concurrently. type StopWatch struct { mu struct { syncutil.Mutex // started is true if the stop watch has been started and haven't been // stopped after that. started bool // startedAt is the time when the stop watch was started. startedAt time.Time // elapsed is the total time measured by the stop watch (i.e. between // all Starts and Stops). elapsed time.Duration // timeSource is the source of time used by the stop watch. It is always // timeutil.Now except for tests. timeSource func() time.Time // cpuStopWatch is used to track CPU usage. It may be nil, in which case any // operations on it are no-ops. cpuStopWatch *cpuStopWatch } } // NewStopWatch creates a new StopWatch. func NewStopWatch() *StopWatch { return newStopWatch(Now) } // NewStopWatchWithCPU creates a new StopWatch that will track CPU usage in // addition to wall-clock time. func NewStopWatchWithCPU() *StopWatch { w := newStopWatch(Now) if grunning.Supported() { w.mu.cpuStopWatch = &cpuStopWatch{} } return w } // NewTestStopWatch create a new StopWatch with the given time source. It is // used for testing only. func NewTestStopWatch(timeSource func() time.Time) *StopWatch { return newStopWatch(timeSource) } func newStopWatch(timeSource func() time.Time) *StopWatch { w := &StopWatch{} w.mu.timeSource = timeSource return w } // Start starts the stop watch if it hasn't already been started. func (w *StopWatch) Start() { w.mu.Lock() defer w.mu.Unlock() if !w.mu.started { w.mu.started = true w.mu.startedAt = w.mu.timeSource() w.mu.cpuStopWatch.start() } } // Stop stops the stop watch if it hasn't already been stopped and accumulates // the duration that elapsed since it was started. If the stop watch has // already been stopped, it is a noop. func (w *StopWatch) Stop() { w.mu.Lock() defer w.mu.Unlock() if w.mu.started { w.mu.started = false w.mu.elapsed += w.mu.timeSource().Sub(w.mu.startedAt) w.mu.cpuStopWatch.stop() } } // Elapsed returns the total time measured by the stop watch so far. func (w *StopWatch) Elapsed() time.Duration { w.mu.Lock() defer w.mu.Unlock() return w.mu.elapsed } // ElapsedCPU returns the total CPU time measured by the stop watch so far. It // returns zero if cpuStopWatch is nil (which is the case if NewStopWatchWithCPU // was not called or the platform does not support grunning). func (w *StopWatch) ElapsedCPU() time.Duration { w.mu.Lock() defer w.mu.Unlock() return w.mu.cpuStopWatch.elapsed() } // LastStartedAt returns the time the stopwatch was last started, and a bool // indicating if the stopwatch is currently started. func (w *StopWatch) LastStartedAt() (startedAt time.Time, started bool) { w.mu.Lock() defer w.mu.Unlock() return w.mu.startedAt, w.mu.started } // TestTimeSource is a source of time that remembers when it was created (in // terms of the real time) and returns the time based on its creation time and // the number of "advances" it has had. It is used for testing only. type TestTimeSource struct { initTime time.Time counter int64 } // NewTestTimeSource create a new TestTimeSource. func NewTestTimeSource() *TestTimeSource { return &TestTimeSource{initTime: Now()} } // Now tells the current time according to t. func (t *TestTimeSource) Now() time.Time { return t.initTime.Add(time.Duration(t.counter)) } // Advance advances the current time according to t by 1 nanosecond. func (t *TestTimeSource) Advance() { t.counter++ } // Elapsed returns how much time has passed since t has been created. Note that // it is equal to the number of advances in nanoseconds. func (t *TestTimeSource) Elapsed() time.Duration { return time.Duration(t.counter) }
pkg/util/timeutil/stopwatch.go
0
https://github.com/cockroachdb/cockroach/commit/977f72e4cc24f5d14424c278d6d6921016c3fc62
[ 0.00021302214008755982, 0.00017596599354874343, 0.00016642727132420987, 0.00017388320702593774, 0.000010388854207121767 ]
{ "id": 6, "code_window": [ " (gogoproto.casttype) = \"github.com/cockroachdb/cockroach/pkg/roachpb.NodeID\"];\n", "\n", " // The node's decommission readiness status.\n", " NodeReadiness decommission_readiness = 2;\n", "\n", " // The liveness status of the given node.\n", " kv.kvserver.liveness.livenesspb.NodeLivenessStatus liveness_status = 3;\n", "\n", " // The number of total replicas on the node, computed by scanning range\n", " // descriptors.\n" ], "labels": [ "keep", "keep", "keep", "keep", "keep", "replace", "replace", "keep", "keep", "keep" ], "after_edit": [ " reserved 3; // Previously used\n" ], "file_path": "pkg/server/serverpb/admin.proto", "type": "replace", "edit_start_line_idx": 534 }
// Copyright 2019 The Cockroach Authors. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt. // // As of the Change Date specified in that file, in accordance with // the Business Source License, use of this software will be governed // by the Apache License, Version 2.0, included in the file // licenses/APL.txt. package pgcode // Code is a wrapper around a string to ensure that pgcodes are used in // different pgerror functions by avoiding accidental string input. type Code struct { code string } // MakeCode converts a string into a Code. func MakeCode(s string) Code { return Code{code: s} } // String returns the underlying pgcode string. func (c Code) String() string { return c.code } // PG error codes from: http://www.postgresql.org/docs/9.5/static/errcodes-appendix.html. // Specifically, errcodes.txt is copied from from Postgres' src/backend/utils/errcodes.txt. // // The error definitions were generated using the generate.sh script, // with a bit of manual tweaking performed afterwards. var ( // Section: Class 00 - Successful Completion SuccessfulCompletion = MakeCode("00000") // Section: Class 01 - Warning Warning = MakeCode("01000") WarningDynamicResultSetsReturned = MakeCode("0100C") WarningImplicitZeroBitPadding = MakeCode("01008") WarningNullValueEliminatedInSetFunction = MakeCode("01003") WarningPrivilegeNotGranted = MakeCode("01007") WarningPrivilegeNotRevoked = MakeCode("01006") WarningStringDataRightTruncation = MakeCode("01004") WarningDeprecatedFeature = MakeCode("01P01") // Section: Class 02 - No Data (this is also a warning class per the SQL standard) NoData = MakeCode("02000") NoAdditionalDynamicResultSetsReturned = MakeCode("02001") // Section: Class 03 - SQL Statement Not Yet Complete SQLStatementNotYetComplete = MakeCode("03000") // Section: Class 08 - Connection Exception ConnectionException = MakeCode("08000") ConnectionDoesNotExist = MakeCode("08003") ConnectionFailure = MakeCode("08006") SQLclientUnableToEstablishSQLconnection = MakeCode("08001") SQLserverRejectedEstablishmentOfSQLconnection = MakeCode("08004") TransactionResolutionUnknown = MakeCode("08007") ProtocolViolation = MakeCode("08P01") // Section: Class 09 - Triggered Action Exception TriggeredActionException = MakeCode("09000") // Section: Class 0A - Feature Not Supported FeatureNotSupported = MakeCode("0A000") // Section: Class 0B - Invalid Transaction Initiation InvalidTransactionInitiation = MakeCode("0B000") // Section: Class 0F - Locator Exception LocatorException = MakeCode("0F000") InvalidLocatorSpecification = MakeCode("0F001") // Section: Class 0L - Invalid Grantor InvalidGrantor = MakeCode("0L000") InvalidGrantOperation = MakeCode("0LP01") // Section: Class 0P - Invalid Role Specification InvalidRoleSpecification = MakeCode("0P000") // Section: Class 0Z - Diagnostics Exception DiagnosticsException = MakeCode("0Z000") StackedDiagnosticsAccessedWithoutActiveHandler = MakeCode("0Z002") // Section: Class 20 - Case Not Found CaseNotFound = MakeCode("20000") // Section: Class 21 - Cardinality Violation CardinalityViolation = MakeCode("21000") // Section: Class 22 - Data Exception DataException = MakeCode("22000") ArraySubscript = MakeCode("2202E") CharacterNotInRepertoire = MakeCode("22021") DatetimeFieldOverflow = MakeCode("22008") DivisionByZero = MakeCode("22012") InvalidWindowFrameOffset = MakeCode("22013") ErrorInAssignment = MakeCode("22005") EscapeCharacterConflict = MakeCode("2200B") IndicatorOverflow = MakeCode("22022") IntervalFieldOverflow = MakeCode("22015") InvalidArgumentForLogarithm = MakeCode("2201E") InvalidArgumentForNtileFunction = MakeCode("22014") InvalidArgumentForNthValueFunction = MakeCode("22016") InvalidArgumentForPowerFunction = MakeCode("2201F") InvalidArgumentForWidthBucketFunction = MakeCode("2201G") InvalidCharacterValueForCast = MakeCode("22018") InvalidDatetimeFormat = MakeCode("22007") InvalidEscapeCharacter = MakeCode("22019") InvalidEscapeOctet = MakeCode("2200D") InvalidEscapeSequence = MakeCode("22025") NonstandardUseOfEscapeCharacter = MakeCode("22P06") InvalidIndicatorParameterValue = MakeCode("22010") InvalidParameterValue = MakeCode("22023") InvalidRegularExpression = MakeCode("2201B") InvalidRowCountInLimitClause = MakeCode("2201W") InvalidRowCountInResultOffsetClause = MakeCode("2201X") InvalidTimeZoneDisplacementValue = MakeCode("22009") InvalidUseOfEscapeCharacter = MakeCode("2200C") MostSpecificTypeMismatch = MakeCode("2200G") NullValueNotAllowed = MakeCode("22004") NullValueNoIndicatorParameter = MakeCode("22002") NumericValueOutOfRange = MakeCode("22003") SequenceGeneratorLimitExceeded = MakeCode("2200H") StringDataLengthMismatch = MakeCode("22026") StringDataRightTruncation = MakeCode("22001") Substring = MakeCode("22011") Trim = MakeCode("22027") UnterminatedCString = MakeCode("22024") ZeroLengthCharacterString = MakeCode("2200F") FloatingPointException = MakeCode("22P01") InvalidTextRepresentation = MakeCode("22P02") InvalidBinaryRepresentation = MakeCode("22P03") BadCopyFileFormat = MakeCode("22P04") UntranslatableCharacter = MakeCode("22P05") NotAnXMLDocument = MakeCode("2200L") InvalidXMLDocument = MakeCode("2200M") InvalidXMLContent = MakeCode("2200N") InvalidXMLComment = MakeCode("2200S") InvalidXMLProcessingInstruction = MakeCode("2200T") // Section: Class 23 - Integrity Constraint Violation IntegrityConstraintViolation = MakeCode("23000") RestrictViolation = MakeCode("23001") NotNullViolation = MakeCode("23502") ForeignKeyViolation = MakeCode("23503") UniqueViolation = MakeCode("23505") CheckViolation = MakeCode("23514") ExclusionViolation = MakeCode("23P01") // Section: Class 24 - Invalid Cursor State InvalidCursorState = MakeCode("24000") // Section: Class 25 - Invalid Transaction State InvalidTransactionState = MakeCode("25000") ActiveSQLTransaction = MakeCode("25001") BranchTransactionAlreadyActive = MakeCode("25002") HeldCursorRequiresSameIsolationLevel = MakeCode("25008") InappropriateAccessModeForBranchTransaction = MakeCode("25003") InappropriateIsolationLevelForBranchTransaction = MakeCode("25004") NoActiveSQLTransactionForBranchTransaction = MakeCode("25005") ReadOnlySQLTransaction = MakeCode("25006") SchemaAndDataStatementMixingNotSupported = MakeCode("25007") NoActiveSQLTransaction = MakeCode("25P01") InFailedSQLTransaction = MakeCode("25P02") // Section: Class 26 - Invalid SQL Statement Name InvalidSQLStatementName = MakeCode("26000") // Section: Class 27 - Triggered Data Change Violation TriggeredDataChangeViolation = MakeCode("27000") // Section: Class 28 - Invalid Authorization Specification InvalidAuthorizationSpecification = MakeCode("28000") InvalidPassword = MakeCode("28P01") // Section: Class 2B - Dependent Privilege Descriptors Still Exist DependentPrivilegeDescriptorsStillExist = MakeCode("2B000") DependentObjectsStillExist = MakeCode("2BP01") // Section: Class 2D - Invalid Transaction Termination InvalidTransactionTermination = MakeCode("2D000") // Section: Class 2F - SQL Routine Exception RoutineExceptionFunctionExecutedNoReturnStatement = MakeCode("2F005") RoutineExceptionModifyingSQLDataNotPermitted = MakeCode("2F002") RoutineExceptionProhibitedSQLStatementAttempted = MakeCode("2F003") RoutineExceptionReadingSQLDataNotPermitted = MakeCode("2F004") // Section: Class 34 - Invalid Cursor Name InvalidCursorName = MakeCode("34000") // Section: Class 38 - External Routine Exception ExternalRoutineException = MakeCode("38000") ExternalRoutineContainingSQLNotPermitted = MakeCode("38001") ExternalRoutineModifyingSQLDataNotPermitted = MakeCode("38002") ExternalRoutineProhibitedSQLStatementAttempted = MakeCode("38003") ExternalRoutineReadingSQLDataNotPermitted = MakeCode("38004") // Section: Class 39 - External Routine Invocation Exception ExternalRoutineInvocationException = MakeCode("39000") ExternalRoutineInvalidSQLstateReturned = MakeCode("39001") ExternalRoutineNullValueNotAllowed = MakeCode("39004") ExternalRoutineTriggerProtocolViolated = MakeCode("39P01") ExternalRoutineSrfProtocolViolated = MakeCode("39P02") // Section: Class 3B - Savepoint Exception SavepointException = MakeCode("3B000") InvalidSavepointSpecification = MakeCode("3B001") // Section: Class 3D - Invalid Catalog Name InvalidCatalogName = MakeCode("3D000") // Section: Class 3F - Invalid Schema Name InvalidSchemaName = MakeCode("3F000") // Section: Class 40 - Transaction Rollback TransactionRollback = MakeCode("40000") TransactionIntegrityConstraintViolation = MakeCode("40002") SerializationFailure = MakeCode("40001") StatementCompletionUnknown = MakeCode("40003") DeadlockDetected = MakeCode("40P01") // Section: Class 42 - Syntax Error or Access Rule Violation SyntaxErrorOrAccessRuleViolation = MakeCode("42000") Syntax = MakeCode("42601") InsufficientPrivilege = MakeCode("42501") CannotCoerce = MakeCode("42846") Grouping = MakeCode("42803") Windowing = MakeCode("42P20") InvalidRecursion = MakeCode("42P19") InvalidForeignKey = MakeCode("42830") InvalidName = MakeCode("42602") NameTooLong = MakeCode("42622") ReservedName = MakeCode("42939") DatatypeMismatch = MakeCode("42804") IndeterminateDatatype = MakeCode("42P18") CollationMismatch = MakeCode("42P21") IndeterminateCollation = MakeCode("42P22") WrongObjectType = MakeCode("42809") GeneratedAlways = MakeCode("428C9") UndefinedColumn = MakeCode("42703") UndefinedCursor = MakeCode("34000") UndefinedDatabase = MakeCode("3D000") UndefinedFunction = MakeCode("42883") UndefinedPreparedStatement = MakeCode("26000") UndefinedSchema = MakeCode("3F000") UndefinedTable = MakeCode("42P01") UndefinedParameter = MakeCode("42P02") UndefinedObject = MakeCode("42704") DuplicateColumn = MakeCode("42701") DuplicateCursor = MakeCode("42P03") DuplicateDatabase = MakeCode("42P04") DuplicateFunction = MakeCode("42723") DuplicatePreparedStatement = MakeCode("42P05") DuplicateSchema = MakeCode("42P06") DuplicateRelation = MakeCode("42P07") DuplicateAlias = MakeCode("42712") DuplicateObject = MakeCode("42710") AmbiguousColumn = MakeCode("42702") AmbiguousFunction = MakeCode("42725") AmbiguousParameter = MakeCode("42P08") AmbiguousAlias = MakeCode("42P09") InvalidColumnReference = MakeCode("42P10") InvalidColumnDefinition = MakeCode("42611") InvalidCursorDefinition = MakeCode("42P11") InvalidDatabaseDefinition = MakeCode("42P12") InvalidFunctionDefinition = MakeCode("42P13") InvalidPreparedStatementDefinition = MakeCode("42P14") InvalidSchemaDefinition = MakeCode("42P15") InvalidTableDefinition = MakeCode("42P16") InvalidObjectDefinition = MakeCode("42P17") FileAlreadyExists = MakeCode("42C01") // Section: Class 44 - WITH CHECK OPTION Violation WithCheckOptionViolation = MakeCode("44000") // Section: Class 53 - Insufficient Resources InsufficientResources = MakeCode("53000") DiskFull = MakeCode("53100") OutOfMemory = MakeCode("53200") TooManyConnections = MakeCode("53300") ConfigurationLimitExceeded = MakeCode("53400") // Section: Class 54 - Program Limit Exceeded ProgramLimitExceeded = MakeCode("54000") StatementTooComplex = MakeCode("54001") TooManyColumns = MakeCode("54011") TooManyArguments = MakeCode("54023") // Section: Class 55 - Object Not In Prerequisite State ObjectNotInPrerequisiteState = MakeCode("55000") ObjectInUse = MakeCode("55006") CantChangeRuntimeParam = MakeCode("55P02") LockNotAvailable = MakeCode("55P03") // Section: Class 57 - Operator Intervention OperatorIntervention = MakeCode("57000") QueryCanceled = MakeCode("57014") AdminShutdown = MakeCode("57P01") CrashShutdown = MakeCode("57P02") CannotConnectNow = MakeCode("57P03") DatabaseDropped = MakeCode("57P04") // Section: Class 58 - System Error System = MakeCode("58000") Io = MakeCode("58030") UndefinedFile = MakeCode("58P01") DuplicateFile = MakeCode("58P02") // Section: Class F0 - Configuration File Error ConfigFile = MakeCode("F0000") LockFileExists = MakeCode("F0001") // Section: Class HV - Foreign Data Wrapper Error (SQL/MED) FdwError = MakeCode("HV000") FdwColumnNameNotFound = MakeCode("HV005") FdwDynamicParameterValueNeeded = MakeCode("HV002") FdwFunctionSequenceError = MakeCode("HV010") FdwInconsistentDescriptorInformation = MakeCode("HV021") FdwInvalidAttributeValue = MakeCode("HV024") FdwInvalidColumnName = MakeCode("HV007") FdwInvalidColumnNumber = MakeCode("HV008") FdwInvalidDataType = MakeCode("HV004") FdwInvalidDataTypeDescriptors = MakeCode("HV006") FdwInvalidDescriptorFieldIdentifier = MakeCode("HV091") FdwInvalidHandle = MakeCode("HV00B") FdwInvalidOptionIndex = MakeCode("HV00C") FdwInvalidOptionName = MakeCode("HV00D") FdwInvalidStringLengthOrBufferLength = MakeCode("HV090") FdwInvalidStringFormat = MakeCode("HV00A") FdwInvalidUseOfNullPointer = MakeCode("HV009") FdwTooManyHandles = MakeCode("HV014") FdwOutOfMemory = MakeCode("HV001") FdwNoSchemas = MakeCode("HV00P") FdwOptionNameNotFound = MakeCode("HV00J") FdwReplyHandle = MakeCode("HV00K") FdwSchemaNotFound = MakeCode("HV00Q") FdwTableNotFound = MakeCode("HV00R") FdwUnableToCreateExecution = MakeCode("HV00L") FdwUnableToCreateReply = MakeCode("HV00M") FdwUnableToEstablishConnection = MakeCode("HV00N") // Section: Class P0 - PL/pgSQL Error PLpgSQL = MakeCode("P0000") RaiseException = MakeCode("P0001") NoDataFound = MakeCode("P0002") TooManyRows = MakeCode("P0003") AssertFailure = MakeCode("P0004") // Section: Class XX - Internal Error Internal = MakeCode("XX000") DataCorrupted = MakeCode("XX001") IndexCorrupted = MakeCode("XX002") ) // The following errors are CockroachDB-specific. var ( // Uncategorized is used for errors that flow out to a client // when there's no code known yet. Uncategorized = MakeCode("XXUUU") // CCLRequired signals that a CCL binary is required to complete this // task. CCLRequired = MakeCode("XXC01") // CCLValidLicenseRequired signals that a valid CCL license is // required to complete this task. CCLValidLicenseRequired = MakeCode("XXC02") // TransactionCommittedWithSchemaChangeFailure signals that the // non-DDL payload of a transaction was committed successfully but // some DDL operation failed, without rolling back the rest of the // transaction. // // We define a separate code instead of reusing a code from // PostgreSQL (like StatementCompletionUnknown) because that makes // it easier to document the error (this code only occurs in this // particular situation) in a way that's unique to CockroachDB. // // We also use a "XX" code for this for several reasons: // - it needs to override any other pg code set "underneath" in the cause. // - it forces implementers of logic tests to be mindful about // this situation. The logic test runner will remind the implementer // that: // serious error with code "XXA00" occurred; if expected, // must use 'error pgcode XXA00 ...' TransactionCommittedWithSchemaChangeFailure = MakeCode("XXA00") // Class 22C - Semantic errors in the structure of a SQL statement. // ScalarOperationCannotRunWithoutFullSessionContext signals that an // operator or built-in function was used that requires a full session // context and thus cannot be run in a background job or away from the SQL // gateway. ScalarOperationCannotRunWithoutFullSessionContext = MakeCode("22C01") // Class 55C - Object Not In Prerequisite State (Cockroach extension) // SchemaChangeOccurred signals that a DDL change to the targets of a // CHANGEFEED has lead to its termination. If this error code is received // the CHANGEFEED will have previously emitted a resolved timestamp which // precedes the hlc timestamp of the relevant DDL transaction. SchemaChangeOccurred = MakeCode("55C01") // NoPrimaryKey signals that a table descriptor is invalid because the table // does not have a primary key. NoPrimaryKey = MakeCode("55C02") // Class 58C - System errors related to CockroachDB node problems. // RangeUnavailable signals that some data from the cluster cannot be // accessed (e.g. because all replicas awol). RangeUnavailable = MakeCode("58C00") // InternalConnectionFailure refers to a networking error encountered // internally on a connection between different Cockroach nodes. InternalConnectionFailure = MakeCode("58C01") // ProxyConnectionError is returned by the sqlproxyccl and it indicates a // problem establishing the connection through the proxy. ProxyConnectionError = MakeCode("08C00") // Class XC - cockroach extension. // CockroachDB distributed system related errors. // UnsatisfiableBoundedStaleness signals that the bounded staleness query // cannot be satisfied. UnsatisfiableBoundedStaleness = MakeCode("XCUBS") // QueryNotRunningInHomeRegion signals that a query is not running in its // home region. QueryNotRunningInHomeRegion = MakeCode("XCHR1") // QueryHasNoHomeRegion signals that a query has no home region. QueryHasNoHomeRegion = MakeCode("XCHR2") // ExperimentalFeature signals that a feature we supported experimentally is being // used without the session variable being enabled. ExperimentalFeature = MakeCode("XCEXF") )
pkg/sql/pgwire/pgcode/codes.go
0
https://github.com/cockroachdb/cockroach/commit/977f72e4cc24f5d14424c278d6d6921016c3fc62
[ 0.00030254852026700974, 0.0001748240611050278, 0.00016287968901451677, 0.00017043198749888688, 0.000021627603928209282 ]
{ "id": 3, "code_window": [ "\n", "// validates that the service created, updated by REST\n", "// has correct ClusterIPs related fields\n", "func isValidClusterIPFields(t *testing.T, storage *REST, pre *api.Service, post *api.Service) {\n", "\t// valid for gate off/on scenarios\n", "\t// ClusterIP\n", "\tif len(post.Spec.ClusterIP) == 0 {\n", "\t\tt.Fatalf(\"service must have clusterIP : %+v\", post)\n" ], "labels": [ "keep", "keep", "keep", "add", "keep", "keep", "keep", "keep" ], "after_edit": [ "\tt.Helper()\n", "\n" ], "file_path": "pkg/registry/core/service/storage/rest_test.go", "type": "add", "edit_start_line_idx": 3374 }
/* Copyright 2014 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package storage import ( "context" "fmt" "math/rand" "net" "net/http" "net/url" "strconv" "k8s.io/apimachinery/pkg/api/errors" metainternalversion "k8s.io/apimachinery/pkg/apis/meta/internalversion" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" utilnet "k8s.io/apimachinery/pkg/util/net" utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/validation/field" "k8s.io/apimachinery/pkg/watch" genericapirequest "k8s.io/apiserver/pkg/endpoints/request" "k8s.io/apiserver/pkg/registry/rest" "k8s.io/apiserver/pkg/util/dryrun" utilfeature "k8s.io/apiserver/pkg/util/feature" "k8s.io/klog/v2" apiservice "k8s.io/kubernetes/pkg/api/service" api "k8s.io/kubernetes/pkg/apis/core" "k8s.io/kubernetes/pkg/apis/core/validation" "k8s.io/kubernetes/pkg/features" registry "k8s.io/kubernetes/pkg/registry/core/service" "k8s.io/kubernetes/pkg/registry/core/service/ipallocator" "k8s.io/kubernetes/pkg/registry/core/service/portallocator" netutils "k8s.io/utils/net" "sigs.k8s.io/structured-merge-diff/v4/fieldpath" ) // REST adapts a service registry into apiserver's RESTStorage model. type REST struct { strategy rest.RESTCreateUpdateStrategy services ServiceStorage endpoints EndpointsStorage serviceIPAllocatorsByFamily map[api.IPFamily]ipallocator.Interface defaultServiceIPFamily api.IPFamily // --service-cluster-ip-range[0] serviceNodePorts portallocator.Interface proxyTransport http.RoundTripper pods rest.Getter } // ServiceNodePort includes protocol and port number of a service NodePort. type ServiceNodePort struct { // The IP protocol for this port. Supports "TCP" and "UDP". Protocol api.Protocol // The port on each node on which this service is exposed. // Default is to auto-allocate a port if the ServiceType of this Service requires one. NodePort int32 } type ServiceStorage interface { rest.Scoper rest.Getter rest.Lister rest.CreaterUpdater rest.GracefulDeleter rest.Watcher rest.StorageVersionProvider rest.ResetFieldsStrategy } type EndpointsStorage interface { rest.Getter rest.GracefulDeleter } // NewREST returns a wrapper around the underlying generic storage and performs // allocations and deallocations of various service related resources like ports. // TODO: all transactional behavior should be supported from within generic storage // or the strategy. func NewREST( services ServiceStorage, endpoints EndpointsStorage, pods rest.Getter, serviceIPs ipallocator.Interface, secondaryServiceIPs ipallocator.Interface, serviceNodePorts portallocator.Interface, proxyTransport http.RoundTripper, ) (*REST, *registry.ProxyREST) { strategy, _ := registry.StrategyForServiceCIDRs(serviceIPs.CIDR(), secondaryServiceIPs != nil) byIPFamily := make(map[api.IPFamily]ipallocator.Interface) // detect this cluster default Service IPFamily (ipfamily of --service-cluster-ip-range[0]) serviceIPFamily := api.IPv4Protocol cidr := serviceIPs.CIDR() if netutils.IsIPv6CIDR(&cidr) { serviceIPFamily = api.IPv6Protocol } // add primary family byIPFamily[serviceIPFamily] = serviceIPs if secondaryServiceIPs != nil { // process secondary family secondaryServiceIPFamily := api.IPv6Protocol // get family of secondary if serviceIPFamily == api.IPv6Protocol { secondaryServiceIPFamily = api.IPv4Protocol } // add it byIPFamily[secondaryServiceIPFamily] = secondaryServiceIPs } klog.V(0).Infof("the default service ipfamily for this cluster is: %s", string(serviceIPFamily)) rest := &REST{ strategy: strategy, services: services, endpoints: endpoints, serviceIPAllocatorsByFamily: byIPFamily, serviceNodePorts: serviceNodePorts, defaultServiceIPFamily: serviceIPFamily, proxyTransport: proxyTransport, pods: pods, } return rest, &registry.ProxyREST{Redirector: rest, ProxyTransport: proxyTransport} } var ( _ ServiceStorage = &REST{} _ rest.CategoriesProvider = &REST{} _ rest.ShortNamesProvider = &REST{} _ rest.StorageVersionProvider = &REST{} ) func (rs *REST) StorageVersion() runtime.GroupVersioner { return rs.services.StorageVersion() } // ShortNames implements the ShortNamesProvider interface. Returns a list of short names for a resource. func (rs *REST) ShortNames() []string { return []string{"svc"} } // Categories implements the CategoriesProvider interface. Returns a list of categories a resource is part of. func (rs *REST) Categories() []string { return []string{"all"} } func (rs *REST) NamespaceScoped() bool { return rs.services.NamespaceScoped() } func (rs *REST) New() runtime.Object { return rs.services.New() } func (rs *REST) NewList() runtime.Object { return rs.services.NewList() } func (rs *REST) Get(ctx context.Context, name string, options *metav1.GetOptions) (runtime.Object, error) { return rs.services.Get(ctx, name, options) } func (rs *REST) List(ctx context.Context, options *metainternalversion.ListOptions) (runtime.Object, error) { return rs.services.List(ctx, options) } func (rs *REST) Watch(ctx context.Context, options *metainternalversion.ListOptions) (watch.Interface, error) { return rs.services.Watch(ctx, options) } func (rs *REST) Create(ctx context.Context, obj runtime.Object, createValidation rest.ValidateObjectFunc, options *metav1.CreateOptions) (runtime.Object, error) { service := obj.(*api.Service) // bag of clusterIPs allocated in the process of creation // failed allocation will automatically trigger release var toReleaseClusterIPs map[api.IPFamily]string if err := rest.BeforeCreate(rs.strategy, ctx, obj); err != nil { return nil, err } // TODO: this should probably move to strategy.PrepareForCreate() defer func() { released, err := rs.releaseClusterIPs(toReleaseClusterIPs) if err != nil { klog.Warningf("failed to release clusterIPs for failed new service:%v allocated:%v released:%v error:%v", service.Name, toReleaseClusterIPs, released, err) } }() // try set ip families (for missing ip families) // we do it here, since we want this to be visible // even when dryRun == true if err := rs.tryDefaultValidateServiceClusterIPFields(nil, service); err != nil { return nil, err } var err error if !dryrun.IsDryRun(options.DryRun) { toReleaseClusterIPs, err = rs.allocServiceClusterIPs(service) if err != nil { return nil, err } } nodePortOp := portallocator.StartOperation(rs.serviceNodePorts, dryrun.IsDryRun(options.DryRun)) defer nodePortOp.Finish() if service.Spec.Type == api.ServiceTypeNodePort || service.Spec.Type == api.ServiceTypeLoadBalancer { if err := initNodePorts(service, nodePortOp); err != nil { return nil, err } } // Handle ExternalTraffic related fields during service creation. if apiservice.NeedsHealthCheck(service) { if err := allocateHealthCheckNodePort(service, nodePortOp); err != nil { return nil, errors.NewInternalError(err) } } if errs := validation.ValidateServiceExternalTrafficFieldsCombination(service); len(errs) > 0 { return nil, errors.NewInvalid(api.Kind("Service"), service.Name, errs) } out, err := rs.services.Create(ctx, service, createValidation, options) if err != nil { err = rest.CheckGeneratedNameError(rs.strategy, err, service) } if err == nil { el := nodePortOp.Commit() if el != nil { // these should be caught by an eventual reconciliation / restart utilruntime.HandleError(fmt.Errorf("error(s) committing service node-ports changes: %v", el)) } // no clusterips to release toReleaseClusterIPs = nil } return out, err } func (rs *REST) Delete(ctx context.Context, id string, deleteValidation rest.ValidateObjectFunc, options *metav1.DeleteOptions) (runtime.Object, bool, error) { // TODO: handle graceful obj, _, err := rs.services.Delete(ctx, id, deleteValidation, options) if err != nil { return nil, false, err } svc := obj.(*api.Service) // (khenidak) double check that this is in fact the best place for this // delete strategy handles graceful delete only. It expects strategy // to implement Graceful-Delete related interface. Hence we are not doing // the below there. instead we are doing it locally. Until strategy.BeforeDelete works without // having to implement graceful delete management // set ClusterIPs based on ClusterIP // because we depend on ClusterIPs and data might be saved without ClusterIPs .. if svc.Spec.ClusterIPs == nil && len(svc.Spec.ClusterIP) > 0 { svc.Spec.ClusterIPs = []string{svc.Spec.ClusterIP} } // Only perform the cleanup if this is a non-dryrun deletion if !dryrun.IsDryRun(options.DryRun) { // TODO: can leave dangling endpoints, and potentially return incorrect // endpoints if a new service is created with the same name _, _, err = rs.endpoints.Delete(ctx, id, rest.ValidateAllObjectFunc, &metav1.DeleteOptions{}) if err != nil && !errors.IsNotFound(err) { return nil, false, err } rs.releaseAllocatedResources(svc) } // TODO: this is duplicated from the generic storage, when this wrapper is fully removed we can drop this details := &metav1.StatusDetails{ Name: svc.Name, UID: svc.UID, } if info, ok := genericapirequest.RequestInfoFrom(ctx); ok { details.Group = info.APIGroup details.Kind = info.Resource // legacy behavior } status := &metav1.Status{Status: metav1.StatusSuccess, Details: details} return status, true, nil } func (rs *REST) releaseAllocatedResources(svc *api.Service) { rs.releaseServiceClusterIPs(svc) for _, nodePort := range collectServiceNodePorts(svc) { err := rs.serviceNodePorts.Release(nodePort) if err != nil { // these should be caught by an eventual reconciliation / restart utilruntime.HandleError(fmt.Errorf("Error releasing service %s node port %d: %v", svc.Name, nodePort, err)) } } if apiservice.NeedsHealthCheck(svc) { nodePort := svc.Spec.HealthCheckNodePort if nodePort > 0 { err := rs.serviceNodePorts.Release(int(nodePort)) if err != nil { // these should be caught by an eventual reconciliation / restart utilruntime.HandleError(fmt.Errorf("Error releasing service %s health check node port %d: %v", svc.Name, nodePort, err)) } } } } func shouldAllocateNodePorts(service *api.Service) bool { if service.Spec.Type == api.ServiceTypeNodePort { return true } if service.Spec.Type == api.ServiceTypeLoadBalancer { if utilfeature.DefaultFeatureGate.Enabled(features.ServiceLBNodePortControl) { return *service.Spec.AllocateLoadBalancerNodePorts } return true } return false } // externalTrafficPolicyUpdate adjusts ExternalTrafficPolicy during service update if needed. // It is necessary because we default ExternalTrafficPolicy field to different values. // (NodePort / LoadBalancer: default is Global; Other types: default is empty.) func externalTrafficPolicyUpdate(oldService, service *api.Service) { var neededExternalTraffic, needsExternalTraffic bool if oldService.Spec.Type == api.ServiceTypeNodePort || oldService.Spec.Type == api.ServiceTypeLoadBalancer { neededExternalTraffic = true } if service.Spec.Type == api.ServiceTypeNodePort || service.Spec.Type == api.ServiceTypeLoadBalancer { needsExternalTraffic = true } if neededExternalTraffic && !needsExternalTraffic { // Clear ExternalTrafficPolicy to prevent confusion from ineffective field. service.Spec.ExternalTrafficPolicy = api.ServiceExternalTrafficPolicyType("") } } // healthCheckNodePortUpdate handles HealthCheckNodePort allocation/release // and adjusts HealthCheckNodePort during service update if needed. func (rs *REST) healthCheckNodePortUpdate(oldService, service *api.Service, nodePortOp *portallocator.PortAllocationOperation) (bool, error) { neededHealthCheckNodePort := apiservice.NeedsHealthCheck(oldService) oldHealthCheckNodePort := oldService.Spec.HealthCheckNodePort needsHealthCheckNodePort := apiservice.NeedsHealthCheck(service) newHealthCheckNodePort := service.Spec.HealthCheckNodePort switch { // Case 1: Transition from don't need HealthCheckNodePort to needs HealthCheckNodePort. // Allocate a health check node port or attempt to reserve the user-specified one if provided. // Insert health check node port into the service's HealthCheckNodePort field if needed. case !neededHealthCheckNodePort && needsHealthCheckNodePort: klog.Infof("Transition to LoadBalancer type service with ExternalTrafficPolicy=Local") if err := allocateHealthCheckNodePort(service, nodePortOp); err != nil { return false, errors.NewInternalError(err) } // Case 2: Transition from needs HealthCheckNodePort to don't need HealthCheckNodePort. // Free the existing healthCheckNodePort and clear the HealthCheckNodePort field. case neededHealthCheckNodePort && !needsHealthCheckNodePort: klog.Infof("Transition to non LoadBalancer type service or LoadBalancer type service with ExternalTrafficPolicy=Global") klog.V(4).Infof("Releasing healthCheckNodePort: %d", oldHealthCheckNodePort) nodePortOp.ReleaseDeferred(int(oldHealthCheckNodePort)) // Clear the HealthCheckNodePort field. service.Spec.HealthCheckNodePort = 0 // Case 3: Remain in needs HealthCheckNodePort. // Reject changing the value of the HealthCheckNodePort field. case neededHealthCheckNodePort && needsHealthCheckNodePort: if oldHealthCheckNodePort != newHealthCheckNodePort { klog.Warningf("Attempt to change value of health check node port DENIED") fldPath := field.NewPath("spec", "healthCheckNodePort") el := field.ErrorList{field.Invalid(fldPath, newHealthCheckNodePort, "cannot change healthCheckNodePort on loadBalancer service with externalTraffic=Local during update")} return false, errors.NewInvalid(api.Kind("Service"), service.Name, el) } } return true, nil } func (rs *REST) Update(ctx context.Context, name string, objInfo rest.UpdatedObjectInfo, createValidation rest.ValidateObjectFunc, updateValidation rest.ValidateObjectUpdateFunc, forceAllowCreate bool, options *metav1.UpdateOptions) (runtime.Object, bool, error) { oldObj, err := rs.services.Get(ctx, name, &metav1.GetOptions{}) if err != nil { // Support create on update, if forced to. if forceAllowCreate { obj, err := objInfo.UpdatedObject(ctx, nil) if err != nil { return nil, false, err } createdObj, err := rs.Create(ctx, obj, createValidation, &metav1.CreateOptions{DryRun: options.DryRun}) if err != nil { return nil, false, err } return createdObj, true, nil } return nil, false, err } oldService := oldObj.(*api.Service) obj, err := objInfo.UpdatedObject(ctx, oldService) if err != nil { return nil, false, err } service := obj.(*api.Service) if !rest.ValidNamespace(ctx, &service.ObjectMeta) { return nil, false, errors.NewConflict(api.Resource("services"), service.Namespace, fmt.Errorf("Service.Namespace does not match the provided context")) } // Copy over non-user fields if err := rest.BeforeUpdate(rs.strategy, ctx, service, oldService); err != nil { return nil, false, err } var allocated map[api.IPFamily]string var toReleaseIPs map[api.IPFamily]string performRelease := false // when set, any clusterIP that should be released will be released // cleanup // on failure: Any allocated ip must be released back // on failure: any ip that should be released, will *not* be released // on success: any ip that should be released, will be released defer func() { // release the allocated, this is expected to be cleared if the entire function ran to success if allocated_released, err := rs.releaseClusterIPs(allocated); err != nil { klog.V(4).Infof("service %v/%v failed to clean up after failed service update error:%v. Allocated/Released:%v/%v", service.Namespace, service.Name, err, allocated, allocated_released) } // performRelease is set when the enture function ran to success if performRelease { if toReleaseIPs_released, err := rs.releaseClusterIPs(toReleaseIPs); err != nil { klog.V(4).Infof("service %v/%v failed to clean up after failed service update error:%v. ShouldRelease/Released:%v/%v", service.Namespace, service.Name, err, toReleaseIPs, toReleaseIPs_released) } } }() nodePortOp := portallocator.StartOperation(rs.serviceNodePorts, dryrun.IsDryRun(options.DryRun)) defer nodePortOp.Finish() // try set ip families (for missing ip families) if err := rs.tryDefaultValidateServiceClusterIPFields(oldService, service); err != nil { return nil, false, err } if !dryrun.IsDryRun(options.DryRun) { allocated, toReleaseIPs, err = rs.handleClusterIPsForUpdatedService(oldService, service) if err != nil { return nil, false, err } } // Update service from NodePort or LoadBalancer to ExternalName or ClusterIP, should release NodePort if exists. if (oldService.Spec.Type == api.ServiceTypeNodePort || oldService.Spec.Type == api.ServiceTypeLoadBalancer) && (service.Spec.Type == api.ServiceTypeExternalName || service.Spec.Type == api.ServiceTypeClusterIP) { releaseNodePorts(oldService, nodePortOp) } // Update service from any type to NodePort or LoadBalancer, should update NodePort. if service.Spec.Type == api.ServiceTypeNodePort || service.Spec.Type == api.ServiceTypeLoadBalancer { if err := updateNodePorts(oldService, service, nodePortOp); err != nil { return nil, false, err } } // Update service from LoadBalancer to non-LoadBalancer, should remove any LoadBalancerStatus. if service.Spec.Type != api.ServiceTypeLoadBalancer { // Although loadbalancer delete is actually asynchronous, we don't need to expose the user to that complexity. service.Status.LoadBalancer = api.LoadBalancerStatus{} } // Handle ExternalTraffic related updates. success, err := rs.healthCheckNodePortUpdate(oldService, service, nodePortOp) if !success || err != nil { return nil, false, err } externalTrafficPolicyUpdate(oldService, service) if errs := validation.ValidateServiceExternalTrafficFieldsCombination(service); len(errs) > 0 { return nil, false, errors.NewInvalid(api.Kind("Service"), service.Name, errs) } out, created, err := rs.services.Update(ctx, service.Name, rest.DefaultUpdatedObjectInfo(service), createValidation, updateValidation, forceAllowCreate, options) if err == nil { el := nodePortOp.Commit() if el != nil { // problems should be fixed by an eventual reconciliation / restart utilruntime.HandleError(fmt.Errorf("error(s) committing NodePorts changes: %v", el)) } } // all good allocated = nil // if something was allocated, keep it allocated performRelease = true // if something that should be released then go ahead and release it return out, created, err } // GetResetFields implements rest.ResetFieldsStrategy func (rs *REST) GetResetFields() map[fieldpath.APIVersion]*fieldpath.Set { return rs.services.GetResetFields() } // Implement Redirector. var _ = rest.Redirector(&REST{}) // ResourceLocation returns a URL to which one can send traffic for the specified service. func (rs *REST) ResourceLocation(ctx context.Context, id string) (*url.URL, http.RoundTripper, error) { // Allow ID as "svcname", "svcname:port", or "scheme:svcname:port". svcScheme, svcName, portStr, valid := utilnet.SplitSchemeNamePort(id) if !valid { return nil, nil, errors.NewBadRequest(fmt.Sprintf("invalid service request %q", id)) } // If a port *number* was specified, find the corresponding service port name if portNum, err := strconv.ParseInt(portStr, 10, 64); err == nil { obj, err := rs.services.Get(ctx, svcName, &metav1.GetOptions{}) if err != nil { return nil, nil, err } svc := obj.(*api.Service) found := false for _, svcPort := range svc.Spec.Ports { if int64(svcPort.Port) == portNum { // use the declared port's name portStr = svcPort.Name found = true break } } if !found { return nil, nil, errors.NewServiceUnavailable(fmt.Sprintf("no service port %d found for service %q", portNum, svcName)) } } obj, err := rs.endpoints.Get(ctx, svcName, &metav1.GetOptions{}) if err != nil { return nil, nil, err } eps := obj.(*api.Endpoints) if len(eps.Subsets) == 0 { return nil, nil, errors.NewServiceUnavailable(fmt.Sprintf("no endpoints available for service %q", svcName)) } // Pick a random Subset to start searching from. ssSeed := rand.Intn(len(eps.Subsets)) // Find a Subset that has the port. for ssi := 0; ssi < len(eps.Subsets); ssi++ { ss := &eps.Subsets[(ssSeed+ssi)%len(eps.Subsets)] if len(ss.Addresses) == 0 { continue } for i := range ss.Ports { if ss.Ports[i].Name == portStr { addrSeed := rand.Intn(len(ss.Addresses)) // This is a little wonky, but it's expensive to test for the presence of a Pod // So we repeatedly try at random and validate it, this means that for an invalid // service with a lot of endpoints we're going to potentially make a lot of calls, // but in the expected case we'll only make one. for try := 0; try < len(ss.Addresses); try++ { addr := ss.Addresses[(addrSeed+try)%len(ss.Addresses)] if err := isValidAddress(ctx, &addr, rs.pods); err != nil { utilruntime.HandleError(fmt.Errorf("Address %v isn't valid (%v)", addr, err)) continue } ip := addr.IP port := int(ss.Ports[i].Port) return &url.URL{ Scheme: svcScheme, Host: net.JoinHostPort(ip, strconv.Itoa(port)), }, rs.proxyTransport, nil } utilruntime.HandleError(fmt.Errorf("Failed to find a valid address, skipping subset: %v", ss)) } } } return nil, nil, errors.NewServiceUnavailable(fmt.Sprintf("no endpoints available for service %q", id)) } func (r *REST) ConvertToTable(ctx context.Context, object runtime.Object, tableOptions runtime.Object) (*metav1.Table, error) { return r.services.ConvertToTable(ctx, object, tableOptions) } func (rs *REST) allocClusterIPs(service *api.Service, toAlloc map[api.IPFamily]string) (map[api.IPFamily]string, error) { allocated := make(map[api.IPFamily]string) for family, ip := range toAlloc { allocator := rs.serviceIPAllocatorsByFamily[family] // should always be there, as we pre validate if ip == "" { allocatedIP, err := allocator.AllocateNext() if err != nil { return allocated, errors.NewInternalError(fmt.Errorf("failed to allocate a serviceIP: %v", err)) } allocated[family] = allocatedIP.String() } else { parsedIP := netutils.ParseIPSloppy(ip) if err := allocator.Allocate(parsedIP); err != nil { el := field.ErrorList{field.Invalid(field.NewPath("spec", "clusterIPs"), service.Spec.ClusterIPs, fmt.Sprintf("failed to allocate IP %v: %v", ip, err))} return allocated, errors.NewInvalid(api.Kind("Service"), service.Name, el) } allocated[family] = ip } } return allocated, nil } // releases clusterIPs per family func (rs *REST) releaseClusterIPs(toRelease map[api.IPFamily]string) (map[api.IPFamily]string, error) { if toRelease == nil { return nil, nil } released := make(map[api.IPFamily]string) for family, ip := range toRelease { allocator, ok := rs.serviceIPAllocatorsByFamily[family] if !ok { // cluster was configured for dual stack, then single stack klog.V(4).Infof("delete service. Not releasing ClusterIP:%v because IPFamily:%v is no longer configured on server", ip, family) continue } parsedIP := netutils.ParseIPSloppy(ip) if err := allocator.Release(parsedIP); err != nil { return released, err } released[family] = ip } return released, nil } // standard allocator for dualstackgate==Off, hard wired dependency // and ignores policy, families and clusterIPs func (rs *REST) allocServiceClusterIP(service *api.Service) (map[api.IPFamily]string, error) { toAlloc := make(map[api.IPFamily]string) // get clusterIP.. empty string if user did not specify an ip toAlloc[rs.defaultServiceIPFamily] = service.Spec.ClusterIP // alloc allocated, err := rs.allocClusterIPs(service, toAlloc) // set if err == nil { service.Spec.ClusterIP = allocated[rs.defaultServiceIPFamily] service.Spec.ClusterIPs = []string{allocated[rs.defaultServiceIPFamily]} } return allocated, err } // allocates ClusterIPs for a service func (rs *REST) allocServiceClusterIPs(service *api.Service) (map[api.IPFamily]string, error) { // external name don't get ClusterIPs if service.Spec.Type == api.ServiceTypeExternalName { return nil, nil } // headless don't get ClusterIPs if len(service.Spec.ClusterIPs) > 0 && service.Spec.ClusterIPs[0] == api.ClusterIPNone { return nil, nil } if !utilfeature.DefaultFeatureGate.Enabled(features.IPv6DualStack) { return rs.allocServiceClusterIP(service) } toAlloc := make(map[api.IPFamily]string) // at this stage, the only fact we know is that service has correct ip families // assigned to it. It may have partial assigned ClusterIPs (Upgrade to dual stack) // may have no ips at all. The below loop is meant to fix this // (we also know that this cluster has these families) // if there is no slice to work with if service.Spec.ClusterIPs == nil { service.Spec.ClusterIPs = make([]string, 0, len(service.Spec.IPFamilies)) } for i, ipFamily := range service.Spec.IPFamilies { if i > (len(service.Spec.ClusterIPs) - 1) { service.Spec.ClusterIPs = append(service.Spec.ClusterIPs, "" /* just a marker */) } toAlloc[ipFamily] = service.Spec.ClusterIPs[i] } // allocate allocated, err := rs.allocClusterIPs(service, toAlloc) // set if successful if err == nil { for family, ip := range allocated { for i, check := range service.Spec.IPFamilies { if family == check { service.Spec.ClusterIPs[i] = ip // while we technically don't need to do that testing rest does not // go through conversion logic but goes through validation *sigh*. // so we set ClusterIP here as well // because the testing code expects valid (as they are output-ed from conversion) // as it patches fields if i == 0 { service.Spec.ClusterIP = ip } } } } } return allocated, err } // handles type change/upgrade/downgrade change type for an update service // this func does not perform actual release of clusterIPs. it returns // a map[family]ip for the caller to release when everything else has // executed successfully func (rs *REST) handleClusterIPsForUpdatedService(oldService *api.Service, service *api.Service) (allocated map[api.IPFamily]string, toRelease map[api.IPFamily]string, err error) { // We don't want to upgrade (add an IP) or downgrade (remove an IP) // following a cluster downgrade/upgrade to/from dual-stackness // a PreferDualStack service following principle of least surprise // That means: PreferDualStack service will only be upgraded // if: // - changes type to RequireDualStack // - manually adding or removing ClusterIP (secondary) // - manually adding or removing IPFamily (secondary) if isMatchingPreferDualStackClusterIPFields(oldService, service) { return allocated, toRelease, nil // nothing more to do. } // use cases: // A: service changing types from ExternalName TO ClusterIP types ==> allocate all new // B: service changing types from ClusterIP types TO ExternalName ==> release all allocated // C: Service upgrading to dual stack ==> partial allocation // D: service downgrading from dual stack ==> partial release // CASE A: // Update service from ExternalName to non-ExternalName, should initialize ClusterIP. if oldService.Spec.Type == api.ServiceTypeExternalName && service.Spec.Type != api.ServiceTypeExternalName { allocated, err := rs.allocServiceClusterIPs(service) return allocated, nil, err } // CASE B: // Update service from non-ExternalName to ExternalName, should release ClusterIP if exists. if oldService.Spec.Type != api.ServiceTypeExternalName && service.Spec.Type == api.ServiceTypeExternalName { toRelease = make(map[api.IPFamily]string) if !utilfeature.DefaultFeatureGate.Enabled(features.IPv6DualStack) { // for non dual stack enabled cluster we use clusterIPs toRelease[rs.defaultServiceIPFamily] = oldService.Spec.ClusterIP } else { // dual stack is enabled, collect ClusterIPs by families for i, family := range oldService.Spec.IPFamilies { toRelease[family] = oldService.Spec.ClusterIPs[i] } } return nil, toRelease, nil } // if headless service then we bail out early (no clusterIPs management needed) if len(oldService.Spec.ClusterIPs) > 0 && oldService.Spec.ClusterIPs[0] == api.ClusterIPNone { return nil, nil, nil } // upgrade and downgrade are specific to dualstack if !utilfeature.DefaultFeatureGate.Enabled(features.IPv6DualStack) { return nil, nil, nil } upgraded := len(oldService.Spec.IPFamilies) == 1 && len(service.Spec.IPFamilies) == 2 downgraded := len(oldService.Spec.IPFamilies) == 2 && len(service.Spec.IPFamilies) == 1 // CASE C: if upgraded { toAllocate := make(map[api.IPFamily]string) // if secondary ip was named, just get it. if not add a marker if len(service.Spec.ClusterIPs) < 2 { service.Spec.ClusterIPs = append(service.Spec.ClusterIPs, "" /* marker */) } toAllocate[service.Spec.IPFamilies[1]] = service.Spec.ClusterIPs[1] // allocate allocated, err := rs.allocClusterIPs(service, toAllocate) // set if successful if err == nil { service.Spec.ClusterIPs[1] = allocated[service.Spec.IPFamilies[1]] } return allocated, nil, err } // CASE D: if downgraded { toRelease = make(map[api.IPFamily]string) toRelease[oldService.Spec.IPFamilies[1]] = oldService.Spec.ClusterIPs[1] // note: we don't release clusterIP, this is left to clean up in the action itself return nil, toRelease, err } // it was not an upgrade nor downgrade return nil, nil, nil } // for pre dual stack (gate == off). Hardwired to ClusterIP and ignores all new fields func (rs *REST) releaseServiceClusterIP(service *api.Service) (released map[api.IPFamily]string, err error) { toRelease := make(map[api.IPFamily]string) // we need to do that to handle cases where allocator is no longer configured on // cluster if netutils.IsIPv6String(service.Spec.ClusterIP) { toRelease[api.IPv6Protocol] = service.Spec.ClusterIP } else { toRelease[api.IPv4Protocol] = service.Spec.ClusterIP } return rs.releaseClusterIPs(toRelease) } // releases allocated ClusterIPs for service that is about to be deleted func (rs *REST) releaseServiceClusterIPs(service *api.Service) (released map[api.IPFamily]string, err error) { // external name don't get ClusterIPs if service.Spec.Type == api.ServiceTypeExternalName { return nil, nil } // headless don't get ClusterIPs if len(service.Spec.ClusterIPs) > 0 && service.Spec.ClusterIPs[0] == api.ClusterIPNone { return nil, nil } if !utilfeature.DefaultFeatureGate.Enabled(features.IPv6DualStack) { return rs.releaseServiceClusterIP(service) } toRelease := make(map[api.IPFamily]string) for _, ip := range service.Spec.ClusterIPs { if netutils.IsIPv6String(ip) { toRelease[api.IPv6Protocol] = ip } else { toRelease[api.IPv4Protocol] = ip } } return rs.releaseClusterIPs(toRelease) } // tests if two preferred dual-stack service have matching ClusterIPFields // assumption: old service is a valid, default service (e.g., loaded from store) func isMatchingPreferDualStackClusterIPFields(oldService, service *api.Service) bool { if oldService == nil { return false } if service.Spec.IPFamilyPolicy == nil { return false } // if type mutated then it is an update // that needs to run through the entire process. if oldService.Spec.Type != service.Spec.Type { return false } // both must be type that gets an IP assigned if service.Spec.Type != api.ServiceTypeClusterIP && service.Spec.Type != api.ServiceTypeNodePort && service.Spec.Type != api.ServiceTypeLoadBalancer { return false } // both must be of IPFamilyPolicy==PreferDualStack if service.Spec.IPFamilyPolicy != nil && *(service.Spec.IPFamilyPolicy) != api.IPFamilyPolicyPreferDualStack { return false } if oldService.Spec.IPFamilyPolicy != nil && *(oldService.Spec.IPFamilyPolicy) != api.IPFamilyPolicyPreferDualStack { return false } // compare ClusterIPs lengths. // due to validation. if len(service.Spec.ClusterIPs) != len(oldService.Spec.ClusterIPs) { return false } for i, ip := range service.Spec.ClusterIPs { if oldService.Spec.ClusterIPs[i] != ip { return false } } // compare IPFamilies if len(service.Spec.IPFamilies) != len(oldService.Spec.IPFamilies) { return false } for i, family := range service.Spec.IPFamilies { if oldService.Spec.IPFamilies[i] != family { return false } } // they match on // Policy: preferDualStack // ClusterIPs // IPFamilies return true } // attempts to default service ip families according to cluster configuration // while ensuring that provided families are configured on cluster. func (rs *REST) tryDefaultValidateServiceClusterIPFields(oldService, service *api.Service) error { // can not do anything here if service.Spec.Type == api.ServiceTypeExternalName { return nil } // gate off. We don't need to validate or default new fields // we totally depend on existing validation in apis/validation if !utilfeature.DefaultFeatureGate.Enabled(features.IPv6DualStack) { return nil } // We don't want to upgrade (add an IP) or downgrade (remove an IP) // following a cluster downgrade/upgrade to/from dual-stackness // a PreferDualStack service following principle of least surprise // That means: PreferDualStack service will only be upgraded // if: // - changes type to RequireDualStack // - manually adding or removing ClusterIP (secondary) // - manually adding or removing IPFamily (secondary) if isMatchingPreferDualStackClusterIPFields(oldService, service) { return nil // nothing more to do. } // two families or two IPs with SingleStack if service.Spec.IPFamilyPolicy != nil { el := make(field.ErrorList, 0) if *(service.Spec.IPFamilyPolicy) == api.IPFamilyPolicySingleStack { if len(service.Spec.ClusterIPs) == 2 { el = append(el, field.Invalid(field.NewPath("spec", "ipFamilyPolicy"), service.Spec.IPFamilyPolicy, "must be RequireDualStack or PreferDualStack when multiple 'clusterIPs' are specified")) } if len(service.Spec.IPFamilies) == 2 { el = append(el, field.Invalid(field.NewPath("spec", "ipFamilyPolicy"), service.Spec.IPFamilyPolicy, "must be RequireDualStack or PreferDualStack when multiple 'ipFamilies' are specified")) } } if len(el) > 0 { return errors.NewInvalid(api.Kind("Service"), service.Name, el) } } // Infer IPFamilies[] from ClusterIPs[]. for i, ip := range service.Spec.ClusterIPs { if ip == api.ClusterIPNone { break } // we have previously validated for ip correctness and if family exist it will match ip family // so the following is safe to do isIPv6 := netutils.IsIPv6String(ip) // Family is not specified yet. if i >= len(service.Spec.IPFamilies) { if isIPv6 { // first make sure that family(ip) is configured if _, found := rs.serviceIPAllocatorsByFamily[api.IPv6Protocol]; !found { el := field.ErrorList{field.Invalid(field.NewPath("spec", "clusterIPs").Index(i), service.Spec.ClusterIPs, "may not use IPv6 on a cluster which is not configured for it")} return errors.NewInvalid(api.Kind("Service"), service.Name, el) } service.Spec.IPFamilies = append(service.Spec.IPFamilies, api.IPv6Protocol) } else { // first make sure that family(ip) is configured if _, found := rs.serviceIPAllocatorsByFamily[api.IPv4Protocol]; !found { el := field.ErrorList{field.Invalid(field.NewPath("spec", "clusterIPs").Index(i), service.Spec.ClusterIPs, "may not use IPv4 on a cluster which is not configured for it")} return errors.NewInvalid(api.Kind("Service"), service.Name, el) } service.Spec.IPFamilies = append(service.Spec.IPFamilies, api.IPv4Protocol) } } } // Infer IPFamilyPolicy from IPFamilies[]. This block does not handle the // final defaulting - that happens a bit later, after special-cases. if service.Spec.IPFamilyPolicy == nil && len(service.Spec.IPFamilies) == 2 { requireDualStack := api.IPFamilyPolicyRequireDualStack service.Spec.IPFamilyPolicy = &requireDualStack } // Special-case: headless+selectorless if len(service.Spec.ClusterIPs) > 0 && service.Spec.ClusterIPs[0] == api.ClusterIPNone && len(service.Spec.Selector) == 0 { // If the use said nothing about policy and we can't infer it, they get dual-stack if service.Spec.IPFamilyPolicy == nil { requireDualStack := api.IPFamilyPolicyRequireDualStack service.Spec.IPFamilyPolicy = &requireDualStack } // If IPFamilies was not set by the user, start with the default // family. if len(service.Spec.IPFamilies) == 0 { service.Spec.IPFamilies = []api.IPFamily{rs.defaultServiceIPFamily} } // this follows headful services. With one exception on a single stack // cluster the user is allowed to create headless services that has multi families // the validation allows it if len(service.Spec.IPFamilies) < 2 { if *(service.Spec.IPFamilyPolicy) != api.IPFamilyPolicySingleStack { // add the alt ipfamily if service.Spec.IPFamilies[0] == api.IPv4Protocol { service.Spec.IPFamilies = append(service.Spec.IPFamilies, api.IPv6Protocol) } else { service.Spec.IPFamilies = append(service.Spec.IPFamilies, api.IPv4Protocol) } } } // nothing more needed here return nil } // ipfamily check // the following applies on all type of services including headless w/ selector el := make(field.ErrorList, 0) // asking for dual stack on a non dual stack cluster // should fail without assigning any family if service.Spec.IPFamilyPolicy != nil && *(service.Spec.IPFamilyPolicy) == api.IPFamilyPolicyRequireDualStack && len(rs.serviceIPAllocatorsByFamily) < 2 { el = append(el, field.Invalid(field.NewPath("spec", "ipFamilyPolicy"), service.Spec.IPFamilyPolicy, "Cluster is not configured for dual stack services")) } // if there is a family requested then it has to be configured on cluster for i, ipFamily := range service.Spec.IPFamilies { if _, found := rs.serviceIPAllocatorsByFamily[ipFamily]; !found { el = append(el, field.Invalid(field.NewPath("spec", "ipFamilies").Index(i), service.Spec.ClusterIPs, fmt.Sprintf("ipfamily %v is not configured on cluster", ipFamily))) } } // if we have validation errors return them and bail out if len(el) > 0 { return errors.NewInvalid(api.Kind("Service"), service.Name, el) } // Finally, if IPFamilyPolicy is *still* not set, we can default it to // SingleStack. If there are any webhooks, they have already run. if service.Spec.IPFamilyPolicy == nil { singleStack := api.IPFamilyPolicySingleStack service.Spec.IPFamilyPolicy = &singleStack } // nil families, gets cluster default (if feature flag is not in effect, the strategy will take care of removing it) if len(service.Spec.IPFamilies) == 0 { service.Spec.IPFamilies = []api.IPFamily{rs.defaultServiceIPFamily} } // is this service looking for dual stack, and this cluster does have two families? // if so, then append the missing family if *(service.Spec.IPFamilyPolicy) != api.IPFamilyPolicySingleStack && len(service.Spec.IPFamilies) == 1 && len(rs.serviceIPAllocatorsByFamily) == 2 { if service.Spec.IPFamilies[0] == api.IPv4Protocol { service.Spec.IPFamilies = append(service.Spec.IPFamilies, api.IPv6Protocol) } if service.Spec.IPFamilies[0] == api.IPv6Protocol { service.Spec.IPFamilies = append(service.Spec.IPFamilies, api.IPv4Protocol) } } return nil } func isValidAddress(ctx context.Context, addr *api.EndpointAddress, pods rest.Getter) error { if addr.TargetRef == nil { return fmt.Errorf("Address has no target ref, skipping: %v", addr) } if genericapirequest.NamespaceValue(ctx) != addr.TargetRef.Namespace { return fmt.Errorf("Address namespace doesn't match context namespace") } obj, err := pods.Get(ctx, addr.TargetRef.Name, &metav1.GetOptions{}) if err != nil { return err } pod, ok := obj.(*api.Pod) if !ok { return fmt.Errorf("failed to cast to pod: %v", obj) } if pod == nil { return fmt.Errorf("pod is missing, skipping (%s/%s)", addr.TargetRef.Namespace, addr.TargetRef.Name) } for _, podIP := range pod.Status.PodIPs { if podIP.IP == addr.IP { return nil } } return fmt.Errorf("pod ip(s) doesn't match endpoint ip, skipping: %v vs %s (%s/%s)", pod.Status.PodIPs, addr.IP, addr.TargetRef.Namespace, addr.TargetRef.Name) } // This is O(N), but we expect haystack to be small; // so small that we expect a linear search to be faster func containsNumber(haystack []int, needle int) bool { for _, v := range haystack { if v == needle { return true } } return false } // This is O(N), but we expect serviceNodePorts to be small; // so small that we expect a linear search to be faster func containsNodePort(serviceNodePorts []ServiceNodePort, serviceNodePort ServiceNodePort) bool { for _, snp := range serviceNodePorts { if snp == serviceNodePort { return true } } return false } // Loop through the service ports list, find one with the same port number and // NodePort specified, return this NodePort otherwise return 0. func findRequestedNodePort(port int, servicePorts []api.ServicePort) int { for i := range servicePorts { servicePort := servicePorts[i] if port == int(servicePort.Port) && servicePort.NodePort != 0 { return int(servicePort.NodePort) } } return 0 } // allocateHealthCheckNodePort allocates health check node port to service. func allocateHealthCheckNodePort(service *api.Service, nodePortOp *portallocator.PortAllocationOperation) error { healthCheckNodePort := service.Spec.HealthCheckNodePort if healthCheckNodePort != 0 { // If the request has a health check nodePort in mind, attempt to reserve it. err := nodePortOp.Allocate(int(healthCheckNodePort)) if err != nil { return fmt.Errorf("failed to allocate requested HealthCheck NodePort %v: %v", healthCheckNodePort, err) } klog.V(4).Infof("Reserved user requested healthCheckNodePort: %d", healthCheckNodePort) } else { // If the request has no health check nodePort specified, allocate any. healthCheckNodePort, err := nodePortOp.AllocateNext() if err != nil { return fmt.Errorf("failed to allocate a HealthCheck NodePort %v: %v", healthCheckNodePort, err) } service.Spec.HealthCheckNodePort = int32(healthCheckNodePort) klog.V(4).Infof("Reserved allocated healthCheckNodePort: %d", healthCheckNodePort) } return nil } func initNodePorts(service *api.Service, nodePortOp *portallocator.PortAllocationOperation) error { svcPortToNodePort := map[int]int{} for i := range service.Spec.Ports { servicePort := &service.Spec.Ports[i] if servicePort.NodePort == 0 && !shouldAllocateNodePorts(service) { // Don't allocate new ports, but do respect specific requests. continue } allocatedNodePort := svcPortToNodePort[int(servicePort.Port)] if allocatedNodePort == 0 { // This will only scan forward in the service.Spec.Ports list because any matches // before the current port would have been found in svcPortToNodePort. This is really // looking for any user provided values. np := findRequestedNodePort(int(servicePort.Port), service.Spec.Ports) if np != 0 { err := nodePortOp.Allocate(np) if err != nil { // TODO: when validation becomes versioned, this gets more complicated. el := field.ErrorList{field.Invalid(field.NewPath("spec", "ports").Index(i).Child("nodePort"), np, err.Error())} return errors.NewInvalid(api.Kind("Service"), service.Name, el) } servicePort.NodePort = int32(np) svcPortToNodePort[int(servicePort.Port)] = np } else { nodePort, err := nodePortOp.AllocateNext() if err != nil { // TODO: what error should be returned here? It's not a // field-level validation failure (the field is valid), and it's // not really an internal error. return errors.NewInternalError(fmt.Errorf("failed to allocate a nodePort: %v", err)) } servicePort.NodePort = int32(nodePort) svcPortToNodePort[int(servicePort.Port)] = nodePort } } else if int(servicePort.NodePort) != allocatedNodePort { // TODO(xiangpengzhao): do we need to allocate a new NodePort in this case? // Note: the current implementation is better, because it saves a NodePort. if servicePort.NodePort == 0 { servicePort.NodePort = int32(allocatedNodePort) } else { err := nodePortOp.Allocate(int(servicePort.NodePort)) if err != nil { // TODO: when validation becomes versioned, this gets more complicated. el := field.ErrorList{field.Invalid(field.NewPath("spec", "ports").Index(i).Child("nodePort"), servicePort.NodePort, err.Error())} return errors.NewInvalid(api.Kind("Service"), service.Name, el) } } } } return nil } func updateNodePorts(oldService, newService *api.Service, nodePortOp *portallocator.PortAllocationOperation) error { oldNodePortsNumbers := collectServiceNodePorts(oldService) newNodePorts := []ServiceNodePort{} portAllocated := map[int]bool{} for i := range newService.Spec.Ports { servicePort := &newService.Spec.Ports[i] if servicePort.NodePort == 0 && !shouldAllocateNodePorts(newService) { // Don't allocate new ports, but do respect specific requests. continue } nodePort := ServiceNodePort{Protocol: servicePort.Protocol, NodePort: servicePort.NodePort} if nodePort.NodePort != 0 { if !containsNumber(oldNodePortsNumbers, int(nodePort.NodePort)) && !portAllocated[int(nodePort.NodePort)] { err := nodePortOp.Allocate(int(nodePort.NodePort)) if err != nil { el := field.ErrorList{field.Invalid(field.NewPath("spec", "ports").Index(i).Child("nodePort"), nodePort.NodePort, err.Error())} return errors.NewInvalid(api.Kind("Service"), newService.Name, el) } portAllocated[int(nodePort.NodePort)] = true } } else { nodePortNumber, err := nodePortOp.AllocateNext() if err != nil { // TODO: what error should be returned here? It's not a // field-level validation failure (the field is valid), and it's // not really an internal error. return errors.NewInternalError(fmt.Errorf("failed to allocate a nodePort: %v", err)) } servicePort.NodePort = int32(nodePortNumber) nodePort.NodePort = servicePort.NodePort } if containsNodePort(newNodePorts, nodePort) { return fmt.Errorf("duplicate nodePort: %v", nodePort) } newNodePorts = append(newNodePorts, nodePort) } newNodePortsNumbers := collectServiceNodePorts(newService) // The comparison loops are O(N^2), but we don't expect N to be huge // (there's a hard-limit at 2^16, because they're ports; and even 4 ports would be a lot) for _, oldNodePortNumber := range oldNodePortsNumbers { if containsNumber(newNodePortsNumbers, oldNodePortNumber) { continue } nodePortOp.ReleaseDeferred(int(oldNodePortNumber)) } return nil } func releaseNodePorts(service *api.Service, nodePortOp *portallocator.PortAllocationOperation) { nodePorts := collectServiceNodePorts(service) for _, nodePort := range nodePorts { nodePortOp.ReleaseDeferred(nodePort) } } func collectServiceNodePorts(service *api.Service) []int { servicePorts := []int{} for i := range service.Spec.Ports { servicePort := &service.Spec.Ports[i] if servicePort.NodePort != 0 { servicePorts = append(servicePorts, int(servicePort.NodePort)) } } return servicePorts }
pkg/registry/core/service/storage/rest.go
1
https://github.com/kubernetes/kubernetes/commit/75dea6b8bc2b7fb889cd41957bc5d061ab614ff4
[ 0.9953840374946594, 0.23208406567573547, 0.00016194679483305663, 0.002276231534779072, 0.36972546577453613 ]
{ "id": 3, "code_window": [ "\n", "// validates that the service created, updated by REST\n", "// has correct ClusterIPs related fields\n", "func isValidClusterIPFields(t *testing.T, storage *REST, pre *api.Service, post *api.Service) {\n", "\t// valid for gate off/on scenarios\n", "\t// ClusterIP\n", "\tif len(post.Spec.ClusterIP) == 0 {\n", "\t\tt.Fatalf(\"service must have clusterIP : %+v\", post)\n" ], "labels": [ "keep", "keep", "keep", "add", "keep", "keep", "keep", "keep" ], "after_edit": [ "\tt.Helper()\n", "\n" ], "file_path": "pkg/registry/core/service/storage/rest_test.go", "type": "add", "edit_start_line_idx": 3374 }
/* Copyright 2021 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package network import "github.com/onsi/ginkgo" // SIGDescribe annotates the test with the SIG label. func SIGDescribe(text string, body func()) bool { return ginkgo.Describe("[sig-network] "+text, body) }
test/e2e/cloud/gcp/network/framework.go
0
https://github.com/kubernetes/kubernetes/commit/75dea6b8bc2b7fb889cd41957bc5d061ab614ff4
[ 0.00017834424215834588, 0.0001749686780385673, 0.00016894456348381937, 0.00017761722847353667, 0.0000042700198719103355 ]
{ "id": 3, "code_window": [ "\n", "// validates that the service created, updated by REST\n", "// has correct ClusterIPs related fields\n", "func isValidClusterIPFields(t *testing.T, storage *REST, pre *api.Service, post *api.Service) {\n", "\t// valid for gate off/on scenarios\n", "\t// ClusterIP\n", "\tif len(post.Spec.ClusterIP) == 0 {\n", "\t\tt.Fatalf(\"service must have clusterIP : %+v\", post)\n" ], "labels": [ "keep", "keep", "keep", "add", "keep", "keep", "keep", "keep" ], "after_edit": [ "\tt.Helper()\n", "\n" ], "file_path": "pkg/registry/core/service/storage/rest_test.go", "type": "add", "edit_start_line_idx": 3374 }
Additional IP Rights Grant (Patents) "This implementation" means the copyrightable works distributed by Google as part of the Go project. Google hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, transfer and otherwise run, modify and propagate the contents of this implementation of Go, where such license applies only to those patent claims, both currently owned or controlled by Google and acquired in the future, licensable by Google that are necessarily infringed by this implementation of Go. This grant does not include claims that would be infringed only as a consequence of further modification of this implementation. If you or your agent or exclusive licensee institute or order or agree to the institution of patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that this implementation of Go or any code incorporated within this implementation of Go constitutes direct or contributory patent infringement, or inducement of patent infringement, then any patent rights granted to you under this License for this implementation of Go shall terminate as of the date such litigation is filed.
vendor/golang.org/x/term/PATENTS
0
https://github.com/kubernetes/kubernetes/commit/75dea6b8bc2b7fb889cd41957bc5d061ab614ff4
[ 0.00017627001216169447, 0.00017552472127135843, 0.00017454999033361673, 0.0001757541176630184, 7.20689854460943e-7 ]
{ "id": 3, "code_window": [ "\n", "// validates that the service created, updated by REST\n", "// has correct ClusterIPs related fields\n", "func isValidClusterIPFields(t *testing.T, storage *REST, pre *api.Service, post *api.Service) {\n", "\t// valid for gate off/on scenarios\n", "\t// ClusterIP\n", "\tif len(post.Spec.ClusterIP) == 0 {\n", "\t\tt.Fatalf(\"service must have clusterIP : %+v\", post)\n" ], "labels": [ "keep", "keep", "keep", "add", "keep", "keep", "keep", "keep" ], "after_edit": [ "\tt.Helper()\n", "\n" ], "file_path": "pkg/registry/core/service/storage/rest_test.go", "type": "add", "edit_start_line_idx": 3374 }
package migration import ( "errors" "github.com/coredns/corefile-migration/migration/corefile" ) type plugin struct { status string replacedBy string additional string namedOptions map[string]option patternOptions map[string]option action pluginActionFn // action affecting this plugin only add serverActionFn // action to add a new plugin to the server block downAction pluginActionFn // downgrade action affecting this plugin only } type option struct { name string status string replacedBy string additional string action optionActionFn // action affecting this option only add pluginActionFn // action to add the option to the plugin downAction optionActionFn // downgrade action affecting this option only } type corefileAction func(*corefile.Corefile) (*corefile.Corefile, error) type serverActionFn func(*corefile.Server) (*corefile.Server, error) type pluginActionFn func(*corefile.Plugin) (*corefile.Plugin, error) type optionActionFn func(*corefile.Option) (*corefile.Option, error) // plugins holds a map of plugin names and their migration rules per "version". "Version" here is meaningless outside // of the context of this code. Each change in options or migration actions for a plugin requires a new "version" // containing those new/removed options and migration actions. Plugins in CoreDNS are not versioned. var plugins = map[string]map[string]plugin{ "kubernetes": { "v1": plugin{ namedOptions: map[string]option{ "resyncperiod": {}, "endpoint": {}, "tls": {}, "namespaces": {}, "labels": {}, "pods": {}, "endpoint_pod_names": {}, "upstream": {}, "ttl": {}, "noendpoints": {}, "transfer": {}, "fallthrough": {}, "ignore": {}, }, }, "v2": plugin{ namedOptions: map[string]option{ "resyncperiod": {}, "endpoint": {}, "tls": {}, "namespaces": {}, "labels": {}, "pods": {}, "endpoint_pod_names": {}, "upstream": {}, "ttl": {}, "noendpoints": {}, "transfer": {}, "fallthrough": {}, "ignore": {}, "kubeconfig": {}, // new option }, }, "v3": plugin{ namedOptions: map[string]option{ "resyncperiod": {}, "endpoint": { // new deprecation status: SevDeprecated, action: useFirstArgumentOnly, }, "tls": {}, "kubeconfig": {}, "namespaces": {}, "labels": {}, "pods": {}, "endpoint_pod_names": {}, "upstream": {}, "ttl": {}, "noendpoints": {}, "transfer": {}, "fallthrough": {}, "ignore": {}, }, }, "v4": plugin{ namedOptions: map[string]option{ "resyncperiod": {}, "endpoint": { status: SevIgnored, action: useFirstArgumentOnly, }, "tls": {}, "kubeconfig": {}, "namespaces": {}, "labels": {}, "pods": {}, "endpoint_pod_names": {}, "upstream": { // new deprecation status: SevDeprecated, action: removeOption, }, "ttl": {}, "noendpoints": {}, "transfer": {}, "fallthrough": {}, "ignore": {}, }, }, "v5": plugin{ namedOptions: map[string]option{ "resyncperiod": { // new deprecation status: SevDeprecated, action: removeOption, }, "endpoint": { status: SevIgnored, action: useFirstArgumentOnly, }, "tls": {}, "kubeconfig": {}, "namespaces": {}, "labels": {}, "pods": {}, "endpoint_pod_names": {}, "upstream": { status: SevIgnored, action: removeOption, }, "ttl": {}, "noendpoints": {}, "transfer": {}, "fallthrough": {}, "ignore": {}, }, }, "v6": plugin{ namedOptions: map[string]option{ "resyncperiod": { // now ignored status: SevIgnored, action: removeOption, }, "endpoint": { status: SevIgnored, action: useFirstArgumentOnly, }, "tls": {}, "kubeconfig": {}, "namespaces": {}, "labels": {}, "pods": {}, "endpoint_pod_names": {}, "upstream": { status: SevIgnored, action: removeOption, }, "ttl": {}, "noendpoints": {}, "transfer": {}, "fallthrough": {}, "ignore": {}, }, }, "v7": plugin{ namedOptions: map[string]option{ "resyncperiod": { // new removal status: SevRemoved, action: removeOption, }, "endpoint": { status: SevIgnored, action: useFirstArgumentOnly, }, "tls": {}, "kubeconfig": {}, "namespaces": {}, "labels": {}, "pods": {}, "endpoint_pod_names": {}, "upstream": { // new removal status: SevRemoved, action: removeOption, }, "ttl": {}, "noendpoints": {}, "transfer": {}, "fallthrough": {}, "ignore": {}, }, }, "v8 remove transfer option": plugin{ namedOptions: map[string]option{ "endpoint": { status: SevIgnored, action: useFirstArgumentOnly, }, "tls": {}, "kubeconfig": {}, "namespaces": {}, "labels": {}, "pods": {}, "endpoint_pod_names": {}, "ttl": {}, "noendpoints": {}, "transfer": { status: SevRemoved, action: removeOption, }, "fallthrough": {}, "ignore": {}, }, }, "v8": plugin{ namedOptions: map[string]option{ "endpoint": { status: SevIgnored, action: useFirstArgumentOnly, }, "tls": {}, "kubeconfig": {}, "namespaces": {}, "labels": {}, "pods": {}, "endpoint_pod_names": {}, "ttl": {}, "noendpoints": {}, "fallthrough": {}, "ignore": {}, }, }, }, "errors": { "v1": plugin{}, "v2": plugin{ namedOptions: map[string]option{ "consolidate": {}, }, }, }, "health": { "v1": plugin{ namedOptions: map[string]option{ "lameduck": {}, }, }, "v1 add lameduck": plugin{ namedOptions: map[string]option{ "lameduck": { status: SevNewDefault, add: func(c *corefile.Plugin) (*corefile.Plugin, error) { return addOptionToPlugin(c, &corefile.Option{Name: "lameduck 5s"}) }, downAction: removeOption, }, }, }, }, "hosts": { "v1": plugin{ namedOptions: map[string]option{ "ttl": {}, "no_reverse": {}, "reload": {}, "fallthrough": {}, }, patternOptions: map[string]option{ `\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}`: {}, // close enough `[0-9A-Fa-f]{1,4}:[:0-9A-Fa-f]+:[0-9A-Fa-f]{1,4}`: {}, // less close, but still close enough }, }, }, "rewrite": { "v1": plugin{ namedOptions: map[string]option{ "type": {}, "class": {}, "name": {}, "answer name": {}, "edns0": {}, }, }, "v2": plugin{ namedOptions: map[string]option{ "type": {}, "class": {}, "name": {}, "answer name": {}, "edns0": {}, "ttl": {}, // new option }, }, }, "log": { "v1": plugin{ namedOptions: map[string]option{ "class": {}, }, }, }, "cache": { "v1": plugin{ namedOptions: map[string]option{ "success": {}, "denial": {}, "prefetch": {}, }, }, "v2": plugin{ namedOptions: map[string]option{ "success": {}, "denial": {}, "prefetch": {}, "serve_stale": {}, // new option }, }, }, "forward": { "v1": plugin{ namedOptions: map[string]option{ "except": {}, "force_tcp": {}, "expire": {}, "max_fails": {}, "tls": {}, "tls_servername": {}, "policy": {}, "health_check": {}, }, }, "v2": plugin{ namedOptions: map[string]option{ "except": {}, "force_tcp": {}, "prefer_udp": {}, "expire": {}, "max_fails": {}, "tls": {}, "tls_servername": {}, "policy": {}, "health_check": {}, }, }, "v3": plugin{ namedOptions: map[string]option{ "except": {}, "force_tcp": {}, "prefer_udp": {}, "expire": {}, "max_fails": {}, "tls": {}, "tls_servername": {}, "policy": {}, "health_check": {}, "max_concurrent": {}, }, }, "v3 add max_concurrent": plugin{ namedOptions: map[string]option{ "except": {}, "force_tcp": {}, "prefer_udp": {}, "expire": {}, "max_fails": {}, "tls": {}, "tls_servername": {}, "policy": {}, "health_check": {}, "max_concurrent": { // new option status: SevNewDefault, add: func(c *corefile.Plugin) (*corefile.Plugin, error) { return addOptionToPlugin(c, &corefile.Option{Name: "max_concurrent 1000"}) }, downAction: removeOption, }, }, }, }, "k8s_external": { "v1": plugin{ namedOptions: map[string]option{ "apex": {}, "ttl": {}, }, }, }, "proxy": { "v1": plugin{ namedOptions: map[string]option{ "policy": {}, "fail_timeout": {}, "max_fails": {}, "health_check": {}, "except": {}, "spray": {}, "protocol": { // https_google option ignored status: SevIgnored, action: proxyRemoveHttpsGoogleProtocol, }, }, }, "v2": plugin{ namedOptions: map[string]option{ "policy": {}, "fail_timeout": {}, "max_fails": {}, "health_check": {}, "except": {}, "spray": {}, "protocol": { // https_google option removed status: SevRemoved, action: proxyRemoveHttpsGoogleProtocol, }, }, }, "deprecation": plugin{ // proxy -> forward deprecation migration status: SevDeprecated, replacedBy: "forward", action: proxyToForwardPluginAction, namedOptions: proxyToForwardOptionsMigrations, }, "removal": plugin{ // proxy -> forward forced migration status: SevRemoved, replacedBy: "forward", action: proxyToForwardPluginAction, namedOptions: proxyToForwardOptionsMigrations, }, }, "transfer": { "v1": plugin{ namedOptions: map[string]option{ "to": {}, }, }, }, } func removePlugin(*corefile.Plugin) (*corefile.Plugin, error) { return nil, nil } func removeOption(*corefile.Option) (*corefile.Option, error) { return nil, nil } func renamePlugin(p *corefile.Plugin, to string) (*corefile.Plugin, error) { p.Name = to return p, nil } func addToServerBlockWithPlugins(sb *corefile.Server, newPlugin *corefile.Plugin, with []string) (*corefile.Server, error) { if len(with) == 0 { // add to all blocks sb.Plugins = append(sb.Plugins, newPlugin) return sb, nil } for _, p := range sb.Plugins { for _, w := range with { if w == p.Name { // add to this block sb.Plugins = append(sb.Plugins, newPlugin) return sb, nil } } } return sb, nil } func copyKubernetesTransferOptToPlugin(cf *corefile.Corefile) (*corefile.Corefile, error) { for _, s := range cf.Servers { var ( to []string zone string ) for _, p := range s.Plugins { if p.Name != "kubernetes" { continue } zone = p.Args[0] for _, o := range p.Options { if o.Name != "transfer" { continue } to = o.Args } } if len(to) < 2 { continue } s.Plugins = append(s.Plugins, &corefile.Plugin{ Name: "transfer", Args: []string{zone}, Options: []*corefile.Option{{Name: "to", Args: to[1:]}}, }) } return cf, nil } func addToKubernetesServerBlocks(sb *corefile.Server, newPlugin *corefile.Plugin) (*corefile.Server, error) { return addToServerBlockWithPlugins(sb, newPlugin, []string{"kubernetes"}) } func addToForwardingServerBlocks(sb *corefile.Server, newPlugin *corefile.Plugin) (*corefile.Server, error) { return addToServerBlockWithPlugins(sb, newPlugin, []string{"forward", "proxy"}) } func addToAllServerBlocks(sb *corefile.Server, newPlugin *corefile.Plugin) (*corefile.Server, error) { return addToServerBlockWithPlugins(sb, newPlugin, []string{}) } func addOptionToPlugin(pl *corefile.Plugin, newOption *corefile.Option) (*corefile.Plugin, error) { pl.Options = append(pl.Options, newOption) return pl, nil } var proxyToForwardOptionsMigrations = map[string]option{ "policy": { action: func(o *corefile.Option) (*corefile.Option, error) { if len(o.Args) == 1 && o.Args[0] == "least_conn" { o.Name = "force_tcp" o.Args = nil } return o, nil }, }, "except": {}, "fail_timeout": {action: removeOption}, "max_fails": {action: removeOption}, "health_check": {action: removeOption}, "spray": {action: removeOption}, "protocol": { action: func(o *corefile.Option) (*corefile.Option, error) { if len(o.Args) >= 2 && o.Args[0] == "force_tcp" { o.Name = "force_tcp" o.Args = nil return o, nil } return nil, nil }, }, } var proxyToForwardPluginAction = func(p *corefile.Plugin) (*corefile.Plugin, error) { return renamePlugin(p, "forward") } var useFirstArgumentOnly = func(o *corefile.Option) (*corefile.Option, error) { if len(o.Args) < 1 { return o, nil } o.Args = o.Args[:1] return o, nil } var proxyRemoveHttpsGoogleProtocol = func(o *corefile.Option) (*corefile.Option, error) { if len(o.Args) > 0 && o.Args[0] == "https_google" { return nil, nil } return o, nil } func breakForwardStubDomainsIntoServerBlocks(cf *corefile.Corefile) (*corefile.Corefile, error) { for _, sb := range cf.Servers { for j, fwd := range sb.Plugins { if fwd.Name != "forward" { continue } if len(fwd.Args) == 0 { return nil, errors.New("found invalid forward plugin declaration") } if fwd.Args[0] == "." { // dont move the default upstream continue } if len(sb.DomPorts) != 1 { return cf, errors.New("unhandled migration of multi-domain/port server block") } if sb.DomPorts[0] != "." && sb.DomPorts[0] != ".:53" { return cf, errors.New("unhandled migration of non-default domain/port server block") } newSb := &corefile.Server{} // create a new server block newSb.DomPorts = []string{fwd.Args[0]} // copy the forward zone to the server block domain fwd.Args[0] = "." // the plugin's zone changes to "." for brevity newSb.Plugins = append(newSb.Plugins, fwd) // add the plugin to its new server block // Add appropriate addtl plugins to new server block newSb.Plugins = append(newSb.Plugins, &corefile.Plugin{Name: "loop"}) newSb.Plugins = append(newSb.Plugins, &corefile.Plugin{Name: "errors"}) newSb.Plugins = append(newSb.Plugins, &corefile.Plugin{Name: "cache", Args: []string{"30"}}) //add new server block to corefile cf.Servers = append(cf.Servers, newSb) //remove the forward plugin from the original server block sb.Plugins = append(sb.Plugins[:j], sb.Plugins[j+1:]...) } } return cf, nil }
vendor/github.com/coredns/corefile-migration/migration/plugins.go
0
https://github.com/kubernetes/kubernetes/commit/75dea6b8bc2b7fb889cd41957bc5d061ab614ff4
[ 0.00032671380904503167, 0.0001743636094033718, 0.0001642339921090752, 0.0001712075900286436, 0.000021384359570220113 ]
{ "id": 0, "code_window": [ "dev-dynamic-mem: BUILD_TAGS+=memprofiler\n", "dev-dynamic-mem: dev-dynamic\n", "\n", "# Creates a Docker image by adding the compiled linux/amd64 binary found in ./bin.\n", "# The resulting image is tagged \"vault:dev\". \n", "docker-dev: prep\n" ], "labels": [ "keep", "keep", "keep", "keep", "replace", "keep" ], "after_edit": [ "# The resulting image is tagged \"vault:dev\".\n" ], "file_path": "Makefile", "type": "replace", "edit_start_line_idx": 53 }
# Multi-stage builder to avoid polluting users environment with wrong # architecture binaries. Since this binary is used in an alpine container, # we're explicitly compiling for 'linux/amd64' FROM debian:buster AS builder ARG VERSION=1.13.10 ARG CGO_ENABLED=0 ARG BUILD_TAGS ENV JOBS=2 RUN apt-get update -y && apt-get install --no-install-recommends -y -q \ curl \ zip \ build-essential \ gcc-multilib \ g++-multilib \ ca-certificates \ git mercurial bzr \ gnupg \ libltdl-dev \ libltdl7 RUN curl -sL https://deb.nodesource.com/setup_10.x | bash - RUN curl -sL https://dl.yarnpkg.com/debian/pubkey.gpg | apt-key add - RUN echo "deb https://dl.yarnpkg.com/debian/ stable main" | tee /etc/apt/sources.list.d/yarn.list RUN apt-get update -y && apt-get install -y -q nodejs yarn RUN rm -rf /var/lib/apt/lists/* RUN mkdir /goroot && mkdir /go RUN curl https://storage.googleapis.com/golang/go${VERSION}.linux-amd64.tar.gz \ | tar xvzf - -C /goroot --strip-components=1 ENV GOPATH /go ENV GOROOT /goroot ENV PATH $GOROOT/bin:$GOPATH/bin:$PATH WORKDIR /go/src/github.com/hashicorp/vault COPY . . RUN make bootstrap static-dist \ && CGO_ENABLED=$CGO_ENABLED BUILD_TAGS='$BUILD_TAGS ui' VAULT_DEV_BUILD=1 XC_OSARCH='linux/amd64' sh -c "'./scripts/build.sh'" # Docker Image FROM alpine:3.10 # Create a vault user and group first so the IDs get set the same way, # even as the rest of this may change over time. RUN addgroup vault && \ adduser -S -G vault vault # Set up certificates, our base tools, and Vault. RUN set -eux; \ apk add --no-cache ca-certificates libcap su-exec dumb-init tzdata COPY --from=builder /go/bin/vault /bin/vault # /vault/logs is made available to use as a location to store audit logs, if # desired; /vault/file is made available to use as a location with the file # storage backend, if desired; the server will be started with /vault/config as # the configuration directory so you can add additional config files in that # location. RUN mkdir -p /vault/logs && \ mkdir -p /vault/file && \ mkdir -p /vault/config && \ chown -R vault:vault /vault # Expose the logs directory as a volume since there's potentially long-running # state in there VOLUME /vault/logs # Expose the file directory as a volume since there's potentially long-running # state in there VOLUME /vault/file # 8200/tcp is the primary interface that applications use to interact with # Vault. EXPOSE 8200 # The entry point script uses dumb-init as the top-level process to reap any # zombie processes created by Vault sub-processes. # # For production derivatives of this container, you should add the IPC_LOCK # capability so that Vault can mlock memory. COPY ./scripts/docker/docker-entrypoint.sh /usr/local/bin/docker-entrypoint.sh ENTRYPOINT ["docker-entrypoint.sh"] # By default you'll get a single-node development server that stores everything # in RAM and bootstraps itself. Don't use this configuration for production. CMD ["server", "-dev"]
scripts/docker/Dockerfile.ui
1
https://github.com/hashicorp/vault/commit/9204c47af7b04eadff6677db70909663b55ed56b
[ 0.007558389566838741, 0.0015452387742698193, 0.00016920603229664266, 0.00041821127524599433, 0.0024299626238644123 ]
{ "id": 0, "code_window": [ "dev-dynamic-mem: BUILD_TAGS+=memprofiler\n", "dev-dynamic-mem: dev-dynamic\n", "\n", "# Creates a Docker image by adding the compiled linux/amd64 binary found in ./bin.\n", "# The resulting image is tagged \"vault:dev\". \n", "docker-dev: prep\n" ], "labels": [ "keep", "keep", "keep", "keep", "replace", "keep" ], "after_edit": [ "# The resulting image is tagged \"vault:dev\".\n" ], "file_path": "Makefile", "type": "replace", "edit_start_line_idx": 53 }
/* * Copyright 2018 - Present Okta, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // AUTO-GENERATED! DO NOT EDIT FILE DIRECTLY package okta import () type ApplicationCredentialsSigningUse string
vendor/github.com/okta/okta-sdk-golang/v2/okta/applicationCredentialsSigningUse.go
0
https://github.com/hashicorp/vault/commit/9204c47af7b04eadff6677db70909663b55ed56b
[ 0.00021085147454869002, 0.00018672480655368418, 0.0001743598550092429, 0.00017496307555120438, 0.000017061911421478726 ]
{ "id": 0, "code_window": [ "dev-dynamic-mem: BUILD_TAGS+=memprofiler\n", "dev-dynamic-mem: dev-dynamic\n", "\n", "# Creates a Docker image by adding the compiled linux/amd64 binary found in ./bin.\n", "# The resulting image is tagged \"vault:dev\". \n", "docker-dev: prep\n" ], "labels": [ "keep", "keep", "keep", "keep", "replace", "keep" ], "after_edit": [ "# The resulting image is tagged \"vault:dev\".\n" ], "file_path": "Makefile", "type": "replace", "edit_start_line_idx": 53 }
// Copyright 2013 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package yaml import ( "bytes" "encoding" "encoding/json" "reflect" "sort" "strings" "sync" "unicode" "unicode/utf8" ) // indirect walks down v allocating pointers as needed, // until it gets to a non-pointer. // if it encounters an Unmarshaler, indirect stops and returns that. // if decodingNull is true, indirect stops at the last pointer so it can be set to nil. func indirect(v reflect.Value, decodingNull bool) (json.Unmarshaler, encoding.TextUnmarshaler, reflect.Value) { // If v is a named type and is addressable, // start with its address, so that if the type has pointer methods, // we find them. if v.Kind() != reflect.Ptr && v.Type().Name() != "" && v.CanAddr() { v = v.Addr() } for { // Load value from interface, but only if the result will be // usefully addressable. if v.Kind() == reflect.Interface && !v.IsNil() { e := v.Elem() if e.Kind() == reflect.Ptr && !e.IsNil() && (!decodingNull || e.Elem().Kind() == reflect.Ptr) { v = e continue } } if v.Kind() != reflect.Ptr { break } if v.Elem().Kind() != reflect.Ptr && decodingNull && v.CanSet() { break } if v.IsNil() { if v.CanSet() { v.Set(reflect.New(v.Type().Elem())) } else { v = reflect.New(v.Type().Elem()) } } if v.Type().NumMethod() > 0 { if u, ok := v.Interface().(json.Unmarshaler); ok { return u, nil, reflect.Value{} } if u, ok := v.Interface().(encoding.TextUnmarshaler); ok { return nil, u, reflect.Value{} } } v = v.Elem() } return nil, nil, v } // A field represents a single field found in a struct. type field struct { name string nameBytes []byte // []byte(name) equalFold func(s, t []byte) bool // bytes.EqualFold or equivalent tag bool index []int typ reflect.Type omitEmpty bool quoted bool } func fillField(f field) field { f.nameBytes = []byte(f.name) f.equalFold = foldFunc(f.nameBytes) return f } // byName sorts field by name, breaking ties with depth, // then breaking ties with "name came from json tag", then // breaking ties with index sequence. type byName []field func (x byName) Len() int { return len(x) } func (x byName) Swap(i, j int) { x[i], x[j] = x[j], x[i] } func (x byName) Less(i, j int) bool { if x[i].name != x[j].name { return x[i].name < x[j].name } if len(x[i].index) != len(x[j].index) { return len(x[i].index) < len(x[j].index) } if x[i].tag != x[j].tag { return x[i].tag } return byIndex(x).Less(i, j) } // byIndex sorts field by index sequence. type byIndex []field func (x byIndex) Len() int { return len(x) } func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] } func (x byIndex) Less(i, j int) bool { for k, xik := range x[i].index { if k >= len(x[j].index) { return false } if xik != x[j].index[k] { return xik < x[j].index[k] } } return len(x[i].index) < len(x[j].index) } // typeFields returns a list of fields that JSON should recognize for the given type. // The algorithm is breadth-first search over the set of structs to include - the top struct // and then any reachable anonymous structs. func typeFields(t reflect.Type) []field { // Anonymous fields to explore at the current level and the next. current := []field{} next := []field{{typ: t}} // Count of queued names for current level and the next. count := map[reflect.Type]int{} nextCount := map[reflect.Type]int{} // Types already visited at an earlier level. visited := map[reflect.Type]bool{} // Fields found. var fields []field for len(next) > 0 { current, next = next, current[:0] count, nextCount = nextCount, map[reflect.Type]int{} for _, f := range current { if visited[f.typ] { continue } visited[f.typ] = true // Scan f.typ for fields to include. for i := 0; i < f.typ.NumField(); i++ { sf := f.typ.Field(i) if sf.PkgPath != "" { // unexported continue } tag := sf.Tag.Get("json") if tag == "-" { continue } name, opts := parseTag(tag) if !isValidTag(name) { name = "" } index := make([]int, len(f.index)+1) copy(index, f.index) index[len(f.index)] = i ft := sf.Type if ft.Name() == "" && ft.Kind() == reflect.Ptr { // Follow pointer. ft = ft.Elem() } // Record found field and index sequence. if name != "" || !sf.Anonymous || ft.Kind() != reflect.Struct { tagged := name != "" if name == "" { name = sf.Name } fields = append(fields, fillField(field{ name: name, tag: tagged, index: index, typ: ft, omitEmpty: opts.Contains("omitempty"), quoted: opts.Contains("string"), })) if count[f.typ] > 1 { // If there were multiple instances, add a second, // so that the annihilation code will see a duplicate. // It only cares about the distinction between 1 or 2, // so don't bother generating any more copies. fields = append(fields, fields[len(fields)-1]) } continue } // Record new anonymous struct to explore in next round. nextCount[ft]++ if nextCount[ft] == 1 { next = append(next, fillField(field{name: ft.Name(), index: index, typ: ft})) } } } } sort.Sort(byName(fields)) // Delete all fields that are hidden by the Go rules for embedded fields, // except that fields with JSON tags are promoted. // The fields are sorted in primary order of name, secondary order // of field index length. Loop over names; for each name, delete // hidden fields by choosing the one dominant field that survives. out := fields[:0] for advance, i := 0, 0; i < len(fields); i += advance { // One iteration per name. // Find the sequence of fields with the name of this first field. fi := fields[i] name := fi.name for advance = 1; i+advance < len(fields); advance++ { fj := fields[i+advance] if fj.name != name { break } } if advance == 1 { // Only one field with this name out = append(out, fi) continue } dominant, ok := dominantField(fields[i : i+advance]) if ok { out = append(out, dominant) } } fields = out sort.Sort(byIndex(fields)) return fields } // dominantField looks through the fields, all of which are known to // have the same name, to find the single field that dominates the // others using Go's embedding rules, modified by the presence of // JSON tags. If there are multiple top-level fields, the boolean // will be false: This condition is an error in Go and we skip all // the fields. func dominantField(fields []field) (field, bool) { // The fields are sorted in increasing index-length order. The winner // must therefore be one with the shortest index length. Drop all // longer entries, which is easy: just truncate the slice. length := len(fields[0].index) tagged := -1 // Index of first tagged field. for i, f := range fields { if len(f.index) > length { fields = fields[:i] break } if f.tag { if tagged >= 0 { // Multiple tagged fields at the same level: conflict. // Return no field. return field{}, false } tagged = i } } if tagged >= 0 { return fields[tagged], true } // All remaining fields have the same length. If there's more than one, // we have a conflict (two fields named "X" at the same level) and we // return no field. if len(fields) > 1 { return field{}, false } return fields[0], true } var fieldCache struct { sync.RWMutex m map[reflect.Type][]field } // cachedTypeFields is like typeFields but uses a cache to avoid repeated work. func cachedTypeFields(t reflect.Type) []field { fieldCache.RLock() f := fieldCache.m[t] fieldCache.RUnlock() if f != nil { return f } // Compute fields without lock. // Might duplicate effort but won't hold other computations back. f = typeFields(t) if f == nil { f = []field{} } fieldCache.Lock() if fieldCache.m == nil { fieldCache.m = map[reflect.Type][]field{} } fieldCache.m[t] = f fieldCache.Unlock() return f } func isValidTag(s string) bool { if s == "" { return false } for _, c := range s { switch { case strings.ContainsRune("!#$%&()*+-./:<=>?@[]^_{|}~ ", c): // Backslash and quote chars are reserved, but // otherwise any punctuation chars are allowed // in a tag name. default: if !unicode.IsLetter(c) && !unicode.IsDigit(c) { return false } } } return true } const ( caseMask = ^byte(0x20) // Mask to ignore case in ASCII. kelvin = '\u212a' smallLongEss = '\u017f' ) // foldFunc returns one of four different case folding equivalence // functions, from most general (and slow) to fastest: // // 1) bytes.EqualFold, if the key s contains any non-ASCII UTF-8 // 2) equalFoldRight, if s contains special folding ASCII ('k', 'K', 's', 'S') // 3) asciiEqualFold, no special, but includes non-letters (including _) // 4) simpleLetterEqualFold, no specials, no non-letters. // // The letters S and K are special because they map to 3 runes, not just 2: // * S maps to s and to U+017F 'ſ' Latin small letter long s // * k maps to K and to U+212A 'K' Kelvin sign // See http://play.golang.org/p/tTxjOc0OGo // // The returned function is specialized for matching against s and // should only be given s. It's not curried for performance reasons. func foldFunc(s []byte) func(s, t []byte) bool { nonLetter := false special := false // special letter for _, b := range s { if b >= utf8.RuneSelf { return bytes.EqualFold } upper := b & caseMask if upper < 'A' || upper > 'Z' { nonLetter = true } else if upper == 'K' || upper == 'S' { // See above for why these letters are special. special = true } } if special { return equalFoldRight } if nonLetter { return asciiEqualFold } return simpleLetterEqualFold } // equalFoldRight is a specialization of bytes.EqualFold when s is // known to be all ASCII (including punctuation), but contains an 's', // 'S', 'k', or 'K', requiring a Unicode fold on the bytes in t. // See comments on foldFunc. func equalFoldRight(s, t []byte) bool { for _, sb := range s { if len(t) == 0 { return false } tb := t[0] if tb < utf8.RuneSelf { if sb != tb { sbUpper := sb & caseMask if 'A' <= sbUpper && sbUpper <= 'Z' { if sbUpper != tb&caseMask { return false } } else { return false } } t = t[1:] continue } // sb is ASCII and t is not. t must be either kelvin // sign or long s; sb must be s, S, k, or K. tr, size := utf8.DecodeRune(t) switch sb { case 's', 'S': if tr != smallLongEss { return false } case 'k', 'K': if tr != kelvin { return false } default: return false } t = t[size:] } if len(t) > 0 { return false } return true } // asciiEqualFold is a specialization of bytes.EqualFold for use when // s is all ASCII (but may contain non-letters) and contains no // special-folding letters. // See comments on foldFunc. func asciiEqualFold(s, t []byte) bool { if len(s) != len(t) { return false } for i, sb := range s { tb := t[i] if sb == tb { continue } if ('a' <= sb && sb <= 'z') || ('A' <= sb && sb <= 'Z') { if sb&caseMask != tb&caseMask { return false } } else { return false } } return true } // simpleLetterEqualFold is a specialization of bytes.EqualFold for // use when s is all ASCII letters (no underscores, etc) and also // doesn't contain 'k', 'K', 's', or 'S'. // See comments on foldFunc. func simpleLetterEqualFold(s, t []byte) bool { if len(s) != len(t) { return false } for i, b := range s { if b&caseMask != t[i]&caseMask { return false } } return true } // tagOptions is the string following a comma in a struct field's "json" // tag, or the empty string. It does not include the leading comma. type tagOptions string // parseTag splits a struct field's json tag into its name and // comma-separated options. func parseTag(tag string) (string, tagOptions) { if idx := strings.Index(tag, ","); idx != -1 { return tag[:idx], tagOptions(tag[idx+1:]) } return tag, tagOptions("") } // Contains reports whether a comma-separated list of options // contains a particular substr flag. substr must be surrounded by a // string boundary or commas. func (o tagOptions) Contains(optionName string) bool { if len(o) == 0 { return false } s := string(o) for s != "" { var next string i := strings.Index(s, ",") if i >= 0 { s, next = s[:i], s[i+1:] } if s == optionName { return true } s = next } return false }
vendor/github.com/ghodss/yaml/fields.go
0
https://github.com/hashicorp/vault/commit/9204c47af7b04eadff6677db70909663b55ed56b
[ 0.0008049817406572402, 0.00018926554184872657, 0.00016430855612270534, 0.00017117918469011784, 0.00009256261546397582 ]
{ "id": 0, "code_window": [ "dev-dynamic-mem: BUILD_TAGS+=memprofiler\n", "dev-dynamic-mem: dev-dynamic\n", "\n", "# Creates a Docker image by adding the compiled linux/amd64 binary found in ./bin.\n", "# The resulting image is tagged \"vault:dev\". \n", "docker-dev: prep\n" ], "labels": [ "keep", "keep", "keep", "keep", "replace", "keep" ], "after_edit": [ "# The resulting image is tagged \"vault:dev\".\n" ], "file_path": "Makefile", "type": "replace", "edit_start_line_idx": 53 }
syntax = "proto3"; option go_package = "types"; package github_com_hashicorp_go_raftchunking_types; message ChunkInfo { // OpNum is the ID of the op, used to ensure values are applied to the // right operation uint64 op_num = 1; // SequenceNum is the current number of the ops; when applying we should // see this start at zero and increment by one without skips uint32 sequence_num = 2; // NumChunks is used to check whether all chunks have been received and // reconstruction should be attempted uint32 num_chunks = 3; // NextExtensions holds inner extensions information for the next layer // down of Apply bytes next_extensions = 4; }
vendor/github.com/hashicorp/go-raftchunking/types/types.proto
0
https://github.com/hashicorp/vault/commit/9204c47af7b04eadff6677db70909663b55ed56b
[ 0.00016935034363996238, 0.0001685442985035479, 0.0001675439125392586, 0.00016873863933142275, 7.501664640585659e-7 ]
{ "id": 1, "code_window": [ "docker-dev: prep\n", "\tdocker build -f scripts/docker/Dockerfile -t vault:dev .\n", "\n", "docker-dev-ui: prep\n" ], "labels": [ "keep", "replace", "keep", "keep" ], "after_edit": [ "\tdocker build --build-arg VERSION=$(GO_VERSION_MIN) -f scripts/docker/Dockerfile -t vault:dev .\n" ], "file_path": "Makefile", "type": "replace", "edit_start_line_idx": 55 }
# Multi-stage builder to avoid polluting users environment with wrong # architecture binaries. Since this binary is used in an alpine container, # we're explicitly compiling for 'linux/amd64' ARG VERSION=1.13.10 FROM golang:${VERSION} AS builder ARG CGO_ENABLED=0 ARG BUILD_TAGS WORKDIR /go/src/github.com/hashicorp/vault COPY . . RUN make bootstrap \ && CGO_ENABLED=$CGO_ENABLED BUILD_TAGS='$BUILD_TAGS' VAULT_DEV_BUILD=1 XC_OSARCH='linux/amd64' sh -c "'./scripts/build.sh'" # Docker Image FROM alpine:3.10 # Create a vault user and group first so the IDs get set the same way, # even as the rest of this may change over time. RUN addgroup vault && \ adduser -S -G vault vault # Set up certificates, our base tools, and Vault. RUN set -eux; \ apk add --no-cache ca-certificates libcap su-exec dumb-init tzdata COPY --from=builder /go/bin/vault /bin/vault # /vault/logs is made available to use as a location to store audit logs, if # desired; /vault/file is made available to use as a location with the file # storage backend, if desired; the server will be started with /vault/config as # the configuration directory so you can add additional config files in that # location. RUN mkdir -p /vault/logs && \ mkdir -p /vault/file && \ mkdir -p /vault/config && \ chown -R vault:vault /vault # Expose the logs directory as a volume since there's potentially long-running # state in there VOLUME /vault/logs # Expose the file directory as a volume since there's potentially long-running # state in there VOLUME /vault/file # 8200/tcp is the primary interface that applications use to interact with # Vault. EXPOSE 8200 # The entry point script uses dumb-init as the top-level process to reap any # zombie processes created by Vault sub-processes. # # For production derivatives of this container, you should add the IPC_LOCK # capability so that Vault can mlock memory. COPY ./scripts/docker/docker-entrypoint.sh /usr/local/bin/docker-entrypoint.sh ENTRYPOINT ["docker-entrypoint.sh"] # By default you'll get a single-node development server that stores everything # in RAM and bootstraps itself. Don't use this configuration for production. CMD ["server", "-dev"]
scripts/docker/Dockerfile
1
https://github.com/hashicorp/vault/commit/9204c47af7b04eadff6677db70909663b55ed56b
[ 0.011588783003389835, 0.0019492844585329294, 0.0001701565779512748, 0.0003214319294784218, 0.003939106594771147 ]
{ "id": 1, "code_window": [ "docker-dev: prep\n", "\tdocker build -f scripts/docker/Dockerfile -t vault:dev .\n", "\n", "docker-dev-ui: prep\n" ], "labels": [ "keep", "replace", "keep", "keep" ], "after_edit": [ "\tdocker build --build-arg VERSION=$(GO_VERSION_MIN) -f scripts/docker/Dockerfile -t vault:dev .\n" ], "file_path": "Makefile", "type": "replace", "edit_start_line_idx": 55 }
package command import ( "io/ioutil" "os" "strings" "testing" "github.com/mitchellh/cli" ) func testPolicyFmtCommand(tb testing.TB) (*cli.MockUi, *PolicyFmtCommand) { tb.Helper() ui := cli.NewMockUi() return ui, &PolicyFmtCommand{ BaseCommand: &BaseCommand{ UI: ui, }, } } func TestPolicyFmtCommand_Run(t *testing.T) { t.Parallel() cases := []struct { name string args []string out string code int }{ { "not_enough_args", []string{}, "Not enough arguments", 1, }, { "too_many_args", []string{"foo", "bar"}, "Too many arguments", 1, }, } t.Run("validations", func(t *testing.T) { t.Parallel() for _, tc := range cases { tc := tc t.Run(tc.name, func(t *testing.T) { t.Parallel() client, closer := testVaultServer(t) defer closer() ui, cmd := testPolicyFmtCommand(t) cmd.client = client code := cmd.Run(tc.args) if code != tc.code { t.Errorf("expected %d to be %d", code, tc.code) } combined := ui.OutputWriter.String() + ui.ErrorWriter.String() if !strings.Contains(combined, tc.out) { t.Errorf("expected %q to contain %q", combined, tc.out) } }) } }) t.Run("default", func(t *testing.T) { t.Parallel() policy := strings.TrimSpace(` path "secret" { capabilities = ["create", "update","delete"] } `) f, err := ioutil.TempFile("", "") if err != nil { t.Fatal(err) } defer os.Remove(f.Name()) if _, err := f.Write([]byte(policy)); err != nil { t.Fatal(err) } f.Close() client, closer := testVaultServer(t) defer closer() _, cmd := testPolicyFmtCommand(t) cmd.client = client code := cmd.Run([]string{ f.Name(), }) if exp := 0; code != exp { t.Errorf("expected %d to be %d", code, exp) } expected := strings.TrimSpace(` path "secret" { capabilities = ["create", "update", "delete"] } `) + "\n" contents, err := ioutil.ReadFile(f.Name()) if err != nil { t.Fatal(err) } if string(contents) != expected { t.Errorf("expected %q to be %q", string(contents), expected) } }) t.Run("bad_hcl", func(t *testing.T) { t.Parallel() policy := `dafdaf` f, err := ioutil.TempFile("", "") if err != nil { t.Fatal(err) } defer os.Remove(f.Name()) if _, err := f.Write([]byte(policy)); err != nil { t.Fatal(err) } f.Close() client, closer := testVaultServer(t) defer closer() ui, cmd := testPolicyFmtCommand(t) cmd.client = client code := cmd.Run([]string{ f.Name(), }) if exp := 1; code != exp { t.Errorf("expected %d to be %d", code, exp) } stderr := ui.ErrorWriter.String() expected := "failed to parse policy" if !strings.Contains(stderr, expected) { t.Errorf("expected %q to include %q", stderr, expected) } }) t.Run("bad_policy", func(t *testing.T) { t.Parallel() policy := `banana "foo" {}` f, err := ioutil.TempFile("", "") if err != nil { t.Fatal(err) } defer os.Remove(f.Name()) if _, err := f.Write([]byte(policy)); err != nil { t.Fatal(err) } f.Close() client, closer := testVaultServer(t) defer closer() ui, cmd := testPolicyFmtCommand(t) cmd.client = client code := cmd.Run([]string{ f.Name(), }) if exp := 1; code != exp { t.Errorf("expected %d to be %d", code, exp) } stderr := ui.ErrorWriter.String() expected := "failed to parse policy" if !strings.Contains(stderr, expected) { t.Errorf("expected %q to include %q", stderr, expected) } }) t.Run("bad_policy", func(t *testing.T) { t.Parallel() policy := `path "secret/" { capabilities = ["bogus"] }` f, err := ioutil.TempFile("", "") if err != nil { t.Fatal(err) } defer os.Remove(f.Name()) if _, err := f.Write([]byte(policy)); err != nil { t.Fatal(err) } f.Close() client, closer := testVaultServer(t) defer closer() ui, cmd := testPolicyFmtCommand(t) cmd.client = client code := cmd.Run([]string{ f.Name(), }) if exp := 1; code != exp { t.Errorf("expected %d to be %d", code, exp) } stderr := ui.ErrorWriter.String() expected := "failed to parse policy" if !strings.Contains(stderr, expected) { t.Errorf("expected %q to include %q", stderr, expected) } }) t.Run("no_tabs", func(t *testing.T) { t.Parallel() _, cmd := testPolicyFmtCommand(t) assertNoTabs(t, cmd) }) }
command/policy_fmt_test.go
0
https://github.com/hashicorp/vault/commit/9204c47af7b04eadff6677db70909663b55ed56b
[ 0.0011431919410824776, 0.00028072320856153965, 0.0001679191191215068, 0.00017065418069250882, 0.00025028048548847437 ]
{ "id": 1, "code_window": [ "docker-dev: prep\n", "\tdocker build -f scripts/docker/Dockerfile -t vault:dev .\n", "\n", "docker-dev-ui: prep\n" ], "labels": [ "keep", "replace", "keep", "keep" ], "after_edit": [ "\tdocker build --build-arg VERSION=$(GO_VERSION_MIN) -f scripts/docker/Dockerfile -t vault:dev .\n" ], "file_path": "Makefile", "type": "replace", "edit_start_line_idx": 55 }
module github.com/prometheus/procfs go 1.12 require ( github.com/google/go-cmp v0.3.1 golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e )
vendor/github.com/prometheus/procfs/go.mod
0
https://github.com/hashicorp/vault/commit/9204c47af7b04eadff6677db70909663b55ed56b
[ 0.00016793017857708037, 0.00016793017857708037, 0.00016793017857708037, 0.00016793017857708037, 0 ]
{ "id": 1, "code_window": [ "docker-dev: prep\n", "\tdocker build -f scripts/docker/Dockerfile -t vault:dev .\n", "\n", "docker-dev-ui: prep\n" ], "labels": [ "keep", "replace", "keep", "keep" ], "after_edit": [ "\tdocker build --build-arg VERSION=$(GO_VERSION_MIN) -f scripts/docker/Dockerfile -t vault:dev .\n" ], "file_path": "Makefile", "type": "replace", "edit_start_line_idx": 55 }
Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
vendor/github.com/aws/aws-sdk-go/LICENSE.txt
0
https://github.com/hashicorp/vault/commit/9204c47af7b04eadff6677db70909663b55ed56b
[ 0.00017648209177423269, 0.00017084965656977147, 0.00016237696399912238, 0.00017128008767031133, 0.0000032077659852802753 ]
{ "id": 2, "code_window": [ "\n", "docker-dev-ui: prep\n", "\tdocker build -f scripts/docker/Dockerfile.ui -t vault:dev-ui .\n", "\n", "# test runs the unit tests and vets the code\n", "test: prep\n" ], "labels": [ "keep", "keep", "replace", "keep", "keep", "keep" ], "after_edit": [ "\tdocker build --build-arg VERSION=$(GO_VERSION_MIN) -f scripts/docker/Dockerfile.ui -t vault:dev-ui .\n" ], "file_path": "Makefile", "type": "replace", "edit_start_line_idx": 58 }
# Determine this makefile's path. # Be sure to place this BEFORE `include` directives, if any. THIS_FILE := $(lastword $(MAKEFILE_LIST)) TEST?=$$($(GO_CMD) list ./... | grep -v /vendor/ | grep -v /integ) TEST_TIMEOUT?=45m EXTENDED_TEST_TIMEOUT=60m INTEG_TEST_TIMEOUT=120m VETARGS?=-asmdecl -atomic -bool -buildtags -copylocks -methods -nilfunc -printf -rangeloops -shift -structtags -unsafeptr EXTERNAL_TOOLS_CI=\ github.com/elazarl/go-bindata-assetfs/... \ github.com/hashicorp/go-bindata/... \ github.com/mitchellh/gox \ golang.org/x/tools/cmd/goimports EXTERNAL_TOOLS=\ github.com/client9/misspell/cmd/misspell GOFMT_FILES?=$$(find . -name '*.go' | grep -v pb.go | grep -v vendor) GO_VERSION_MIN=1.14.7 GO_CMD?=go CGO_ENABLED?=0 ifneq ($(FDB_ENABLED), ) CGO_ENABLED=1 BUILD_TAGS+=foundationdb endif default: dev # bin generates the releasable binaries for Vault bin: prep @CGO_ENABLED=$(CGO_ENABLED) BUILD_TAGS='$(BUILD_TAGS) ui' sh -c "'$(CURDIR)/scripts/build.sh'" # dev creates binaries for testing Vault locally. These are put # into ./bin/ as well as $GOPATH/bin dev: prep @CGO_ENABLED=$(CGO_ENABLED) BUILD_TAGS='$(BUILD_TAGS)' VAULT_DEV_BUILD=1 sh -c "'$(CURDIR)/scripts/build.sh'" dev-ui: assetcheck prep @CGO_ENABLED=$(CGO_ENABLED) BUILD_TAGS='$(BUILD_TAGS) ui' VAULT_DEV_BUILD=1 sh -c "'$(CURDIR)/scripts/build.sh'" dev-dynamic: prep @CGO_ENABLED=1 BUILD_TAGS='$(BUILD_TAGS)' VAULT_DEV_BUILD=1 sh -c "'$(CURDIR)/scripts/build.sh'" # *-mem variants will enable memory profiling which will write snapshots of heap usage # to $TMP/vaultprof every 5 minutes. These can be analyzed using `$ go tool pprof <profile_file>`. # Note that any build can have profiling added via: `$ BUILD_TAGS=memprofiler make ...` dev-mem: BUILD_TAGS+=memprofiler dev-mem: dev dev-ui-mem: BUILD_TAGS+=memprofiler dev-ui-mem: assetcheck dev-ui dev-dynamic-mem: BUILD_TAGS+=memprofiler dev-dynamic-mem: dev-dynamic # Creates a Docker image by adding the compiled linux/amd64 binary found in ./bin. # The resulting image is tagged "vault:dev". docker-dev: prep docker build -f scripts/docker/Dockerfile -t vault:dev . docker-dev-ui: prep docker build -f scripts/docker/Dockerfile.ui -t vault:dev-ui . # test runs the unit tests and vets the code test: prep @CGO_ENABLED=$(CGO_ENABLED) \ VAULT_ADDR= \ VAULT_TOKEN= \ VAULT_DEV_ROOT_TOKEN_ID= \ VAULT_ACC= \ $(GO_CMD) test -tags='$(BUILD_TAGS)' $(TEST) $(TESTARGS) -timeout=$(TEST_TIMEOUT) -parallel=20 testcompile: prep @for pkg in $(TEST) ; do \ $(GO_CMD) test -v -c -tags='$(BUILD_TAGS)' $$pkg -parallel=4 ; \ done # testacc runs acceptance tests testacc: prep @if [ "$(TEST)" = "./..." ]; then \ echo "ERROR: Set TEST to a specific package"; \ exit 1; \ fi VAULT_ACC=1 $(GO_CMD) test -tags='$(BUILD_TAGS)' $(TEST) -v $(TESTARGS) -timeout=$(EXTENDED_TEST_TIMEOUT) # testrace runs the race checker testrace: prep @CGO_ENABLED=1 \ VAULT_ADDR= \ VAULT_TOKEN= \ VAULT_DEV_ROOT_TOKEN_ID= \ VAULT_ACC= \ $(GO_CMD) test -tags='$(BUILD_TAGS)' -race $(TEST) $(TESTARGS) -timeout=$(EXTENDED_TEST_TIMEOUT) -parallel=20 cover: ./scripts/coverage.sh --html # vet runs the Go source code static analysis tool `vet` to find # any common errors. vet: @$(GO_CMD) list -f '{{.Dir}}' ./... | grep -v /vendor/ \ | grep -v '.*github.com/hashicorp/vault$$' \ | xargs $(GO_CMD) vet ; if [ $$? -eq 1 ]; then \ echo ""; \ echo "Vet found suspicious constructs. Please check the reported constructs"; \ echo "and fix them if necessary before submitting the code for reviewal."; \ fi # lint runs vet plus a number of other checkers, it is more comprehensive, but louder lint: @$(GO_CMD) list -f '{{.Dir}}' ./... | grep -v /vendor/ \ | xargs golangci-lint run; if [ $$? -eq 1 ]; then \ echo ""; \ echo "Lint found suspicious constructs. Please check the reported constructs"; \ echo "and fix them if necessary before submitting the code for reviewal."; \ fi # for ci jobs, runs lint against the changed packages in the commit ci-lint: @golangci-lint run --deadline 10m --new-from-rev=HEAD~ # prep runs `go generate` to build the dynamically generated # source files. prep: fmtcheck @sh -c "'$(CURDIR)/scripts/goversioncheck.sh' '$(GO_VERSION_MIN)'" @$(GO_CMD) generate $($(GO_CMD) list ./... | grep -v /vendor/) @if [ -d .git/hooks ]; then cp .hooks/* .git/hooks/; fi # bootstrap the build by downloading additional tools needed to build ci-bootstrap: @for tool in $(EXTERNAL_TOOLS_CI) ; do \ echo "Installing/Updating $$tool" ; \ GO111MODULE=off $(GO_CMD) get -u $$tool; \ done # bootstrap the build by downloading additional tools that may be used by devs bootstrap: ci-bootstrap go generate -tags tools tools/tools.go # Note: if you have plugins in GOPATH you can update all of them via something like: # for i in $(ls | grep vault-plugin-); do cd $i; git remote update; git reset --hard origin/master; dep ensure -update; git add .; git commit; git push; cd ..; done update-plugins: grep vault-plugin- vendor/vendor.json | cut -d '"' -f 4 | xargs govendor fetch static-assets-dir: @mkdir -p ./pkg/web_ui static-assets: static-assets-dir @echo "--> Generating static assets" @go-bindata-assetfs -o bindata_assetfs.go -pkg http -prefix pkg -modtime 1480000000 -tags ui ./pkg/web_ui/... @mv bindata_assetfs.go http @$(MAKE) -f $(THIS_FILE) fmt test-ember: @echo "--> Installing JavaScript assets" @cd ui && yarn --ignore-optional @echo "--> Running ember tests" @cd ui && yarn run test:oss ember-ci-test: # Deprecated, to be removed soon. @echo "ember-ci-test is deprecated in favour of test-ui-browserstack" @exit 1 check-vault-in-path: @VAULT_BIN=$$(command -v vault) || { echo "vault command not found"; exit 1; }; \ [ -x "$$VAULT_BIN" ] || { echo "$$VAULT_BIN not executable"; exit 1; }; \ printf "Using Vault at %s:\n\$$ vault version\n%s\n" "$$VAULT_BIN" "$$(vault version)" check-browserstack-creds: @[ -n "$$BROWSERSTACK_ACCESS_KEY" ] || { echo "BROWSERSTACK_ACCESS_KEY not set"; exit 1; } @[ -n "$$BROWSERSTACK_USERNAME" ] || { echo "BROWSERSTACK_USERNAME not set"; exit 1; } test-ui-browserstack: check-vault-in-path check-browserstack-creds @echo "--> Installing JavaScript assets" @cd ui && yarn --ignore-optional @echo "--> Running ember tests in Browserstack" @cd ui && yarn run test:browserstack ember-dist: @echo "--> Installing JavaScript assets" @cd ui && yarn --ignore-optional @cd ui && npm rebuild node-sass @echo "--> Building Ember application" @cd ui && yarn run build @rm -rf ui/if-you-need-to-delete-this-open-an-issue-async-disk-cache ember-dist-dev: @echo "--> Installing JavaScript assets" @cd ui && yarn --ignore-optional @cd ui && npm rebuild node-sass @echo "--> Building Ember application" @cd ui && yarn run build-dev static-dist: ember-dist static-assets static-dist-dev: ember-dist-dev static-assets proto: protoc vault/*.proto --go_out=plugins=grpc,paths=source_relative:. protoc vault/activity/activity_log.proto --go_out=plugins=grpc,paths=source_relative:. protoc helper/storagepacker/types.proto --go_out=plugins=grpc,paths=source_relative:. protoc helper/forwarding/types.proto --go_out=plugins=grpc,paths=source_relative:. protoc sdk/logical/*.proto --go_out=plugins=grpc,paths=source_relative:. protoc physical/raft/types.proto --go_out=plugins=grpc,paths=source_relative:. protoc helper/identity/mfa/types.proto --go_out=plugins=grpc,paths=source_relative:. protoc helper/identity/types.proto --go_out=plugins=grpc,paths=source_relative:. protoc sdk/database/dbplugin/*.proto --go_out=plugins=grpc,paths=source_relative:. protoc sdk/database/newdbplugin/proto/*.proto --go_out=plugins=grpc,paths=source_relative:. protoc sdk/plugin/pb/*.proto --go_out=plugins=grpc,paths=source_relative:. sed -i -e 's/Id/ID/' vault/request_forwarding_service.pb.go sed -i -e 's/Idp/IDP/' -e 's/Url/URL/' -e 's/Id/ID/' -e 's/IDentity/Identity/' -e 's/EntityId/EntityID/' -e 's/Api/API/' -e 's/Qr/QR/' -e 's/Totp/TOTP/' -e 's/Mfa/MFA/' -e 's/Pingid/PingID/' -e 's/protobuf:"/sentinel:"" protobuf:"/' -e 's/namespaceId/namespaceID/' -e 's/Ttl/TTL/' -e 's/BoundCidrs/BoundCIDRs/' helper/identity/types.pb.go helper/identity/mfa/types.pb.go helper/storagepacker/types.pb.go sdk/plugin/pb/backend.pb.go sdk/logical/identity.pb.go vault/activity/activity_log.pb.go fmtcheck: @true #@sh -c "'$(CURDIR)/scripts/gofmtcheck.sh'" fmt: goimports -w $(GOFMT_FILES) assetcheck: @echo "==> Checking compiled UI assets..." @sh -c "'$(CURDIR)/scripts/assetcheck.sh'" spellcheck: @echo "==> Spell checking website..." @misspell -error -source=text website/source mysql-database-plugin: @CGO_ENABLED=0 $(GO_CMD) build -o bin/mysql-database-plugin ./plugins/database/mysql/mysql-database-plugin mysql-legacy-database-plugin: @CGO_ENABLED=0 $(GO_CMD) build -o bin/mysql-legacy-database-plugin ./plugins/database/mysql/mysql-legacy-database-plugin cassandra-database-plugin: @CGO_ENABLED=0 $(GO_CMD) build -o bin/cassandra-database-plugin ./plugins/database/cassandra/cassandra-database-plugin influxdb-database-plugin: @CGO_ENABLED=0 $(GO_CMD) build -o bin/influxdb-database-plugin ./plugins/database/influxdb/influxdb-database-plugin postgresql-database-plugin: @CGO_ENABLED=0 $(GO_CMD) build -o bin/postgresql-database-plugin ./plugins/database/postgresql/postgresql-database-plugin mssql-database-plugin: @CGO_ENABLED=0 $(GO_CMD) build -o bin/mssql-database-plugin ./plugins/database/mssql/mssql-database-plugin hana-database-plugin: @CGO_ENABLED=0 $(GO_CMD) build -o bin/hana-database-plugin ./plugins/database/hana/hana-database-plugin mongodb-database-plugin: @CGO_ENABLED=0 $(GO_CMD) build -o bin/mongodb-database-plugin ./plugins/database/mongodb/mongodb-database-plugin # Tell packagespec where to write its CircleCI config. PACKAGESPEC_CIRCLECI_CONFIG := .circleci/config/@build-release.yml # Tell packagespec to re-run 'make ci-config' whenever updating its own CI config. PACKAGESPEC_HOOK_POST_CI_CONFIG := $(MAKE) ci-config .PHONY: ci-config ci-config: @$(MAKE) -C .circleci ci-config .PHONY: ci-verify ci-verify: @$(MAKE) -C .circleci ci-verify .PHONY: bin default prep test vet bootstrap ci-bootstrap fmt fmtcheck mysql-database-plugin mysql-legacy-database-plugin cassandra-database-plugin influxdb-database-plugin postgresql-database-plugin mssql-database-plugin hana-database-plugin mongodb-database-plugin static-assets ember-dist ember-dist-dev static-dist static-dist-dev assetcheck check-vault-in-path check-browserstack-creds test-ui-browserstack packages build build-ci .NOTPARALLEL: ember-dist ember-dist-dev static-assets -include packagespec.mk
Makefile
1
https://github.com/hashicorp/vault/commit/9204c47af7b04eadff6677db70909663b55ed56b
[ 0.0051825931295752525, 0.0006063089822418988, 0.00016493233852088451, 0.0001707655901554972, 0.0010831530671566725 ]
{ "id": 2, "code_window": [ "\n", "docker-dev-ui: prep\n", "\tdocker build -f scripts/docker/Dockerfile.ui -t vault:dev-ui .\n", "\n", "# test runs the unit tests and vets the code\n", "test: prep\n" ], "labels": [ "keep", "keep", "replace", "keep", "keep", "keep" ], "after_edit": [ "\tdocker build --build-arg VERSION=$(GO_VERSION_MIN) -f scripts/docker/Dockerfile.ui -t vault:dev-ui .\n" ], "file_path": "Makefile", "type": "replace", "edit_start_line_idx": 58 }
package mock import ( "context" "github.com/hashicorp/vault/sdk/framework" "github.com/hashicorp/vault/sdk/logical" ) // pathInternal is used to test viewing internal backend values. In this case, // it is used to test the invalidate func. func pathInternal(b *backend) *framework.Path { return &framework.Path{ Pattern: "internal", Fields: map[string]*framework.FieldSchema{ "value": &framework.FieldSchema{Type: framework.TypeString}, }, Callbacks: map[logical.Operation]framework.OperationFunc{ logical.UpdateOperation: b.pathInternalUpdate, logical.ReadOperation: b.pathInternalRead, }, } } func (b *backend) pathInternalUpdate(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { value := data.Get("value").(string) b.internal = value // Return the secret return nil, nil } func (b *backend) pathInternalRead(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { // Return the secret return &logical.Response{ Data: map[string]interface{}{ "value": b.internal, }, }, nil }
vendor/github.com/hashicorp/vault/sdk/plugin/mock/path_internal.go
0
https://github.com/hashicorp/vault/commit/9204c47af7b04eadff6677db70909663b55ed56b
[ 0.00017286321963183582, 0.00016964029055088758, 0.00016649696044623852, 0.00016960050561465323, 0.0000022607903247262584 ]
{ "id": 2, "code_window": [ "\n", "docker-dev-ui: prep\n", "\tdocker build -f scripts/docker/Dockerfile.ui -t vault:dev-ui .\n", "\n", "# test runs the unit tests and vets the code\n", "test: prep\n" ], "labels": [ "keep", "keep", "replace", "keep", "keep", "keep" ], "after_edit": [ "\tdocker build --build-arg VERSION=$(GO_VERSION_MIN) -f scripts/docker/Dockerfile.ui -t vault:dev-ui .\n" ], "file_path": "Makefile", "type": "replace", "edit_start_line_idx": 58 }
import { module, test } from 'qunit'; import { setupRenderingTest } from 'ember-qunit'; import { settled, render, click, fillIn, findAll, find, triggerEvent, waitUntil } from '@ember/test-helpers'; import hbs from 'htmlbars-inline-precompile'; let file; const fileEvent = () => { const data = { some: 'content' }; file = new Blob([JSON.stringify(data, null, 2)], { type: 'application/json' }); file.name = 'file.json'; return ['change', { files: [file] }]; }; module('Integration | Component | pgp file', function(hooks) { setupRenderingTest(hooks); hooks.beforeEach(function() { file = null; this.lastOnChangeCall = null; this.set('change', (index, key) => { this.lastOnChangeCall = [index, key]; this.set('key', key); }); }); test('it renders', async function(assert) { this.set('key', { value: '' }); this.set('index', 0); await render(hbs`{{pgp-file index=index key=key onChange=(action change)}}`); assert.equal(find('[data-test-pgp-label]').textContent.trim(), 'PGP KEY 1'); assert.equal(find('[data-test-pgp-file-input-label]').textContent.trim(), 'Choose a file…'); }); test('it accepts files', async function(assert) { const key = { value: '' }; const event = fileEvent(); this.set('key', key); this.set('index', 0); await render(hbs`{{pgp-file index=index key=key onChange=(action change)}}`); triggerEvent('[data-test-pgp-file-input]', ...event); // FileReader is async, but then we need extra run loop wait to re-render await waitUntil(() => { return !!this.lastOnChangeCall; }); assert.equal( find('[data-test-pgp-file-input-label]').textContent.trim(), file.name, 'the file input shows the file name' ); assert.notDeepEqual(this.lastOnChangeCall[1].value, key.value, 'onChange was called with the new key'); assert.equal(this.lastOnChangeCall[0], 0, 'onChange is called with the index value'); await click('[data-test-pgp-clear]'); assert.equal(this.lastOnChangeCall[1].value, key.value, 'the key gets reset when the input is cleared'); }); test('it allows for text entry', async function(assert) { const key = { value: '' }; const text = 'a really long pgp key'; this.set('key', key); this.set('index', 0); await render(hbs`{{pgp-file index=index key=key onChange=(action change)}}`); await click('[data-test-text-toggle]'); assert.equal(findAll('[data-test-pgp-file-textarea]').length, 1, 'renders the textarea on toggle'); fillIn('[data-test-pgp-file-textarea]', text); await waitUntil(() => { return !!this.lastOnChangeCall; }); assert.equal(this.lastOnChangeCall[1].value, text, 'the key value is passed to onChange'); }); test('toggling back and forth', async function(assert) { const key = { value: '' }; const event = fileEvent(); this.set('key', key); this.set('index', 0); await render(hbs`{{pgp-file index=index key=key onChange=(action change)}}`); await triggerEvent('[data-test-pgp-file-input]', ...event); await settled(); await click('[data-test-text-toggle]'); assert.equal(findAll('[data-test-pgp-file-textarea]').length, 1, 'renders the textarea on toggle'); assert.equal( find('[data-test-pgp-file-textarea]').textContent.trim(), this.lastOnChangeCall[1].value, 'textarea shows the value of the base64d key' ); }); });
ui/tests/integration/components/pgp-file-test.js
0
https://github.com/hashicorp/vault/commit/9204c47af7b04eadff6677db70909663b55ed56b
[ 0.00017551833298057318, 0.0001698935084277764, 0.0001655687519814819, 0.00016978816711343825, 0.0000024214846234826837 ]
{ "id": 2, "code_window": [ "\n", "docker-dev-ui: prep\n", "\tdocker build -f scripts/docker/Dockerfile.ui -t vault:dev-ui .\n", "\n", "# test runs the unit tests and vets the code\n", "test: prep\n" ], "labels": [ "keep", "keep", "replace", "keep", "keep", "keep" ], "after_edit": [ "\tdocker build --build-arg VERSION=$(GO_VERSION_MIN) -f scripts/docker/Dockerfile.ui -t vault:dev-ui .\n" ], "file_path": "Makefile", "type": "replace", "edit_start_line_idx": 58 }
// +build !enterprise package vault type sentinelPolicy struct{}
vault/policy_util.go
0
https://github.com/hashicorp/vault/commit/9204c47af7b04eadff6677db70909663b55ed56b
[ 0.0001958275679498911, 0.0001958275679498911, 0.0001958275679498911, 0.0001958275679498911, 0 ]
{ "id": 3, "code_window": [ "# Multi-stage builder to avoid polluting users environment with wrong \n", "# architecture binaries. Since this binary is used in an alpine container, \n", "# we're explicitly compiling for 'linux/amd64'\n" ], "labels": [ "replace", "replace", "keep" ], "after_edit": [ "# Multi-stage builder to avoid polluting users environment with wrong\n", "# architecture binaries. Since this binary is used in an alpine container,\n" ], "file_path": "scripts/docker/Dockerfile", "type": "replace", "edit_start_line_idx": 0 }
# Determine this makefile's path. # Be sure to place this BEFORE `include` directives, if any. THIS_FILE := $(lastword $(MAKEFILE_LIST)) TEST?=$$($(GO_CMD) list ./... | grep -v /vendor/ | grep -v /integ) TEST_TIMEOUT?=45m EXTENDED_TEST_TIMEOUT=60m INTEG_TEST_TIMEOUT=120m VETARGS?=-asmdecl -atomic -bool -buildtags -copylocks -methods -nilfunc -printf -rangeloops -shift -structtags -unsafeptr EXTERNAL_TOOLS_CI=\ github.com/elazarl/go-bindata-assetfs/... \ github.com/hashicorp/go-bindata/... \ github.com/mitchellh/gox \ golang.org/x/tools/cmd/goimports EXTERNAL_TOOLS=\ github.com/client9/misspell/cmd/misspell GOFMT_FILES?=$$(find . -name '*.go' | grep -v pb.go | grep -v vendor) GO_VERSION_MIN=1.14.7 GO_CMD?=go CGO_ENABLED?=0 ifneq ($(FDB_ENABLED), ) CGO_ENABLED=1 BUILD_TAGS+=foundationdb endif default: dev # bin generates the releasable binaries for Vault bin: prep @CGO_ENABLED=$(CGO_ENABLED) BUILD_TAGS='$(BUILD_TAGS) ui' sh -c "'$(CURDIR)/scripts/build.sh'" # dev creates binaries for testing Vault locally. These are put # into ./bin/ as well as $GOPATH/bin dev: prep @CGO_ENABLED=$(CGO_ENABLED) BUILD_TAGS='$(BUILD_TAGS)' VAULT_DEV_BUILD=1 sh -c "'$(CURDIR)/scripts/build.sh'" dev-ui: assetcheck prep @CGO_ENABLED=$(CGO_ENABLED) BUILD_TAGS='$(BUILD_TAGS) ui' VAULT_DEV_BUILD=1 sh -c "'$(CURDIR)/scripts/build.sh'" dev-dynamic: prep @CGO_ENABLED=1 BUILD_TAGS='$(BUILD_TAGS)' VAULT_DEV_BUILD=1 sh -c "'$(CURDIR)/scripts/build.sh'" # *-mem variants will enable memory profiling which will write snapshots of heap usage # to $TMP/vaultprof every 5 minutes. These can be analyzed using `$ go tool pprof <profile_file>`. # Note that any build can have profiling added via: `$ BUILD_TAGS=memprofiler make ...` dev-mem: BUILD_TAGS+=memprofiler dev-mem: dev dev-ui-mem: BUILD_TAGS+=memprofiler dev-ui-mem: assetcheck dev-ui dev-dynamic-mem: BUILD_TAGS+=memprofiler dev-dynamic-mem: dev-dynamic # Creates a Docker image by adding the compiled linux/amd64 binary found in ./bin. # The resulting image is tagged "vault:dev". docker-dev: prep docker build -f scripts/docker/Dockerfile -t vault:dev . docker-dev-ui: prep docker build -f scripts/docker/Dockerfile.ui -t vault:dev-ui . # test runs the unit tests and vets the code test: prep @CGO_ENABLED=$(CGO_ENABLED) \ VAULT_ADDR= \ VAULT_TOKEN= \ VAULT_DEV_ROOT_TOKEN_ID= \ VAULT_ACC= \ $(GO_CMD) test -tags='$(BUILD_TAGS)' $(TEST) $(TESTARGS) -timeout=$(TEST_TIMEOUT) -parallel=20 testcompile: prep @for pkg in $(TEST) ; do \ $(GO_CMD) test -v -c -tags='$(BUILD_TAGS)' $$pkg -parallel=4 ; \ done # testacc runs acceptance tests testacc: prep @if [ "$(TEST)" = "./..." ]; then \ echo "ERROR: Set TEST to a specific package"; \ exit 1; \ fi VAULT_ACC=1 $(GO_CMD) test -tags='$(BUILD_TAGS)' $(TEST) -v $(TESTARGS) -timeout=$(EXTENDED_TEST_TIMEOUT) # testrace runs the race checker testrace: prep @CGO_ENABLED=1 \ VAULT_ADDR= \ VAULT_TOKEN= \ VAULT_DEV_ROOT_TOKEN_ID= \ VAULT_ACC= \ $(GO_CMD) test -tags='$(BUILD_TAGS)' -race $(TEST) $(TESTARGS) -timeout=$(EXTENDED_TEST_TIMEOUT) -parallel=20 cover: ./scripts/coverage.sh --html # vet runs the Go source code static analysis tool `vet` to find # any common errors. vet: @$(GO_CMD) list -f '{{.Dir}}' ./... | grep -v /vendor/ \ | grep -v '.*github.com/hashicorp/vault$$' \ | xargs $(GO_CMD) vet ; if [ $$? -eq 1 ]; then \ echo ""; \ echo "Vet found suspicious constructs. Please check the reported constructs"; \ echo "and fix them if necessary before submitting the code for reviewal."; \ fi # lint runs vet plus a number of other checkers, it is more comprehensive, but louder lint: @$(GO_CMD) list -f '{{.Dir}}' ./... | grep -v /vendor/ \ | xargs golangci-lint run; if [ $$? -eq 1 ]; then \ echo ""; \ echo "Lint found suspicious constructs. Please check the reported constructs"; \ echo "and fix them if necessary before submitting the code for reviewal."; \ fi # for ci jobs, runs lint against the changed packages in the commit ci-lint: @golangci-lint run --deadline 10m --new-from-rev=HEAD~ # prep runs `go generate` to build the dynamically generated # source files. prep: fmtcheck @sh -c "'$(CURDIR)/scripts/goversioncheck.sh' '$(GO_VERSION_MIN)'" @$(GO_CMD) generate $($(GO_CMD) list ./... | grep -v /vendor/) @if [ -d .git/hooks ]; then cp .hooks/* .git/hooks/; fi # bootstrap the build by downloading additional tools needed to build ci-bootstrap: @for tool in $(EXTERNAL_TOOLS_CI) ; do \ echo "Installing/Updating $$tool" ; \ GO111MODULE=off $(GO_CMD) get -u $$tool; \ done # bootstrap the build by downloading additional tools that may be used by devs bootstrap: ci-bootstrap go generate -tags tools tools/tools.go # Note: if you have plugins in GOPATH you can update all of them via something like: # for i in $(ls | grep vault-plugin-); do cd $i; git remote update; git reset --hard origin/master; dep ensure -update; git add .; git commit; git push; cd ..; done update-plugins: grep vault-plugin- vendor/vendor.json | cut -d '"' -f 4 | xargs govendor fetch static-assets-dir: @mkdir -p ./pkg/web_ui static-assets: static-assets-dir @echo "--> Generating static assets" @go-bindata-assetfs -o bindata_assetfs.go -pkg http -prefix pkg -modtime 1480000000 -tags ui ./pkg/web_ui/... @mv bindata_assetfs.go http @$(MAKE) -f $(THIS_FILE) fmt test-ember: @echo "--> Installing JavaScript assets" @cd ui && yarn --ignore-optional @echo "--> Running ember tests" @cd ui && yarn run test:oss ember-ci-test: # Deprecated, to be removed soon. @echo "ember-ci-test is deprecated in favour of test-ui-browserstack" @exit 1 check-vault-in-path: @VAULT_BIN=$$(command -v vault) || { echo "vault command not found"; exit 1; }; \ [ -x "$$VAULT_BIN" ] || { echo "$$VAULT_BIN not executable"; exit 1; }; \ printf "Using Vault at %s:\n\$$ vault version\n%s\n" "$$VAULT_BIN" "$$(vault version)" check-browserstack-creds: @[ -n "$$BROWSERSTACK_ACCESS_KEY" ] || { echo "BROWSERSTACK_ACCESS_KEY not set"; exit 1; } @[ -n "$$BROWSERSTACK_USERNAME" ] || { echo "BROWSERSTACK_USERNAME not set"; exit 1; } test-ui-browserstack: check-vault-in-path check-browserstack-creds @echo "--> Installing JavaScript assets" @cd ui && yarn --ignore-optional @echo "--> Running ember tests in Browserstack" @cd ui && yarn run test:browserstack ember-dist: @echo "--> Installing JavaScript assets" @cd ui && yarn --ignore-optional @cd ui && npm rebuild node-sass @echo "--> Building Ember application" @cd ui && yarn run build @rm -rf ui/if-you-need-to-delete-this-open-an-issue-async-disk-cache ember-dist-dev: @echo "--> Installing JavaScript assets" @cd ui && yarn --ignore-optional @cd ui && npm rebuild node-sass @echo "--> Building Ember application" @cd ui && yarn run build-dev static-dist: ember-dist static-assets static-dist-dev: ember-dist-dev static-assets proto: protoc vault/*.proto --go_out=plugins=grpc,paths=source_relative:. protoc vault/activity/activity_log.proto --go_out=plugins=grpc,paths=source_relative:. protoc helper/storagepacker/types.proto --go_out=plugins=grpc,paths=source_relative:. protoc helper/forwarding/types.proto --go_out=plugins=grpc,paths=source_relative:. protoc sdk/logical/*.proto --go_out=plugins=grpc,paths=source_relative:. protoc physical/raft/types.proto --go_out=plugins=grpc,paths=source_relative:. protoc helper/identity/mfa/types.proto --go_out=plugins=grpc,paths=source_relative:. protoc helper/identity/types.proto --go_out=plugins=grpc,paths=source_relative:. protoc sdk/database/dbplugin/*.proto --go_out=plugins=grpc,paths=source_relative:. protoc sdk/database/newdbplugin/proto/*.proto --go_out=plugins=grpc,paths=source_relative:. protoc sdk/plugin/pb/*.proto --go_out=plugins=grpc,paths=source_relative:. sed -i -e 's/Id/ID/' vault/request_forwarding_service.pb.go sed -i -e 's/Idp/IDP/' -e 's/Url/URL/' -e 's/Id/ID/' -e 's/IDentity/Identity/' -e 's/EntityId/EntityID/' -e 's/Api/API/' -e 's/Qr/QR/' -e 's/Totp/TOTP/' -e 's/Mfa/MFA/' -e 's/Pingid/PingID/' -e 's/protobuf:"/sentinel:"" protobuf:"/' -e 's/namespaceId/namespaceID/' -e 's/Ttl/TTL/' -e 's/BoundCidrs/BoundCIDRs/' helper/identity/types.pb.go helper/identity/mfa/types.pb.go helper/storagepacker/types.pb.go sdk/plugin/pb/backend.pb.go sdk/logical/identity.pb.go vault/activity/activity_log.pb.go fmtcheck: @true #@sh -c "'$(CURDIR)/scripts/gofmtcheck.sh'" fmt: goimports -w $(GOFMT_FILES) assetcheck: @echo "==> Checking compiled UI assets..." @sh -c "'$(CURDIR)/scripts/assetcheck.sh'" spellcheck: @echo "==> Spell checking website..." @misspell -error -source=text website/source mysql-database-plugin: @CGO_ENABLED=0 $(GO_CMD) build -o bin/mysql-database-plugin ./plugins/database/mysql/mysql-database-plugin mysql-legacy-database-plugin: @CGO_ENABLED=0 $(GO_CMD) build -o bin/mysql-legacy-database-plugin ./plugins/database/mysql/mysql-legacy-database-plugin cassandra-database-plugin: @CGO_ENABLED=0 $(GO_CMD) build -o bin/cassandra-database-plugin ./plugins/database/cassandra/cassandra-database-plugin influxdb-database-plugin: @CGO_ENABLED=0 $(GO_CMD) build -o bin/influxdb-database-plugin ./plugins/database/influxdb/influxdb-database-plugin postgresql-database-plugin: @CGO_ENABLED=0 $(GO_CMD) build -o bin/postgresql-database-plugin ./plugins/database/postgresql/postgresql-database-plugin mssql-database-plugin: @CGO_ENABLED=0 $(GO_CMD) build -o bin/mssql-database-plugin ./plugins/database/mssql/mssql-database-plugin hana-database-plugin: @CGO_ENABLED=0 $(GO_CMD) build -o bin/hana-database-plugin ./plugins/database/hana/hana-database-plugin mongodb-database-plugin: @CGO_ENABLED=0 $(GO_CMD) build -o bin/mongodb-database-plugin ./plugins/database/mongodb/mongodb-database-plugin # Tell packagespec where to write its CircleCI config. PACKAGESPEC_CIRCLECI_CONFIG := .circleci/config/@build-release.yml # Tell packagespec to re-run 'make ci-config' whenever updating its own CI config. PACKAGESPEC_HOOK_POST_CI_CONFIG := $(MAKE) ci-config .PHONY: ci-config ci-config: @$(MAKE) -C .circleci ci-config .PHONY: ci-verify ci-verify: @$(MAKE) -C .circleci ci-verify .PHONY: bin default prep test vet bootstrap ci-bootstrap fmt fmtcheck mysql-database-plugin mysql-legacy-database-plugin cassandra-database-plugin influxdb-database-plugin postgresql-database-plugin mssql-database-plugin hana-database-plugin mongodb-database-plugin static-assets ember-dist ember-dist-dev static-dist static-dist-dev assetcheck check-vault-in-path check-browserstack-creds test-ui-browserstack packages build build-ci .NOTPARALLEL: ember-dist ember-dist-dev static-assets -include packagespec.mk
Makefile
1
https://github.com/hashicorp/vault/commit/9204c47af7b04eadff6677db70909663b55ed56b
[ 0.004244254436343908, 0.0004955537151545286, 0.00016564250108785927, 0.0001994109625229612, 0.0008269241661764681 ]
{ "id": 3, "code_window": [ "# Multi-stage builder to avoid polluting users environment with wrong \n", "# architecture binaries. Since this binary is used in an alpine container, \n", "# we're explicitly compiling for 'linux/amd64'\n" ], "labels": [ "replace", "replace", "keep" ], "after_edit": [ "# Multi-stage builder to avoid polluting users environment with wrong\n", "# architecture binaries. Since this binary is used in an alpine container,\n" ], "file_path": "scripts/docker/Dockerfile", "type": "replace", "edit_start_line_idx": 0 }
--- layout: guides page_title: 'Identity: Entities and Groups - Guides' sidebar_title: Identity - Entities & Groups description: |- This guide demonstrates the commands to create entities, entity aliases, and groups. For the purpose of the demonstration, userpass auth method will be used. --- # Identity - Entities and Groups Vault supports multiple authentication methods and also allows enabling the same type of authentication method on different mount paths. Each Vault client may have multiple accounts with various identity providers that are enabled on the Vault server. Vault clients can be mapped as **_entities_** and their corresponding accounts with authentication providers can be mapped as **_aliases_**. In essence, each entity is made up of zero or more aliases. Identity secrets engine internally maintains the clients who are recognized by Vault. ## Reference Material - [Identity Secrets Engine](/docs/secrets/identity) - [Identity Secrets Engine (API)](/api/secret/identity) - [External vs Internal Groups](/docs/secrets/identity#external-vs-internal-groups) ~> **NOTE:** An [interactive tutorial](https://www.katacoda.com/hashicorp/scenarios/vault-identity) is also available if you do not have a Vault environment to perform the steps described in this guide. ## Estimated Time to Complete 10 minutes ## Personas The steps described in this guide are typically performed by **operations** persona. ## Challenge Bob has accounts in both Github and LDAP. Both Github and LDAP auth methods are enabled on the Vault server that he can authenticate using either one of his accounts. Although both accounts belong to Bob, there is no association between the two accounts to set some common properties. ## Solution Create an _entity_ representing Bob, and associate aliases representing each of his accounts as the entity member. You can set additional policies and metadata on the entity level so that both accounts can inherit. When Bob authenticates using either one of his accounts, the entity identifier will be tied to the authenticated token. When such tokens are put to use, their entity identifiers are audit logged, marking a trail of actions performed by specific users. ## Prerequisites To perform the tasks described in this guide, you need to have a Vault environment. Refer to the [Getting Started](/intro/getting-started/install) guide to install Vault. Make sure that your Vault server has been [initialized and unsealed](/intro/getting-started/deploy). ### Policy requirements -> **NOTE:** For the purpose of this guide, you can use the **`root`** token to work with Vault. However, it is recommended that root tokens are used for just enough initial setup or in emergencies. As a best practice, use tokens with an appropriate set of policies based on your role in the organization. To perform all tasks demonstrated in this guide, your policy must include the following permissions: ```shell # Configure auth methods path "sys/auth" { capabilities = [ "read", "list" ] } # Configure auth methods path "sys/auth/*" { capabilities = [ "create", "update", "read", "delete", "list", "sudo" ] } # Manage userpass auth methods path "auth/userpass/*" { capabilities = [ "create", "read", "update", "delete" ] } # Manage github auth methods path "auth/github/*" { capabilities = [ "create", "read", "update", "delete" ] } # Display the Policies tab in UI path "sys/policies" { capabilities = [ "read", "list" ] } # Create and manage ACL policies from UI path "sys/policies/acl/*" { capabilities = [ "create", "read", "update", "delete", "list" ] } # Create and manage policies path "sys/policy" { capabilities = [ "read", "list" ] } # Create and manage policies path "sys/policy/*" { capabilities = [ "create", "read", "update", "delete", "list" ] } # List available secret engines to retrieve accessor ID path "sys/mounts" { capabilities = [ "read" ] } # Create and manage entities and groups path "identity/*" { capabilities = [ "create", "read", "update", "delete", "list" ] } ``` If you are not familiar with policies, complete the [policies](/guides/identity/policies) guide. ## Steps In this lab, you are going to learn the API-based commands to create entities, entity aliases, and groups. For the purpose of the training, you are going to leverage the userpass auth method. The challenge exercise walks you through creating an external group by mapping a GitHub group to an identity group. 1. [Create an Entity with Alias](#step1) 2. [Test the Entity](#step2) 3. [Create an Internal Group](#step3) 4. [Create an External Group](#step4) ### Step 1: Create an Entity with Alias ((#step1)) You are going to create a new entity with base policy assigned. The entity defines two entity aliases with each has a different policy assigned. **Scenario:** A user, Bob Smith at ACME Inc. happened to have two sets of credentials: `bob` and `bsmith`. He can authenticate with Vault using either one of his accounts. To manage his accounts and link them to identity `Bob Smith` in QA team, you are going to create an entity for Bob. ![Entity Bob Smith](/img/vault-entity-1.png) -> For the simplicity of this guide, you are going to work with the `userpass` auth method. However, in reality, the user `bob` might be a username exists in Active Directory, and `bsmith` might be Bob's username in GitHub. #### Scenario Policies **`base.hcl`** ```hcl path "secret/training_*" { capabilities = ["create", "read"] } ``` **`test.hcl`** ```hcl path "secret/test" { capabilities = [ "create", "read", "update", "delete" ] } ``` **`team-qa.hcl`** ```hcl path "secret/team-qa" { capabilities = [ "create", "read", "update", "delete" ] } ``` ~> **NOTE:** If you are running [K/V Secrets Engine v2](/api/secret/kv/kv-v2) at `secret`, set the policies path accordingly: `secret/data/training_*`, `secret/data/test`, and `secret/data/team-qa`. Now, you are going to create `bob` and `bsmith` users with appropriate policies attached. #### CLI command 1. Create policies: `base`, `test`, and `team-qa`. ```shell # Create base policy $ vault policy write base base.hcl # Create test policy $ vault policy write test test.hcl # Create team-qa policy $ vault policy write team-qa team-qa.hcl # List all policies to verify that 'base', 'test' and 'team-qa' policies exist $ vault policy list base default team-qa test root ``` 1. Enable the `userpass` auth method. ```plaintext $ vault auth enable userpass ``` 1. Create a new user in userpass: - username: bob - password: training - policy: test ```plaintext $ vault write auth/userpass/users/bob password="training" policies="test" ``` 1. Create another user in userpass: - username: bsmith - password: training - policy: team-qa ```plaintext $ vault write auth/userpass/users/bsmith password="training" policies="team-qa" ``` 1. Execute the following command to discover the mount accessor for the userpass auth method: ```plaintext $ vault auth list -detailed Path Type Accessor ... ---- ---- -------- ... token/ token auth_token_bec8530a ... userpass/ userpass auth_userpass_70eba76b ... ``` In the output, locate the **Accessor** value for `userpass`. Run the following command to store the userpass accessor value in a file named, `accessor.txt`. ```plaintext $ vault auth list -format=json | jq -r '.["userpass/"].accessor' > accessor.txt ``` 1. Create an entity for `bob-smith`. ```plaintext $ vault write identity/entity name="bob-smith" policies="base" \ metadata=organization="ACME Inc." \ metadata=team="QA" Key Value --- ----- aliases <nil> id 631256b1-8523-9838-5501-d0a1e2cdad9c ``` -> Make a note of the generated entity ID (**`id`**). 1. Now, add the user `bob` to the `bob-smith` entity by creating an entity alias: ```plaintext $ vault write identity/entity-alias name="bob" \ canonical_id=<entity_id> \ mount_accessor=<userpass_accessor> ``` The `<userpass_accessor>` value is stored in `accessor.txt`. **Example:** ```plaintext $ vault write identity/entity-alias name="bob" \ canonical_id="631256b1-8523-9838-5501-d0a1e2cdad9c" \ mount_accessor=$(cat accessor.txt) Key Value --- ----- canonical_id 631256b1-8523-9838-5501-d0a1e2cdad9c id 873f7b12-dec8-c182-024e-e3f065d8a9f1 ``` 1. Repeat the step to add user `bsmith` to the `bob-smith` entity. **Example:** ```plaintext $ vault write identity/entity-alias name="bsmith" \ canonical_id="631256b1-8523-9838-5501-d0a1e2cdad9c" \ mount_accessor=$(cat accessor.txt) Key Value --- ----- canonical_id 631256b1-8523-9838-5501-d0a1e2cdad9c id 55d46747-b99e-6a82-05f5-61bb60fd7d15 ``` 1. Review the entity details. ```plaintext $ vault read identity/entity/id/<entity_id> ``` The output should include the entity aliases, metadata (organization, and team), and base policy. #### API call using cURL 1. Create policies: `base`, `test`, and `team-qa`. To create a policy, use the `/sys/policy` endpoint: ```shell $ curl --header "X-Vault-Token: <TOKEN>" \ --request PUT \ --data <PAYLOAD> \ <VAULT_ADDRESS>/v1/sys/policy/<POLICY_NAME> ``` Where `<TOKEN>` is your valid token, and `<PAYLOAD>` includes the policy name and stringified policy. **Example:** ```shell # Create the API request payload, payload-1.json $ tee payload-1.json <<EOF { "policy": "path \"secret/training_*\" {\n capabilities = [\"create\", \"read\"]\n}" } EOF # Create base policy $ curl --header "X-Vault-Token: ..." \ --request PUT \ --data @payload-1.json \ http://127.0.0.1:8200/v1/sys/policy/base # Create the API request payload, payload-2.json $ tee payload-2.json <<EOF { "policy": "path \"secret/test\" {\n capabilities = [ \"create\", \"read\", \"update\", \"delete\" ]\n }" } EOF # Create base policy $ curl --header "X-Vault-Token: ..." \ --request PUT \ --data @payload-2.json \ http://127.0.0.1:8200/v1/sys/policy/test # Create the API request payload, payload-1.json $ tee payload-3.json <<EOF { "policy": "path \"secret/team-qa\" {\n capabilities = [ \"create\", \"read\", \"update\", \"delete\" ]\n }" } EOF # Create base policy $ curl --header "X-Vault-Token: ..." \ --request PUT \ --data @payload-3.json \ http://127.0.0.1:8200/v1/sys/policy/team-qa # List all policies to verify that 'base', 'test' and 'team-qa' policies exist $ curl --header "X-Vault-Token: ..." \ http://127.0.0.1:8200/v1/sys/policy | jq ``` 1. Enable the `userpass` auth method. ```plaintext $ curl --header "X-Vault-Token: ..." \ --request POST \ --data '{"type": "userpass"}' \ http://127.0.0.1:8200/v1/sys/auth/userpass ``` 1. Create a new user in userpass: - username: bob - password: training - policy: test ```plaintext $ curl --header "X-Vault-Token: ..." \ --request POST \ --data '{"password": "training", "policies": "test"}' \ http://127.0.0.1:8200/v1/auth/userpass/users/bob ``` 1. Create another user in userpass: - username: bsmith - password: training - policy: team-qa ```plaintext $ curl --header "X-Vault-Token: ..." \ --request POST \ --data '{"password": "training", "policies": "team-qa"}' \ http://127.0.0.1:8200/v1/auth/userpass/users/bsmith ``` 1. Execute the following command to discover the mount accessor for the userpass auth method. ```plaintext $ curl --header "X-Vault-Token: ..." \ http://127.0.0.1:8200/v1/sys/auth | jq { ... "userpass/": { "accessor": "auth_userpass_9b6cd254", ... }, ... ``` -> Make a note of the userpass accessor value (**`auth_userpass_XXXXX`**). 1. Create an entity for bob-smith. ```plaintext $ tee payload.json <<EOF { "name": "bob-smith", "metadata": { "organization": "ACME Inc.", "team": "QA" }, "policies": ["base"] } EOF $ curl --header "X-Vault-Token: ..." \ --request POST \ --data @payload.json \ http://127.0.0.1:8200/v1/identity/entity { "request_id": "4d4d340f-f4c9-0201-c87e-42cc140a383a", "lease_id": "", "renewable": false, "lease_duration": 0, "data": { "aliases": null, "id": "6ded4d31-481f-040b-11ad-c6db0cb4d211" }, ... ``` -> Make a note of the generated entity ID (**`id`**). 1. Now, add the user `bob` to the `bob-smith` entity by creating an entity alias. In the request body, you need to pass the userpass name as `name`, the userpass accessor value as `mount_accessor`, and the entity id as `canonical_id`. **Example:** ```shell-session $ tee payload-bob.json <<EOF { "name": "bob", "canonical_id": "6ded4d31-481f-040b-11ad-c6db0cb4d211", "mount_accessor": "auth_userpass_9b6cd254" } EOF $ curl --header "X-Vault-Token: ..." \ --request POST \ --data @payload-bob.json \ http://127.0.0.1:8200/v1/identity/entity-alias ``` 1. Repeat the step to add user `bsmith` to the `bob-smith` entity. **Example:** ```plaintext $ tee payload-bsmith.json <<EOF { "name": "bsmith", "canonical_id": "6ded4d31-481f-040b-11ad-c6db0cb4d211", "mount_accessor": "auth_userpass_9b6cd254" } EOF $ curl --header "X-Vault-Token: ..." \ --request POST \ --data @payload-bsmith.json \ http://127.0.0.1:8200/v1/identity/entity-alias ``` 1. Review the entity details. (**NOTE:** Be sure to enter the entity ID matching your environment.) ```plaintext $ curl --header "X-Vault-Token: ..." \ http://127.0.0.1:8200/v1/identity/entity/id/<ENTITY_ID> { "request_id": "cc0793bf-fafe-4b2c-fd82-88855712845c", "lease_id": "", "renewable": false, "lease_duration": 0, "data": { "aliases": [ { "canonical_id": "6ded4d31-481f-040b-11ad-c6db0cb4d211", ... "mount_type": "userpass", "name": "bob" }, { "canonical_id": "6ded4d31-481f-040b-11ad-c6db0cb4d211", ... "mount_type": "userpass", "name": "bsmith" } ], ... ``` The `bob` and `bsmith` users should appear in the entity alias list. #### Web UI 1. Open a web browser and launch the Vault UI (e.g. http://127.0.01:8200/ui) and then login. 1. Click the **Policies** tab, and then select **Create ACL policy**. 1. Enter **`base`** in the **Name** field, and paste in the [`base.hcl` policy rules](#scenario-policies) in the **Policy** text editor. ![Create Policy](/img/vault-policy-2.png) 1. Click **Create Policy** to complete. 1. Repeat the steps to create policies for **`test`** and **`team-qa`** as well. ![Create Policy](/img/vault-policy-1.png) 1. Click the **Access** tab, and select **Enable new method**. 1. Select **Username & Password** from the **Type** drop-down menu. ![Create Policy](/img/vault-auth-method-2.png) 1. Click **Enable Method**. 1. Click the Vault CLI shell icon (**`>_`**) to open a command shell. Enter the following command to create a new user, **`bob`**: ```plaintext $ vault write auth/userpass/users/bob password="training" policies="test" ``` ![Create Policy](/img/vault-auth-method-3.png) 1. Enter the following command to create a new user, **`bsmith`**: ```plaintext $ vault write auth/userpass/users/bsmith password="training" policies="team-qa" ``` ![Create Policy](/img/vault-auth-method-4.png) 1. Click the icon (**`>_`**) again to hide the shell. 1. From the **Access** tab, select **Entities** and then **Create entity**. 1. Populate the **Name**, **Policies** and **Metadata** fields as shown below: ![Create Policy](/img/vault-entity-4.png) 1. Click **Create**. 1. Select **Add alias**. Enter **`bob`** in the **Name** field and select **`userpass/ (userpass)`** from the **Auth Backend** drop-down list. ![Create Policy](/img/vault-entity-5.png) 1. Click **Create**. 1. Return to the **Entities** list. Select **Add alias** from the **`bob-smith`** entity menu. ![Create Policy](/img/vault-entity-6.png) 1. Enter **`bsmith`** in the **Name** field and select **`userpass/ (userpass)`** from the **Auth Backend** drop-down list, and then click **Create**. ### Step 2: Test the Entity ((#step2)) To better understand how a token inherits the capabilities from the entity's policy, you are going to test it by logging in as `bob`. ### CLI Command First, login as `bob`. ```shell-session $ vault login -method=userpass username=bob password=training Key Value --- ----- token ac318416-0dc1-4311-67e4-b58381c86fde token_accessor 79cced7b-51df-9523-920f-a1579687516b token_duration 768h token_renewable true token_policies ["default" "test"] identity_policies ["base"] policies ["base" "default" "test"] token_meta_username bob ``` > Upon a successful authentication, a token will be returned. Notice that the > output displays **`token_policies`** and **`identity_policies`**. The generated > token has both `test` and `base` policies attached. The `test` policy grants CRUD operations on the `secret/test` path. Test to make sure that you can write secrets in the path. ```shell-session $ vault kv put secret/test owner="bob" Success! Data written to: secret/test ``` Although the username `bob` does not have `base` policy attached, the token inherits the capabilities granted in the base policy because `bob` is a member of the `bob-smith` entity, and the entity has base policy attached. Check to see that the bob's token inherited the capabilities. ```shell-session $ vault token capabilities secret/training_test create, read ``` > The `base` policy grants create and read capabilities on > `secret/training_*` path; therefore, `bob` is permitted to run create and > read operations against any path starting with `secret/training_*`. What about the `secret/team-qa` path? ```shell-session $ vault token capabilities secret/team-qa deny ```  The user `bob` only inherits capability from its associating entity's policy. The user can access the `secret/team-qa` path only if he logs in with `bsmith` credentials. ~> Log back in with the token you used to configure the entity before proceed to [Step 3](#step3). #### API call using cURL First, login as `bob`. ```shell-session $ curl --request POST \ --data '{"password": "training"}' \ http://127.0.0.1:8200/v1/auth/userpass/login/bob { ... "auth": { "client_token": "b3c2ac10-9f8f-4e64-9a1c-337236ba20f6", "accessor": "92204429-6555-772e-cf51-52492d7f1686", "policies": [ "base", "default", "test" ], "token_policies": [ "default", "test" ], "identity_policies": [ "base" ], ... ``` > Upon a successful authentication, a token will be returned. Notice that the > output displays **`token_policies`** and **`identity_policies`**. The generated > token has both `test` and `base` policies attached. The `test` policy grants CRUD operations on the `secret/test` path. Test to make sure that you can write secrets in the path. ```shell-session $ curl --header "X-Vault-Token: ..." \ --request POST \ --data '{"owner": "bob"}' \ http://127.0.0.1:8200/v1/secret/test ``` Although the username `bob` does not have `base` policy attached, the token inherits the capabilities granted in the base policy because `bob` is a member of the `bob-smith` entity, and the entity has base policy attached. Check to see that the bob's token inherited the capabilities. ```shell-session $ curl --header "X-Vault-Token: ..." \ --request POST \ --data '{"paths": ["secret/training_test"]}' http://127.0.0.1:8200/v1/sys/capabilities-self | jq { "secret/training_test": [ "create", "read" ], ... ``` > The `base` policy grants create and read capabilities on > `secret/training_*` path; therefore, `bob` is permitted to run create and > read operations against any path starting with `secret/training_*`. What about the `secret/team-qa` path? ```shell-session $ curl --header "X-Vault-Token: ..." \ --request POST \ --data '{"paths": ["secret/team-qa"]}' http://127.0.0.1:8200/v1/sys/capabilities-self | jq { "secret/team-qa": [ "deny" ], ... ```  The user `bob` only inherits capability from its associating entity's policy. The user can access the `secret/team-qa` path only if he logs in with `bsmith` credentials. !> **NOTE:** Log back in with the token you used to configure the entity before proceed to [Step 3](#step3). ### Step 3: Create an Internal Group ((#step3)) Now, you are going to create an internal group named, **`engineers`**. Its member is `bob-smith` entity that you created in [Step 1](#step1). ![Entity Bob Smith](/img/vault-entity-3.png) The group policy, `team-eng` defines the following: **`team-eng.hcl`** ```plaintext path "secret/team/eng" { capabilities = [ "create", "read", "update", "delete"] } ``` #### CLI Command 1. Create a new policy named, `team-eng`: ```plaintext $ vault policy write team-eng ./team-eng.hcl ``` 1. Create an internal group named, `engineers` and add `bob-smith` entity as a group member and attach `team-eng`. ```plaintext $ vault write identity/group name="engineers" \ policies="team-eng" \ member_entity_ids=<entity_id> \ metadata=team="Engineering" \ metadata=region="North America" ``` Where `<entity_id>` is the value you copied at [Step 1](#step1). **Example:** ```plaintext $ vault write identity/group name="engineers" \ policies="team-eng" \ member_entity_ids="631256b1-8523-9838-5501..." \ metadata=team="Engineering" \ metadata=region="North America" Key Value --- ----- id 81bdac90-284a-7b8c-6289-5fa7693bcb4a name engineers ``` Now, when you login as `bob` or `bsmith`, its generated token inherits the group-level policy, **`team-eng`**. You can perform similar tests demonstrated in [Step 2](#step2) to verify that. #### API call using cURL 1. Create a new policy named, `team-eng`: ```shell # API request payload containing stringified policy $ tee payload.json <<EOF { "policy": "path \"secret/team/eng\" {\n capabilities = [\"create\", \"read\", \"delete\", \"update\"]\n }" } EOF # Create base policy $ curl --header "X-Vault-Token: ..." \ --request PUT \ --data @payload-1.json \ http://127.0.0.1:8200/v1/sys/policy/team-eng ``` 1. Create an internal group named, `engineers` and add `bob-smith` entity as a group member and attach `team-eng`. ```shell # API request msg payload. Be sure to replace <ENTITY_ID> with correct value $ tee payload-group.json <<EOF { "name": "engineers", "policies": ["team-eng"], "member_entity_ids": ["<ENTITY_ID>"], "metadata": { "team": "Engineering", "region": "North America" } } EOF # Use identity/group endpoint $ curl --header "X-Vault-Token: ..." \ --request PUT \ --data @payload-group.json \ http://127.0.0.1:8200/v1/identity/group | jq { "request_id": "2b6eefd6-67a6-31c7-dbc3-11c1c132e2cf", "lease_id": "", "renewable": false, "lease_duration": 0, "data": { "id": "d62157aa-b5f6-b6fe-aa40-0ffc54defc41", "name": "engineers" }, ... ``` Now, when you login as `bob` or `bsmith`, its generated token inherits the group-level policy, **`team-eng`**. You can perform similar tests demonstrated in [Step 2](#step2) to verify that. #### Web UI 1. Click the **Policies** tab, and then select **Create ACL policy**. 1. Enter **`team-eng`** in the **Name** field, and paste in the [`team-eng.hcl` policy rules](#step3) in the **Policy** text editor, and then click **Create Policy**. 1. Click the **Access** tab and select **Entities**. 1. Select the **`bob-smith`** entity and copy its **ID** displayed under the **Details** tab. 1. Now, click **Groups** from the left navigation, and select **Create group**. 1. Enter the group information as shown below. ![Group](/img/vault-entity-7.png) ~> **NOTE:** Make sure to enter the `bob-smith` entity **ID** you copied in the **Member Entity IDs** field. 1. Click **Create**. Now, when you login as `bob` or `bsmith`, its generated token inherits the group-level policy, **`team-eng`**. You can perform similar tests demonstrated in [Step 3](#step3) to verify that. > **Summary:** By default, Vault creates an internal group. When you create an > internal group, you specify the **_group members_** rather than **_group > alias_**. Group _aliases_ are mapping between Vault and external identity providers > (e.g. LDAP, GitHub, etc.). Therefore, you define group aliases only when you > create **external** groups. For internal groups, you specify `member_entity_ids` > and/or `member_group_ids`. ### Step 4: Create an External Group ((#step4)) It is common for organizations to enable auth methods such as LDAP, Okta and perhaps GitHub to handle the Vault user authentication, and individual user's group memberships are defined within those identity providers. In order to manage the group-level authorization, you can create an external group to link Vault with the external identity provider (auth provider) and attach appropriate policies to the group. #### Example Scenario Any user who belongs to **`training`** team in GitHub organization, **`example-inc`** are permitted to perform all operations against the `secret/education` path. **NOTE:** This scenario assumes that the GitHub organization, `example-inc` exists as well as `training` team within the organization. ### CLI Command ```shell # Write a new policy file # If you are running KV v2, set the path to "secret/data/education" instead $ tee education.hcl <<EOF path "secret/education" { capabilities = [ "create", "read", "update", "delete", "list" ] } EOF # Create a new policy named 'education' $ vault policy write education education.hcl # Enable GitHub auth method $ vault auth enable github # Retrieve the mount accessor for the GitHub auth method and save it in accessor.txt $ vault auth list -format=json | jq -r '.["github/"].accessor' > accessor.txt # Configure to point to your GitHub organization (e.g. hashicorp) $ vault write auth/github/config organization=example-inc # Create an external group named, "education" # Be sure to copy the generated group ID $ vault write identity/group name="education" \ policies="education" \ type="external" \ metadata=organization="Product Education" # Create a group alias where canonical_id is the group ID # 'name' is the actual GitHub team name (NOTE: Use slugified team name.) $ vault write identity/group-alias name="training" \ mount_accessor=$(cat accessor.txt) \ canonical_id="<group_ID>" ``` #### API call using cURL ```shell # API request payload containing stringfied policy # If you are running KV v2, set the path to "secret/data/education" instead $ tee payload-pol.json <<EOF { "policy": "path \"secret/education\" {\n capabilities = [\"create\", \"read\", \"delete\", \"update\", \"list\"]\n }" } EOF # Create education policy $ curl --header "X-Vault-Token: ..." \ --request PUT \ --data @payload-pol.json \ http://127.0.0.1:8200/v1/sys/policy/education # Enable GitHub Auth Method at github $ curl --header "X-Vault-Token: ..." \ --request POST \ --data '{"type": "github"}' \ http://127.0.0.1:8200/v1/sys/auth/github # Configure GitHub auth method by setting organization $ curl --header "X-Vault-Token: ..." \ --request POST \ --data '{"organization": "example-inc"}' \ http://127.0.0.1:8200/v1/auth/github/config # Get the github accessor value (**`auth_github_XXXXX`**) $ curl --header "X-Vault-Token: ..." \ http://127.0.0.1:8200/v1/sys/auth | jq { ... "userpass/": { "accessor": "auth_github_91010f60", ... }, ... } # API request msg payload to create an external group $ tee payload-edu.json <<EOF { "name": "education", "policies": ["education"], "type": "external", "metadata": { "organization": "Product Education" } } EOF # Create an external group named, "education" # Be sure to copy the group ID (id) $ curl --header "X-Vault-Token: ..." \ --request POST \ --data @payload-edu.json \ http://127.0.0.1:8200/v1/identity/group | jq { "request_id": "a8161086-13db-f982-4216-7d996eae3fd9", "lease_id": "", "renewable": false, "lease_duration": 0, "data": { "id": "ea18cb62-2478-d370-b726-a77d1700de80", "name": "education" }, ... # API request msg payload to create a group aliases, training $ tee payload-training.json <<EOF { "canonical_id": "<GROUP_ID>", "mount_accessor": "auth_github_XXXXX", "name": "training" } EOF # Create 'training' group alias $ curl --header "X-Vault-Token: ..." \ --request POST \ --data @payload-training.json \ http://127.0.0.1:8200/v1/identity/group-alias | jq ``` #### Web UI 1. Click the **Policies** tab, and then select **Create ACL policy**. 1. Enter **`education`** in the **Name** field, and enter the following policy in the **Policy** text editor, and then click **Create Policy**. (**NOTE:** If you are running KV v2, set the path to **`secret/data/education`** instead.) ```plaintext path "secret/education" { capabilities = [ "create", "read", "update", "delete", "list" ] } ``` 1. Click the **Access** tab and select **Auth Methods**. 1. Select **Enable new method**. 1. Select **GitHub** from the **Type** drop-down menu, and then enter **`example-inc`** in the **Organization** field. 1. Click **Enable Method**. 1. Click the **Access** tab and select **Groups**. 1. Select **Create group**. Enter the group information as shown below. ![Create Policy](/img/vault-entity-9.png) 1. Click **Create**. 1. Select **Add alias** and enter **`training`** in the **Name** field. Select **github/ (github)** from the **Auth Backend** drop-down list. ![Create Policy](/img/vault-entity-10.png) 1. Click **Create**. > **Summary:** At this point, any GitHub user who belongs to `training` > team within the `example-inc` organization can authenticate with Vault. The > generated token for the user has `education` policy attached. ## Next steps Now that you have learned about managing user identity using entities and groups, read the [AppRole Pull Authentication](/guides/identity/authentication) guide to learn how apps or machines can authenticate with Vault.
website/pages/guides/identity/identity.mdx
0
https://github.com/hashicorp/vault/commit/9204c47af7b04eadff6677db70909663b55ed56b
[ 0.0006611528224311769, 0.00018233276205137372, 0.00016333258827216923, 0.00016667899035383016, 0.000060091118939453736 ]
{ "id": 3, "code_window": [ "# Multi-stage builder to avoid polluting users environment with wrong \n", "# architecture binaries. Since this binary is used in an alpine container, \n", "# we're explicitly compiling for 'linux/amd64'\n" ], "labels": [ "replace", "replace", "keep" ], "after_edit": [ "# Multi-stage builder to avoid polluting users environment with wrong\n", "# architecture binaries. Since this binary is used in an alpine container,\n" ], "file_path": "scripts/docker/Dockerfile", "type": "replace", "edit_start_line_idx": 0 }
import Controller from '@ember/controller'; import ListController from 'vault/mixins/list-controller'; export default Controller.extend(ListController, {});
ui/app/controllers/vault/cluster/access/method/item/list.js
0
https://github.com/hashicorp/vault/commit/9204c47af7b04eadff6677db70909663b55ed56b
[ 0.0001675413514021784, 0.0001675413514021784, 0.0001675413514021784, 0.0001675413514021784, 0 ]
{ "id": 3, "code_window": [ "# Multi-stage builder to avoid polluting users environment with wrong \n", "# architecture binaries. Since this binary is used in an alpine container, \n", "# we're explicitly compiling for 'linux/amd64'\n" ], "labels": [ "replace", "replace", "keep" ], "after_edit": [ "# Multi-stage builder to avoid polluting users environment with wrong\n", "# architecture binaries. Since this binary is used in an alpine container,\n" ], "file_path": "scripts/docker/Dockerfile", "type": "replace", "edit_start_line_idx": 0 }
package ram //Licensed under the Apache License, Version 2.0 (the "License"); //you may not use this file except in compliance with the License. //You may obtain a copy of the License at // //http://www.apache.org/licenses/LICENSE-2.0 // //Unless required by applicable law or agreed to in writing, software //distributed under the License is distributed on an "AS IS" BASIS, //WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. //See the License for the specific language governing permissions and //limitations under the License. // // Code generated by Alibaba Cloud SDK Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. import ( "github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests" "github.com/aliyun/alibaba-cloud-sdk-go/sdk/responses" ) // GetPolicyVersion invokes the ram.GetPolicyVersion API synchronously // api document: https://help.aliyun.com/api/ram/getpolicyversion.html func (client *Client) GetPolicyVersion(request *GetPolicyVersionRequest) (response *GetPolicyVersionResponse, err error) { response = CreateGetPolicyVersionResponse() err = client.DoAction(request, response) return } // GetPolicyVersionWithChan invokes the ram.GetPolicyVersion API asynchronously // api document: https://help.aliyun.com/api/ram/getpolicyversion.html // asynchronous document: https://help.aliyun.com/document_detail/66220.html func (client *Client) GetPolicyVersionWithChan(request *GetPolicyVersionRequest) (<-chan *GetPolicyVersionResponse, <-chan error) { responseChan := make(chan *GetPolicyVersionResponse, 1) errChan := make(chan error, 1) err := client.AddAsyncTask(func() { defer close(responseChan) defer close(errChan) response, err := client.GetPolicyVersion(request) if err != nil { errChan <- err } else { responseChan <- response } }) if err != nil { errChan <- err close(responseChan) close(errChan) } return responseChan, errChan } // GetPolicyVersionWithCallback invokes the ram.GetPolicyVersion API asynchronously // api document: https://help.aliyun.com/api/ram/getpolicyversion.html // asynchronous document: https://help.aliyun.com/document_detail/66220.html func (client *Client) GetPolicyVersionWithCallback(request *GetPolicyVersionRequest, callback func(response *GetPolicyVersionResponse, err error)) <-chan int { result := make(chan int, 1) err := client.AddAsyncTask(func() { var response *GetPolicyVersionResponse var err error defer close(result) response, err = client.GetPolicyVersion(request) callback(response, err) result <- 1 }) if err != nil { defer close(result) callback(nil, err) result <- 0 } return result } // GetPolicyVersionRequest is the request struct for api GetPolicyVersion type GetPolicyVersionRequest struct { *requests.RpcRequest VersionId string `position:"Query" name:"VersionId"` PolicyType string `position:"Query" name:"PolicyType"` PolicyName string `position:"Query" name:"PolicyName"` } // GetPolicyVersionResponse is the response struct for api GetPolicyVersion type GetPolicyVersionResponse struct { *responses.BaseResponse RequestId string `json:"RequestId" xml:"RequestId"` PolicyVersion PolicyVersion `json:"PolicyVersion" xml:"PolicyVersion"` } // CreateGetPolicyVersionRequest creates a request to invoke GetPolicyVersion API func CreateGetPolicyVersionRequest() (request *GetPolicyVersionRequest) { request = &GetPolicyVersionRequest{ RpcRequest: &requests.RpcRequest{}, } request.InitWithApiInfo("Ram", "2015-05-01", "GetPolicyVersion", "", "") return } // CreateGetPolicyVersionResponse creates a response to parse from GetPolicyVersion response func CreateGetPolicyVersionResponse() (response *GetPolicyVersionResponse) { response = &GetPolicyVersionResponse{ BaseResponse: &responses.BaseResponse{}, } return }
vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/ram/get_policy_version.go
0
https://github.com/hashicorp/vault/commit/9204c47af7b04eadff6677db70909663b55ed56b
[ 0.005311202257871628, 0.0014856496127322316, 0.00016678741667419672, 0.0003383011498954147, 0.0017900525126606226 ]
{ "id": 4, "code_window": [ "# we're explicitly compiling for 'linux/amd64'\n", "ARG VERSION=1.13.10\n", "\n", "FROM golang:${VERSION} AS builder\n", "\n", "ARG CGO_ENABLED=0\n" ], "labels": [ "keep", "replace", "keep", "keep", "keep", "keep" ], "after_edit": [ "ARG VERSION=1.14.7\n" ], "file_path": "scripts/docker/Dockerfile", "type": "replace", "edit_start_line_idx": 3 }
# Multi-stage builder to avoid polluting users environment with wrong # architecture binaries. Since this binary is used in an alpine container, # we're explicitly compiling for 'linux/amd64' ARG VERSION=1.13.10 FROM golang:${VERSION} AS builder ARG CGO_ENABLED=0 ARG BUILD_TAGS WORKDIR /go/src/github.com/hashicorp/vault COPY . . RUN make bootstrap \ && CGO_ENABLED=$CGO_ENABLED BUILD_TAGS='$BUILD_TAGS' VAULT_DEV_BUILD=1 XC_OSARCH='linux/amd64' sh -c "'./scripts/build.sh'" # Docker Image FROM alpine:3.10 # Create a vault user and group first so the IDs get set the same way, # even as the rest of this may change over time. RUN addgroup vault && \ adduser -S -G vault vault # Set up certificates, our base tools, and Vault. RUN set -eux; \ apk add --no-cache ca-certificates libcap su-exec dumb-init tzdata COPY --from=builder /go/bin/vault /bin/vault # /vault/logs is made available to use as a location to store audit logs, if # desired; /vault/file is made available to use as a location with the file # storage backend, if desired; the server will be started with /vault/config as # the configuration directory so you can add additional config files in that # location. RUN mkdir -p /vault/logs && \ mkdir -p /vault/file && \ mkdir -p /vault/config && \ chown -R vault:vault /vault # Expose the logs directory as a volume since there's potentially long-running # state in there VOLUME /vault/logs # Expose the file directory as a volume since there's potentially long-running # state in there VOLUME /vault/file # 8200/tcp is the primary interface that applications use to interact with # Vault. EXPOSE 8200 # The entry point script uses dumb-init as the top-level process to reap any # zombie processes created by Vault sub-processes. # # For production derivatives of this container, you should add the IPC_LOCK # capability so that Vault can mlock memory. COPY ./scripts/docker/docker-entrypoint.sh /usr/local/bin/docker-entrypoint.sh ENTRYPOINT ["docker-entrypoint.sh"] # By default you'll get a single-node development server that stores everything # in RAM and bootstraps itself. Don't use this configuration for production. CMD ["server", "-dev"]
scripts/docker/Dockerfile
1
https://github.com/hashicorp/vault/commit/9204c47af7b04eadff6677db70909663b55ed56b
[ 0.778939962387085, 0.20333607494831085, 0.00016621460963506252, 0.00016934242739807814, 0.32298043370246887 ]
{ "id": 4, "code_window": [ "# we're explicitly compiling for 'linux/amd64'\n", "ARG VERSION=1.13.10\n", "\n", "FROM golang:${VERSION} AS builder\n", "\n", "ARG CGO_ENABLED=0\n" ], "labels": [ "keep", "replace", "keep", "keep", "keep", "keep" ], "after_edit": [ "ARG VERSION=1.14.7\n" ], "file_path": "scripts/docker/Dockerfile", "type": "replace", "edit_start_line_idx": 3 }
package pgx import ( "database/sql/driver" "math" "reflect" "time" "github.com/jackc/pgx/pgio" "github.com/jackc/pgx/pgtype" ) const ( copyData = 'd' copyFail = 'f' copyDone = 'c' varHeaderSize = 4 ) type FieldDescription struct { Name string Table pgtype.OID AttributeNumber uint16 DataType pgtype.OID DataTypeSize int16 DataTypeName string Modifier int32 FormatCode int16 } func (fd FieldDescription) Length() (int64, bool) { switch fd.DataType { case pgtype.TextOID, pgtype.ByteaOID: return math.MaxInt64, true case pgtype.VarcharOID, pgtype.BPCharArrayOID: return int64(fd.Modifier - varHeaderSize), true default: return 0, false } } func (fd FieldDescription) PrecisionScale() (precision, scale int64, ok bool) { switch fd.DataType { case pgtype.NumericOID: mod := fd.Modifier - varHeaderSize precision = int64((mod >> 16) & 0xffff) scale = int64(mod & 0xffff) return precision, scale, true default: return 0, 0, false } } func (fd FieldDescription) Type() reflect.Type { switch fd.DataType { case pgtype.Float8OID: return reflect.TypeOf(float64(0)) case pgtype.Float4OID: return reflect.TypeOf(float32(0)) case pgtype.Int8OID: return reflect.TypeOf(int64(0)) case pgtype.Int4OID: return reflect.TypeOf(int32(0)) case pgtype.Int2OID: return reflect.TypeOf(int16(0)) case pgtype.VarcharOID, pgtype.BPCharArrayOID, pgtype.TextOID: return reflect.TypeOf("") case pgtype.BoolOID: return reflect.TypeOf(false) case pgtype.NumericOID: return reflect.TypeOf(float64(0)) case pgtype.DateOID, pgtype.TimestampOID, pgtype.TimestamptzOID: return reflect.TypeOf(time.Time{}) case pgtype.ByteaOID: return reflect.TypeOf([]byte(nil)) default: return reflect.TypeOf(new(interface{})).Elem() } } // PgError represents an error reported by the PostgreSQL server. See // http://www.postgresql.org/docs/9.3/static/protocol-error-fields.html for // detailed field description. type PgError struct { Severity string Code string Message string Detail string Hint string Position int32 InternalPosition int32 InternalQuery string Where string SchemaName string TableName string ColumnName string DataTypeName string ConstraintName string File string Line int32 Routine string } func (pe PgError) Error() string { return pe.Severity + ": " + pe.Message + " (SQLSTATE " + pe.Code + ")" } // Notice represents a notice response message reported by the PostgreSQL // server. Be aware that this is distinct from LISTEN/NOTIFY notification. type Notice PgError // appendParse appends a PostgreSQL wire protocol parse message to buf and returns it. func appendParse(buf []byte, name string, query string, parameterOIDs []pgtype.OID) []byte { buf = append(buf, 'P') sp := len(buf) buf = pgio.AppendInt32(buf, -1) buf = append(buf, name...) buf = append(buf, 0) buf = append(buf, query...) buf = append(buf, 0) buf = pgio.AppendInt16(buf, int16(len(parameterOIDs))) for _, oid := range parameterOIDs { buf = pgio.AppendUint32(buf, uint32(oid)) } pgio.SetInt32(buf[sp:], int32(len(buf[sp:]))) return buf } // appendDescribe appends a PostgreSQL wire protocol describe message to buf and returns it. func appendDescribe(buf []byte, objectType byte, name string) []byte { buf = append(buf, 'D') sp := len(buf) buf = pgio.AppendInt32(buf, -1) buf = append(buf, objectType) buf = append(buf, name...) buf = append(buf, 0) pgio.SetInt32(buf[sp:], int32(len(buf[sp:]))) return buf } // appendSync appends a PostgreSQL wire protocol sync message to buf and returns it. func appendSync(buf []byte) []byte { buf = append(buf, 'S') buf = pgio.AppendInt32(buf, 4) return buf } // appendBind appends a PostgreSQL wire protocol bind message to buf and returns it. func appendBind( buf []byte, destinationPortal, preparedStatement string, connInfo *pgtype.ConnInfo, parameterOIDs []pgtype.OID, arguments []interface{}, resultFormatCodes []int16, ) ([]byte, error) { buf = append(buf, 'B') sp := len(buf) buf = pgio.AppendInt32(buf, -1) buf = append(buf, destinationPortal...) buf = append(buf, 0) buf = append(buf, preparedStatement...) buf = append(buf, 0) var err error arguments, err = convertDriverValuers(arguments) if err != nil { return nil, err } buf = pgio.AppendInt16(buf, int16(len(parameterOIDs))) for i, oid := range parameterOIDs { buf = pgio.AppendInt16(buf, chooseParameterFormatCode(connInfo, oid, arguments[i])) } buf = pgio.AppendInt16(buf, int16(len(arguments))) for i, oid := range parameterOIDs { var err error buf, err = encodePreparedStatementArgument(connInfo, buf, oid, arguments[i]) if err != nil { return nil, err } } buf = pgio.AppendInt16(buf, int16(len(resultFormatCodes))) for _, fc := range resultFormatCodes { buf = pgio.AppendInt16(buf, fc) } pgio.SetInt32(buf[sp:], int32(len(buf[sp:]))) return buf, nil } func convertDriverValuers(args []interface{}) ([]interface{}, error) { for i, arg := range args { switch arg := arg.(type) { case pgtype.BinaryEncoder: case pgtype.TextEncoder: case driver.Valuer: v, err := callValuerValue(arg) if err != nil { return nil, err } args[i] = v } } return args, nil } // appendExecute appends a PostgreSQL wire protocol execute message to buf and returns it. func appendExecute(buf []byte, portal string, maxRows uint32) []byte { buf = append(buf, 'E') sp := len(buf) buf = pgio.AppendInt32(buf, -1) buf = append(buf, portal...) buf = append(buf, 0) buf = pgio.AppendUint32(buf, maxRows) pgio.SetInt32(buf[sp:], int32(len(buf[sp:]))) return buf } // appendQuery appends a PostgreSQL wire protocol query message to buf and returns it. func appendQuery(buf []byte, query string) []byte { buf = append(buf, 'Q') sp := len(buf) buf = pgio.AppendInt32(buf, -1) buf = append(buf, query...) buf = append(buf, 0) pgio.SetInt32(buf[sp:], int32(len(buf[sp:]))) return buf }
vendor/github.com/jackc/pgx/messages.go
0
https://github.com/hashicorp/vault/commit/9204c47af7b04eadff6677db70909663b55ed56b
[ 0.00028674936038441956, 0.00017546927847433835, 0.0001643908180994913, 0.00016934872837737203, 0.000023662329112994485 ]
{ "id": 4, "code_window": [ "# we're explicitly compiling for 'linux/amd64'\n", "ARG VERSION=1.13.10\n", "\n", "FROM golang:${VERSION} AS builder\n", "\n", "ARG CGO_ENABLED=0\n" ], "labels": [ "keep", "replace", "keep", "keep", "keep", "keep" ], "after_edit": [ "ARG VERSION=1.14.7\n" ], "file_path": "scripts/docker/Dockerfile", "type": "replace", "edit_start_line_idx": 3 }
// +build !windows package metrics import ( "syscall" ) const ( // DefaultSignal is used with DefaultInmemSignal DefaultSignal = syscall.SIGUSR1 )
vendor/github.com/armon/go-metrics/const_unix.go
0
https://github.com/hashicorp/vault/commit/9204c47af7b04eadff6677db70909663b55ed56b
[ 0.00029081778484396636, 0.00026351091219112277, 0.00023620403953827918, 0.00026351091219112277, 0.000027306872652843595 ]
{ "id": 4, "code_window": [ "# we're explicitly compiling for 'linux/amd64'\n", "ARG VERSION=1.13.10\n", "\n", "FROM golang:${VERSION} AS builder\n", "\n", "ARG CGO_ENABLED=0\n" ], "labels": [ "keep", "replace", "keep", "keep", "keep", "keep" ], "after_edit": [ "ARG VERSION=1.14.7\n" ], "file_path": "scripts/docker/Dockerfile", "type": "replace", "edit_start_line_idx": 3 }
import Controller from '@ember/controller'; export default Controller.extend({ queryParams: ['filter'], filter: '', });
ui/lib/open-api-explorer/addon/controllers/index.js
0
https://github.com/hashicorp/vault/commit/9204c47af7b04eadff6677db70909663b55ed56b
[ 0.00017240380111616105, 0.00017240380111616105, 0.00017240380111616105, 0.00017240380111616105, 0 ]
{ "id": 5, "code_window": [ "# architecture binaries. Since this binary is used in an alpine container,\n", "# we're explicitly compiling for 'linux/amd64'\n", "FROM debian:buster AS builder\n", "\n", "ARG VERSION=1.13.10\n", "ARG CGO_ENABLED=0\n", "ARG BUILD_TAGS\n", "ENV JOBS=2\n", "\n" ], "labels": [ "keep", "keep", "keep", "keep", "replace", "keep", "keep", "keep", "keep" ], "after_edit": [ "ARG VERSION=1.14.7\n" ], "file_path": "scripts/docker/Dockerfile.ui", "type": "replace", "edit_start_line_idx": 5 }
# Multi-stage builder to avoid polluting users environment with wrong # architecture binaries. Since this binary is used in an alpine container, # we're explicitly compiling for 'linux/amd64' ARG VERSION=1.13.10 FROM golang:${VERSION} AS builder ARG CGO_ENABLED=0 ARG BUILD_TAGS WORKDIR /go/src/github.com/hashicorp/vault COPY . . RUN make bootstrap \ && CGO_ENABLED=$CGO_ENABLED BUILD_TAGS='$BUILD_TAGS' VAULT_DEV_BUILD=1 XC_OSARCH='linux/amd64' sh -c "'./scripts/build.sh'" # Docker Image FROM alpine:3.10 # Create a vault user and group first so the IDs get set the same way, # even as the rest of this may change over time. RUN addgroup vault && \ adduser -S -G vault vault # Set up certificates, our base tools, and Vault. RUN set -eux; \ apk add --no-cache ca-certificates libcap su-exec dumb-init tzdata COPY --from=builder /go/bin/vault /bin/vault # /vault/logs is made available to use as a location to store audit logs, if # desired; /vault/file is made available to use as a location with the file # storage backend, if desired; the server will be started with /vault/config as # the configuration directory so you can add additional config files in that # location. RUN mkdir -p /vault/logs && \ mkdir -p /vault/file && \ mkdir -p /vault/config && \ chown -R vault:vault /vault # Expose the logs directory as a volume since there's potentially long-running # state in there VOLUME /vault/logs # Expose the file directory as a volume since there's potentially long-running # state in there VOLUME /vault/file # 8200/tcp is the primary interface that applications use to interact with # Vault. EXPOSE 8200 # The entry point script uses dumb-init as the top-level process to reap any # zombie processes created by Vault sub-processes. # # For production derivatives of this container, you should add the IPC_LOCK # capability so that Vault can mlock memory. COPY ./scripts/docker/docker-entrypoint.sh /usr/local/bin/docker-entrypoint.sh ENTRYPOINT ["docker-entrypoint.sh"] # By default you'll get a single-node development server that stores everything # in RAM and bootstraps itself. Don't use this configuration for production. CMD ["server", "-dev"]
scripts/docker/Dockerfile
1
https://github.com/hashicorp/vault/commit/9204c47af7b04eadff6677db70909663b55ed56b
[ 0.9742634296417236, 0.2547973692417145, 0.00016386968491133302, 0.0002508019970264286, 0.40473830699920654 ]
{ "id": 5, "code_window": [ "# architecture binaries. Since this binary is used in an alpine container,\n", "# we're explicitly compiling for 'linux/amd64'\n", "FROM debian:buster AS builder\n", "\n", "ARG VERSION=1.13.10\n", "ARG CGO_ENABLED=0\n", "ARG BUILD_TAGS\n", "ENV JOBS=2\n", "\n" ], "labels": [ "keep", "keep", "keep", "keep", "replace", "keep", "keep", "keep", "keep" ], "after_edit": [ "ARG VERSION=1.14.7\n" ], "file_path": "scripts/docker/Dockerfile.ui", "type": "replace", "edit_start_line_idx": 5 }
/* * * Copyright 2017 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package grpc import ( "context" "fmt" "io" "sync" "google.golang.org/grpc/balancer" "google.golang.org/grpc/codes" "google.golang.org/grpc/grpclog" "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/transport" "google.golang.org/grpc/status" ) // v2PickerWrapper wraps a balancer.Picker while providing the // balancer.V2Picker API. It requires a pickerWrapper to generate errors // including the latest connectionError. To be deleted when balancer.Picker is // updated to the balancer.V2Picker API. type v2PickerWrapper struct { picker balancer.Picker connErr *connErr } func (v *v2PickerWrapper) Pick(info balancer.PickInfo) (balancer.PickResult, error) { sc, done, err := v.picker.Pick(info.Ctx, info) if err != nil { if err == balancer.ErrTransientFailure { return balancer.PickResult{}, balancer.TransientFailureError(fmt.Errorf("%v, latest connection error: %v", err, v.connErr.connectionError())) } return balancer.PickResult{}, err } return balancer.PickResult{SubConn: sc, Done: done}, nil } // pickerWrapper is a wrapper of balancer.Picker. It blocks on certain pick // actions and unblock when there's a picker update. type pickerWrapper struct { mu sync.Mutex done bool blockingCh chan struct{} picker balancer.V2Picker // The latest connection error. TODO: remove when V1 picker is deprecated; // balancer should be responsible for providing the error. *connErr } type connErr struct { mu sync.Mutex err error } func (c *connErr) updateConnectionError(err error) { c.mu.Lock() c.err = err c.mu.Unlock() } func (c *connErr) connectionError() error { c.mu.Lock() err := c.err c.mu.Unlock() return err } func newPickerWrapper() *pickerWrapper { return &pickerWrapper{blockingCh: make(chan struct{}), connErr: &connErr{}} } // updatePicker is called by UpdateBalancerState. It unblocks all blocked pick. func (pw *pickerWrapper) updatePicker(p balancer.Picker) { pw.updatePickerV2(&v2PickerWrapper{picker: p, connErr: pw.connErr}) } // updatePicker is called by UpdateBalancerState. It unblocks all blocked pick. func (pw *pickerWrapper) updatePickerV2(p balancer.V2Picker) { pw.mu.Lock() if pw.done { pw.mu.Unlock() return } pw.picker = p // pw.blockingCh should never be nil. close(pw.blockingCh) pw.blockingCh = make(chan struct{}) pw.mu.Unlock() } func doneChannelzWrapper(acw *acBalancerWrapper, done func(balancer.DoneInfo)) func(balancer.DoneInfo) { acw.mu.Lock() ac := acw.ac acw.mu.Unlock() ac.incrCallsStarted() return func(b balancer.DoneInfo) { if b.Err != nil && b.Err != io.EOF { ac.incrCallsFailed() } else { ac.incrCallsSucceeded() } if done != nil { done(b) } } } // pick returns the transport that will be used for the RPC. // It may block in the following cases: // - there's no picker // - the current picker returns ErrNoSubConnAvailable // - the current picker returns other errors and failfast is false. // - the subConn returned by the current picker is not READY // When one of these situations happens, pick blocks until the picker gets updated. func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer.PickInfo) (transport.ClientTransport, func(balancer.DoneInfo), error) { var ch chan struct{} var lastPickErr error for { pw.mu.Lock() if pw.done { pw.mu.Unlock() return nil, nil, ErrClientConnClosing } if pw.picker == nil { ch = pw.blockingCh } if ch == pw.blockingCh { // This could happen when either: // - pw.picker is nil (the previous if condition), or // - has called pick on the current picker. pw.mu.Unlock() select { case <-ctx.Done(): var errStr string if lastPickErr != nil { errStr = "latest balancer error: " + lastPickErr.Error() } else if connectionErr := pw.connectionError(); connectionErr != nil { errStr = "latest connection error: " + connectionErr.Error() } else { errStr = ctx.Err().Error() } switch ctx.Err() { case context.DeadlineExceeded: return nil, nil, status.Error(codes.DeadlineExceeded, errStr) case context.Canceled: return nil, nil, status.Error(codes.Canceled, errStr) } case <-ch: } continue } ch = pw.blockingCh p := pw.picker pw.mu.Unlock() pickResult, err := p.Pick(info) if err != nil { if err == balancer.ErrNoSubConnAvailable { continue } if tfe, ok := err.(interface{ IsTransientFailure() bool }); ok && tfe.IsTransientFailure() { if !failfast { lastPickErr = err continue } return nil, nil, status.Error(codes.Unavailable, err.Error()) } if _, ok := status.FromError(err); ok { return nil, nil, err } // err is some other error. return nil, nil, status.Error(codes.Unknown, err.Error()) } acw, ok := pickResult.SubConn.(*acBalancerWrapper) if !ok { grpclog.Error("subconn returned from pick is not *acBalancerWrapper") continue } if t, ok := acw.getAddrConn().getReadyTransport(); ok { if channelz.IsOn() { return t, doneChannelzWrapper(acw, pickResult.Done), nil } return t, pickResult.Done, nil } if pickResult.Done != nil { // Calling done with nil error, no bytes sent and no bytes received. // DoneInfo with default value works. pickResult.Done(balancer.DoneInfo{}) } grpclog.Infof("blockingPicker: the picked transport is not ready, loop back to repick") // If ok == false, ac.state is not READY. // A valid picker always returns READY subConn. This means the state of ac // just changed, and picker will be updated shortly. // continue back to the beginning of the for loop to repick. } } func (pw *pickerWrapper) close() { pw.mu.Lock() defer pw.mu.Unlock() if pw.done { return } pw.done = true close(pw.blockingCh) }
vendor/google.golang.org/grpc/picker_wrapper.go
0
https://github.com/hashicorp/vault/commit/9204c47af7b04eadff6677db70909663b55ed56b
[ 0.00017404409300070256, 0.00016936264000833035, 0.00016293639782816172, 0.00016923622752074152, 0.0000024668672722327756 ]
{ "id": 5, "code_window": [ "# architecture binaries. Since this binary is used in an alpine container,\n", "# we're explicitly compiling for 'linux/amd64'\n", "FROM debian:buster AS builder\n", "\n", "ARG VERSION=1.13.10\n", "ARG CGO_ENABLED=0\n", "ARG BUILD_TAGS\n", "ENV JOBS=2\n", "\n" ], "labels": [ "keep", "keep", "keep", "keep", "replace", "keep", "keep", "keep", "keep" ], "after_edit": [ "ARG VERSION=1.14.7\n" ], "file_path": "scripts/docker/Dockerfile.ui", "type": "replace", "edit_start_line_idx": 5 }
// +build go1.8 package ir import "go/types" var structTypesIdentical = types.IdenticalIgnoreTags
vendor/honnef.co/go/tools/ir/identical.go
0
https://github.com/hashicorp/vault/commit/9204c47af7b04eadff6677db70909663b55ed56b
[ 0.0006547889788635075, 0.0006547889788635075, 0.0006547889788635075, 0.0006547889788635075, 0 ]
{ "id": 5, "code_window": [ "# architecture binaries. Since this binary is used in an alpine container,\n", "# we're explicitly compiling for 'linux/amd64'\n", "FROM debian:buster AS builder\n", "\n", "ARG VERSION=1.13.10\n", "ARG CGO_ENABLED=0\n", "ARG BUILD_TAGS\n", "ENV JOBS=2\n", "\n" ], "labels": [ "keep", "keep", "keep", "keep", "replace", "keep", "keep", "keep", "keep" ], "after_edit": [ "ARG VERSION=1.14.7\n" ], "file_path": "scripts/docker/Dockerfile.ui", "type": "replace", "edit_start_line_idx": 5 }
package reflectwalk //go:generate stringer -type=Location location.go type Location uint const ( None Location = iota Map MapKey MapValue Slice SliceElem Array ArrayElem Struct StructField WalkLoc )
vendor/github.com/mitchellh/reflectwalk/location.go
0
https://github.com/hashicorp/vault/commit/9204c47af7b04eadff6677db70909663b55ed56b
[ 0.0001712278462946415, 0.00016978662461042404, 0.00016834538837429136, 0.00016978662461042404, 0.0000014412289601750672 ]
{ "id": 0, "code_window": [ "\tttl := c.Int(\"ttl\")\n", "\n", "\tctx, cancel := contextWithTotalTimeout(c)\n", "\t_, err := ki.Set(ctx, key, \"\", &client.SetOptions{TTL: time.Duration(ttl) * time.Second, Dir: true, PrevExist: prevExist})\n", "\tcancel()\n", "\tif err != nil {\n", "\t\thandleError(ExitServerError, err)\n" ], "labels": [ "keep", "keep", "keep", "replace", "keep", "keep", "keep" ], "after_edit": [ "\tresp, err := ki.Set(ctx, key, \"\", &client.SetOptions{TTL: time.Duration(ttl) * time.Second, Dir: true, PrevExist: prevExist})\n" ], "file_path": "etcdctl/ctlv2/command/mkdir_command.go", "type": "replace", "edit_start_line_idx": 50 }
// Copyright 2015 The etcd Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package command import ( "errors" "github.com/coreos/etcd/client" "github.com/urfave/cli" ) // NewRemoveCommand returns the CLI command for "rm". func NewRemoveCommand() cli.Command { return cli.Command{ Name: "rm", Usage: "remove a key or a directory", ArgsUsage: "<key>", Flags: []cli.Flag{ cli.BoolFlag{Name: "dir", Usage: "removes the key if it is an empty directory or a key-value pair"}, cli.BoolFlag{Name: "recursive, r", Usage: "removes the key and all child keys(if it is a directory)"}, cli.StringFlag{Name: "with-value", Value: "", Usage: "previous value"}, cli.IntFlag{Name: "with-index", Value: 0, Usage: "previous index"}, }, Action: func(c *cli.Context) error { rmCommandFunc(c, mustNewKeyAPI(c)) return nil }, } } // rmCommandFunc executes the "rm" command. func rmCommandFunc(c *cli.Context, ki client.KeysAPI) { if len(c.Args()) == 0 { handleError(ExitBadArgs, errors.New("key required")) } key := c.Args()[0] recursive := c.Bool("recursive") dir := c.Bool("dir") prevValue := c.String("with-value") prevIndex := c.Int("with-index") ctx, cancel := contextWithTotalTimeout(c) resp, err := ki.Delete(ctx, key, &client.DeleteOptions{PrevIndex: uint64(prevIndex), PrevValue: prevValue, Dir: dir, Recursive: recursive}) cancel() if err != nil { handleError(ExitServerError, err) } if !resp.Node.Dir { printResponseKey(resp, c.GlobalString("output")) } }
etcdctl/ctlv2/command/rm_command.go
1
https://github.com/etcd-io/etcd/commit/205f10aeb6d7a2869d4da16131cccb77ba5289e2
[ 0.1594787836074829, 0.023169727995991707, 0.00016525220416951925, 0.00017302151536569, 0.0556509755551815 ]
{ "id": 0, "code_window": [ "\tttl := c.Int(\"ttl\")\n", "\n", "\tctx, cancel := contextWithTotalTimeout(c)\n", "\t_, err := ki.Set(ctx, key, \"\", &client.SetOptions{TTL: time.Duration(ttl) * time.Second, Dir: true, PrevExist: prevExist})\n", "\tcancel()\n", "\tif err != nil {\n", "\t\thandleError(ExitServerError, err)\n" ], "labels": [ "keep", "keep", "keep", "replace", "keep", "keep", "keep" ], "after_edit": [ "\tresp, err := ki.Set(ctx, key, \"\", &client.SetOptions{TTL: time.Duration(ttl) * time.Second, Dir: true, PrevExist: prevExist})\n" ], "file_path": "etcdctl/ctlv2/command/mkdir_command.go", "type": "replace", "edit_start_line_idx": 50 }
Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
cmd/vendor/github.com/cockroachdb/cmux/LICENSE
0
https://github.com/etcd-io/etcd/commit/205f10aeb6d7a2869d4da16131cccb77ba5289e2
[ 0.00017922763072419912, 0.00017619652498979121, 0.00017028787988238037, 0.00017652736278250813, 0.0000017601003037270857 ]
{ "id": 0, "code_window": [ "\tttl := c.Int(\"ttl\")\n", "\n", "\tctx, cancel := contextWithTotalTimeout(c)\n", "\t_, err := ki.Set(ctx, key, \"\", &client.SetOptions{TTL: time.Duration(ttl) * time.Second, Dir: true, PrevExist: prevExist})\n", "\tcancel()\n", "\tif err != nil {\n", "\t\thandleError(ExitServerError, err)\n" ], "labels": [ "keep", "keep", "keep", "replace", "keep", "keep", "keep" ], "after_edit": [ "\tresp, err := ki.Set(ctx, key, \"\", &client.SetOptions{TTL: time.Duration(ttl) * time.Second, Dir: true, PrevExist: prevExist})\n" ], "file_path": "etcdctl/ctlv2/command/mkdir_command.go", "type": "replace", "edit_start_line_idx": 50 }
package pflag import ( "fmt" "strconv" ) // -- float32 Value type float32Value float32 func newFloat32Value(val float32, p *float32) *float32Value { *p = val return (*float32Value)(p) } func (f *float32Value) Set(s string) error { v, err := strconv.ParseFloat(s, 32) *f = float32Value(v) return err } func (f *float32Value) Type() string { return "float32" } func (f *float32Value) String() string { return fmt.Sprintf("%v", *f) } func float32Conv(sval string) (interface{}, error) { v, err := strconv.ParseFloat(sval, 32) if err != nil { return 0, err } return float32(v), nil } // GetFloat32 return the float32 value of a flag with the given name func (f *FlagSet) GetFloat32(name string) (float32, error) { val, err := f.getFlagType(name, "float32", float32Conv) if err != nil { return 0, err } return val.(float32), nil } // Float32Var defines a float32 flag with specified name, default value, and usage string. // The argument p points to a float32 variable in which to store the value of the flag. func (f *FlagSet) Float32Var(p *float32, name string, value float32, usage string) { f.VarP(newFloat32Value(value, p), name, "", usage) } // Float32VarP is like Float32Var, but accepts a shorthand letter that can be used after a single dash. func (f *FlagSet) Float32VarP(p *float32, name, shorthand string, value float32, usage string) { f.VarP(newFloat32Value(value, p), name, shorthand, usage) } // Float32Var defines a float32 flag with specified name, default value, and usage string. // The argument p points to a float32 variable in which to store the value of the flag. func Float32Var(p *float32, name string, value float32, usage string) { CommandLine.VarP(newFloat32Value(value, p), name, "", usage) } // Float32VarP is like Float32Var, but accepts a shorthand letter that can be used after a single dash. func Float32VarP(p *float32, name, shorthand string, value float32, usage string) { CommandLine.VarP(newFloat32Value(value, p), name, shorthand, usage) } // Float32 defines a float32 flag with specified name, default value, and usage string. // The return value is the address of a float32 variable that stores the value of the flag. func (f *FlagSet) Float32(name string, value float32, usage string) *float32 { p := new(float32) f.Float32VarP(p, name, "", value, usage) return p } // Float32P is like Float32, but accepts a shorthand letter that can be used after a single dash. func (f *FlagSet) Float32P(name, shorthand string, value float32, usage string) *float32 { p := new(float32) f.Float32VarP(p, name, shorthand, value, usage) return p } // Float32 defines a float32 flag with specified name, default value, and usage string. // The return value is the address of a float32 variable that stores the value of the flag. func Float32(name string, value float32, usage string) *float32 { return CommandLine.Float32P(name, "", value, usage) } // Float32P is like Float32, but accepts a shorthand letter that can be used after a single dash. func Float32P(name, shorthand string, value float32, usage string) *float32 { return CommandLine.Float32P(name, shorthand, value, usage) }
cmd/vendor/github.com/spf13/pflag/float32.go
0
https://github.com/etcd-io/etcd/commit/205f10aeb6d7a2869d4da16131cccb77ba5289e2
[ 0.0009559377213008702, 0.00034015762503258884, 0.0001642186543904245, 0.00017697903967928141, 0.0002643519837874919 ]
{ "id": 0, "code_window": [ "\tttl := c.Int(\"ttl\")\n", "\n", "\tctx, cancel := contextWithTotalTimeout(c)\n", "\t_, err := ki.Set(ctx, key, \"\", &client.SetOptions{TTL: time.Duration(ttl) * time.Second, Dir: true, PrevExist: prevExist})\n", "\tcancel()\n", "\tif err != nil {\n", "\t\thandleError(ExitServerError, err)\n" ], "labels": [ "keep", "keep", "keep", "replace", "keep", "keep", "keep" ], "after_edit": [ "\tresp, err := ki.Set(ctx, key, \"\", &client.SetOptions{TTL: time.Duration(ttl) * time.Second, Dir: true, PrevExist: prevExist})\n" ], "file_path": "etcdctl/ctlv2/command/mkdir_command.go", "type": "replace", "edit_start_line_idx": 50 }
// Copyright 2009 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // +build !gccgo #include "textflag.h" // // System call support for 386, NetBSD // // Just jump to package syscall's implementation for all these functions. // The runtime may know about them. TEXT ·Syscall(SB),NOSPLIT,$0-28 JMP syscall·Syscall(SB) TEXT ·Syscall6(SB),NOSPLIT,$0-40 JMP syscall·Syscall6(SB) TEXT ·Syscall9(SB),NOSPLIT,$0-52 JMP syscall·Syscall9(SB) TEXT ·RawSyscall(SB),NOSPLIT,$0-28 JMP syscall·RawSyscall(SB) TEXT ·RawSyscall6(SB),NOSPLIT,$0-40 JMP syscall·RawSyscall6(SB)
cmd/vendor/golang.org/x/sys/unix/asm_netbsd_386.s
0
https://github.com/etcd-io/etcd/commit/205f10aeb6d7a2869d4da16131cccb77ba5289e2
[ 0.00017933275375980884, 0.0001766548230079934, 0.00017482783005107194, 0.00017580388521309942, 0.000001935054797286284 ]
{ "id": 1, "code_window": [ "\tcancel()\n", "\tif err != nil {\n", "\t\thandleError(ExitServerError, err)\n", "\t}\n", "}" ], "labels": [ "keep", "keep", "keep", "add", "keep" ], "after_edit": [ "\tif c.GlobalString(\"output\") != \"simple\" {\n", "\t\tprintResponseKey(resp, c.GlobalString(\"output\"))\n", "\t}\n" ], "file_path": "etcdctl/ctlv2/command/mkdir_command.go", "type": "add", "edit_start_line_idx": 55 }
// Copyright 2015 The etcd Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package command import ( "errors" "time" "github.com/coreos/etcd/client" "github.com/urfave/cli" ) // NewUpdateDirCommand returns the CLI command for "updatedir". func NewUpdateDirCommand() cli.Command { return cli.Command{ Name: "updatedir", Usage: "update an existing directory", ArgsUsage: "<key> <value>", Flags: []cli.Flag{ cli.IntFlag{Name: "ttl", Value: 0, Usage: "key time-to-live"}, }, Action: func(c *cli.Context) error { updatedirCommandFunc(c, mustNewKeyAPI(c)) return nil }, } } // updatedirCommandFunc executes the "updatedir" command. func updatedirCommandFunc(c *cli.Context, ki client.KeysAPI) { if len(c.Args()) == 0 { handleError(ExitBadArgs, errors.New("key required")) } key := c.Args()[0] ttl := c.Int("ttl") ctx, cancel := contextWithTotalTimeout(c) _, err := ki.Set(ctx, key, "", &client.SetOptions{TTL: time.Duration(ttl) * time.Second, Dir: true, PrevExist: client.PrevExist}) cancel() if err != nil { handleError(ExitServerError, err) } }
etcdctl/ctlv2/command/update_dir_command.go
1
https://github.com/etcd-io/etcd/commit/205f10aeb6d7a2869d4da16131cccb77ba5289e2
[ 0.14485082030296326, 0.025103451684117317, 0.00016767624765634537, 0.00017758709145709872, 0.053582608699798584 ]
{ "id": 1, "code_window": [ "\tcancel()\n", "\tif err != nil {\n", "\t\thandleError(ExitServerError, err)\n", "\t}\n", "}" ], "labels": [ "keep", "keep", "keep", "add", "keep" ], "after_edit": [ "\tif c.GlobalString(\"output\") != \"simple\" {\n", "\t\tprintResponseKey(resp, c.GlobalString(\"output\"))\n", "\t}\n" ], "file_path": "etcdctl/ctlv2/command/mkdir_command.go", "type": "add", "edit_start_line_idx": 55 }
// Copyright 2015 The etcd Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package transport import ( "net" "testing" "time" ) // TestNewTimeoutListener tests that NewTimeoutListener returns a // rwTimeoutListener struct with timeouts set. func TestNewTimeoutListener(t *testing.T) { l, err := NewTimeoutListener("127.0.0.1:0", "http", nil, time.Hour, time.Hour) if err != nil { t.Fatalf("unexpected NewTimeoutListener error: %v", err) } defer l.Close() tln := l.(*rwTimeoutListener) if tln.rdtimeoutd != time.Hour { t.Errorf("read timeout = %s, want %s", tln.rdtimeoutd, time.Hour) } if tln.wtimeoutd != time.Hour { t.Errorf("write timeout = %s, want %s", tln.wtimeoutd, time.Hour) } } func TestWriteReadTimeoutListener(t *testing.T) { ln, err := net.Listen("tcp", "127.0.0.1:0") if err != nil { t.Fatalf("unexpected listen error: %v", err) } wln := rwTimeoutListener{ Listener: ln, wtimeoutd: 10 * time.Millisecond, rdtimeoutd: 10 * time.Millisecond, } stop := make(chan struct{}) blocker := func() { conn, derr := net.Dial("tcp", ln.Addr().String()) if derr != nil { t.Fatalf("unexpected dail error: %v", derr) } defer conn.Close() // block the receiver until the writer timeout <-stop } go blocker() conn, err := wln.Accept() if err != nil { t.Fatalf("unexpected accept error: %v", err) } defer conn.Close() // fill the socket buffer data := make([]byte, 5*1024*1024) done := make(chan struct{}) go func() { _, err = conn.Write(data) done <- struct{}{} }() select { case <-done: // It waits 1s more to avoid delay in low-end system. case <-time.After(wln.wtimeoutd*10 + time.Second): t.Fatal("wait timeout") } if operr, ok := err.(*net.OpError); !ok || operr.Op != "write" || !operr.Timeout() { t.Errorf("err = %v, want write i/o timeout error", err) } stop <- struct{}{} go blocker() conn, err = wln.Accept() if err != nil { t.Fatalf("unexpected accept error: %v", err) } buf := make([]byte, 10) go func() { _, err = conn.Read(buf) done <- struct{}{} }() select { case <-done: case <-time.After(wln.rdtimeoutd * 10): t.Fatal("wait timeout") } if operr, ok := err.(*net.OpError); !ok || operr.Op != "read" || !operr.Timeout() { t.Errorf("err = %v, want write i/o timeout error", err) } stop <- struct{}{} }
pkg/transport/timeout_listener_test.go
0
https://github.com/etcd-io/etcd/commit/205f10aeb6d7a2869d4da16131cccb77ba5289e2
[ 0.0006218733033165336, 0.00023942551342770457, 0.00016776179836597294, 0.00017722186748869717, 0.00012786878505721688 ]
{ "id": 1, "code_window": [ "\tcancel()\n", "\tif err != nil {\n", "\t\thandleError(ExitServerError, err)\n", "\t}\n", "}" ], "labels": [ "keep", "keep", "keep", "add", "keep" ], "after_edit": [ "\tif c.GlobalString(\"output\") != \"simple\" {\n", "\t\tprintResponseKey(resp, c.GlobalString(\"output\"))\n", "\t}\n" ], "file_path": "etcdctl/ctlv2/command/mkdir_command.go", "type": "add", "edit_start_line_idx": 55 }
// Copyright 2015 The etcd Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package fileutil import ( "os" "path" "sort" "strings" "time" ) func PurgeFile(dirname string, suffix string, max uint, interval time.Duration, stop <-chan struct{}) <-chan error { return purgeFile(dirname, suffix, max, interval, stop, nil) } // purgeFile is the internal implementation for PurgeFile which can post purged files to purgec if non-nil. func purgeFile(dirname string, suffix string, max uint, interval time.Duration, stop <-chan struct{}, purgec chan<- string) <-chan error { errC := make(chan error, 1) go func() { for { fnames, err := ReadDir(dirname) if err != nil { errC <- err return } newfnames := make([]string, 0) for _, fname := range fnames { if strings.HasSuffix(fname, suffix) { newfnames = append(newfnames, fname) } } sort.Strings(newfnames) fnames = newfnames for len(newfnames) > int(max) { f := path.Join(dirname, newfnames[0]) l, err := TryLockFile(f, os.O_WRONLY, PrivateFileMode) if err != nil { break } if err = os.Remove(f); err != nil { errC <- err return } if err = l.Close(); err != nil { plog.Errorf("error unlocking %s when purging file (%v)", l.Name(), err) errC <- err return } plog.Infof("purged file %s successfully", f) newfnames = newfnames[1:] } if purgec != nil { for i := 0; i < len(fnames)-len(newfnames); i++ { purgec <- fnames[i] } } select { case <-time.After(interval): case <-stop: return } } }() return errC }
pkg/fileutil/purge.go
0
https://github.com/etcd-io/etcd/commit/205f10aeb6d7a2869d4da16131cccb77ba5289e2
[ 0.0004153094778303057, 0.0002052770578302443, 0.00016556844639126211, 0.00017741411284077913, 0.00007966475823195651 ]
{ "id": 1, "code_window": [ "\tcancel()\n", "\tif err != nil {\n", "\t\thandleError(ExitServerError, err)\n", "\t}\n", "}" ], "labels": [ "keep", "keep", "keep", "add", "keep" ], "after_edit": [ "\tif c.GlobalString(\"output\") != \"simple\" {\n", "\t\tprintResponseKey(resp, c.GlobalString(\"output\"))\n", "\t}\n" ], "file_path": "etcdctl/ctlv2/command/mkdir_command.go", "type": "add", "edit_start_line_idx": 55 }
// Copyright 2016 The etcd Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package cmd import ( "testing" "time" ) func TestGetTimeseries(t *testing.T) { sp := newSecondPoints() now := time.Now() sp.Add(now, time.Second) sp.Add(now.Add(5*time.Second), time.Second) n := sp.getTimeSeries().Len() if n < 3 { t.Fatalf("expected at 6 points of time series, got %s", sp.getTimeSeries()) } }
tools/benchmark/cmd/timeseries_test.go
0
https://github.com/etcd-io/etcd/commit/205f10aeb6d7a2869d4da16131cccb77ba5289e2
[ 0.0001785976201063022, 0.00017589793424122036, 0.00016973898163996637, 0.00017762754578143358, 0.000003577966708689928 ]
{ "id": 2, "code_window": [ "\tresp, err := ki.Delete(ctx, key, &client.DeleteOptions{PrevIndex: uint64(prevIndex), PrevValue: prevValue, Dir: dir, Recursive: recursive})\n", "\tcancel()\n", "\tif err != nil {\n", "\t\thandleError(ExitServerError, err)\n", "\t}\n", "\n", "\tif !resp.Node.Dir {\n", "\t\tprintResponseKey(resp, c.GlobalString(\"output\"))\n", "\t}\n", "}" ], "labels": [ "keep", "keep", "keep", "keep", "keep", "replace", "replace", "keep", "keep", "keep" ], "after_edit": [ "\tif !resp.Node.Dir || c.GlobalString(\"output\") != \"simple\" {\n" ], "file_path": "etcdctl/ctlv2/command/rm_command.go", "type": "replace", "edit_start_line_idx": 59 }
// Copyright 2015 The etcd Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package command import ( "errors" "time" "github.com/coreos/etcd/client" "github.com/urfave/cli" ) // NewUpdateDirCommand returns the CLI command for "updatedir". func NewUpdateDirCommand() cli.Command { return cli.Command{ Name: "updatedir", Usage: "update an existing directory", ArgsUsage: "<key> <value>", Flags: []cli.Flag{ cli.IntFlag{Name: "ttl", Value: 0, Usage: "key time-to-live"}, }, Action: func(c *cli.Context) error { updatedirCommandFunc(c, mustNewKeyAPI(c)) return nil }, } } // updatedirCommandFunc executes the "updatedir" command. func updatedirCommandFunc(c *cli.Context, ki client.KeysAPI) { if len(c.Args()) == 0 { handleError(ExitBadArgs, errors.New("key required")) } key := c.Args()[0] ttl := c.Int("ttl") ctx, cancel := contextWithTotalTimeout(c) _, err := ki.Set(ctx, key, "", &client.SetOptions{TTL: time.Duration(ttl) * time.Second, Dir: true, PrevExist: client.PrevExist}) cancel() if err != nil { handleError(ExitServerError, err) } }
etcdctl/ctlv2/command/update_dir_command.go
1
https://github.com/etcd-io/etcd/commit/205f10aeb6d7a2869d4da16131cccb77ba5289e2
[ 0.003008207306265831, 0.0006645587855018675, 0.00016729217895772308, 0.00017737253801897168, 0.0010488874977454543 ]
{ "id": 2, "code_window": [ "\tresp, err := ki.Delete(ctx, key, &client.DeleteOptions{PrevIndex: uint64(prevIndex), PrevValue: prevValue, Dir: dir, Recursive: recursive})\n", "\tcancel()\n", "\tif err != nil {\n", "\t\thandleError(ExitServerError, err)\n", "\t}\n", "\n", "\tif !resp.Node.Dir {\n", "\t\tprintResponseKey(resp, c.GlobalString(\"output\"))\n", "\t}\n", "}" ], "labels": [ "keep", "keep", "keep", "keep", "keep", "replace", "replace", "keep", "keep", "keep" ], "after_edit": [ "\tif !resp.Node.Dir || c.GlobalString(\"output\") != \"simple\" {\n" ], "file_path": "etcdctl/ctlv2/command/rm_command.go", "type": "replace", "edit_start_line_idx": 59 }
// Extensions for Protocol Buffers to create more go like structures. // // Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved. // http://github.com/gogo/protobuf/gogoproto // // Go support for Protocol Buffers - Google's data interchange format // // Copyright 2010 The Go Authors. All rights reserved. // https://github.com/golang/protobuf // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following disclaimer // in the documentation and/or other materials provided with the // distribution. // * Neither the name of Google Inc. nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. package proto // Functions for writing the text protocol buffer format. import ( "bufio" "bytes" "encoding" "errors" "fmt" "io" "log" "math" "reflect" "sort" "strings" ) var ( newline = []byte("\n") spaces = []byte(" ") gtNewline = []byte(">\n") endBraceNewline = []byte("}\n") backslashN = []byte{'\\', 'n'} backslashR = []byte{'\\', 'r'} backslashT = []byte{'\\', 't'} backslashDQ = []byte{'\\', '"'} backslashBS = []byte{'\\', '\\'} posInf = []byte("inf") negInf = []byte("-inf") nan = []byte("nan") ) type writer interface { io.Writer WriteByte(byte) error } // textWriter is an io.Writer that tracks its indentation level. type textWriter struct { ind int complete bool // if the current position is a complete line compact bool // whether to write out as a one-liner w writer } func (w *textWriter) WriteString(s string) (n int, err error) { if !strings.Contains(s, "\n") { if !w.compact && w.complete { w.writeIndent() } w.complete = false return io.WriteString(w.w, s) } // WriteString is typically called without newlines, so this // codepath and its copy are rare. We copy to avoid // duplicating all of Write's logic here. return w.Write([]byte(s)) } func (w *textWriter) Write(p []byte) (n int, err error) { newlines := bytes.Count(p, newline) if newlines == 0 { if !w.compact && w.complete { w.writeIndent() } n, err = w.w.Write(p) w.complete = false return n, err } frags := bytes.SplitN(p, newline, newlines+1) if w.compact { for i, frag := range frags { if i > 0 { if err := w.w.WriteByte(' '); err != nil { return n, err } n++ } nn, err := w.w.Write(frag) n += nn if err != nil { return n, err } } return n, nil } for i, frag := range frags { if w.complete { w.writeIndent() } nn, err := w.w.Write(frag) n += nn if err != nil { return n, err } if i+1 < len(frags) { if err := w.w.WriteByte('\n'); err != nil { return n, err } n++ } } w.complete = len(frags[len(frags)-1]) == 0 return n, nil } func (w *textWriter) WriteByte(c byte) error { if w.compact && c == '\n' { c = ' ' } if !w.compact && w.complete { w.writeIndent() } err := w.w.WriteByte(c) w.complete = c == '\n' return err } func (w *textWriter) indent() { w.ind++ } func (w *textWriter) unindent() { if w.ind == 0 { log.Printf("proto: textWriter unindented too far") return } w.ind-- } func writeName(w *textWriter, props *Properties) error { if _, err := w.WriteString(props.OrigName); err != nil { return err } if props.Wire != "group" { return w.WriteByte(':') } return nil } // raw is the interface satisfied by RawMessage. type raw interface { Bytes() []byte } func writeStruct(w *textWriter, sv reflect.Value) error { st := sv.Type() sprops := GetProperties(st) for i := 0; i < sv.NumField(); i++ { fv := sv.Field(i) props := sprops.Prop[i] name := st.Field(i).Name if strings.HasPrefix(name, "XXX_") { // There are two XXX_ fields: // XXX_unrecognized []byte // XXX_extensions map[int32]proto.Extension // The first is handled here; // the second is handled at the bottom of this function. if name == "XXX_unrecognized" && !fv.IsNil() { if err := writeUnknownStruct(w, fv.Interface().([]byte)); err != nil { return err } } continue } if fv.Kind() == reflect.Ptr && fv.IsNil() { // Field not filled in. This could be an optional field or // a required field that wasn't filled in. Either way, there // isn't anything we can show for it. continue } if fv.Kind() == reflect.Slice && fv.IsNil() { // Repeated field that is empty, or a bytes field that is unused. continue } if props.Repeated && fv.Kind() == reflect.Slice { // Repeated field. for j := 0; j < fv.Len(); j++ { if err := writeName(w, props); err != nil { return err } if !w.compact { if err := w.WriteByte(' '); err != nil { return err } } v := fv.Index(j) if v.Kind() == reflect.Ptr && v.IsNil() { // A nil message in a repeated field is not valid, // but we can handle that more gracefully than panicking. if _, err := w.Write([]byte("<nil>\n")); err != nil { return err } continue } if len(props.Enum) > 0 { if err := writeEnum(w, v, props); err != nil { return err } } else if err := writeAny(w, v, props); err != nil { return err } if err := w.WriteByte('\n'); err != nil { return err } } continue } if fv.Kind() == reflect.Map { // Map fields are rendered as a repeated struct with key/value fields. keys := fv.MapKeys() sort.Sort(mapKeys(keys)) for _, key := range keys { val := fv.MapIndex(key) if err := writeName(w, props); err != nil { return err } if !w.compact { if err := w.WriteByte(' '); err != nil { return err } } // open struct if err := w.WriteByte('<'); err != nil { return err } if !w.compact { if err := w.WriteByte('\n'); err != nil { return err } } w.indent() // key if _, err := w.WriteString("key:"); err != nil { return err } if !w.compact { if err := w.WriteByte(' '); err != nil { return err } } if err := writeAny(w, key, props.mkeyprop); err != nil { return err } if err := w.WriteByte('\n'); err != nil { return err } // nil values aren't legal, but we can avoid panicking because of them. if val.Kind() != reflect.Ptr || !val.IsNil() { // value if _, err := w.WriteString("value:"); err != nil { return err } if !w.compact { if err := w.WriteByte(' '); err != nil { return err } } if err := writeAny(w, val, props.mvalprop); err != nil { return err } if err := w.WriteByte('\n'); err != nil { return err } } // close struct w.unindent() if err := w.WriteByte('>'); err != nil { return err } if err := w.WriteByte('\n'); err != nil { return err } } continue } if props.proto3 && fv.Kind() == reflect.Slice && fv.Len() == 0 { // empty bytes field continue } if props.proto3 && fv.Kind() != reflect.Ptr && fv.Kind() != reflect.Slice { // proto3 non-repeated scalar field; skip if zero value if isProto3Zero(fv) { continue } } if fv.Kind() == reflect.Interface { // Check if it is a oneof. if st.Field(i).Tag.Get("protobuf_oneof") != "" { // fv is nil, or holds a pointer to generated struct. // That generated struct has exactly one field, // which has a protobuf struct tag. if fv.IsNil() { continue } inner := fv.Elem().Elem() // interface -> *T -> T tag := inner.Type().Field(0).Tag.Get("protobuf") props = new(Properties) // Overwrite the outer props var, but not its pointee. props.Parse(tag) // Write the value in the oneof, not the oneof itself. fv = inner.Field(0) // Special case to cope with malformed messages gracefully: // If the value in the oneof is a nil pointer, don't panic // in writeAny. if fv.Kind() == reflect.Ptr && fv.IsNil() { // Use errors.New so writeAny won't render quotes. msg := errors.New("/* nil */") fv = reflect.ValueOf(&msg).Elem() } } } if err := writeName(w, props); err != nil { return err } if !w.compact { if err := w.WriteByte(' '); err != nil { return err } } if b, ok := fv.Interface().(raw); ok { if err := writeRaw(w, b.Bytes()); err != nil { return err } continue } if len(props.Enum) > 0 { if err := writeEnum(w, fv, props); err != nil { return err } } else if err := writeAny(w, fv, props); err != nil { return err } if err := w.WriteByte('\n'); err != nil { return err } } // Extensions (the XXX_extensions field). pv := sv if pv.CanAddr() { pv = sv.Addr() } else { pv = reflect.New(sv.Type()) pv.Elem().Set(sv) } if pv.Type().Implements(extendableProtoType) { if err := writeExtensions(w, pv); err != nil { return err } } return nil } // writeRaw writes an uninterpreted raw message. func writeRaw(w *textWriter, b []byte) error { if err := w.WriteByte('<'); err != nil { return err } if !w.compact { if err := w.WriteByte('\n'); err != nil { return err } } w.indent() if err := writeUnknownStruct(w, b); err != nil { return err } w.unindent() if err := w.WriteByte('>'); err != nil { return err } return nil } // writeAny writes an arbitrary field. func writeAny(w *textWriter, v reflect.Value, props *Properties) error { v = reflect.Indirect(v) if props != nil && len(props.CustomType) > 0 { custom, ok := v.Interface().(Marshaler) if ok { data, err := custom.Marshal() if err != nil { return err } if err := writeString(w, string(data)); err != nil { return err } return nil } } // Floats have special cases. if v.Kind() == reflect.Float32 || v.Kind() == reflect.Float64 { x := v.Float() var b []byte switch { case math.IsInf(x, 1): b = posInf case math.IsInf(x, -1): b = negInf case math.IsNaN(x): b = nan } if b != nil { _, err := w.Write(b) return err } // Other values are handled below. } // We don't attempt to serialise every possible value type; only those // that can occur in protocol buffers. switch v.Kind() { case reflect.Slice: // Should only be a []byte; repeated fields are handled in writeStruct. if err := writeString(w, string(v.Bytes())); err != nil { return err } case reflect.String: if err := writeString(w, v.String()); err != nil { return err } case reflect.Struct: // Required/optional group/message. var bra, ket byte = '<', '>' if props != nil && props.Wire == "group" { bra, ket = '{', '}' } if err := w.WriteByte(bra); err != nil { return err } if !w.compact { if err := w.WriteByte('\n'); err != nil { return err } } w.indent() if tm, ok := v.Interface().(encoding.TextMarshaler); ok { text, err := tm.MarshalText() if err != nil { return err } if _, err = w.Write(text); err != nil { return err } } else if err := writeStruct(w, v); err != nil { return err } w.unindent() if err := w.WriteByte(ket); err != nil { return err } default: _, err := fmt.Fprint(w, v.Interface()) return err } return nil } // equivalent to C's isprint. func isprint(c byte) bool { return c >= 0x20 && c < 0x7f } // writeString writes a string in the protocol buffer text format. // It is similar to strconv.Quote except we don't use Go escape sequences, // we treat the string as a byte sequence, and we use octal escapes. // These differences are to maintain interoperability with the other // languages' implementations of the text format. func writeString(w *textWriter, s string) error { // use WriteByte here to get any needed indent if err := w.WriteByte('"'); err != nil { return err } // Loop over the bytes, not the runes. for i := 0; i < len(s); i++ { var err error // Divergence from C++: we don't escape apostrophes. // There's no need to escape them, and the C++ parser // copes with a naked apostrophe. switch c := s[i]; c { case '\n': _, err = w.w.Write(backslashN) case '\r': _, err = w.w.Write(backslashR) case '\t': _, err = w.w.Write(backslashT) case '"': _, err = w.w.Write(backslashDQ) case '\\': _, err = w.w.Write(backslashBS) default: if isprint(c) { err = w.w.WriteByte(c) } else { _, err = fmt.Fprintf(w.w, "\\%03o", c) } } if err != nil { return err } } return w.WriteByte('"') } func writeUnknownStruct(w *textWriter, data []byte) (err error) { if !w.compact { if _, err := fmt.Fprintf(w, "/* %d unknown bytes */\n", len(data)); err != nil { return err } } b := NewBuffer(data) for b.index < len(b.buf) { x, err := b.DecodeVarint() if err != nil { _, ferr := fmt.Fprintf(w, "/* %v */\n", err) return ferr } wire, tag := x&7, x>>3 if wire == WireEndGroup { w.unindent() if _, werr := w.Write(endBraceNewline); werr != nil { return werr } continue } if _, ferr := fmt.Fprint(w, tag); ferr != nil { return ferr } if wire != WireStartGroup { if err = w.WriteByte(':'); err != nil { return err } } if !w.compact || wire == WireStartGroup { if err = w.WriteByte(' '); err != nil { return err } } switch wire { case WireBytes: buf, e := b.DecodeRawBytes(false) if e == nil { _, err = fmt.Fprintf(w, "%q", buf) } else { _, err = fmt.Fprintf(w, "/* %v */", e) } case WireFixed32: x, err = b.DecodeFixed32() err = writeUnknownInt(w, x, err) case WireFixed64: x, err = b.DecodeFixed64() err = writeUnknownInt(w, x, err) case WireStartGroup: err = w.WriteByte('{') w.indent() case WireVarint: x, err = b.DecodeVarint() err = writeUnknownInt(w, x, err) default: _, err = fmt.Fprintf(w, "/* unknown wire type %d */", wire) } if err != nil { return err } if err := w.WriteByte('\n'); err != nil { return err } } return nil } func writeUnknownInt(w *textWriter, x uint64, err error) error { if err == nil { _, err = fmt.Fprint(w, x) } else { _, err = fmt.Fprintf(w, "/* %v */", err) } return err } type int32Slice []int32 func (s int32Slice) Len() int { return len(s) } func (s int32Slice) Less(i, j int) bool { return s[i] < s[j] } func (s int32Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } // writeExtensions writes all the extensions in pv. // pv is assumed to be a pointer to a protocol message struct that is extendable. func writeExtensions(w *textWriter, pv reflect.Value) error { emap := extensionMaps[pv.Type().Elem()] ep := pv.Interface().(extendableProto) // Order the extensions by ID. // This isn't strictly necessary, but it will give us // canonical output, which will also make testing easier. var m map[int32]Extension if em, ok := ep.(extensionsMap); ok { m = em.ExtensionMap() } else if em, ok := ep.(extensionsBytes); ok { eb := em.GetExtensions() var err error m, err = BytesToExtensionsMap(*eb) if err != nil { return err } } ids := make([]int32, 0, len(m)) for id := range m { ids = append(ids, id) } sort.Sort(int32Slice(ids)) for _, extNum := range ids { ext := m[extNum] var desc *ExtensionDesc if emap != nil { desc = emap[extNum] } if desc == nil { // Unknown extension. if err := writeUnknownStruct(w, ext.enc); err != nil { return err } continue } pb, err := GetExtension(ep, desc) if err != nil { return fmt.Errorf("failed getting extension: %v", err) } // Repeated extensions will appear as a slice. if !desc.repeated() { if err := writeExtension(w, desc.Name, pb); err != nil { return err } } else { v := reflect.ValueOf(pb) for i := 0; i < v.Len(); i++ { if err := writeExtension(w, desc.Name, v.Index(i).Interface()); err != nil { return err } } } } return nil } func writeExtension(w *textWriter, name string, pb interface{}) error { if _, err := fmt.Fprintf(w, "[%s]:", name); err != nil { return err } if !w.compact { if err := w.WriteByte(' '); err != nil { return err } } if err := writeAny(w, reflect.ValueOf(pb), nil); err != nil { return err } if err := w.WriteByte('\n'); err != nil { return err } return nil } func (w *textWriter) writeIndent() { if !w.complete { return } remain := w.ind * 2 for remain > 0 { n := remain if n > len(spaces) { n = len(spaces) } w.w.Write(spaces[:n]) remain -= n } w.complete = false } // TextMarshaler is a configurable text format marshaler. type TextMarshaler struct { Compact bool // use compact text format (one line). } // Marshal writes a given protocol buffer in text format. // The only errors returned are from w. func (m *TextMarshaler) Marshal(w io.Writer, pb Message) error { val := reflect.ValueOf(pb) if pb == nil || val.IsNil() { w.Write([]byte("<nil>")) return nil } var bw *bufio.Writer ww, ok := w.(writer) if !ok { bw = bufio.NewWriter(w) ww = bw } aw := &textWriter{ w: ww, complete: true, compact: m.Compact, } if tm, ok := pb.(encoding.TextMarshaler); ok { text, err := tm.MarshalText() if err != nil { return err } if _, err = aw.Write(text); err != nil { return err } if bw != nil { return bw.Flush() } return nil } // Dereference the received pointer so we don't have outer < and >. v := reflect.Indirect(val) if err := writeStruct(aw, v); err != nil { return err } if bw != nil { return bw.Flush() } return nil } // Text is the same as Marshal, but returns the string directly. func (m *TextMarshaler) Text(pb Message) string { var buf bytes.Buffer m.Marshal(&buf, pb) return buf.String() } var ( defaultTextMarshaler = TextMarshaler{} compactTextMarshaler = TextMarshaler{Compact: true} ) // TODO: consider removing some of the Marshal functions below. // MarshalText writes a given protocol buffer in text format. // The only errors returned are from w. func MarshalText(w io.Writer, pb Message) error { return defaultTextMarshaler.Marshal(w, pb) } // MarshalTextString is the same as MarshalText, but returns the string directly. func MarshalTextString(pb Message) string { return defaultTextMarshaler.Text(pb) } // CompactText writes a given protocol buffer in compact text format (one line). func CompactText(w io.Writer, pb Message) error { return compactTextMarshaler.Marshal(w, pb) } // CompactTextString is the same as CompactText, but returns the string directly. func CompactTextString(pb Message) string { return compactTextMarshaler.Text(pb) }
cmd/vendor/github.com/gogo/protobuf/proto/text.go
0
https://github.com/etcd-io/etcd/commit/205f10aeb6d7a2869d4da16131cccb77ba5289e2
[ 0.00017907950677908957, 0.00017231453966815025, 0.00016306291217915714, 0.00017304990615230054, 0.000004084311512997374 ]
{ "id": 2, "code_window": [ "\tresp, err := ki.Delete(ctx, key, &client.DeleteOptions{PrevIndex: uint64(prevIndex), PrevValue: prevValue, Dir: dir, Recursive: recursive})\n", "\tcancel()\n", "\tif err != nil {\n", "\t\thandleError(ExitServerError, err)\n", "\t}\n", "\n", "\tif !resp.Node.Dir {\n", "\t\tprintResponseKey(resp, c.GlobalString(\"output\"))\n", "\t}\n", "}" ], "labels": [ "keep", "keep", "keep", "keep", "keep", "replace", "replace", "keep", "keep", "keep" ], "after_edit": [ "\tif !resp.Node.Dir || c.GlobalString(\"output\") != \"simple\" {\n" ], "file_path": "etcdctl/ctlv2/command/rm_command.go", "type": "replace", "edit_start_line_idx": 59 }
// Copyright 2014 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Copyright (c) 2013, The Prometheus Authors // All rights reserved. // // Use of this source code is governed by a BSD-style license that can be found // in the LICENSE file. package prometheus import ( "bytes" "compress/gzip" "errors" "fmt" "hash/fnv" "io" "net/http" "net/url" "os" "sort" "strings" "sync" "github.com/golang/protobuf/proto" "github.com/prometheus/common/expfmt" dto "github.com/prometheus/client_model/go" ) var ( defRegistry = newDefaultRegistry() errAlreadyReg = errors.New("duplicate metrics collector registration attempted") ) // Constants relevant to the HTTP interface. const ( // APIVersion is the version of the format of the exported data. This // will match this library's version, which subscribes to the Semantic // Versioning scheme. APIVersion = "0.0.4" // DelimitedTelemetryContentType is the content type set on telemetry // data responses in delimited protobuf format. DelimitedTelemetryContentType = `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=delimited` // TextTelemetryContentType is the content type set on telemetry data // responses in text format. TextTelemetryContentType = `text/plain; version=` + APIVersion // ProtoTextTelemetryContentType is the content type set on telemetry // data responses in protobuf text format. (Only used for debugging.) ProtoTextTelemetryContentType = `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=text` // ProtoCompactTextTelemetryContentType is the content type set on // telemetry data responses in protobuf compact text format. (Only used // for debugging.) ProtoCompactTextTelemetryContentType = `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=compact-text` // Constants for object pools. numBufs = 4 numMetricFamilies = 1000 numMetrics = 10000 // Capacity for the channel to collect metrics and descriptors. capMetricChan = 1000 capDescChan = 10 contentTypeHeader = "Content-Type" contentLengthHeader = "Content-Length" contentEncodingHeader = "Content-Encoding" acceptEncodingHeader = "Accept-Encoding" acceptHeader = "Accept" ) // Handler returns the HTTP handler for the global Prometheus registry. It is // already instrumented with InstrumentHandler (using "prometheus" as handler // name). Usually the handler is used to handle the "/metrics" endpoint. func Handler() http.Handler { return InstrumentHandler("prometheus", defRegistry) } // UninstrumentedHandler works in the same way as Handler, but the returned HTTP // handler is not instrumented. This is useful if no instrumentation is desired // (for whatever reason) or if the instrumentation has to happen with a // different handler name (or with a different instrumentation approach // altogether). See the InstrumentHandler example. func UninstrumentedHandler() http.Handler { return defRegistry } // Register registers a new Collector to be included in metrics collection. It // returns an error if the descriptors provided by the Collector are invalid or // if they - in combination with descriptors of already registered Collectors - // do not fulfill the consistency and uniqueness criteria described in the Desc // documentation. // // Do not register the same Collector multiple times concurrently. (Registering // the same Collector twice would result in an error anyway, but on top of that, // it is not safe to do so concurrently.) func Register(m Collector) error { _, err := defRegistry.Register(m) return err } // MustRegister works like Register but panics where Register would have // returned an error. func MustRegister(m Collector) { err := Register(m) if err != nil { panic(err) } } // RegisterOrGet works like Register but does not return an error if a Collector // is registered that equals a previously registered Collector. (Two Collectors // are considered equal if their Describe method yields the same set of // descriptors.) Instead, the previously registered Collector is returned (which // is helpful if the new and previously registered Collectors are equal but not // identical, i.e. not pointers to the same object). // // As for Register, it is still not safe to call RegisterOrGet with the same // Collector multiple times concurrently. func RegisterOrGet(m Collector) (Collector, error) { return defRegistry.RegisterOrGet(m) } // MustRegisterOrGet works like Register but panics where RegisterOrGet would // have returned an error. func MustRegisterOrGet(m Collector) Collector { existing, err := RegisterOrGet(m) if err != nil { panic(err) } return existing } // Unregister unregisters the Collector that equals the Collector passed in as // an argument. (Two Collectors are considered equal if their Describe method // yields the same set of descriptors.) The function returns whether a Collector // was unregistered. func Unregister(c Collector) bool { return defRegistry.Unregister(c) } // SetMetricFamilyInjectionHook sets a function that is called whenever metrics // are collected. The hook function must be set before metrics collection begins // (i.e. call SetMetricFamilyInjectionHook before setting the HTTP handler.) The // MetricFamily protobufs returned by the hook function are merged with the // metrics collected in the usual way. // // This is a way to directly inject MetricFamily protobufs managed and owned by // the caller. The caller has full responsibility. As no registration of the // injected metrics has happened, there is no descriptor to check against, and // there are no registration-time checks. If collect-time checks are disabled // (see function EnableCollectChecks), no sanity checks are performed on the // returned protobufs at all. If collect-checks are enabled, type and uniqueness // checks are performed, but no further consistency checks (which would require // knowledge of a metric descriptor). // // Sorting concerns: The caller is responsible for sorting the label pairs in // each metric. However, the order of metrics will be sorted by the registry as // it is required anyway after merging with the metric families collected // conventionally. // // The function must be callable at any time and concurrently. func SetMetricFamilyInjectionHook(hook func() []*dto.MetricFamily) { defRegistry.metricFamilyInjectionHook = hook } // PanicOnCollectError sets the behavior whether a panic is caused upon an error // while metrics are collected and served to the HTTP endpoint. By default, an // internal server error (status code 500) is served with an error message. func PanicOnCollectError(b bool) { defRegistry.panicOnCollectError = b } // EnableCollectChecks enables (or disables) additional consistency checks // during metrics collection. These additional checks are not enabled by default // because they inflict a performance penalty and the errors they check for can // only happen if the used Metric and Collector types have internal programming // errors. It can be helpful to enable these checks while working with custom // Collectors or Metrics whose correctness is not well established yet. func EnableCollectChecks(b bool) { defRegistry.collectChecksEnabled = b } // encoder is a function that writes a dto.MetricFamily to an io.Writer in a // certain encoding. It returns the number of bytes written and any error // encountered. Note that pbutil.WriteDelimited and pbutil.MetricFamilyToText // are encoders. type encoder func(io.Writer, *dto.MetricFamily) (int, error) type registry struct { mtx sync.RWMutex collectorsByID map[uint64]Collector // ID is a hash of the descIDs. descIDs map[uint64]struct{} dimHashesByName map[string]uint64 bufPool chan *bytes.Buffer metricFamilyPool chan *dto.MetricFamily metricPool chan *dto.Metric metricFamilyInjectionHook func() []*dto.MetricFamily panicOnCollectError, collectChecksEnabled bool } func (r *registry) Register(c Collector) (Collector, error) { descChan := make(chan *Desc, capDescChan) go func() { c.Describe(descChan) close(descChan) }() newDescIDs := map[uint64]struct{}{} newDimHashesByName := map[string]uint64{} var collectorID uint64 // Just a sum of all desc IDs. var duplicateDescErr error r.mtx.Lock() defer r.mtx.Unlock() // Coduct various tests... for desc := range descChan { // Is the descriptor valid at all? if desc.err != nil { return c, fmt.Errorf("descriptor %s is invalid: %s", desc, desc.err) } // Is the descID unique? // (In other words: Is the fqName + constLabel combination unique?) if _, exists := r.descIDs[desc.id]; exists { duplicateDescErr = fmt.Errorf("descriptor %s already exists with the same fully-qualified name and const label values", desc) } // If it is not a duplicate desc in this collector, add it to // the collectorID. (We allow duplicate descs within the same // collector, but their existence must be a no-op.) if _, exists := newDescIDs[desc.id]; !exists { newDescIDs[desc.id] = struct{}{} collectorID += desc.id } // Are all the label names and the help string consistent with // previous descriptors of the same name? // First check existing descriptors... if dimHash, exists := r.dimHashesByName[desc.fqName]; exists { if dimHash != desc.dimHash { return nil, fmt.Errorf("a previously registered descriptor with the same fully-qualified name as %s has different label names or a different help string", desc) } } else { // ...then check the new descriptors already seen. if dimHash, exists := newDimHashesByName[desc.fqName]; exists { if dimHash != desc.dimHash { return nil, fmt.Errorf("descriptors reported by collector have inconsistent label names or help strings for the same fully-qualified name, offender is %s", desc) } } else { newDimHashesByName[desc.fqName] = desc.dimHash } } } // Did anything happen at all? if len(newDescIDs) == 0 { return nil, errors.New("collector has no descriptors") } if existing, exists := r.collectorsByID[collectorID]; exists { return existing, errAlreadyReg } // If the collectorID is new, but at least one of the descs existed // before, we are in trouble. if duplicateDescErr != nil { return nil, duplicateDescErr } // Only after all tests have passed, actually register. r.collectorsByID[collectorID] = c for hash := range newDescIDs { r.descIDs[hash] = struct{}{} } for name, dimHash := range newDimHashesByName { r.dimHashesByName[name] = dimHash } return c, nil } func (r *registry) RegisterOrGet(m Collector) (Collector, error) { existing, err := r.Register(m) if err != nil && err != errAlreadyReg { return nil, err } return existing, nil } func (r *registry) Unregister(c Collector) bool { descChan := make(chan *Desc, capDescChan) go func() { c.Describe(descChan) close(descChan) }() descIDs := map[uint64]struct{}{} var collectorID uint64 // Just a sum of the desc IDs. for desc := range descChan { if _, exists := descIDs[desc.id]; !exists { collectorID += desc.id descIDs[desc.id] = struct{}{} } } r.mtx.RLock() if _, exists := r.collectorsByID[collectorID]; !exists { r.mtx.RUnlock() return false } r.mtx.RUnlock() r.mtx.Lock() defer r.mtx.Unlock() delete(r.collectorsByID, collectorID) for id := range descIDs { delete(r.descIDs, id) } // dimHashesByName is left untouched as those must be consistent // throughout the lifetime of a program. return true } func (r *registry) Push(job, instance, pushURL, method string) error { if !strings.Contains(pushURL, "://") { pushURL = "http://" + pushURL } pushURL = fmt.Sprintf("%s/metrics/jobs/%s", pushURL, url.QueryEscape(job)) if instance != "" { pushURL += "/instances/" + url.QueryEscape(instance) } buf := r.getBuf() defer r.giveBuf(buf) if err := r.writePB(expfmt.NewEncoder(buf, expfmt.FmtProtoDelim)); err != nil { if r.panicOnCollectError { panic(err) } return err } req, err := http.NewRequest(method, pushURL, buf) if err != nil { return err } req.Header.Set(contentTypeHeader, DelimitedTelemetryContentType) resp, err := http.DefaultClient.Do(req) if err != nil { return err } defer resp.Body.Close() if resp.StatusCode != 202 { return fmt.Errorf("unexpected status code %d while pushing to %s", resp.StatusCode, pushURL) } return nil } func (r *registry) ServeHTTP(w http.ResponseWriter, req *http.Request) { contentType := expfmt.Negotiate(req.Header) buf := r.getBuf() defer r.giveBuf(buf) writer, encoding := decorateWriter(req, buf) if err := r.writePB(expfmt.NewEncoder(writer, contentType)); err != nil { if r.panicOnCollectError { panic(err) } http.Error(w, "An error has occurred:\n\n"+err.Error(), http.StatusInternalServerError) return } if closer, ok := writer.(io.Closer); ok { closer.Close() } header := w.Header() header.Set(contentTypeHeader, string(contentType)) header.Set(contentLengthHeader, fmt.Sprint(buf.Len())) if encoding != "" { header.Set(contentEncodingHeader, encoding) } w.Write(buf.Bytes()) } func (r *registry) writePB(encoder expfmt.Encoder) error { var metricHashes map[uint64]struct{} if r.collectChecksEnabled { metricHashes = make(map[uint64]struct{}) } metricChan := make(chan Metric, capMetricChan) wg := sync.WaitGroup{} r.mtx.RLock() metricFamiliesByName := make(map[string]*dto.MetricFamily, len(r.dimHashesByName)) // Scatter. // (Collectors could be complex and slow, so we call them all at once.) wg.Add(len(r.collectorsByID)) go func() { wg.Wait() close(metricChan) }() for _, collector := range r.collectorsByID { go func(collector Collector) { defer wg.Done() collector.Collect(metricChan) }(collector) } r.mtx.RUnlock() // Drain metricChan in case of premature return. defer func() { for _ = range metricChan { } }() // Gather. for metric := range metricChan { // This could be done concurrently, too, but it required locking // of metricFamiliesByName (and of metricHashes if checks are // enabled). Most likely not worth it. desc := metric.Desc() metricFamily, ok := metricFamiliesByName[desc.fqName] if !ok { metricFamily = r.getMetricFamily() defer r.giveMetricFamily(metricFamily) metricFamily.Name = proto.String(desc.fqName) metricFamily.Help = proto.String(desc.help) metricFamiliesByName[desc.fqName] = metricFamily } dtoMetric := r.getMetric() defer r.giveMetric(dtoMetric) if err := metric.Write(dtoMetric); err != nil { // TODO: Consider different means of error reporting so // that a single erroneous metric could be skipped // instead of blowing up the whole collection. return fmt.Errorf("error collecting metric %v: %s", desc, err) } switch { case metricFamily.Type != nil: // Type already set. We are good. case dtoMetric.Gauge != nil: metricFamily.Type = dto.MetricType_GAUGE.Enum() case dtoMetric.Counter != nil: metricFamily.Type = dto.MetricType_COUNTER.Enum() case dtoMetric.Summary != nil: metricFamily.Type = dto.MetricType_SUMMARY.Enum() case dtoMetric.Untyped != nil: metricFamily.Type = dto.MetricType_UNTYPED.Enum() case dtoMetric.Histogram != nil: metricFamily.Type = dto.MetricType_HISTOGRAM.Enum() default: return fmt.Errorf("empty metric collected: %s", dtoMetric) } if r.collectChecksEnabled { if err := r.checkConsistency(metricFamily, dtoMetric, desc, metricHashes); err != nil { return err } } metricFamily.Metric = append(metricFamily.Metric, dtoMetric) } if r.metricFamilyInjectionHook != nil { for _, mf := range r.metricFamilyInjectionHook() { existingMF, exists := metricFamiliesByName[mf.GetName()] if !exists { metricFamiliesByName[mf.GetName()] = mf if r.collectChecksEnabled { for _, m := range mf.Metric { if err := r.checkConsistency(mf, m, nil, metricHashes); err != nil { return err } } } continue } for _, m := range mf.Metric { if r.collectChecksEnabled { if err := r.checkConsistency(existingMF, m, nil, metricHashes); err != nil { return err } } existingMF.Metric = append(existingMF.Metric, m) } } } // Now that MetricFamilies are all set, sort their Metrics // lexicographically by their label values. for _, mf := range metricFamiliesByName { sort.Sort(metricSorter(mf.Metric)) } // Write out MetricFamilies sorted by their name. names := make([]string, 0, len(metricFamiliesByName)) for name := range metricFamiliesByName { names = append(names, name) } sort.Strings(names) for _, name := range names { if err := encoder.Encode(metricFamiliesByName[name]); err != nil { return err } } return nil } func (r *registry) checkConsistency(metricFamily *dto.MetricFamily, dtoMetric *dto.Metric, desc *Desc, metricHashes map[uint64]struct{}) error { // Type consistency with metric family. if metricFamily.GetType() == dto.MetricType_GAUGE && dtoMetric.Gauge == nil || metricFamily.GetType() == dto.MetricType_COUNTER && dtoMetric.Counter == nil || metricFamily.GetType() == dto.MetricType_SUMMARY && dtoMetric.Summary == nil || metricFamily.GetType() == dto.MetricType_HISTOGRAM && dtoMetric.Histogram == nil || metricFamily.GetType() == dto.MetricType_UNTYPED && dtoMetric.Untyped == nil { return fmt.Errorf( "collected metric %s %s is not a %s", metricFamily.GetName(), dtoMetric, metricFamily.GetType(), ) } // Is the metric unique (i.e. no other metric with the same name and the same label values)? h := fnv.New64a() var buf bytes.Buffer buf.WriteString(metricFamily.GetName()) buf.WriteByte(separatorByte) h.Write(buf.Bytes()) // Make sure label pairs are sorted. We depend on it for the consistency // check. Label pairs must be sorted by contract. But the point of this // method is to check for contract violations. So we better do the sort // now. sort.Sort(LabelPairSorter(dtoMetric.Label)) for _, lp := range dtoMetric.Label { buf.Reset() buf.WriteString(lp.GetValue()) buf.WriteByte(separatorByte) h.Write(buf.Bytes()) } metricHash := h.Sum64() if _, exists := metricHashes[metricHash]; exists { return fmt.Errorf( "collected metric %s %s was collected before with the same name and label values", metricFamily.GetName(), dtoMetric, ) } metricHashes[metricHash] = struct{}{} if desc == nil { return nil // Nothing left to check if we have no desc. } // Desc consistency with metric family. if metricFamily.GetName() != desc.fqName { return fmt.Errorf( "collected metric %s %s has name %q but should have %q", metricFamily.GetName(), dtoMetric, metricFamily.GetName(), desc.fqName, ) } if metricFamily.GetHelp() != desc.help { return fmt.Errorf( "collected metric %s %s has help %q but should have %q", metricFamily.GetName(), dtoMetric, metricFamily.GetHelp(), desc.help, ) } // Is the desc consistent with the content of the metric? lpsFromDesc := make([]*dto.LabelPair, 0, len(dtoMetric.Label)) lpsFromDesc = append(lpsFromDesc, desc.constLabelPairs...) for _, l := range desc.variableLabels { lpsFromDesc = append(lpsFromDesc, &dto.LabelPair{ Name: proto.String(l), }) } if len(lpsFromDesc) != len(dtoMetric.Label) { return fmt.Errorf( "labels in collected metric %s %s are inconsistent with descriptor %s", metricFamily.GetName(), dtoMetric, desc, ) } sort.Sort(LabelPairSorter(lpsFromDesc)) for i, lpFromDesc := range lpsFromDesc { lpFromMetric := dtoMetric.Label[i] if lpFromDesc.GetName() != lpFromMetric.GetName() || lpFromDesc.Value != nil && lpFromDesc.GetValue() != lpFromMetric.GetValue() { return fmt.Errorf( "labels in collected metric %s %s are inconsistent with descriptor %s", metricFamily.GetName(), dtoMetric, desc, ) } } r.mtx.RLock() // Remaining checks need the read lock. defer r.mtx.RUnlock() // Is the desc registered? if _, exist := r.descIDs[desc.id]; !exist { return fmt.Errorf( "collected metric %s %s with unregistered descriptor %s", metricFamily.GetName(), dtoMetric, desc, ) } return nil } func (r *registry) getBuf() *bytes.Buffer { select { case buf := <-r.bufPool: return buf default: return &bytes.Buffer{} } } func (r *registry) giveBuf(buf *bytes.Buffer) { buf.Reset() select { case r.bufPool <- buf: default: } } func (r *registry) getMetricFamily() *dto.MetricFamily { select { case mf := <-r.metricFamilyPool: return mf default: return &dto.MetricFamily{} } } func (r *registry) giveMetricFamily(mf *dto.MetricFamily) { mf.Reset() select { case r.metricFamilyPool <- mf: default: } } func (r *registry) getMetric() *dto.Metric { select { case m := <-r.metricPool: return m default: return &dto.Metric{} } } func (r *registry) giveMetric(m *dto.Metric) { m.Reset() select { case r.metricPool <- m: default: } } func newRegistry() *registry { return &registry{ collectorsByID: map[uint64]Collector{}, descIDs: map[uint64]struct{}{}, dimHashesByName: map[string]uint64{}, bufPool: make(chan *bytes.Buffer, numBufs), metricFamilyPool: make(chan *dto.MetricFamily, numMetricFamilies), metricPool: make(chan *dto.Metric, numMetrics), } } func newDefaultRegistry() *registry { r := newRegistry() r.Register(NewProcessCollector(os.Getpid(), "")) r.Register(NewGoCollector()) return r } // decorateWriter wraps a writer to handle gzip compression if requested. It // returns the decorated writer and the appropriate "Content-Encoding" header // (which is empty if no compression is enabled). func decorateWriter(request *http.Request, writer io.Writer) (io.Writer, string) { header := request.Header.Get(acceptEncodingHeader) parts := strings.Split(header, ",") for _, part := range parts { part := strings.TrimSpace(part) if part == "gzip" || strings.HasPrefix(part, "gzip;") { return gzip.NewWriter(writer), "gzip" } } return writer, "" } type metricSorter []*dto.Metric func (s metricSorter) Len() int { return len(s) } func (s metricSorter) Swap(i, j int) { s[i], s[j] = s[j], s[i] } func (s metricSorter) Less(i, j int) bool { if len(s[i].Label) != len(s[j].Label) { // This should not happen. The metrics are // inconsistent. However, we have to deal with the fact, as // people might use custom collectors or metric family injection // to create inconsistent metrics. So let's simply compare the // number of labels in this case. That will still yield // reproducible sorting. return len(s[i].Label) < len(s[j].Label) } for n, lp := range s[i].Label { vi := lp.GetValue() vj := s[j].Label[n].GetValue() if vi != vj { return vi < vj } } return true }
cmd/vendor/github.com/prometheus/client_golang/prometheus/registry.go
0
https://github.com/etcd-io/etcd/commit/205f10aeb6d7a2869d4da16131cccb77ba5289e2
[ 0.003688797354698181, 0.00022186934074852616, 0.0001635136577533558, 0.00017413374735042453, 0.00040863422327674925 ]
{ "id": 2, "code_window": [ "\tresp, err := ki.Delete(ctx, key, &client.DeleteOptions{PrevIndex: uint64(prevIndex), PrevValue: prevValue, Dir: dir, Recursive: recursive})\n", "\tcancel()\n", "\tif err != nil {\n", "\t\thandleError(ExitServerError, err)\n", "\t}\n", "\n", "\tif !resp.Node.Dir {\n", "\t\tprintResponseKey(resp, c.GlobalString(\"output\"))\n", "\t}\n", "}" ], "labels": [ "keep", "keep", "keep", "keep", "keep", "replace", "replace", "keep", "keep", "keep" ], "after_edit": [ "\tif !resp.Node.Dir || c.GlobalString(\"output\") != \"simple\" {\n" ], "file_path": "etcdctl/ctlv2/command/rm_command.go", "type": "replace", "edit_start_line_idx": 59 }
// Copyright 2014 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package prometheus import "hash/fnv" // Untyped is a Metric that represents a single numerical value that can // arbitrarily go up and down. // // An Untyped metric works the same as a Gauge. The only difference is that to // no type information is implied. // // To create Untyped instances, use NewUntyped. type Untyped interface { Metric Collector // Set sets the Untyped metric to an arbitrary value. Set(float64) // Inc increments the Untyped metric by 1. Inc() // Dec decrements the Untyped metric by 1. Dec() // Add adds the given value to the Untyped metric. (The value can be // negative, resulting in a decrease.) Add(float64) // Sub subtracts the given value from the Untyped metric. (The value can // be negative, resulting in an increase.) Sub(float64) } // UntypedOpts is an alias for Opts. See there for doc comments. type UntypedOpts Opts // NewUntyped creates a new Untyped metric from the provided UntypedOpts. func NewUntyped(opts UntypedOpts) Untyped { return newValue(NewDesc( BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), opts.Help, nil, opts.ConstLabels, ), UntypedValue, 0) } // UntypedVec is a Collector that bundles a set of Untyped metrics that all // share the same Desc, but have different values for their variable // labels. This is used if you want to count the same thing partitioned by // various dimensions. Create instances with NewUntypedVec. type UntypedVec struct { MetricVec } // NewUntypedVec creates a new UntypedVec based on the provided UntypedOpts and // partitioned by the given label names. At least one label name must be // provided. func NewUntypedVec(opts UntypedOpts, labelNames []string) *UntypedVec { desc := NewDesc( BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), opts.Help, labelNames, opts.ConstLabels, ) return &UntypedVec{ MetricVec: MetricVec{ children: map[uint64]Metric{}, desc: desc, hash: fnv.New64a(), newMetric: func(lvs ...string) Metric { return newValue(desc, UntypedValue, 0, lvs...) }, }, } } // GetMetricWithLabelValues replaces the method of the same name in // MetricVec. The difference is that this method returns an Untyped and not a // Metric so that no type conversion is required. func (m *UntypedVec) GetMetricWithLabelValues(lvs ...string) (Untyped, error) { metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...) if metric != nil { return metric.(Untyped), err } return nil, err } // GetMetricWith replaces the method of the same name in MetricVec. The // difference is that this method returns an Untyped and not a Metric so that no // type conversion is required. func (m *UntypedVec) GetMetricWith(labels Labels) (Untyped, error) { metric, err := m.MetricVec.GetMetricWith(labels) if metric != nil { return metric.(Untyped), err } return nil, err } // WithLabelValues works as GetMetricWithLabelValues, but panics where // GetMetricWithLabelValues would have returned an error. By not returning an // error, WithLabelValues allows shortcuts like // myVec.WithLabelValues("404", "GET").Add(42) func (m *UntypedVec) WithLabelValues(lvs ...string) Untyped { return m.MetricVec.WithLabelValues(lvs...).(Untyped) } // With works as GetMetricWith, but panics where GetMetricWithLabels would have // returned an error. By not returning an error, With allows shortcuts like // myVec.With(Labels{"code": "404", "method": "GET"}).Add(42) func (m *UntypedVec) With(labels Labels) Untyped { return m.MetricVec.With(labels).(Untyped) } // UntypedFunc is an Untyped whose value is determined at collect time by // calling a provided function. // // To create UntypedFunc instances, use NewUntypedFunc. type UntypedFunc interface { Metric Collector } // NewUntypedFunc creates a new UntypedFunc based on the provided // UntypedOpts. The value reported is determined by calling the given function // from within the Write method. Take into account that metric collection may // happen concurrently. If that results in concurrent calls to Write, like in // the case where an UntypedFunc is directly registered with Prometheus, the // provided function must be concurrency-safe. func NewUntypedFunc(opts UntypedOpts, function func() float64) UntypedFunc { return newValueFunc(NewDesc( BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), opts.Help, nil, opts.ConstLabels, ), UntypedValue, function) }
cmd/vendor/github.com/prometheus/client_golang/prometheus/untyped.go
0
https://github.com/etcd-io/etcd/commit/205f10aeb6d7a2869d4da16131cccb77ba5289e2
[ 0.00017866847338154912, 0.0001707540504867211, 0.00016395282000303268, 0.00016979061183519661, 0.000004596174676407827 ]
{ "id": 3, "code_window": [ "\tif err != nil {\n", "\t\thandleError(ExitServerError, err)\n", "\t}\n", "\n", "\tif !resp.Node.Dir {\n", "\t\tprintResponseKey(resp, c.GlobalString(\"output\"))\n", "\t}\n", "}" ], "labels": [ "keep", "keep", "keep", "keep", "replace", "keep", "keep", "keep" ], "after_edit": [ "\tif !resp.Node.Dir || c.GlobalString(\"output\") != \"simple\" {\n" ], "file_path": "etcdctl/ctlv2/command/rmdir_command.go", "type": "replace", "edit_start_line_idx": 50 }
// Copyright 2015 The etcd Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package command import ( "errors" "github.com/coreos/etcd/client" "github.com/urfave/cli" ) // NewRemoveDirCommand returns the CLI command for "rmdir". func NewRemoveDirCommand() cli.Command { return cli.Command{ Name: "rmdir", Usage: "removes the key if it is an empty directory or a key-value pair", ArgsUsage: "<key>", Action: func(c *cli.Context) error { rmdirCommandFunc(c, mustNewKeyAPI(c)) return nil }, } } // rmdirCommandFunc executes the "rmdir" command. func rmdirCommandFunc(c *cli.Context, ki client.KeysAPI) { if len(c.Args()) == 0 { handleError(ExitBadArgs, errors.New("key required")) } key := c.Args()[0] ctx, cancel := contextWithTotalTimeout(c) resp, err := ki.Delete(ctx, key, &client.DeleteOptions{Dir: true}) cancel() if err != nil { handleError(ExitServerError, err) } if !resp.Node.Dir { printResponseKey(resp, c.GlobalString("output")) } }
etcdctl/ctlv2/command/rmdir_command.go
1
https://github.com/etcd-io/etcd/commit/205f10aeb6d7a2869d4da16131cccb77ba5289e2
[ 0.07921238988637924, 0.014291745610535145, 0.00017672272224444896, 0.00022121405345387757, 0.02910379134118557 ]
{ "id": 3, "code_window": [ "\tif err != nil {\n", "\t\thandleError(ExitServerError, err)\n", "\t}\n", "\n", "\tif !resp.Node.Dir {\n", "\t\tprintResponseKey(resp, c.GlobalString(\"output\"))\n", "\t}\n", "}" ], "labels": [ "keep", "keep", "keep", "keep", "replace", "keep", "keep", "keep" ], "after_edit": [ "\tif !resp.Node.Dir || c.GlobalString(\"output\") != \"simple\" {\n" ], "file_path": "etcdctl/ctlv2/command/rmdir_command.go", "type": "replace", "edit_start_line_idx": 50 }
rclone.conf bin etcd2-backup.tgz *~
contrib/systemd/etcd2-backup-coreos/.gitignore
0
https://github.com/etcd-io/etcd/commit/205f10aeb6d7a2869d4da16131cccb77ba5289e2
[ 0.0001730030489852652, 0.0001730030489852652, 0.0001730030489852652, 0.0001730030489852652, 0 ]
{ "id": 3, "code_window": [ "\tif err != nil {\n", "\t\thandleError(ExitServerError, err)\n", "\t}\n", "\n", "\tif !resp.Node.Dir {\n", "\t\tprintResponseKey(resp, c.GlobalString(\"output\"))\n", "\t}\n", "}" ], "labels": [ "keep", "keep", "keep", "keep", "replace", "keep", "keep", "keep" ], "after_edit": [ "\tif !resp.Node.Dir || c.GlobalString(\"output\") != \"simple\" {\n" ], "file_path": "etcdctl/ctlv2/command/rmdir_command.go", "type": "replace", "edit_start_line_idx": 50 }
// Copyright 2015 The etcd Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package mvcc import ( "bytes" "errors" "fmt" "github.com/google/btree" ) var ( ErrRevisionNotFound = errors.New("mvcc: revision not found") ) // keyIndex stores the revisions of a key in the backend. // Each keyIndex has at least one key generation. // Each generation might have several key versions. // Tombstone on a key appends an tombstone version at the end // of the current generation and creates a new empty generation. // Each version of a key has an index pointing to the backend. // // For example: put(1.0);put(2.0);tombstone(3.0);put(4.0);tombstone(5.0) on key "foo" // generate a keyIndex: // key: "foo" // rev: 5 // generations: // {empty} // {4.0, 5.0(t)} // {1.0, 2.0, 3.0(t)} // // Compact a keyIndex removes the versions with smaller or equal to // rev except the largest one. If the generation becomes empty // during compaction, it will be removed. if all the generations get // removed, the keyIndex should be removed. // For example: // compact(2) on the previous example // generations: // {empty} // {4.0, 5.0(t)} // {2.0, 3.0(t)} // // compact(4) // generations: // {empty} // {4.0, 5.0(t)} // // compact(5): // generations: // {empty} -> key SHOULD be removed. // // compact(6): // generations: // {empty} -> key SHOULD be removed. type keyIndex struct { key []byte modified revision // the main rev of the last modification generations []generation } // put puts a revision to the keyIndex. func (ki *keyIndex) put(main int64, sub int64) { rev := revision{main: main, sub: sub} if !rev.GreaterThan(ki.modified) { plog.Panicf("store.keyindex: put with unexpected smaller revision [%v / %v]", rev, ki.modified) } if len(ki.generations) == 0 { ki.generations = append(ki.generations, generation{}) } g := &ki.generations[len(ki.generations)-1] if len(g.revs) == 0 { // create a new key keysGauge.Inc() g.created = rev } g.revs = append(g.revs, rev) g.ver++ ki.modified = rev } func (ki *keyIndex) restore(created, modified revision, ver int64) { if len(ki.generations) != 0 { plog.Panicf("store.keyindex: cannot restore non-empty keyIndex") } ki.modified = modified g := generation{created: created, ver: ver, revs: []revision{modified}} ki.generations = append(ki.generations, g) keysGauge.Inc() } // tombstone puts a revision, pointing to a tombstone, to the keyIndex. // It also creates a new empty generation in the keyIndex. // It returns ErrRevisionNotFound when tombstone on an empty generation. func (ki *keyIndex) tombstone(main int64, sub int64) error { if ki.isEmpty() { plog.Panicf("store.keyindex: unexpected tombstone on empty keyIndex %s", string(ki.key)) } if ki.generations[len(ki.generations)-1].isEmpty() { return ErrRevisionNotFound } ki.put(main, sub) ki.generations = append(ki.generations, generation{}) keysGauge.Dec() return nil } // get gets the modified, created revision and version of the key that satisfies the given atRev. // Rev must be higher than or equal to the given atRev. func (ki *keyIndex) get(atRev int64) (modified, created revision, ver int64, err error) { if ki.isEmpty() { plog.Panicf("store.keyindex: unexpected get on empty keyIndex %s", string(ki.key)) } g := ki.findGeneration(atRev) if g.isEmpty() { return revision{}, revision{}, 0, ErrRevisionNotFound } n := g.walk(func(rev revision) bool { return rev.main > atRev }) if n != -1 { return g.revs[n], g.created, g.ver - int64(len(g.revs)-n-1), nil } return revision{}, revision{}, 0, ErrRevisionNotFound } // since returns revisions since the given rev. Only the revision with the // largest sub revision will be returned if multiple revisions have the same // main revision. func (ki *keyIndex) since(rev int64) []revision { if ki.isEmpty() { plog.Panicf("store.keyindex: unexpected get on empty keyIndex %s", string(ki.key)) } since := revision{rev, 0} var gi int // find the generations to start checking for gi = len(ki.generations) - 1; gi > 0; gi-- { g := ki.generations[gi] if g.isEmpty() { continue } if since.GreaterThan(g.created) { break } } var revs []revision var last int64 for ; gi < len(ki.generations); gi++ { for _, r := range ki.generations[gi].revs { if since.GreaterThan(r) { continue } if r.main == last { // replace the revision with a new one that has higher sub value, // because the original one should not be seen by external revs[len(revs)-1] = r continue } revs = append(revs, r) last = r.main } } return revs } // compact compacts a keyIndex by removing the versions with smaller or equal // revision than the given atRev except the largest one (If the largest one is // a tombstone, it will not be kept). // If a generation becomes empty during compaction, it will be removed. func (ki *keyIndex) compact(atRev int64, available map[revision]struct{}) { if ki.isEmpty() { plog.Panicf("store.keyindex: unexpected compact on empty keyIndex %s", string(ki.key)) } // walk until reaching the first revision that has an revision smaller or equal to // the atRev. // add it to the available map f := func(rev revision) bool { if rev.main <= atRev { available[rev] = struct{}{} return false } return true } i, g := 0, &ki.generations[0] // find first generation includes atRev or created after atRev for i < len(ki.generations)-1 { if tomb := g.revs[len(g.revs)-1].main; tomb > atRev { break } i++ g = &ki.generations[i] } if !g.isEmpty() { n := g.walk(f) // remove the previous contents. if n != -1 { g.revs = g.revs[n:] } // remove any tombstone if len(g.revs) == 1 && i != len(ki.generations)-1 { delete(available, g.revs[0]) i++ } } // remove the previous generations. ki.generations = ki.generations[i:] return } func (ki *keyIndex) isEmpty() bool { return len(ki.generations) == 1 && ki.generations[0].isEmpty() } // findGeneration finds out the generation of the keyIndex that the // given rev belongs to. If the given rev is at the gap of two generations, // which means that the key does not exist at the given rev, it returns nil. func (ki *keyIndex) findGeneration(rev int64) *generation { lastg := len(ki.generations) - 1 cg := lastg for cg >= 0 { if len(ki.generations[cg].revs) == 0 { cg-- continue } g := ki.generations[cg] if cg != lastg { if tomb := g.revs[len(g.revs)-1].main; tomb <= rev { return nil } } if g.revs[0].main <= rev { return &ki.generations[cg] } cg-- } return nil } func (a *keyIndex) Less(b btree.Item) bool { return bytes.Compare(a.key, b.(*keyIndex).key) == -1 } func (a *keyIndex) equal(b *keyIndex) bool { if !bytes.Equal(a.key, b.key) { return false } if a.modified != b.modified { return false } if len(a.generations) != len(b.generations) { return false } for i := range a.generations { ag, bg := a.generations[i], b.generations[i] if !ag.equal(bg) { return false } } return true } func (ki *keyIndex) String() string { var s string for _, g := range ki.generations { s += g.String() } return s } // generation contains multiple revisions of a key. type generation struct { ver int64 created revision // when the generation is created (put in first revision). revs []revision } func (g *generation) isEmpty() bool { return g == nil || len(g.revs) == 0 } // walk walks through the revisions in the generation in descending order. // It passes the revision to the given function. // walk returns until: 1. it finishes walking all pairs 2. the function returns false. // walk returns the position at where it stopped. If it stopped after // finishing walking, -1 will be returned. func (g *generation) walk(f func(rev revision) bool) int { l := len(g.revs) for i := range g.revs { ok := f(g.revs[l-i-1]) if !ok { return l - i - 1 } } return -1 } func (g *generation) String() string { return fmt.Sprintf("g: created[%d] ver[%d], revs %#v\n", g.created, g.ver, g.revs) } func (a generation) equal(b generation) bool { if a.ver != b.ver { return false } if len(a.revs) != len(b.revs) { return false } for i := range a.revs { ar, br := a.revs[i], b.revs[i] if ar != br { return false } } return true }
mvcc/key_index.go
0
https://github.com/etcd-io/etcd/commit/205f10aeb6d7a2869d4da16131cccb77ba5289e2
[ 0.00022136275947559625, 0.00017432717140763998, 0.0001672181679168716, 0.00017325291992165148, 0.000008745950253796764 ]
{ "id": 3, "code_window": [ "\tif err != nil {\n", "\t\thandleError(ExitServerError, err)\n", "\t}\n", "\n", "\tif !resp.Node.Dir {\n", "\t\tprintResponseKey(resp, c.GlobalString(\"output\"))\n", "\t}\n", "}" ], "labels": [ "keep", "keep", "keep", "keep", "replace", "keep", "keep", "keep" ], "after_edit": [ "\tif !resp.Node.Dir || c.GlobalString(\"output\") != \"simple\" {\n" ], "file_path": "etcdctl/ctlv2/command/rmdir_command.go", "type": "replace", "edit_start_line_idx": 50 }
// Copyright 2015 The etcd Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package osutil implements operating system-related utility functions. package osutil import ( "os" "strings" "github.com/coreos/pkg/capnslog" ) var ( plog = capnslog.NewPackageLogger("github.com/coreos/etcd/pkg", "osutil") ) func Unsetenv(key string) error { envs := os.Environ() os.Clearenv() for _, e := range envs { strs := strings.SplitN(e, "=", 2) if strs[0] == key { continue } if err := os.Setenv(strs[0], strs[1]); err != nil { return err } } return nil }
pkg/osutil/osutil.go
0
https://github.com/etcd-io/etcd/commit/205f10aeb6d7a2869d4da16131cccb77ba5289e2
[ 0.00017926248256117105, 0.00017260669847019017, 0.0001670396886765957, 0.00017483248666394502, 0.000004819466994376853 ]