hunk
dict | file
stringlengths 0
11.8M
| file_path
stringlengths 2
234
| label
int64 0
1
| commit_url
stringlengths 74
103
| dependency_score
sequencelengths 5
5
|
---|---|---|---|---|---|
{
"id": 12,
"code_window": [
"\tsql = \"insert into t(id, a) values(2000, 'test')\"\n",
"\tencoder.Encode(sql, tableID)\n",
"\tc.Assert(alloc.Base(), Equals, int64(2000))\n",
"}\n",
"\n",
"func (s *testKvEncoderSuite) TestSimpleKeyEncode(c *C) {\n",
"\tencoder, err := New(\"test\", nil)\n",
"\tc.Assert(err, IsNil)\n",
"\tdefer encoder.Close()\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"func (s *testKvEncoderSuite) TestAllocatorRebaseSmaller(c *C) {\n",
"\talloc := NewAllocator()\n",
"\talloc.Rebase(1, 10, false)\n",
"\tc.Assert(alloc.Base(), Equals, int64(10))\n",
"\talloc.Rebase(1, 100, false)\n",
"\tc.Assert(alloc.Base(), Equals, int64(100))\n",
"\talloc.Rebase(1, 1, false)\n",
"\tc.Assert(alloc.Base(), Equals, int64(100))\n",
"\talloc.Reset(1)\n",
"\tc.Assert(alloc.Base(), Equals, int64(1))\n",
"}\n",
"\n"
],
"file_path": "util/kvencoder/kv_encoder_test.go",
"type": "add",
"edit_start_line_idx": 459
} | // Copyright 2017 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package kvenc
import (
"sync/atomic"
"github.com/pingcap/tidb/meta/autoid"
)
var _ autoid.Allocator = &allocator{}
var (
step = int64(5000)
)
// NewAllocator new an allocator.
func NewAllocator() autoid.Allocator {
return &allocator{}
}
type allocator struct {
base int64
}
func (alloc *allocator) Alloc(tableID int64) (int64, error) {
return atomic.AddInt64(&alloc.base, 1), nil
}
func (alloc *allocator) Rebase(tableID, newBase int64, allocIDs bool) error {
atomic.StoreInt64(&alloc.base, newBase)
return nil
}
func (alloc *allocator) Base() int64 {
return atomic.LoadInt64(&alloc.base)
}
func (alloc *allocator) End() int64 {
return alloc.Base() + step
}
func (alloc *allocator) NextGlobalAutoID(tableID int64) (int64, error) {
return alloc.End() + 1, nil
}
| util/kvencoder/allocator.go | 1 | https://github.com/pingcap/tidb/commit/c6258e3aeb79a06753e9d013b78a6f6e0b4df708 | [
0.0032545512076467276,
0.0011936606606468558,
0.00017772852152120322,
0.0008154412498697639,
0.0011400869116187096
] |
{
"id": 12,
"code_window": [
"\tsql = \"insert into t(id, a) values(2000, 'test')\"\n",
"\tencoder.Encode(sql, tableID)\n",
"\tc.Assert(alloc.Base(), Equals, int64(2000))\n",
"}\n",
"\n",
"func (s *testKvEncoderSuite) TestSimpleKeyEncode(c *C) {\n",
"\tencoder, err := New(\"test\", nil)\n",
"\tc.Assert(err, IsNil)\n",
"\tdefer encoder.Close()\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"func (s *testKvEncoderSuite) TestAllocatorRebaseSmaller(c *C) {\n",
"\talloc := NewAllocator()\n",
"\talloc.Rebase(1, 10, false)\n",
"\tc.Assert(alloc.Base(), Equals, int64(10))\n",
"\talloc.Rebase(1, 100, false)\n",
"\tc.Assert(alloc.Base(), Equals, int64(100))\n",
"\talloc.Rebase(1, 1, false)\n",
"\tc.Assert(alloc.Base(), Equals, int64(100))\n",
"\talloc.Reset(1)\n",
"\tc.Assert(alloc.Base(), Equals, int64(1))\n",
"}\n",
"\n"
],
"file_path": "util/kvencoder/kv_encoder_test.go",
"type": "add",
"edit_start_line_idx": 459
} | // Copyright 2011 The Snappy-Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package snappy
import (
"encoding/binary"
"errors"
"io"
)
var (
// ErrCorrupt reports that the input is invalid.
ErrCorrupt = errors.New("snappy: corrupt input")
// ErrTooLarge reports that the uncompressed length is too large.
ErrTooLarge = errors.New("snappy: decoded block is too large")
// ErrUnsupported reports that the input isn't supported.
ErrUnsupported = errors.New("snappy: unsupported input")
)
// DecodedLen returns the length of the decoded block.
func DecodedLen(src []byte) (int, error) {
v, _, err := decodedLen(src)
return v, err
}
// decodedLen returns the length of the decoded block and the number of bytes
// that the length header occupied.
func decodedLen(src []byte) (blockLen, headerLen int, err error) {
v, n := binary.Uvarint(src)
if n <= 0 || v > 0xffffffff {
return 0, 0, ErrCorrupt
}
const wordSize = 32 << (^uint(0) >> 32 & 1)
if wordSize == 32 && v > 0x7fffffff {
return 0, 0, ErrTooLarge
}
return int(v), n, nil
}
// Decode returns the decoded form of src. The returned slice may be a sub-
// slice of dst if dst was large enough to hold the entire decoded block.
// Otherwise, a newly allocated slice will be returned.
// It is valid to pass a nil dst.
func Decode(dst, src []byte) ([]byte, error) {
dLen, s, err := decodedLen(src)
if err != nil {
return nil, err
}
if len(dst) < dLen {
dst = make([]byte, dLen)
}
var d, offset, length int
for s < len(src) {
switch src[s] & 0x03 {
case tagLiteral:
x := uint(src[s] >> 2)
switch {
case x < 60:
s++
case x == 60:
s += 2
if s > len(src) {
return nil, ErrCorrupt
}
x = uint(src[s-1])
case x == 61:
s += 3
if s > len(src) {
return nil, ErrCorrupt
}
x = uint(src[s-2]) | uint(src[s-1])<<8
case x == 62:
s += 4
if s > len(src) {
return nil, ErrCorrupt
}
x = uint(src[s-3]) | uint(src[s-2])<<8 | uint(src[s-1])<<16
case x == 63:
s += 5
if s > len(src) {
return nil, ErrCorrupt
}
x = uint(src[s-4]) | uint(src[s-3])<<8 | uint(src[s-2])<<16 | uint(src[s-1])<<24
}
length = int(x + 1)
if length <= 0 {
return nil, errors.New("snappy: unsupported literal length")
}
if length > len(dst)-d || length > len(src)-s {
return nil, ErrCorrupt
}
copy(dst[d:], src[s:s+length])
d += length
s += length
continue
case tagCopy1:
s += 2
if s > len(src) {
return nil, ErrCorrupt
}
length = 4 + int(src[s-2])>>2&0x7
offset = int(src[s-2])&0xe0<<3 | int(src[s-1])
case tagCopy2:
s += 3
if s > len(src) {
return nil, ErrCorrupt
}
length = 1 + int(src[s-3])>>2
offset = int(src[s-2]) | int(src[s-1])<<8
case tagCopy4:
return nil, errors.New("snappy: unsupported COPY_4 tag")
}
end := d + length
if offset > d || end > len(dst) {
return nil, ErrCorrupt
}
for ; d < end; d++ {
dst[d] = dst[d-offset]
}
}
if d != dLen {
return nil, ErrCorrupt
}
return dst[:d], nil
}
// NewReader returns a new Reader that decompresses from r, using the framing
// format described at
// https://github.com/google/snappy/blob/master/framing_format.txt
func NewReader(r io.Reader) *Reader {
return &Reader{
r: r,
decoded: make([]byte, maxUncompressedChunkLen),
buf: make([]byte, MaxEncodedLen(maxUncompressedChunkLen)+checksumSize),
}
}
// Reader is an io.Reader than can read Snappy-compressed bytes.
type Reader struct {
r io.Reader
err error
decoded []byte
buf []byte
// decoded[i:j] contains decoded bytes that have not yet been passed on.
i, j int
readHeader bool
}
// Reset discards any buffered data, resets all state, and switches the Snappy
// reader to read from r. This permits reusing a Reader rather than allocating
// a new one.
func (r *Reader) Reset(reader io.Reader) {
r.r = reader
r.err = nil
r.i = 0
r.j = 0
r.readHeader = false
}
func (r *Reader) readFull(p []byte) (ok bool) {
if _, r.err = io.ReadFull(r.r, p); r.err != nil {
if r.err == io.ErrUnexpectedEOF {
r.err = ErrCorrupt
}
return false
}
return true
}
// Read satisfies the io.Reader interface.
func (r *Reader) Read(p []byte) (int, error) {
if r.err != nil {
return 0, r.err
}
for {
if r.i < r.j {
n := copy(p, r.decoded[r.i:r.j])
r.i += n
return n, nil
}
if !r.readFull(r.buf[:4]) {
return 0, r.err
}
chunkType := r.buf[0]
if !r.readHeader {
if chunkType != chunkTypeStreamIdentifier {
r.err = ErrCorrupt
return 0, r.err
}
r.readHeader = true
}
chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16
if chunkLen > len(r.buf) {
r.err = ErrUnsupported
return 0, r.err
}
// The chunk types are specified at
// https://github.com/google/snappy/blob/master/framing_format.txt
switch chunkType {
case chunkTypeCompressedData:
// Section 4.2. Compressed data (chunk type 0x00).
if chunkLen < checksumSize {
r.err = ErrCorrupt
return 0, r.err
}
buf := r.buf[:chunkLen]
if !r.readFull(buf) {
return 0, r.err
}
checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24
buf = buf[checksumSize:]
n, err := DecodedLen(buf)
if err != nil {
r.err = err
return 0, r.err
}
if n > len(r.decoded) {
r.err = ErrCorrupt
return 0, r.err
}
if _, err := Decode(r.decoded, buf); err != nil {
r.err = err
return 0, r.err
}
if crc(r.decoded[:n]) != checksum {
r.err = ErrCorrupt
return 0, r.err
}
r.i, r.j = 0, n
continue
case chunkTypeUncompressedData:
// Section 4.3. Uncompressed data (chunk type 0x01).
if chunkLen < checksumSize {
r.err = ErrCorrupt
return 0, r.err
}
buf := r.buf[:checksumSize]
if !r.readFull(buf) {
return 0, r.err
}
checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24
// Read directly into r.decoded instead of via r.buf.
n := chunkLen - checksumSize
if !r.readFull(r.decoded[:n]) {
return 0, r.err
}
if crc(r.decoded[:n]) != checksum {
r.err = ErrCorrupt
return 0, r.err
}
r.i, r.j = 0, n
continue
case chunkTypeStreamIdentifier:
// Section 4.1. Stream identifier (chunk type 0xff).
if chunkLen != len(magicBody) {
r.err = ErrCorrupt
return 0, r.err
}
if !r.readFull(r.buf[:len(magicBody)]) {
return 0, r.err
}
for i := 0; i < len(magicBody); i++ {
if r.buf[i] != magicBody[i] {
r.err = ErrCorrupt
return 0, r.err
}
}
continue
}
if chunkType <= 0x7f {
// Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f).
r.err = ErrUnsupported
return 0, r.err
}
// Section 4.4 Padding (chunk type 0xfe).
// Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd).
if !r.readFull(r.buf[:chunkLen]) {
return 0, r.err
}
}
}
| vendor/github.com/golang/snappy/decode.go | 0 | https://github.com/pingcap/tidb/commit/c6258e3aeb79a06753e9d013b78a6f6e0b4df708 | [
0.3686995804309845,
0.017955878749489784,
0.0001644194999244064,
0.00017465901328250766,
0.07028884440660477
] |
{
"id": 12,
"code_window": [
"\tsql = \"insert into t(id, a) values(2000, 'test')\"\n",
"\tencoder.Encode(sql, tableID)\n",
"\tc.Assert(alloc.Base(), Equals, int64(2000))\n",
"}\n",
"\n",
"func (s *testKvEncoderSuite) TestSimpleKeyEncode(c *C) {\n",
"\tencoder, err := New(\"test\", nil)\n",
"\tc.Assert(err, IsNil)\n",
"\tdefer encoder.Close()\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"func (s *testKvEncoderSuite) TestAllocatorRebaseSmaller(c *C) {\n",
"\talloc := NewAllocator()\n",
"\talloc.Rebase(1, 10, false)\n",
"\tc.Assert(alloc.Base(), Equals, int64(10))\n",
"\talloc.Rebase(1, 100, false)\n",
"\tc.Assert(alloc.Base(), Equals, int64(100))\n",
"\talloc.Rebase(1, 1, false)\n",
"\tc.Assert(alloc.Base(), Equals, int64(100))\n",
"\talloc.Reset(1)\n",
"\tc.Assert(alloc.Base(), Equals, int64(1))\n",
"}\n",
"\n"
],
"file_path": "util/kvencoder/kv_encoder_test.go",
"type": "add",
"edit_start_line_idx": 459
} | // Copyright 2012, Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package sync2
// What's in a name? Channels have all you need to emulate a counting
// semaphore with a boatload of extra functionality. However, in some
// cases, you just want a familiar API.
import (
"time"
)
// Semaphore is a counting semaphore with the option to
// specify a timeout.
type Semaphore struct {
slots chan struct{}
timeout time.Duration
}
// NewSemaphore creates a Semaphore. The count parameter must be a positive
// number. A timeout of zero means that there is no timeout.
func NewSemaphore(count int, timeout time.Duration) *Semaphore {
sem := &Semaphore{
slots: make(chan struct{}, count),
timeout: timeout,
}
for i := 0; i < count; i++ {
sem.slots <- struct{}{}
}
return sem
}
// Acquire returns true on successful acquisition, and
// false on a timeout.
func (sem *Semaphore) Acquire() bool {
if sem.timeout == 0 {
<-sem.slots
return true
}
select {
case <-sem.slots:
return true
case <-time.After(sem.timeout):
return false
}
}
// Release releases the acquired semaphore. You must
// not release more than the number of semaphores you've
// acquired.
func (sem *Semaphore) Release() {
sem.slots <- struct{}{}
}
| vendor/github.com/ngaut/sync2/semaphore.go | 0 | https://github.com/pingcap/tidb/commit/c6258e3aeb79a06753e9d013b78a6f6e0b4df708 | [
0.00017661390302237123,
0.00017331691924482584,
0.0001703819289105013,
0.00017272477271035314,
0.000002117275926138973
] |
{
"id": 12,
"code_window": [
"\tsql = \"insert into t(id, a) values(2000, 'test')\"\n",
"\tencoder.Encode(sql, tableID)\n",
"\tc.Assert(alloc.Base(), Equals, int64(2000))\n",
"}\n",
"\n",
"func (s *testKvEncoderSuite) TestSimpleKeyEncode(c *C) {\n",
"\tencoder, err := New(\"test\", nil)\n",
"\tc.Assert(err, IsNil)\n",
"\tdefer encoder.Close()\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"func (s *testKvEncoderSuite) TestAllocatorRebaseSmaller(c *C) {\n",
"\talloc := NewAllocator()\n",
"\talloc.Rebase(1, 10, false)\n",
"\tc.Assert(alloc.Base(), Equals, int64(10))\n",
"\talloc.Rebase(1, 100, false)\n",
"\tc.Assert(alloc.Base(), Equals, int64(100))\n",
"\talloc.Rebase(1, 1, false)\n",
"\tc.Assert(alloc.Base(), Equals, int64(100))\n",
"\talloc.Reset(1)\n",
"\tc.Assert(alloc.Base(), Equals, int64(1))\n",
"}\n",
"\n"
],
"file_path": "util/kvencoder/kv_encoder_test.go",
"type": "add",
"edit_start_line_idx": 459
} | // Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package util
// This a copy of Go std bytes.Buffer with some modification
// and some features stripped.
import (
"bytes"
"io"
)
// A Buffer is a variable-sized buffer of bytes with Read and Write methods.
// The zero value for Buffer is an empty buffer ready to use.
type Buffer struct {
buf []byte // contents are the bytes buf[off : len(buf)]
off int // read at &buf[off], write at &buf[len(buf)]
bootstrap [64]byte // memory to hold first slice; helps small buffers (Printf) avoid allocation.
}
// Bytes returns a slice of the contents of the unread portion of the buffer;
// len(b.Bytes()) == b.Len(). If the caller changes the contents of the
// returned slice, the contents of the buffer will change provided there
// are no intervening method calls on the Buffer.
func (b *Buffer) Bytes() []byte { return b.buf[b.off:] }
// String returns the contents of the unread portion of the buffer
// as a string. If the Buffer is a nil pointer, it returns "<nil>".
func (b *Buffer) String() string {
if b == nil {
// Special case, useful in debugging.
return "<nil>"
}
return string(b.buf[b.off:])
}
// Len returns the number of bytes of the unread portion of the buffer;
// b.Len() == len(b.Bytes()).
func (b *Buffer) Len() int { return len(b.buf) - b.off }
// Truncate discards all but the first n unread bytes from the buffer.
// It panics if n is negative or greater than the length of the buffer.
func (b *Buffer) Truncate(n int) {
switch {
case n < 0 || n > b.Len():
panic("leveldb/util.Buffer: truncation out of range")
case n == 0:
// Reuse buffer space.
b.off = 0
}
b.buf = b.buf[0 : b.off+n]
}
// Reset resets the buffer so it has no content.
// b.Reset() is the same as b.Truncate(0).
func (b *Buffer) Reset() { b.Truncate(0) }
// grow grows the buffer to guarantee space for n more bytes.
// It returns the index where bytes should be written.
// If the buffer can't grow it will panic with bytes.ErrTooLarge.
func (b *Buffer) grow(n int) int {
m := b.Len()
// If buffer is empty, reset to recover space.
if m == 0 && b.off != 0 {
b.Truncate(0)
}
if len(b.buf)+n > cap(b.buf) {
var buf []byte
if b.buf == nil && n <= len(b.bootstrap) {
buf = b.bootstrap[0:]
} else if m+n <= cap(b.buf)/2 {
// We can slide things down instead of allocating a new
// slice. We only need m+n <= cap(b.buf) to slide, but
// we instead let capacity get twice as large so we
// don't spend all our time copying.
copy(b.buf[:], b.buf[b.off:])
buf = b.buf[:m]
} else {
// not enough space anywhere
buf = makeSlice(2*cap(b.buf) + n)
copy(buf, b.buf[b.off:])
}
b.buf = buf
b.off = 0
}
b.buf = b.buf[0 : b.off+m+n]
return b.off + m
}
// Alloc allocs n bytes of slice from the buffer, growing the buffer as
// needed. If n is negative, Alloc will panic.
// If the buffer can't grow it will panic with bytes.ErrTooLarge.
func (b *Buffer) Alloc(n int) []byte {
if n < 0 {
panic("leveldb/util.Buffer.Alloc: negative count")
}
m := b.grow(n)
return b.buf[m:]
}
// Grow grows the buffer's capacity, if necessary, to guarantee space for
// another n bytes. After Grow(n), at least n bytes can be written to the
// buffer without another allocation.
// If n is negative, Grow will panic.
// If the buffer can't grow it will panic with bytes.ErrTooLarge.
func (b *Buffer) Grow(n int) {
if n < 0 {
panic("leveldb/util.Buffer.Grow: negative count")
}
m := b.grow(n)
b.buf = b.buf[0:m]
}
// Write appends the contents of p to the buffer, growing the buffer as
// needed. The return value n is the length of p; err is always nil. If the
// buffer becomes too large, Write will panic with bytes.ErrTooLarge.
func (b *Buffer) Write(p []byte) (n int, err error) {
m := b.grow(len(p))
return copy(b.buf[m:], p), nil
}
// MinRead is the minimum slice size passed to a Read call by
// Buffer.ReadFrom. As long as the Buffer has at least MinRead bytes beyond
// what is required to hold the contents of r, ReadFrom will not grow the
// underlying buffer.
const MinRead = 512
// ReadFrom reads data from r until EOF and appends it to the buffer, growing
// the buffer as needed. The return value n is the number of bytes read. Any
// error except io.EOF encountered during the read is also returned. If the
// buffer becomes too large, ReadFrom will panic with bytes.ErrTooLarge.
func (b *Buffer) ReadFrom(r io.Reader) (n int64, err error) {
// If buffer is empty, reset to recover space.
if b.off >= len(b.buf) {
b.Truncate(0)
}
for {
if free := cap(b.buf) - len(b.buf); free < MinRead {
// not enough space at end
newBuf := b.buf
if b.off+free < MinRead {
// not enough space using beginning of buffer;
// double buffer capacity
newBuf = makeSlice(2*cap(b.buf) + MinRead)
}
copy(newBuf, b.buf[b.off:])
b.buf = newBuf[:len(b.buf)-b.off]
b.off = 0
}
m, e := r.Read(b.buf[len(b.buf):cap(b.buf)])
b.buf = b.buf[0 : len(b.buf)+m]
n += int64(m)
if e == io.EOF {
break
}
if e != nil {
return n, e
}
}
return n, nil // err is EOF, so return nil explicitly
}
// makeSlice allocates a slice of size n. If the allocation fails, it panics
// with bytes.ErrTooLarge.
func makeSlice(n int) []byte {
// If the make fails, give a known error.
defer func() {
if recover() != nil {
panic(bytes.ErrTooLarge)
}
}()
return make([]byte, n)
}
// WriteTo writes data to w until the buffer is drained or an error occurs.
// The return value n is the number of bytes written; it always fits into an
// int, but it is int64 to match the io.WriterTo interface. Any error
// encountered during the write is also returned.
func (b *Buffer) WriteTo(w io.Writer) (n int64, err error) {
if b.off < len(b.buf) {
nBytes := b.Len()
m, e := w.Write(b.buf[b.off:])
if m > nBytes {
panic("leveldb/util.Buffer.WriteTo: invalid Write count")
}
b.off += m
n = int64(m)
if e != nil {
return n, e
}
// all bytes should have been written, by definition of
// Write method in io.Writer
if m != nBytes {
return n, io.ErrShortWrite
}
}
// Buffer is now empty; reset.
b.Truncate(0)
return
}
// WriteByte appends the byte c to the buffer, growing the buffer as needed.
// The returned error is always nil, but is included to match bufio.Writer's
// WriteByte. If the buffer becomes too large, WriteByte will panic with
// bytes.ErrTooLarge.
func (b *Buffer) WriteByte(c byte) error {
m := b.grow(1)
b.buf[m] = c
return nil
}
// Read reads the next len(p) bytes from the buffer or until the buffer
// is drained. The return value n is the number of bytes read. If the
// buffer has no data to return, err is io.EOF (unless len(p) is zero);
// otherwise it is nil.
func (b *Buffer) Read(p []byte) (n int, err error) {
if b.off >= len(b.buf) {
// Buffer is empty, reset to recover space.
b.Truncate(0)
if len(p) == 0 {
return
}
return 0, io.EOF
}
n = copy(p, b.buf[b.off:])
b.off += n
return
}
// Next returns a slice containing the next n bytes from the buffer,
// advancing the buffer as if the bytes had been returned by Read.
// If there are fewer than n bytes in the buffer, Next returns the entire buffer.
// The slice is only valid until the next call to a read or write method.
func (b *Buffer) Next(n int) []byte {
m := b.Len()
if n > m {
n = m
}
data := b.buf[b.off : b.off+n]
b.off += n
return data
}
// ReadByte reads and returns the next byte from the buffer.
// If no byte is available, it returns error io.EOF.
func (b *Buffer) ReadByte() (c byte, err error) {
if b.off >= len(b.buf) {
// Buffer is empty, reset to recover space.
b.Truncate(0)
return 0, io.EOF
}
c = b.buf[b.off]
b.off++
return c, nil
}
// ReadBytes reads until the first occurrence of delim in the input,
// returning a slice containing the data up to and including the delimiter.
// If ReadBytes encounters an error before finding a delimiter,
// it returns the data read before the error and the error itself (often io.EOF).
// ReadBytes returns err != nil if and only if the returned data does not end in
// delim.
func (b *Buffer) ReadBytes(delim byte) (line []byte, err error) {
slice, err := b.readSlice(delim)
// return a copy of slice. The buffer's backing array may
// be overwritten by later calls.
line = append(line, slice...)
return
}
// readSlice is like ReadBytes but returns a reference to internal buffer data.
func (b *Buffer) readSlice(delim byte) (line []byte, err error) {
i := bytes.IndexByte(b.buf[b.off:], delim)
end := b.off + i + 1
if i < 0 {
end = len(b.buf)
err = io.EOF
}
line = b.buf[b.off:end]
b.off = end
return line, err
}
// NewBuffer creates and initializes a new Buffer using buf as its initial
// contents. It is intended to prepare a Buffer to read existing data. It
// can also be used to size the internal buffer for writing. To do that,
// buf should have the desired capacity but a length of zero.
//
// In most cases, new(Buffer) (or just declaring a Buffer variable) is
// sufficient to initialize a Buffer.
func NewBuffer(buf []byte) *Buffer { return &Buffer{buf: buf} }
| vendor/github.com/pingcap/goleveldb/leveldb/util/buffer.go | 0 | https://github.com/pingcap/tidb/commit/c6258e3aeb79a06753e9d013b78a6f6e0b4df708 | [
0.0007760896114632487,
0.0002071602939395234,
0.0001604139106348157,
0.00017291473341174424,
0.0001155105564976111
] |
{
"id": 0,
"code_window": [
"\t\tCommit: false,\n",
"\t\t// Resolved intents should maintain an abort span entry to prevent\n",
"\t\t// concurrent requests from failing to notice the transaction was aborted.\n",
"\t\tPoison: true,\n",
"\t})\n",
"\n",
"\tconst taskName = \"txnHeartbeater: aborting txn\"\n",
"\tlog.VEventf(ctx, 2, \"async abort for txn: %s\", txn)\n",
"\tif err := h.stopper.RunAsyncTask(h.AnnotateCtx(context.Background()), taskName,\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t// NB: Setting `Source: kvpb.AdmissionHeader_OTHER` means this request will\n",
"\t// bypass AC.\n",
"\tba.AdmissionHeader = kvpb.AdmissionHeader{\n",
"\t\tPriority: txn.AdmissionPriority,\n",
"\t\tCreateTime: timeutil.Now().UnixNano(),\n",
"\t\tSource: kvpb.AdmissionHeader_OTHER,\n",
"\t}\n"
],
"file_path": "pkg/kv/kvclient/kvcoord/txn_interceptor_heartbeater.go",
"type": "add",
"edit_start_line_idx": 527
} | // Copyright 2016 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
// This file contains replica methods related to range leases.
//
// Here be dragons: The lease system (especially for epoch-based
// leases) relies on multiple interlocking conditional puts (here and
// in NodeLiveness). Reads (to get expected values) and conditional
// puts have to happen in a certain order, leading to surprising
// dependencies at a distance (for example, there's a LeaseStatus
// object that gets plumbed most of the way through this file.
// LeaseStatus bundles the results of multiple checks with the time at
// which they were performed, so that timestamp must be used for later
// operations). The current arrangement is not perfect, and some
// opportunities for improvement appear, but any changes must be made
// very carefully.
//
// NOTE(bdarnell): The biggest problem with the current code is that
// with epoch-based leases, we may do two separate slow operations
// (IncrementEpoch/Heartbeat and RequestLease/AdminTransferLease). In
// the organization that was inherited from expiration-based leases,
// we prepare the arguments we're going to use for the lease
// operations before performing the liveness operations, and by the
// time the liveness operations complete those may be stale.
//
// Therefore, my suggested refactoring would be to move the liveness
// operations earlier in the process, soon after the initial
// leaseStatus call. If a liveness operation is required, do it and
// start over, with a fresh leaseStatus.
//
// This could also allow the liveness operations to be coalesced per
// node instead of having each range separately queue up redundant
// liveness operations. (The InitOrJoin model predates the
// singleflight package; could we simplify things by using it?)
package kvserver
import (
"context"
"fmt"
"time"
"github.com/cockroachdb/cockroach/pkg/base"
"github.com/cockroachdb/cockroach/pkg/keys"
"github.com/cockroachdb/cockroach/pkg/kv/kvpb"
"github.com/cockroachdb/cockroach/pkg/kv/kvserver/constraint"
"github.com/cockroachdb/cockroach/pkg/kv/kvserver/kvserverpb"
"github.com/cockroachdb/cockroach/pkg/kv/kvserver/liveness"
"github.com/cockroachdb/cockroach/pkg/kv/kvserver/liveness/livenesspb"
"github.com/cockroachdb/cockroach/pkg/kv/kvserver/raftutil"
"github.com/cockroachdb/cockroach/pkg/roachpb"
"github.com/cockroachdb/cockroach/pkg/settings"
"github.com/cockroachdb/cockroach/pkg/util"
"github.com/cockroachdb/cockroach/pkg/util/envutil"
"github.com/cockroachdb/cockroach/pkg/util/growstack"
"github.com/cockroachdb/cockroach/pkg/util/hlc"
"github.com/cockroachdb/cockroach/pkg/util/log"
"github.com/cockroachdb/cockroach/pkg/util/quotapool"
"github.com/cockroachdb/cockroach/pkg/util/retry"
"github.com/cockroachdb/cockroach/pkg/util/stop"
"github.com/cockroachdb/cockroach/pkg/util/syncutil"
"github.com/cockroachdb/cockroach/pkg/util/timeutil"
"github.com/cockroachdb/cockroach/pkg/util/tracing"
"github.com/cockroachdb/errors"
"github.com/cockroachdb/logtags"
"github.com/cockroachdb/redact"
)
var TransferExpirationLeasesFirstEnabled = settings.RegisterBoolSetting(
settings.SystemOnly,
"kv.transfer_expiration_leases_first.enabled",
"controls whether we transfer expiration-based leases that are later upgraded to epoch-based ones",
true,
)
var ExpirationLeasesOnly = settings.RegisterBoolSetting(
settings.SystemOnly,
"kv.expiration_leases_only.enabled",
"only use expiration-based leases, never epoch-based ones (experimental, affects performance)",
// false by default. Metamorphically enabled in tests, but not in deadlock
// builds because TestClusters are usually so slow that they're unable
// to maintain leases/leadership/liveness.
!syncutil.DeadlockEnabled &&
util.ConstantWithMetamorphicTestBool("kv.expiration_leases_only.enabled", false),
)
// DisableExpirationLeasesOnly is an escape hatch for ExpirationLeasesOnly,
// which can be used to hard-disable expiration-based leases e.g. if clusters
// are unable to start back up due to the lease extension load.
var DisableExpirationLeasesOnly = envutil.EnvOrDefaultBool(
"COCKROACH_DISABLE_EXPIRATION_LEASES_ONLY", false)
// EagerLeaseAcquisitionConcurrency is the number of concurrent, eager lease
// acquisitions made during Raft ticks, across all stores. Note that this does
// not include expiration lease extensions, which are unbounded.
var EagerLeaseAcquisitionConcurrency = settings.RegisterIntSetting(
settings.SystemOnly,
"kv.lease.eager_acquisition_concurrency",
"the maximum number of concurrent eager lease acquisitions (0 disables eager acquisition)",
256,
settings.NonNegativeInt,
)
// LeaseCheckPreferencesOnAcquisitionEnabled controls whether lease preferences
// are checked upon acquiring a new lease. If the new lease violates the
// configured preferences, it is enqueued in the replicate queue for
// processing.
//
// TODO(kvoli): Remove this cluster setting in 24.1, once we wish to enable
// this by default or is subsumed by another mechanism.
var LeaseCheckPreferencesOnAcquisitionEnabled = settings.RegisterBoolSetting(
settings.SystemOnly,
"kv.lease.check_preferences_on_acquisition.enabled",
"controls whether lease preferences are checked on lease acquisition, "+
"if the new lease violates preferences, it is queued for processing",
true,
)
var leaseStatusLogLimiter = func() *log.EveryN {
e := log.Every(15 * time.Second)
e.ShouldLog() // waste the first shot
return &e
}()
// leaseRequestHandle is a handle to an asynchronous lease request.
type leaseRequestHandle struct {
p *pendingLeaseRequest
c chan *kvpb.Error
}
// C returns the channel where the lease request's result will be sent on.
func (h *leaseRequestHandle) C() <-chan *kvpb.Error {
if h.c == nil {
panic("handle already canceled")
}
return h.c
}
// Cancel cancels the request handle. The asynchronous lease request will
// continue until it completes, to ensure leases can be acquired even if the
// client goes away (in particular in the face of IO delays which may trigger
// client timeouts).
func (h *leaseRequestHandle) Cancel() {
h.p.repl.mu.Lock()
defer h.p.repl.mu.Unlock()
if len(h.c) == 0 {
// Our lease request is ongoing...
// Unregister handle.
delete(h.p.llHandles, h)
}
// Mark handle as canceled.
h.c = nil
}
// resolve notifies the handle of the request's result.
//
// Requires repl.mu is exclusively locked.
func (h *leaseRequestHandle) resolve(pErr *kvpb.Error) { h.c <- pErr }
// pendingLeaseRequest coalesces RequestLease requests and lets
// callers join an in-progress lease request and wait for the result.
// The actual execution of the RequestLease Raft request is delegated
// to a replica.
//
// There are two types of leases: expiration-based and epoch-based.
// Expiration-based leases are considered valid as long as the wall
// time is less than the lease expiration timestamp minus the maximum
// clock offset. Epoch-based leases do not expire, but rely on the
// leaseholder maintaining its node liveness record (also a lease, but
// at the node level). All ranges up to and including the node
// liveness table must use expiration-based leases to avoid any
// circular dependencies.
//
// Methods are not thread-safe; a pendingLeaseRequest is logically part
// of the replica it references, so replica.mu should be used to
// synchronize all calls.
type pendingLeaseRequest struct {
// The replica that the pendingLeaseRequest is a part of.
repl *Replica
// Set of request handles attached to the lease acquisition.
// All accesses require repl.mu to be exclusively locked.
llHandles map[*leaseRequestHandle]struct{}
// nextLease is the pending RequestLease request, if any. It can be used to
// figure out if we're in the process of extending our own lease, or
// transferring it to another replica.
nextLease roachpb.Lease
}
func makePendingLeaseRequest(repl *Replica) pendingLeaseRequest {
return pendingLeaseRequest{
repl: repl,
llHandles: make(map[*leaseRequestHandle]struct{}),
}
}
// RequestPending returns the pending Lease, if one is in progress.
// The second return val is true if a lease request is pending.
//
// Requires repl.mu is read locked.
func (p *pendingLeaseRequest) RequestPending() (roachpb.Lease, bool) {
return p.nextLease, p.nextLease != roachpb.Lease{}
}
// InitOrJoinRequest executes a RequestLease command asynchronously and returns a
// handle on which the result will be posted. If there's already a request in
// progress, we join in waiting for the results of that request.
// It is an error to call InitOrJoinRequest() while a request is in progress
// naming another replica as lease holder.
//
// replica is used to schedule and execute async work (proposing a RequestLease
// command). replica.mu is locked when delivering results, so calls from the
// replica happen either before or after a result for a pending request has
// happened.
//
// The new lease will be a successor to the one in the status
// argument, and its fields will be used to fill in the expected
// values for liveness and lease operations.
//
// transfer needs to be set if the request represents a lease transfer (as
// opposed to an extension, or acquiring the lease when none is held).
//
// Requires repl.mu is exclusively locked.
func (p *pendingLeaseRequest) InitOrJoinRequest(
ctx context.Context,
nextLeaseHolder roachpb.ReplicaDescriptor,
status kvserverpb.LeaseStatus,
startKey roachpb.Key,
transfer bool,
bypassSafetyChecks bool,
limiter *quotapool.IntPool,
) *leaseRequestHandle {
if nextLease, ok := p.RequestPending(); ok {
if nextLease.Replica.ReplicaID == nextLeaseHolder.ReplicaID {
// Join a pending request asking for the same replica to become lease
// holder.
return p.JoinRequest()
}
// We can't join the request in progress.
// TODO(nvanbenschoten): should this return a LeaseRejectedError? Should
// it cancel and replace the request in progress? Reconsider.
return p.newResolvedHandle(kvpb.NewErrorf(
"request for different replica in progress (requesting: %+v, in progress: %+v)",
nextLeaseHolder.ReplicaID, nextLease.Replica.ReplicaID))
}
acquisition := !status.Lease.OwnedBy(p.repl.store.StoreID())
extension := !transfer && !acquisition
_ = extension // not used, just documentation
if acquisition {
// If this is a non-cooperative lease change (i.e. an acquisition), it
// is up to us to ensure that Lease.Start is greater than the end time
// of the previous lease. This means that if status refers to an expired
// epoch lease, we must increment the liveness epoch of the previous
// leaseholder *using status.Liveness*, which we know to be expired *at
// status.Timestamp*, before we can propose this lease. If this
// increment fails, we cannot propose this new lease (see handling of
// ErrEpochAlreadyIncremented in requestLeaseAsync).
//
// Note that the request evaluation may decrease our proposed start time
// if it decides that it is safe to do so (for example, this happens
// when renewing an expiration-based lease), but it will never increase
// it (and a start timestamp that is too low is unsafe because it
// results in incorrect initialization of the timestamp cache on the new
// leaseholder). For expiration-based leases, we have a safeguard during
// evaluation - we simply check that the new lease starts after the old
// lease ends and throw an error if now. But for epoch-based leases, we
// don't have the benefit of such a safeguard during evaluation because
// the expiration is indirectly stored in the referenced liveness record
// and not in the lease itself. So for epoch-based leases, enforcing
// this safety condition is truly up to us.
if status.State != kvserverpb.LeaseState_EXPIRED {
log.Fatalf(ctx, "cannot acquire lease from another node before it has expired: %v", status)
}
}
// No request in progress. Let's propose a Lease command asynchronously.
llHandle := p.newHandle()
reqHeader := kvpb.RequestHeader{
Key: startKey,
}
reqLease := roachpb.Lease{
Start: status.Now,
Replica: nextLeaseHolder,
ProposedTS: &status.Now,
}
if p.repl.shouldUseExpirationLeaseRLocked() ||
(transfer &&
TransferExpirationLeasesFirstEnabled.Get(&p.repl.store.ClusterSettings().SV)) {
// In addition to ranges that should be using expiration-based leases
// (typically the meta and liveness ranges), we also use them during lease
// transfers for all other ranges. After acquiring these expiration based
// leases, the leaseholders are expected to upgrade them to the more
// efficient epoch-based ones. But by transferring an expiration-based
// lease, we can limit the effect of an ill-advised lease transfer since the
// incoming leaseholder needs to recognize itself as such within a few
// seconds; if it doesn't (we accidentally sent the lease to a replica in
// need of a snapshot or far behind on its log), the lease is up for grabs.
// If we simply transferred epoch based leases, it's possible for the new
// leaseholder that's delayed in applying the lease transfer to maintain its
// lease (assuming the node it's on is able to heartbeat its liveness
// record).
reqLease.Expiration = &hlc.Timestamp{}
*reqLease.Expiration = status.Now.ToTimestamp().Add(int64(p.repl.store.cfg.RangeLeaseDuration), 0)
} else {
// Get the liveness for the next lease holder and set the epoch in the lease request.
l, ok := p.repl.store.cfg.NodeLiveness.GetLiveness(nextLeaseHolder.NodeID)
if !ok || l.Epoch == 0 {
llHandle.resolve(kvpb.NewError(&kvpb.LeaseRejectedError{
Existing: status.Lease,
Requested: reqLease,
Message: fmt.Sprintf("couldn't request lease for %+v: %v", nextLeaseHolder, liveness.ErrRecordCacheMiss),
}))
return llHandle
}
reqLease.Epoch = l.Epoch
}
var leaseReq kvpb.Request
if transfer {
leaseReq = &kvpb.TransferLeaseRequest{
RequestHeader: reqHeader,
Lease: reqLease,
PrevLease: status.Lease,
BypassSafetyChecks: bypassSafetyChecks,
}
} else {
if bypassSafetyChecks {
// TODO(nvanbenschoten): we could support a similar bypassSafetyChecks
// flag for RequestLeaseRequest, which would disable the protection in
// propBuf.maybeRejectUnsafeProposalLocked. For now, we use a testing
// knob.
log.Fatal(ctx, "bypassSafetyChecks not supported for RequestLeaseRequest")
}
minProposedTS := p.repl.mu.minLeaseProposedTS
leaseReq = &kvpb.RequestLeaseRequest{
RequestHeader: reqHeader,
Lease: reqLease,
// PrevLease must match for our lease to be accepted. If another
// lease is applied between our previous call to leaseStatus and
// our lease request applying, it will be rejected.
PrevLease: status.Lease,
MinProposedTS: &minProposedTS,
}
}
err := p.requestLeaseAsync(ctx, nextLeaseHolder, status, leaseReq, limiter)
if err != nil {
if errors.Is(err, stop.ErrThrottled) {
llHandle.resolve(kvpb.NewError(err))
} else {
// We failed to start the asynchronous task. Send a blank NotLeaseHolderError
// back to indicate that we have no idea who the range lease holder might
// be; we've withdrawn from active duty.
llHandle.resolve(kvpb.NewError(
kvpb.NewNotLeaseHolderError(roachpb.Lease{}, p.repl.store.StoreID(), p.repl.mu.state.Desc,
"lease acquisition task couldn't be started; node is shutting down")))
}
return llHandle
}
// InitOrJoinRequest requires that repl.mu is exclusively locked. requestLeaseAsync
// also requires this lock to send results on all waiter channels. This means that
// no results will be sent until we've release the lock, so there's no race between
// adding our new channel to p.llHandles below and requestLeaseAsync sending results
// on all channels in p.llHandles. The same logic applies to p.nextLease.
p.llHandles[llHandle] = struct{}{}
p.nextLease = reqLease
return llHandle
}
// requestLeaseAsync sends a transfer lease or lease request to the specified
// replica. The request is sent in an async task. If limiter is non-nil, it is
// used to bound the number of goroutines spawned, returning ErrThrottled when
// exceeded.
//
// The status argument is used as the expected value for liveness operations.
// leaseReq must be consistent with the LeaseStatus.
func (p *pendingLeaseRequest) requestLeaseAsync(
parentCtx context.Context,
nextLeaseHolder roachpb.ReplicaDescriptor,
status kvserverpb.LeaseStatus,
leaseReq kvpb.Request,
limiter *quotapool.IntPool,
) error {
// Create a new context. We run the request to completion even if all callers
// go away, to ensure leases can be acquired e.g. in the face of IO delays
// which may trigger client timeouts).
ctx := p.repl.AnnotateCtx(context.Background())
// Attach the parent's tracing span to the lease request, if any. It might
// outlive the parent in case the parent's ctx is canceled, so we use
// FollowsFrom. We can't include the trace for any other requests that join
// this one, but let's try to include it where we can.
var sp *tracing.Span
if parentSp := tracing.SpanFromContext(parentCtx); parentSp != nil {
ctx, sp = p.repl.AmbientContext.Tracer.StartSpanCtx(ctx, "request range lease",
tracing.WithParent(parentSp), tracing.WithFollowsFrom())
}
err := p.repl.store.Stopper().RunAsyncTaskEx(
ctx,
stop.TaskOpts{
TaskName: "pendingLeaseRequest: requesting lease",
SpanOpt: stop.ChildSpan,
// If a limiter is passed, use it to bound the number of spawned
// goroutines. When exceeded, return an error.
Sem: limiter,
},
func(ctx context.Context) {
defer sp.Finish()
// Grow the goroutine stack, to avoid having to re-grow it during request
// processing. This is normally done when processing batch requests via
// RPC, but here we submit the request directly to the local replica.
growstack.Grow()
err := p.requestLease(ctx, nextLeaseHolder, status, leaseReq)
// Error will be handled below.
// We reset our state below regardless of whether we've gotten an error or
// not, but note that an error is ambiguous - there's no guarantee that the
// transfer will not still apply. That's OK, however, as the "in transfer"
// state maintained by the pendingLeaseRequest is not relied on for
// correctness (see repl.mu.minLeaseProposedTS), and resetting the state
// is beneficial as it'll allow the replica to attempt to transfer again or
// extend the existing lease in the future.
p.repl.mu.Lock()
defer p.repl.mu.Unlock()
// Send result of lease to all waiter channels and cleanup request.
for llHandle := range p.llHandles {
// Don't send the same transaction object twice; this can lead to races.
if err != nil {
pErr := kvpb.NewError(err)
// TODO(tbg): why?
pErr.SetTxn(pErr.GetTxn())
llHandle.resolve(pErr)
} else {
llHandle.resolve(nil)
}
delete(p.llHandles, llHandle)
}
p.nextLease = roachpb.Lease{}
})
if err != nil {
p.nextLease = roachpb.Lease{}
sp.Finish()
return err
}
return nil
}
var logFailedHeartbeatOwnLiveness = log.Every(10 * time.Second)
// requestLease sends a synchronous transfer lease or lease request to the
// specified replica. It is only meant to be called from requestLeaseAsync,
// since it does not coordinate with other in-flight lease requests.
func (p *pendingLeaseRequest) requestLease(
ctx context.Context,
nextLeaseHolder roachpb.ReplicaDescriptor,
status kvserverpb.LeaseStatus,
leaseReq kvpb.Request,
) error {
started := timeutil.Now()
defer func() {
p.repl.store.metrics.LeaseRequestLatency.RecordValue(timeutil.Since(started).Nanoseconds())
}()
// If we're replacing an expired epoch-based lease, we must increment the
// epoch of the prior owner to invalidate its leases. If we were the owner,
// then we instead heartbeat to become live.
if status.Lease.Type() == roachpb.LeaseEpoch && status.State == kvserverpb.LeaseState_EXPIRED {
var err error
// If this replica is previous & next lease holder, manually heartbeat to become live.
if status.OwnedBy(nextLeaseHolder.StoreID) && p.repl.store.StoreID() == nextLeaseHolder.StoreID {
if err = p.repl.store.cfg.NodeLiveness.Heartbeat(ctx, status.Liveness); err != nil && logFailedHeartbeatOwnLiveness.ShouldLog() {
log.Errorf(ctx, "failed to heartbeat own liveness record: %s", err)
}
} else if status.Liveness.Epoch == status.Lease.Epoch {
// If not owner, increment epoch if necessary to invalidate lease.
// However, we only do so in the event that the next leaseholder is
// considered live at this time. If not, there's no sense in
// incrementing the expired leaseholder's epoch.
if !p.repl.store.cfg.NodeLiveness.GetNodeVitalityFromCache(nextLeaseHolder.NodeID).IsLive(livenesspb.EpochLease) {
err = errors.Errorf("not incrementing epoch on n%d because next leaseholder (n%d) not live",
status.Liveness.NodeID, nextLeaseHolder.NodeID)
log.VEventf(ctx, 1, "%v", err)
} else if err = p.repl.store.cfg.NodeLiveness.IncrementEpoch(ctx, status.Liveness); err != nil {
// If we get ErrEpochAlreadyIncremented, someone else beat
// us to it. This proves that the target node is truly
// dead *now*, but it doesn't prove that it was dead at
// status.Timestamp (which we've encoded into our lease
// request). It's possible that the node was temporarily
// considered dead but revived without having its epoch
// incremented, i.e. that it was in fact live at
// status.Timestamp.
//
// It would be incorrect to simply proceed to sending our
// lease request since our lease.Start may precede the
// effective end timestamp of the predecessor lease (the
// expiration of the last successful heartbeat before the
// epoch increment), and so under this lease this node's
// timestamp cache would not necessarily reflect all reads
// served by the prior leaseholder.
//
// It would be correct to bump the timestamp in the lease
// request and proceed, but that just sets up another race
// between this node and the one that already incremented
// the epoch. They're probably going to beat us this time
// too, so just return the NotLeaseHolderError here
// instead of trying to fix up the timestamps and submit
// the lease request.
//
// ErrEpochAlreadyIncremented is not an unusual situation,
// so we don't log it as an error.
//
// https://github.com/cockroachdb/cockroach/issues/35986
if errors.Is(err, liveness.ErrEpochAlreadyIncremented) {
// ignore
} else if errors.HasType(err, &liveness.ErrEpochCondFailed{}) {
// ErrEpochCondFailed indicates that someone else changed the liveness
// record while we were incrementing it. The node could still be
// alive, or someone else updated it. Don't log this as an error.
log.Infof(ctx, "failed to increment leaseholder's epoch: %s", err)
} else {
log.Errorf(ctx, "failed to increment leaseholder's epoch: %s", err)
}
}
}
if err != nil {
// Return an NLHE with an empty lease, since we know the previous lease
// isn't valid. In particular, if it was ours but we failed to reacquire
// it (e.g. because our heartbeat failed due to a stalled disk) then we
// don't want DistSender to retry us.
return kvpb.NewNotLeaseHolderError(roachpb.Lease{}, p.repl.store.StoreID(), p.repl.Desc(),
fmt.Sprintf("failed to manipulate liveness record: %s", err))
}
}
// Send the RequestLeaseRequest or TransferLeaseRequest and wait for the new
// lease to be applied.
//
// The Replica circuit breakers together with round-tripping a ProbeRequest
// here before asking for the lease could provide an alternative, simpler
// solution to the below issue:
//
// https://github.com/cockroachdb/cockroach/issues/37906
ba := &kvpb.BatchRequest{}
ba.Timestamp = p.repl.store.Clock().Now()
ba.RangeID = p.repl.RangeID
// NB:
// RequestLease always bypasses the circuit breaker (i.e. will prefer to
// get stuck on an unavailable range rather than failing fast; see
// `(*RequestLeaseRequest).flags()`). This enables the caller to chose
// between either behavior for themselves: if they too want to bypass
// the circuit breaker, they simply don't check for the circuit breaker
// while waiting for their lease handle. If they want to fail-fast, they
// do. If the lease instead adopted the caller's preference, we'd have
// to handle the case of multiple preferences joining onto one lease
// request, which is more difficult.
//
// TransferLease will observe the circuit breaker, as transferring a
// lease when the range is unavailable results in, essentially, giving
// up on the lease and thus worsening the situation.
ba.Add(leaseReq)
_, pErr := p.repl.Send(ctx, ba)
return pErr.GoError()
}
// JoinRequest adds one more waiter to the currently pending request.
// It is the caller's responsibility to ensure that there is a pending request,
// and that the request is compatible with whatever the caller is currently
// wanting to do (i.e. the request is naming the intended node as the next
// lease holder).
//
// Requires repl.mu is exclusively locked.
func (p *pendingLeaseRequest) JoinRequest() *leaseRequestHandle {
llHandle := p.newHandle()
if _, ok := p.RequestPending(); !ok {
llHandle.resolve(kvpb.NewErrorf("no request in progress"))
return llHandle
}
p.llHandles[llHandle] = struct{}{}
return llHandle
}
// TransferInProgress returns whether the replica is in the process of
// transferring away its range lease. Note that the return values are
// best-effort and shouldn't be relied upon for correctness: if a previous
// transfer has returned an error, TransferInProgress will return `false`, but
// that doesn't necessarily mean that the transfer cannot still apply (see
// replica.mu.minLeaseProposedTS).
//
// It is assumed that the replica owning this pendingLeaseRequest owns the
// LeaderLease.
//
// replicaID is the ID of the parent replica.
//
// Requires repl.mu is read locked.
func (p *pendingLeaseRequest) TransferInProgress(replicaID roachpb.ReplicaID) bool {
if nextLease, ok := p.RequestPending(); ok {
// Is the lease being transferred? (as opposed to just extended)
return replicaID != nextLease.Replica.ReplicaID
}
return false
}
// newHandle creates a new leaseRequestHandle referencing the pending lease
// request.
func (p *pendingLeaseRequest) newHandle() *leaseRequestHandle {
return &leaseRequestHandle{
p: p,
c: make(chan *kvpb.Error, 1),
}
}
// newResolvedHandle creates a new leaseRequestHandle referencing the pending
// lease request. It then resolves the handle with the provided error.
func (p *pendingLeaseRequest) newResolvedHandle(pErr *kvpb.Error) *leaseRequestHandle {
h := p.newHandle()
h.resolve(pErr)
return h
}
// leaseStatus returns a lease status. The lease status is linked to the desire
// to serve a request at a specific timestamp (which may be a future timestamp)
// under the lease, as well as a notion of the current hlc time (now).
//
// # Explanation
//
// A status of ERROR indicates a failure to determine the correct lease status,
// and should not occur under normal operations. The caller's only recourse is
// to give up or to retry.
//
// If the lease is expired according to the now timestamp (and, in the case of
// epoch-based leases, the liveness epoch), a status of EXPIRED is returned.
// Note that this ignores the timestamp of the request, which may well
// technically be eligible to be served under the lease. The key feature of an
// EXPIRED status is that it reflects that a new lease with a start timestamp
// greater than or equal to now can be acquired non-cooperatively.
//
// If the lease is not EXPIRED, the lease's start timestamp is checked against
// the minProposedTimestamp. This timestamp indicates the oldest timestamp that
// a lease can have as its start time and still be used by the node. It is set
// both in cooperative lease transfers and to prevent reuse of leases across
// node restarts (which would result in latching violations). Leases with start
// times preceding this timestamp are assigned a status of PROSCRIBED and can
// not be used. Instead, a new lease should be acquired by callers.
//
// If the lease is not EXPIRED or PROSCRIBED, the request timestamp is taken
// into account. The expiration timestamp is adjusted for clock offset; if the
// request timestamp falls into the so-called "stasis period" at the end of the
// lifetime of the lease, or if the request timestamp is beyond the end of the
// lifetime of the lease, the status is UNUSABLE. Callers typically want to
// react to an UNUSABLE lease status by extending the lease, if they are in a
// position to do so.
//
// Finally, for requests timestamps falling before the stasis period of a lease
// that is not EXPIRED and also not PROSCRIBED, the status is VALID.
//
// # Implementation Note
//
// On the surface, it might seem like we could easily abandon the lease stasis
// concept in favor of consulting a request's uncertainty interval. We would
// then define a request's timestamp as the maximum of its read_timestamp and
// its global_uncertainty_limit, and simply check whether this timestamp falls
// below a lease's expiration. This could allow certain transactional requests
// to operate more closely to a lease's expiration. But not all requests that
// expect linearizability use an uncertainty interval (e.g. non-transactional
// requests), and so the lease stasis period serves as a kind of catch-all
// uncertainty interval for non-transactional and admin requests.
//
// Without that stasis period, the following linearizability violation could
// occur for two non-transactional requests operating on a single register
// during a lease change:
//
// - a range lease gets committed on the new lease holder (but not the old).
// - client proposes and commits a write on new lease holder (with a timestamp
// just greater than the expiration of the old lease).
// - client tries to read what it wrote, but hits a slow coordinator (which
// assigns a timestamp covered by the old lease).
// - the read is served by the old lease holder (which has not processed the
// change in lease holdership).
// - the client fails to read their own write.
func (r *Replica) leaseStatus(
ctx context.Context,
lease roachpb.Lease,
now hlc.ClockTimestamp,
minProposedTS hlc.ClockTimestamp,
minValidObservedTS hlc.ClockTimestamp,
reqTS hlc.Timestamp,
) kvserverpb.LeaseStatus {
status := kvserverpb.LeaseStatus{
Lease: lease,
// NOTE: it would not be correct to accept either only the request time
// or only the current time in this method, we need both. We need the
// request time to determine whether the current lease can serve a given
// request, even if that request has a timestamp in the future of
// present time. We need the current time to distinguish between an
// EXPIRED lease and an UNUSABLE lease. Only an EXPIRED lease can change
// hands through a lease acquisition.
Now: now,
RequestTime: reqTS,
MinValidObservedTimestamp: minValidObservedTS,
}
var expiration hlc.Timestamp
if lease.Type() == roachpb.LeaseExpiration {
expiration = lease.GetExpiration()
} else {
l, ok := r.store.cfg.NodeLiveness.GetLiveness(lease.Replica.NodeID)
status.Liveness = l.Liveness
if !ok || status.Liveness.Epoch < lease.Epoch {
// If lease validity can't be determined (e.g. gossip is down
// and liveness info isn't available for owner), we can neither
// use the lease nor do we want to attempt to acquire it.
var msg redact.StringBuilder
if !ok {
msg.Printf("can't determine lease status of %s due to node liveness error: %v",
lease.Replica, liveness.ErrRecordCacheMiss)
} else {
msg.Printf("can't determine lease status of %s because node liveness info for n%d is stale. lease: %s, liveness: %s",
lease.Replica, lease.Replica.NodeID, lease, l.Liveness)
}
if leaseStatusLogLimiter.ShouldLog() {
log.Infof(ctx, "%s", msg)
}
status.State = kvserverpb.LeaseState_ERROR
status.ErrInfo = msg.String()
return status
}
if status.Liveness.Epoch > lease.Epoch {
status.State = kvserverpb.LeaseState_EXPIRED
return status
}
expiration = status.Liveness.Expiration.ToTimestamp()
}
maxOffset := r.store.Clock().MaxOffset()
stasis := expiration.Add(-int64(maxOffset), 0)
ownedLocally := lease.OwnedBy(r.store.StoreID())
// NB: the order of these checks is important, and goes from stronger to
// weaker reasons why the lease may be considered invalid. For example,
// EXPIRED or PROSCRIBED must take precedence over UNUSABLE, because some
// callers consider UNUSABLE as valid. For an example issue that this ordering
// may cause, see https://github.com/cockroachdb/cockroach/issues/100101.
if expiration.LessEq(now.ToTimestamp()) {
status.State = kvserverpb.LeaseState_EXPIRED
} else if ownedLocally && lease.ProposedTS != nil && lease.ProposedTS.Less(minProposedTS) {
// If the replica owns the lease, additional verify that the lease's
// proposed timestamp is not earlier than the min proposed timestamp.
status.State = kvserverpb.LeaseState_PROSCRIBED
} else if stasis.LessEq(reqTS) {
status.State = kvserverpb.LeaseState_UNUSABLE
} else {
status.State = kvserverpb.LeaseState_VALID
}
return status
}
// CurrentLeaseStatus returns the status of the current lease for the
// current time.
//
// Common operations to perform on the resulting status are to check if
// it is valid using the IsValid method and to check whether the lease
// is held locally using the OwnedBy method.
//
// Note that this method does not check to see if a transfer is pending,
// but returns the status of the current lease and ownership at the
// specified point in time.
func (r *Replica) CurrentLeaseStatus(ctx context.Context) kvserverpb.LeaseStatus {
return r.LeaseStatusAt(ctx, r.Clock().NowAsClockTimestamp())
}
// LeaseStatusAt is like CurrentLeaseStatus, but accepts a now timestamp.
func (r *Replica) LeaseStatusAt(
ctx context.Context, now hlc.ClockTimestamp,
) kvserverpb.LeaseStatus {
r.mu.RLock()
defer r.mu.RUnlock()
return r.leaseStatusAtRLocked(ctx, now)
}
func (r *Replica) leaseStatusAtRLocked(
ctx context.Context, now hlc.ClockTimestamp,
) kvserverpb.LeaseStatus {
return r.leaseStatusForRequestRLocked(ctx, now, hlc.Timestamp{})
}
func (r *Replica) leaseStatusForRequestRLocked(
ctx context.Context, now hlc.ClockTimestamp, reqTS hlc.Timestamp,
) kvserverpb.LeaseStatus {
if reqTS.IsEmpty() {
// If the request timestamp is empty, return the status that
// would be given to a request with a timestamp of now.
reqTS = now.ToTimestamp()
}
return r.leaseStatus(ctx, *r.mu.state.Lease, now, r.mu.minLeaseProposedTS,
r.mu.minValidObservedTimestamp, reqTS)
}
// OwnsValidLease returns whether this replica is the current valid
// leaseholder.
//
// Note that this method does not check to see if a transfer is pending,
// but returns the status of the current lease and ownership at the
// specified point in time.
func (r *Replica) OwnsValidLease(ctx context.Context, now hlc.ClockTimestamp) bool {
r.mu.RLock()
defer r.mu.RUnlock()
return r.ownsValidLeaseRLocked(ctx, now)
}
func (r *Replica) ownsValidLeaseRLocked(ctx context.Context, now hlc.ClockTimestamp) bool {
st := r.leaseStatusAtRLocked(ctx, now)
return st.IsValid() && st.OwnedBy(r.store.StoreID())
}
// requiresExpirationLeaseRLocked returns whether this range unconditionally
// uses an expiration-based lease. Ranges located before or including the node
// liveness table must always use expiration leases to avoid circular
// dependencies on the node liveness table. All other ranges typically use
// epoch-based leases, but may temporarily use expiration based leases during
// lease transfers.
//
// TODO(erikgrinaker): It isn't always clear when to use this and when to use
// shouldUseExpirationLeaseRLocked. We can merge these once there are no more
// callers: when expiration leases don't quiesce and are always eagerly renewed.
func (r *Replica) requiresExpirationLeaseRLocked() bool {
return r.store.cfg.NodeLiveness == nil ||
r.mu.state.Desc.StartKey.Less(roachpb.RKey(keys.NodeLivenessKeyMax))
}
// shouldUseExpirationLeaseRLocked returns true if this range should be using an
// expiration-based lease, either because it requires one or because
// kv.expiration_leases_only.enabled is enabled.
func (r *Replica) shouldUseExpirationLeaseRLocked() bool {
return (ExpirationLeasesOnly.Get(&r.ClusterSettings().SV) && !DisableExpirationLeasesOnly) ||
r.requiresExpirationLeaseRLocked()
}
// requestLeaseLocked executes a request to obtain or extend a lease
// asynchronously and returns a channel on which the result will be posted. If
// there's already a request in progress, we join in waiting for the results of
// that request. Unless an error is returned, the obtained lease will be valid
// for a time interval containing the requested timestamp.
//
// A limiter can be passed to bound the number of new lease requests spawned.
// The function is responsible for acquiring quota and releasing it. If there is
// no quota, it resolves the returned handle with an error. Joining onto an
// existing lease request does not count towards the limit.
func (r *Replica) requestLeaseLocked(
ctx context.Context, status kvserverpb.LeaseStatus, limiter *quotapool.IntPool,
) *leaseRequestHandle {
if r.store.TestingKnobs().LeaseRequestEvent != nil {
if err := r.store.TestingKnobs().LeaseRequestEvent(status.Now.ToTimestamp(), r.StoreID(), r.GetRangeID()); err != nil {
return r.mu.pendingLeaseRequest.newResolvedHandle(err)
}
}
if pErr := r.store.TestingKnobs().PinnedLeases.rejectLeaseIfPinnedElsewhere(r); pErr != nil {
return r.mu.pendingLeaseRequest.newResolvedHandle(pErr)
}
// Propose a Raft command to get a lease for this replica.
repDesc, err := r.getReplicaDescriptorRLocked()
if err != nil {
return r.mu.pendingLeaseRequest.newResolvedHandle(kvpb.NewError(err))
}
return r.mu.pendingLeaseRequest.InitOrJoinRequest(
ctx, repDesc, status, r.mu.state.Desc.StartKey.AsRawKey(),
false /* transfer */, false /* bypassSafetyChecks */, limiter)
}
// AdminTransferLease transfers the LeaderLease to another replica. Only the
// current holder of the LeaderLease can do a transfer, because it needs to stop
// serving reads and proposing Raft commands (CPut is a read) while evaluating
// and proposing the TransferLease request. This synchronization with all other
// requests on the leaseholder is enforced through latching. The TransferLease
// request grabs a write latch over all keys in the range.
//
// If the leaseholder did not respect latching and did not stop serving reads
// during the lease transfer, it would potentially serve reads with timestamps
// greater than the start timestamp of the new (transferred) lease, which is
// determined during the evaluation of the TransferLease request. More subtly,
// the replica can't even serve reads or propose commands with timestamps lower
// than the start of the new lease because it could lead to read your own write
// violations (see comments on the stasis period on leaseStatus). We could, in
// principle, serve reads more than the maximum clock offset in the past.
//
// The method waits for any in-progress lease extension to be done, and it also
// blocks until the transfer is done. If a transfer is already in progress, this
// method joins in waiting for it to complete if it's transferring to the same
// replica. Otherwise, a NotLeaseHolderError is returned.
//
// AdminTransferLease implements the ReplicaLeaseMover interface.
func (r *Replica) AdminTransferLease(
ctx context.Context, target roachpb.StoreID, bypassSafetyChecks bool,
) error {
if r.store.cfg.TestingKnobs.DisableLeaderFollowsLeaseholder {
// Ensure lease transfers still work when we don't colocate leaders and leases.
bypassSafetyChecks = true
}
// initTransferHelper inits a transfer if no extension is in progress.
// It returns a channel for waiting for the result of a pending
// extension (if any is in progress) and a channel for waiting for the
// transfer (if it was successfully initiated).
var nextLeaseHolder roachpb.ReplicaDescriptor
initTransferHelper := func() (extension, transfer *leaseRequestHandle, err error) {
r.mu.Lock()
defer r.mu.Unlock()
now := r.store.Clock().NowAsClockTimestamp()
status := r.leaseStatusAtRLocked(ctx, now)
if status.Lease.OwnedBy(target) {
// The target is already the lease holder. Nothing to do.
return nil, nil, nil
}
desc := r.mu.state.Desc
if !status.Lease.OwnedBy(r.store.StoreID()) {
return nil, nil, kvpb.NewNotLeaseHolderError(status.Lease, r.store.StoreID(), desc,
"can't transfer the lease because this store doesn't own it")
}
// Verify the target is a replica of the range.
var ok bool
if nextLeaseHolder, ok = desc.GetReplicaDescriptor(target); !ok {
return nil, nil, roachpb.ErrReplicaNotFound
}
if nextLease, ok := r.mu.pendingLeaseRequest.RequestPending(); ok &&
nextLease.Replica != nextLeaseHolder {
repDesc, err := r.getReplicaDescriptorRLocked()
if err != nil {
return nil, nil, err
}
if nextLease.Replica == repDesc {
// There's an extension in progress. Let's wait for it to succeed and
// try again.
return r.mu.pendingLeaseRequest.JoinRequest(), nil, nil
}
// Another transfer is in progress, and it's not transferring to the
// same replica we'd like.
return nil, nil, kvpb.NewNotLeaseHolderError(nextLease, r.store.StoreID(), desc,
"another transfer to a different store is in progress")
}
// Verify that the lease transfer would be safe. This check is best-effort
// in that it can race with Raft leadership changes and log truncation. See
// propBuf.maybeRejectUnsafeProposalLocked for a non-racy version of this
// check, along with a full explanation of why it is important. We include
// both because rejecting a lease transfer in the propBuf after we have
// revoked our current lease is more disruptive than doing so here, before
// we have revoked our current lease.
raftStatus := r.raftStatusRLocked()
raftFirstIndex := r.raftFirstIndexRLocked()
snapStatus := raftutil.ReplicaMayNeedSnapshot(raftStatus, raftFirstIndex, nextLeaseHolder.ReplicaID)
if snapStatus != raftutil.NoSnapshotNeeded && !bypassSafetyChecks && !r.store.cfg.TestingKnobs.DisableAboveRaftLeaseTransferSafetyChecks {
r.store.metrics.LeaseTransferErrorCount.Inc(1)
log.VEventf(ctx, 2, "not initiating lease transfer because the target %s may "+
"need a snapshot: %s", nextLeaseHolder, snapStatus)
err := NewLeaseTransferRejectedBecauseTargetMayNeedSnapshotError(nextLeaseHolder, snapStatus)
return nil, nil, err
}
transfer = r.mu.pendingLeaseRequest.InitOrJoinRequest(ctx, nextLeaseHolder, status,
desc.StartKey.AsRawKey(), true /* transfer */, bypassSafetyChecks, nil /* limiter */)
return nil, transfer, nil
}
// Before transferring a lease, we ensure that the lease transfer is safe. If
// the leaseholder cannot guarantee this, we reject the lease transfer. To
// make such a claim, the leaseholder needs to become the Raft leader and
// probe the lease target's log. Doing so may take time, so we use a small
// exponential backoff loop with a maximum retry count before returning the
// rejection to the client. As configured, this retry loop should back off
// for about 6 seconds before returning an error.
retryOpts := retry.Options{
InitialBackoff: 50 * time.Millisecond,
MaxBackoff: 1 * time.Second,
Multiplier: 2,
MaxRetries: 10,
}
if count := r.store.TestingKnobs().LeaseTransferRejectedRetryLoopCount; count != 0 {
retryOpts.MaxRetries = count
}
transferRejectedRetry := retry.StartWithCtx(ctx, retryOpts)
transferRejectedRetry.Next() // The first call to Next does not block.
// Loop while there's an extension in progress.
for {
// See if there's an extension in progress that we have to wait for.
// If there isn't, request a transfer.
extension, transfer, err := initTransferHelper()
if err != nil {
if IsLeaseTransferRejectedBecauseTargetMayNeedSnapshotError(err) && transferRejectedRetry.Next() {
// If the lease transfer was rejected because the target may need a
// snapshot, try again. After the backoff, we may have become the Raft
// leader (through maybeTransferRaftLeadershipToLeaseholderLocked) or
// may have learned more about the state of the lease target's log.
log.VEventf(ctx, 2, "retrying lease transfer to store %d after rejection", target)
continue
}
return err
}
if extension == nil {
if transfer == nil {
// The target is us and we're the lease holder.
return nil
}
select {
case pErr := <-transfer.C():
return pErr.GoError()
case <-ctx.Done():
transfer.Cancel()
return ctx.Err()
}
}
// Wait for the in-progress extension without holding the mutex.
if r.store.TestingKnobs().LeaseTransferBlockedOnExtensionEvent != nil {
r.store.TestingKnobs().LeaseTransferBlockedOnExtensionEvent(nextLeaseHolder)
}
select {
case <-extension.C():
continue
case <-ctx.Done():
extension.Cancel()
return ctx.Err()
}
}
}
// GetLease returns the lease and, if available, the proposed next lease.
func (r *Replica) GetLease() (roachpb.Lease, roachpb.Lease) {
r.mu.RLock()
defer r.mu.RUnlock()
return r.getLeaseRLocked()
}
func (r *Replica) getLeaseRLocked() (roachpb.Lease, roachpb.Lease) {
if nextLease, ok := r.mu.pendingLeaseRequest.RequestPending(); ok {
return *r.mu.state.Lease, nextLease
}
return *r.mu.state.Lease, roachpb.Lease{}
}
// RevokeLease stops the replica from using its current lease, if that lease
// matches the provided lease sequence. All future calls to leaseStatus on this
// node with the current lease will now return a PROSCRIBED status.
func (r *Replica) RevokeLease(ctx context.Context, seq roachpb.LeaseSequence) {
r.mu.Lock()
defer r.mu.Unlock()
if r.mu.state.Lease.Sequence == seq {
r.mu.minLeaseProposedTS = r.Clock().NowAsClockTimestamp()
}
}
// NewLeaseTransferRejectedBecauseTargetMayNeedSnapshotError return an error
// indicating that a lease transfer failed because the current leaseholder could
// not prove that the lease transfer target did not need a Raft snapshot.
func NewLeaseTransferRejectedBecauseTargetMayNeedSnapshotError(
target roachpb.ReplicaDescriptor, snapStatus raftutil.ReplicaNeedsSnapshotStatus,
) error {
err := errors.Errorf("refusing to transfer lease to %d because target may need a Raft snapshot: %s",
target, snapStatus)
return errors.Mark(err, errMarkLeaseTransferRejectedBecauseTargetMayNeedSnapshot)
}
// checkRequestTimeRLocked checks that the provided request timestamp is not
// too far in the future. We define "too far" as a time that would require a
// lease extension even if we were perfectly proactive about extending our
// lease asynchronously to always ensure at least a "leaseRenewal" duration
// worth of runway. Doing so ensures that we detect client behavior that
// will inevitably run into frequent synchronous lease extensions.
//
// This serves as a stricter version of a check that if we were to perform
// a lease extension at now, the request would be contained within the new
// lease's expiration (and stasis period).
func (r *Replica) checkRequestTimeRLocked(now hlc.ClockTimestamp, reqTS hlc.Timestamp) error {
var leaseRenewal time.Duration
if r.shouldUseExpirationLeaseRLocked() {
_, leaseRenewal = r.store.cfg.RangeLeaseDurations()
} else {
_, leaseRenewal = r.store.cfg.NodeLivenessDurations()
}
leaseRenewalMinusStasis := leaseRenewal - r.store.Clock().MaxOffset()
if leaseRenewalMinusStasis < 0 {
// If maxOffset > leaseRenewal, such that present time operations risk
// ending up in the stasis period, allow requests up to clock.Now(). Can
// happen in tests.
leaseRenewalMinusStasis = 0
}
maxReqTS := now.ToTimestamp().Add(leaseRenewalMinusStasis.Nanoseconds(), 0)
if maxReqTS.Less(reqTS) {
return errors.Errorf("request timestamp %s too far in future (> %s)", reqTS, maxReqTS)
}
return nil
}
// leaseGoodToGoRLocked verifies that the replica has a lease that is
// valid, owned by the current replica, and usable to serve requests at
// the specified timestamp. The method will return the lease status if
// these conditions are satisfied or an error if they are unsatisfied.
// The lease status is either empty or fully populated.
//
// Latches must be acquired on the range before calling this method.
// This ensures that callers are properly sequenced with TransferLease
// requests, which declare a conflict with all other commands.
//
// The method can has four possible outcomes:
//
// (1) the request timestamp is too far in the future. In this case,
//
// a nonstructured error is returned. This shouldn't happen.
//
// (2) the lease is invalid or otherwise unable to serve a request at
//
// the specified timestamp. In this case, an InvalidLeaseError is
// returned, which is caught in executeBatchWithConcurrencyRetries
// and used to trigger a lease acquisition/extension.
//
// (3) the lease is valid but held by a different replica. In this case,
//
// a NotLeaseHolderError is returned, which is propagated back up to
// the DistSender and triggers a redirection of the request.
//
// (4) the lease is valid, held locally, and capable of serving the
//
// given request. In this case, no error is returned.
func (r *Replica) leaseGoodToGoRLocked(
ctx context.Context, now hlc.ClockTimestamp, reqTS hlc.Timestamp,
) (kvserverpb.LeaseStatus, error) {
st := r.leaseStatusForRequestRLocked(ctx, now, reqTS)
err := r.leaseGoodToGoForStatusRLocked(ctx, now, reqTS, st)
if err != nil {
return kvserverpb.LeaseStatus{}, err
}
return st, err
}
func (r *Replica) leaseGoodToGoForStatusRLocked(
ctx context.Context, now hlc.ClockTimestamp, reqTS hlc.Timestamp, st kvserverpb.LeaseStatus,
) error {
if err := r.checkRequestTimeRLocked(now, reqTS); err != nil {
// Case (1): invalid request.
return err
}
if !st.IsValid() {
// Case (2): invalid lease.
return &kvpb.InvalidLeaseError{}
}
if !st.Lease.OwnedBy(r.store.StoreID()) {
// Case (3): not leaseholder.
_, stillMember := r.mu.state.Desc.GetReplicaDescriptor(st.Lease.Replica.StoreID)
if !stillMember {
// This would be the situation in which the lease holder gets removed when
// holding the lease, or in which a lease request erroneously gets accepted
// for a replica that is not in the replica set. Neither of the two can
// happen in normal usage since appropriate mechanisms have been added:
//
// 1. Only the lease holder (at the time) schedules removal of a replica,
// but the lease can change hands and so the situation in which a follower
// coordinates a replica removal of the (new) lease holder is possible (if
// unlikely) in practice. In this situation, the new lease holder would at
// some point be asked to propose the replica change's EndTxn to Raft. A
// check has been added that prevents proposals that amount to the removal
// of the proposer's (and hence lease holder's) Replica, preventing this
// scenario.
//
// 2. A lease is accepted for a Replica that has been removed. Without
// precautions, this could happen because lease requests are special in
// that they are the only command that is proposed on a follower (other
// commands may be proposed from followers, but not successfully so). For
// all proposals, processRaftCommand checks that their ProposalLease is
// compatible with the active lease for the log position. For commands
// proposed on the lease holder, the spanlatch manager then serializes
// everything. But lease requests get created on followers based on their
// local state and thus without being sequenced through latching. Thus
// a recently removed follower (unaware of its own removal) could submit
// a proposal for the lease (correctly using as a ProposerLease the last
// active lease), and would receive it given the up-to-date ProposerLease.
// Hence, an extra check is in order: processRaftCommand makes sure that
// lease requests for a replica not in the descriptor are bounced.
//
// However, this is possible if the `cockroach debug recover` command has
// been used, so this is just a logged error instead of a fatal assertion.
log.Errorf(ctx, "lease %s owned by replica %+v that no longer exists",
st.Lease, st.Lease.Replica)
}
// Otherwise, if the lease is currently held by another replica, redirect
// to the holder.
return kvpb.NewNotLeaseHolderError(
st.Lease, r.store.StoreID(), r.descRLocked(), "lease held by different store",
)
}
// Case (4): all good.
return nil
}
// leaseGoodToGo is like leaseGoodToGoRLocked, but will acquire the replica read
// lock.
func (r *Replica) leaseGoodToGo(
ctx context.Context, now hlc.ClockTimestamp, reqTS hlc.Timestamp,
) (kvserverpb.LeaseStatus, error) {
r.mu.RLock()
defer r.mu.RUnlock()
return r.leaseGoodToGoRLocked(ctx, now, reqTS)
}
// redirectOnOrAcquireLease checks whether this replica has the lease at
// the current timestamp. If it does, returns the lease and its status.
// If another replica currently holds the lease, redirects by returning
// NotLeaseHolderError and an empty lease status.
//
// If the lease is expired, a renewal is synchronously requested.
// Expiration-based leases are eagerly renewed when a request with a
// timestamp within RangeLeaseRenewalDuration of the lease expiration is
// served.
//
// TODO(spencer): for write commands, don't wait while requesting
//
// the range lease. If the lease acquisition fails, the write cmd
// will fail as well. If it succeeds, as is likely, then the write
// will not incur latency waiting for the command to complete.
// Reads, however, must wait.
func (r *Replica) redirectOnOrAcquireLease(
ctx context.Context,
) (kvserverpb.LeaseStatus, *kvpb.Error) {
return r.redirectOnOrAcquireLeaseForRequest(ctx, hlc.Timestamp{}, r.breaker.Signal())
}
// TestingAcquireLease is redirectOnOrAcquireLease exposed for tests.
func (r *Replica) TestingAcquireLease(ctx context.Context) (kvserverpb.LeaseStatus, error) {
ctx = r.AnnotateCtx(ctx)
ctx = logtags.AddTag(ctx, "lease-acq", nil)
l, pErr := r.redirectOnOrAcquireLease(ctx)
return l, pErr.GoError()
}
func (s *Store) rangeLeaseAcquireTimeout() time.Duration {
if d := s.cfg.TestingKnobs.RangeLeaseAcquireTimeoutOverride; d != 0 {
return d
}
return s.cfg.RangeLeaseAcquireTimeout()
}
// redirectOnOrAcquireLeaseForRequest is like redirectOnOrAcquireLease,
// but it accepts a specific request timestamp instead of assuming that
// the request is operating at the current time.
func (r *Replica) redirectOnOrAcquireLeaseForRequest(
ctx context.Context, reqTS hlc.Timestamp, brSig signaller,
) (status kvserverpb.LeaseStatus, pErr *kvpb.Error) {
// Does not use RunWithTimeout(), because we do not want to mask the
// NotLeaseHolderError on context cancellation.
ctx, cancel := context.WithTimeout(ctx, r.store.rangeLeaseAcquireTimeout()) // nolint:context
defer cancel()
// Try fast-path.
now := r.store.Clock().NowAsClockTimestamp()
{
status, err := r.leaseGoodToGo(ctx, now, reqTS)
if err == nil {
return status, nil
} else if !errors.HasType(err, (*kvpb.InvalidLeaseError)(nil)) {
return kvserverpb.LeaseStatus{}, kvpb.NewError(err)
}
}
if err := brSig.Err(); err != nil {
return kvserverpb.LeaseStatus{}, kvpb.NewError(err)
}
// Loop until the lease is held or the replica ascertains the actual lease
// holder. Returns also on context.Done() (timeout or cancellation).
for attempt := 1; ; attempt++ {
now = r.store.Clock().NowAsClockTimestamp()
llHandle, status, transfer, pErr := func() (*leaseRequestHandle, kvserverpb.LeaseStatus, bool, *kvpb.Error) {
r.mu.Lock()
defer r.mu.Unlock()
// Check that we're not in the process of transferring the lease
// away. If we are doing so, we can't serve reads or propose Raft
// commands - see comments on AdminTransferLease and TransferLease.
// So wait on the lease transfer to complete either successfully or
// unsuccessfully before redirecting or retrying.
repDesc, err := r.getReplicaDescriptorRLocked()
if err != nil {
return nil, kvserverpb.LeaseStatus{}, false, kvpb.NewError(err)
}
if ok := r.mu.pendingLeaseRequest.TransferInProgress(repDesc.ReplicaID); ok {
return r.mu.pendingLeaseRequest.JoinRequest(), kvserverpb.LeaseStatus{}, true /* transfer */, nil
}
status := r.leaseStatusForRequestRLocked(ctx, now, reqTS)
switch status.State {
case kvserverpb.LeaseState_ERROR:
// Lease state couldn't be determined.
msg := status.ErrInfo
if msg == "" {
msg = "lease state could not be determined"
}
log.VEventf(ctx, 2, "%s", msg)
return nil, kvserverpb.LeaseStatus{}, false, kvpb.NewError(
kvpb.NewNotLeaseHolderError(roachpb.Lease{}, r.store.StoreID(), r.mu.state.Desc, msg))
case kvserverpb.LeaseState_VALID, kvserverpb.LeaseState_UNUSABLE:
if !status.Lease.OwnedBy(r.store.StoreID()) {
_, stillMember := r.mu.state.Desc.GetReplicaDescriptor(status.Lease.Replica.StoreID)
if !stillMember {
// See corresponding comment in leaseGoodToGoRLocked.
log.Errorf(ctx, "lease %s owned by replica %+v that no longer exists",
status.Lease, status.Lease.Replica)
}
// Otherwise, if the lease is currently held by another replica, redirect
// to the holder.
return nil, kvserverpb.LeaseStatus{}, false, kvpb.NewError(
kvpb.NewNotLeaseHolderError(status.Lease, r.store.StoreID(), r.mu.state.Desc,
"lease held by different store"))
}
// If the lease is in stasis, we can't serve requests until we've
// renewed the lease, so we return the handle to block on renewal.
if status.State == kvserverpb.LeaseState_UNUSABLE {
return r.requestLeaseLocked(ctx, status, nil), kvserverpb.LeaseStatus{}, false, nil
}
// Return a nil handle and status to signal that we have a valid lease.
return nil, status, false, nil
case kvserverpb.LeaseState_EXPIRED:
// No active lease: Request renewal if a renewal is not already pending.
log.VEventf(ctx, 2, "request range lease (attempt #%d)", attempt)
return r.requestLeaseLocked(ctx, status, nil), kvserverpb.LeaseStatus{}, false, nil
case kvserverpb.LeaseState_PROSCRIBED:
// Lease proposed timestamp is earlier than the min proposed
// timestamp limit this replica must observe. If this store
// owns the lease, re-request. Otherwise, redirect.
if status.Lease.OwnedBy(r.store.StoreID()) {
log.VEventf(ctx, 2, "request range lease (attempt #%d)", attempt)
return r.requestLeaseLocked(ctx, status, nil), kvserverpb.LeaseStatus{}, false, nil
}
// If lease is currently held by another, redirect to holder.
return nil, kvserverpb.LeaseStatus{}, false, kvpb.NewError(
kvpb.NewNotLeaseHolderError(status.Lease, r.store.StoreID(), r.mu.state.Desc, "lease proscribed"))
default:
return nil, kvserverpb.LeaseStatus{}, false, kvpb.NewErrorf("unknown lease status state %v", status)
}
}()
if pErr != nil {
return kvserverpb.LeaseStatus{}, pErr
}
if llHandle == nil {
// We own a valid lease.
log.Eventf(ctx, "valid lease %+v", status)
return status, nil
}
// Wait for the range lease acquisition/transfer to finish, or the
// context to expire.
//
// Note that even if the operation completes successfully, we can't
// assume that we have the lease. This is clearly not the case when
// waiting on a lease transfer and also not the case if our request
// timestamp is not covered by the new lease (though we try to protect
// against this in checkRequestTimeRLocked). So instead of assuming
// anything, we iterate and check again.
pErr = func() (pErr *kvpb.Error) {
slowTimer := timeutil.NewTimer()
defer slowTimer.Stop()
slowTimer.Reset(base.SlowRequestThreshold)
tBegin := timeutil.Now()
for {
select {
case pErr = <-llHandle.C():
if transfer {
// We were waiting on a transfer to finish. Ignore its
// result and try again.
return nil
}
if pErr != nil {
goErr := pErr.GoError()
switch {
case errors.HasType(goErr, (*kvpb.AmbiguousResultError)(nil)):
// This can happen if the RequestLease command we sent has been
// applied locally through a snapshot: the RequestLeaseRequest
// cannot be reproposed so we get this ambiguity.
// We'll just loop around.
return nil
case errors.HasType(goErr, (*kvpb.LeaseRejectedError)(nil)):
var tErr *kvpb.LeaseRejectedError
errors.As(goErr, &tErr)
if tErr.Existing.OwnedBy(r.store.StoreID()) {
// The RequestLease command we sent was rejected because another
// lease was applied in the meantime, but we own that other
// lease. So, loop until the current node becomes aware that
// it's the leaseholder.
return nil
}
// Getting a LeaseRejectedError back means someone else got there
// first, or the lease request was somehow invalid due to a concurrent
// change. That concurrent change could have been that this replica was
// removed (see processRaftCommand), so check for that case before
// falling back to a NotLeaseHolderError.
var err error
if _, descErr := r.GetReplicaDescriptor(); descErr != nil {
err = descErr
} else if st := r.CurrentLeaseStatus(ctx); !st.IsValid() {
err = kvpb.NewNotLeaseHolderError(roachpb.Lease{}, r.store.StoreID(), r.Desc(),
"lease acquisition attempt lost to another lease, which has expired in the meantime")
} else {
err = kvpb.NewNotLeaseHolderError(st.Lease, r.store.StoreID(), r.Desc(),
"lease acquisition attempt lost to another lease")
}
pErr = kvpb.NewError(err)
}
return pErr
}
log.VEventf(ctx, 2, "lease acquisition succeeded: %+v", status.Lease)
return nil
case <-brSig.C():
llHandle.Cancel()
err := brSig.Err()
log.VErrEventf(ctx, 2, "lease acquisition failed: %s", err)
return kvpb.NewError(err)
case <-slowTimer.C:
slowTimer.Read = true
log.Warningf(ctx, "have been waiting %s attempting to acquire lease (%d attempts)",
base.SlowRequestThreshold, attempt)
r.store.metrics.SlowLeaseRequests.Inc(1)
defer func(attempt int) {
r.store.metrics.SlowLeaseRequests.Dec(1)
log.Infof(ctx, "slow lease acquisition finished after %s with error %v after %d attempts", timeutil.Since(tBegin), pErr, attempt)
}(attempt)
case <-ctx.Done():
llHandle.Cancel()
log.VErrEventf(ctx, 2, "lease acquisition failed: %s", ctx.Err())
return kvpb.NewError(kvpb.NewNotLeaseHolderError(roachpb.Lease{}, r.store.StoreID(), r.Desc(),
"lease acquisition canceled because context canceled"))
case <-r.store.Stopper().ShouldQuiesce():
llHandle.Cancel()
return kvpb.NewError(kvpb.NewNotLeaseHolderError(roachpb.Lease{}, r.store.StoreID(), r.Desc(),
"lease acquisition canceled because node is stopping"))
}
}
}()
if pErr != nil {
return kvserverpb.LeaseStatus{}, pErr
}
// Retry...
}
}
// shouldRequestLeaseRLocked determines whether the replica should request a new
// lease. It also returns whether this is a lease extension. This covers the
// following cases:
//
// - The lease has expired, so the Raft leader should attempt to acquire it.
// - The lease is expiration-based, ours, and in need of extension.
// - The node has restarted, and should reacquire its former leases.
// - The lease is ours but has an incorrect type (epoch/expiration).
func (r *Replica) shouldRequestLeaseRLocked(
st kvserverpb.LeaseStatus,
) (shouldRequest bool, isExtension bool) {
switch st.State {
case kvserverpb.LeaseState_EXPIRED:
// Attempt to acquire an expired lease, but only if we're the Raft leader.
// We want the lease and leader to be colocated, and a non-leader lease
// proposal would be rejected by the Raft proposal buffer anyway. This also
// reduces aggregate work across ranges, since only 1 replica will attempt
// to acquire the lease, and only if there is a leader.
return r.isRaftLeaderRLocked(), false
case kvserverpb.LeaseState_PROSCRIBED:
// Reacquire leases after a restart, if they're still ours. We could also
// have revoked our lease as part of a lease transfer, but the transferred
// lease would typically take effect before we get here, and if not then the
// lease compare-and-swap would fail anyway.
return st.OwnedBy(r.StoreID()), false
case kvserverpb.LeaseState_VALID, kvserverpb.LeaseState_UNUSABLE:
// If someone else has the lease, leave it alone.
if !st.OwnedBy(r.StoreID()) {
return false, false
}
// Extend expiration leases if they're due.
if st.Lease.Type() == roachpb.LeaseExpiration {
renewal := st.Lease.Expiration.Add(-r.store.cfg.RangeLeaseRenewalDuration().Nanoseconds(), 0)
if renewal.LessEq(st.Now.ToTimestamp()) {
return true, true
}
}
// Switch the lease type if it's incorrect.
if !r.hasCorrectLeaseTypeRLocked(st.Lease) {
return true, false
}
return false, false
case kvserverpb.LeaseState_ERROR:
return false, false
default:
log.Fatalf(context.Background(), "invalid lease state %s", st.State)
return false, false
}
}
// maybeSwitchLeaseType will synchronously renew a lease using the appropriate
// type if it is (or was) owned by this replica and has an incorrect type. This
// typically happens when changing kv.expiration_leases_only.enabled.
func (r *Replica) maybeSwitchLeaseType(ctx context.Context, st kvserverpb.LeaseStatus) *kvpb.Error {
if !st.OwnedBy(r.store.StoreID()) {
return nil
}
var llHandle *leaseRequestHandle
r.mu.Lock()
if !r.hasCorrectLeaseTypeRLocked(st.Lease) {
llHandle = r.requestLeaseLocked(ctx, st, nil /* limiter */)
}
r.mu.Unlock()
if llHandle != nil {
select {
case pErr := <-llHandle.C():
return pErr
case <-ctx.Done():
return kvpb.NewError(ctx.Err())
}
}
return nil
}
// HasCorrectLeaseType returns true if the lease type is correct for this replica.
func (r *Replica) HasCorrectLeaseType(lease roachpb.Lease) bool {
r.mu.RLock()
defer r.mu.RUnlock()
return r.hasCorrectLeaseTypeRLocked(lease)
}
func (r *Replica) hasCorrectLeaseTypeRLocked(lease roachpb.Lease) bool {
hasExpirationLease := lease.Type() == roachpb.LeaseExpiration
return hasExpirationLease == r.shouldUseExpirationLeaseRLocked()
}
// LeasePreferencesStatus represents the state of satisfying lease preferences.
type LeasePreferencesStatus int
const (
_ LeasePreferencesStatus = iota
// LeasePreferencesViolating indicates the checked store does not satisfy any
// lease preference applied.
LeasePreferencesViolating
// LeasePreferencesLessPreferred indicates the checked store satisfies _some_
// preference, however not the most preferred.
LeasePreferencesLessPreferred
// LeasePreferencesOK indicates the checked store satisfies the first
// preference, or no lease preferences are applied.
LeasePreferencesOK
)
// LeaseViolatesPreferences checks if this replica owns the lease and if it
// violates the lease preferences defined in the span config. If no preferences
// are defined then it will return false and consider it to be in conformance.
func (r *Replica) LeaseViolatesPreferences(ctx context.Context, conf *roachpb.SpanConfig) bool {
storeID := r.store.StoreID()
preferences := conf.LeasePreferences
leaseStatus := r.CurrentLeaseStatus(ctx)
if !leaseStatus.IsValid() || !leaseStatus.Lease.OwnedBy(storeID) {
// We can't determine if the lease preferences are being conformed to or
// not, as the store either doesn't own the lease, or doesn't own a valid
// lease.
return false
}
storeAttrs := r.store.Attrs()
nodeAttrs := r.store.nodeDesc.Attrs
nodeLocality := r.store.nodeDesc.Locality
preferenceStatus := CheckStoreAgainstLeasePreferences(
storeID, storeAttrs, nodeAttrs, nodeLocality, preferences)
return preferenceStatus == LeasePreferencesViolating
}
// CheckStoreAgainstLeasePreferences returns whether the given store would
// violate, be less preferred or ok, leaseholder, according the the lease
// preferences.
func CheckStoreAgainstLeasePreferences(
storeID roachpb.StoreID,
storeAttrs, nodeAttrs roachpb.Attributes,
nodeLocality roachpb.Locality,
preferences []roachpb.LeasePreference,
) LeasePreferencesStatus {
if len(preferences) == 0 {
return LeasePreferencesOK
}
for i, preference := range preferences {
if constraint.CheckConjunction(storeAttrs, nodeAttrs, nodeLocality, preference.Constraints) {
if i > 0 {
return LeasePreferencesLessPreferred
}
return LeasePreferencesOK
}
}
return LeasePreferencesViolating
}
| pkg/kv/kvserver/replica_range_lease.go | 1 | https://github.com/cockroachdb/cockroach/commit/dbe5954ec7032390cc5713bad5ed8fcdaf3b2d05 | [
0.5283776521682739,
0.0035163613501936197,
0.00015874742530286312,
0.0001690666249487549,
0.04136689752340317
] |
{
"id": 0,
"code_window": [
"\t\tCommit: false,\n",
"\t\t// Resolved intents should maintain an abort span entry to prevent\n",
"\t\t// concurrent requests from failing to notice the transaction was aborted.\n",
"\t\tPoison: true,\n",
"\t})\n",
"\n",
"\tconst taskName = \"txnHeartbeater: aborting txn\"\n",
"\tlog.VEventf(ctx, 2, \"async abort for txn: %s\", txn)\n",
"\tif err := h.stopper.RunAsyncTask(h.AnnotateCtx(context.Background()), taskName,\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t// NB: Setting `Source: kvpb.AdmissionHeader_OTHER` means this request will\n",
"\t// bypass AC.\n",
"\tba.AdmissionHeader = kvpb.AdmissionHeader{\n",
"\t\tPriority: txn.AdmissionPriority,\n",
"\t\tCreateTime: timeutil.Now().UnixNano(),\n",
"\t\tSource: kvpb.AdmissionHeader_OTHER,\n",
"\t}\n"
],
"file_path": "pkg/kv/kvclient/kvcoord/txn_interceptor_heartbeater.go",
"type": "add",
"edit_start_line_idx": 527
} | // Copyright 2021 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package main
import (
"os"
"path/filepath"
"sort"
"strings"
"text/template"
"honnef.co/go/tools/analysis/lint"
"honnef.co/go/tools/simple"
"honnef.co/go/tools/staticcheck"
"honnef.co/go/tools/stylecheck"
)
const (
readmeContent = `All of the code in this directory is generated by generate-staticcheck for use in Bazel.
`
rootBuildBazelContent = `exports_files(["def.bzl"])
`
analysisFileTemplate = `// Code generated by generate-staticcheck; DO NOT EDIT.
//go:build bazel
// +build bazel
package {{ .Package }}
import (
util "github.com/cockroachdb/cockroach/pkg/testutils/lint/passes/staticcheck"
"golang.org/x/tools/go/analysis"
"honnef.co/go/tools/{{ .CheckType }}"
)
var Analyzer *analysis.Analyzer
func init() {
for _, analyzer := range {{ .CheckType }}.Analyzers {
if analyzer.Analyzer.Name == "{{ .Check }}" {
Analyzer = analyzer.Analyzer
break
}
}
util.MungeAnalyzer(Analyzer)
}
`
buildBzlFileTemplate = `# Code generated by generate-staticcheck; DO NOT EDIT.
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "{{ .Package }}",
srcs = ["analyzer.go"],
importpath = "github.com/cockroachdb/cockroach/build/bazelutil/staticcheckanalyzers/{{ .Package }}",
visibility = ["//visibility:public"],
deps = [
"//pkg/testutils/lint/passes/staticcheck",
"@co_honnef_go_tools//{{ .CheckType }}",
"@org_golang_x_tools//go/analysis",
],
)
`
defBzlFileTemplate = `# Code generated by generate-staticcheck; DO NOT EDIT.
STATICCHECK_CHECKS = [
{{range $i,$a := .AllAnalyzers}} "{{.}}",
{{end}}]
`
)
func main() {
fileTpl := template.Must(template.New("source").Parse(analysisFileTemplate))
buildTpl := template.Must(template.New("buildbazel").Parse(buildBzlFileTemplate))
rootDir := "build/bazelutil/staticcheckanalyzers"
err := os.RemoveAll(rootDir)
if err != nil {
panic(err)
}
err = os.MkdirAll(rootDir, 0755)
if err != nil {
panic(err)
}
err = os.WriteFile(filepath.Join(rootDir, "README.md"), []byte(readmeContent), 0644)
if err != nil {
panic(err)
}
err = os.WriteFile(filepath.Join(rootDir, "BUILD.bazel"), []byte(rootBuildBazelContent), 0644)
if err != nil {
panic(err)
}
// All of these analyzers will be written to def.bzl.
var allAnalyzers []string
for _, check := range []struct {
Analyzers []*lint.Analyzer
CheckType string
}{
// TODO: Consider adding quickfix checks.
{Analyzers: staticcheck.Analyzers, CheckType: "staticcheck"},
{Analyzers: stylecheck.Analyzers, CheckType: "stylecheck"},
{Analyzers: simple.Analyzers, CheckType: "simple"},
} {
for _, v := range check.Analyzers {
analyzer := v.Analyzer
pkgname := strings.ToLower(analyzer.Name)
dirname := filepath.Join(rootDir, pkgname)
err := os.MkdirAll(dirname, 0755)
if err != nil {
panic(err)
}
outFile, err := os.Create(filepath.Join(dirname, "analyzer.go"))
if err != nil {
panic(err)
}
vars := struct {
Package string
Check string
CheckType string
}{Package: pkgname, Check: analyzer.Name, CheckType: check.CheckType}
err = fileTpl.Execute(outFile, vars)
if err != nil {
panic(err)
}
err = outFile.Close()
if err != nil {
panic(err)
}
outBuild, err := os.Create(filepath.Join(dirname, "BUILD.bazel"))
if err != nil {
panic(err)
}
err = buildTpl.Execute(outBuild, vars)
if err != nil {
panic(err)
}
err = outBuild.Close()
if err != nil {
panic(err)
}
allAnalyzers = append(allAnalyzers, "//"+dirname)
}
}
sort.Strings(allAnalyzers)
fileTpl = template.Must(template.New("defbzl").Parse(defBzlFileTemplate))
defBzlFile, err := os.Create(filepath.Join(rootDir, "def.bzl"))
if err != nil {
panic(err)
}
err = fileTpl.Execute(defBzlFile, struct{ AllAnalyzers []string }{AllAnalyzers: allAnalyzers})
if err != nil {
panic(err)
}
}
| pkg/cmd/generate-staticcheck/main.go | 0 | https://github.com/cockroachdb/cockroach/commit/dbe5954ec7032390cc5713bad5ed8fcdaf3b2d05 | [
0.00017886346904560924,
0.00017233658581972122,
0.00016732470248825848,
0.0001724426110740751,
0.0000034939857869176194
] |
{
"id": 0,
"code_window": [
"\t\tCommit: false,\n",
"\t\t// Resolved intents should maintain an abort span entry to prevent\n",
"\t\t// concurrent requests from failing to notice the transaction was aborted.\n",
"\t\tPoison: true,\n",
"\t})\n",
"\n",
"\tconst taskName = \"txnHeartbeater: aborting txn\"\n",
"\tlog.VEventf(ctx, 2, \"async abort for txn: %s\", txn)\n",
"\tif err := h.stopper.RunAsyncTask(h.AnnotateCtx(context.Background()), taskName,\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t// NB: Setting `Source: kvpb.AdmissionHeader_OTHER` means this request will\n",
"\t// bypass AC.\n",
"\tba.AdmissionHeader = kvpb.AdmissionHeader{\n",
"\t\tPriority: txn.AdmissionPriority,\n",
"\t\tCreateTime: timeutil.Now().UnixNano(),\n",
"\t\tSource: kvpb.AdmissionHeader_OTHER,\n",
"\t}\n"
],
"file_path": "pkg/kv/kvclient/kvcoord/txn_interceptor_heartbeater.go",
"type": "add",
"edit_start_line_idx": 527
} | // Copyright 2022 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
// Code generated by sctestgen, DO NOT EDIT.
package schemachanger
import (
"testing"
"github.com/cockroachdb/cockroach/pkg/sql/schemachanger/sctest"
"github.com/cockroachdb/cockroach/pkg/util/leaktest"
"github.com/cockroachdb/cockroach/pkg/util/log"
)
func TestEndToEndSideEffects_add_column(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/add_column"
sctest.EndToEndSideEffects(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestEndToEndSideEffects_add_column_default_seq(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/add_column_default_seq"
sctest.EndToEndSideEffects(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestEndToEndSideEffects_add_column_default_unique(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/add_column_default_unique"
sctest.EndToEndSideEffects(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestEndToEndSideEffects_add_column_no_default(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/add_column_no_default"
sctest.EndToEndSideEffects(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestEndToEndSideEffects_add_column_with_stored(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/add_column_with_stored"
sctest.EndToEndSideEffects(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestEndToEndSideEffects_add_column_with_stored_family(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/add_column_with_stored_family"
sctest.EndToEndSideEffects(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestEndToEndSideEffects_alter_table_add_check_udf(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/alter_table_add_check_udf"
sctest.EndToEndSideEffects(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestEndToEndSideEffects_alter_table_add_check_unvalidated(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/alter_table_add_check_unvalidated"
sctest.EndToEndSideEffects(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestEndToEndSideEffects_alter_table_add_check_vanilla(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/alter_table_add_check_vanilla"
sctest.EndToEndSideEffects(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestEndToEndSideEffects_alter_table_add_check_with_seq_and_udt(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/alter_table_add_check_with_seq_and_udt"
sctest.EndToEndSideEffects(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestEndToEndSideEffects_alter_table_add_foreign_key(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/alter_table_add_foreign_key"
sctest.EndToEndSideEffects(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestEndToEndSideEffects_alter_table_add_primary_key_drop_rowid(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/alter_table_add_primary_key_drop_rowid"
sctest.EndToEndSideEffects(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestEndToEndSideEffects_alter_table_add_primary_key_drop_rowid_with_secondary_idx(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/alter_table_add_primary_key_drop_rowid_with_secondary_idx"
sctest.EndToEndSideEffects(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestEndToEndSideEffects_alter_table_add_unique_without_index(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/alter_table_add_unique_without_index"
sctest.EndToEndSideEffects(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestEndToEndSideEffects_alter_table_alter_column_set_not_null(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/alter_table_alter_column_set_not_null"
sctest.EndToEndSideEffects(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestEndToEndSideEffects_alter_table_alter_primary_key_drop_rowid(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/alter_table_alter_primary_key_drop_rowid"
sctest.EndToEndSideEffects(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestEndToEndSideEffects_alter_table_alter_primary_key_using_hash(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/alter_table_alter_primary_key_using_hash"
sctest.EndToEndSideEffects(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestEndToEndSideEffects_alter_table_alter_primary_key_vanilla(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/alter_table_alter_primary_key_vanilla"
sctest.EndToEndSideEffects(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestEndToEndSideEffects_alter_table_drop_constraint_check(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/alter_table_drop_constraint_check"
sctest.EndToEndSideEffects(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestEndToEndSideEffects_alter_table_drop_constraint_fk(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/alter_table_drop_constraint_fk"
sctest.EndToEndSideEffects(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestEndToEndSideEffects_alter_table_drop_constraint_uwi(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/alter_table_drop_constraint_uwi"
sctest.EndToEndSideEffects(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestEndToEndSideEffects_alter_table_multiple_commands(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/alter_table_multiple_commands"
sctest.EndToEndSideEffects(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestEndToEndSideEffects_alter_table_validate_constraint(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/alter_table_validate_constraint"
sctest.EndToEndSideEffects(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestEndToEndSideEffects_create_function(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/create_function"
sctest.EndToEndSideEffects(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestEndToEndSideEffects_create_function_in_txn(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/create_function_in_txn"
sctest.EndToEndSideEffects(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestEndToEndSideEffects_create_index(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/create_index"
sctest.EndToEndSideEffects(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestEndToEndSideEffects_create_index_create_schema_separate_statements(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/create_index_create_schema_separate_statements"
sctest.EndToEndSideEffects(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestEndToEndSideEffects_create_schema(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/create_schema"
sctest.EndToEndSideEffects(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestEndToEndSideEffects_create_schema_drop_schema_separate_statements(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/create_schema_drop_schema_separate_statements"
sctest.EndToEndSideEffects(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestEndToEndSideEffects_create_sequence(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/create_sequence"
sctest.EndToEndSideEffects(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestEndToEndSideEffects_drop_column_basic(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/drop_column_basic"
sctest.EndToEndSideEffects(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestEndToEndSideEffects_drop_column_computed_index(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/drop_column_computed_index"
sctest.EndToEndSideEffects(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestEndToEndSideEffects_drop_column_create_index_separate_statements(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/drop_column_create_index_separate_statements"
sctest.EndToEndSideEffects(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestEndToEndSideEffects_drop_column_unique_index(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/drop_column_unique_index"
sctest.EndToEndSideEffects(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestEndToEndSideEffects_drop_column_with_index(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/drop_column_with_index"
sctest.EndToEndSideEffects(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestEndToEndSideEffects_drop_column_with_partial_index(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/drop_column_with_partial_index"
sctest.EndToEndSideEffects(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestEndToEndSideEffects_drop_column_with_udf_default(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/drop_column_with_udf_default"
sctest.EndToEndSideEffects(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestEndToEndSideEffects_drop_function(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/drop_function"
sctest.EndToEndSideEffects(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestEndToEndSideEffects_drop_index_hash_sharded_index(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/drop_index_hash_sharded_index"
sctest.EndToEndSideEffects(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestEndToEndSideEffects_drop_index_partial_expression_index(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/drop_index_partial_expression_index"
sctest.EndToEndSideEffects(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestEndToEndSideEffects_drop_index_vanilla_index(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/drop_index_vanilla_index"
sctest.EndToEndSideEffects(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestEndToEndSideEffects_drop_index_with_fks(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/drop_index_with_fks"
sctest.EndToEndSideEffects(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestEndToEndSideEffects_drop_index_with_materialized_view_dep(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/drop_index_with_materialized_view_dep"
sctest.EndToEndSideEffects(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestEndToEndSideEffects_drop_multiple_columns_separate_statements(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/drop_multiple_columns_separate_statements"
sctest.EndToEndSideEffects(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestEndToEndSideEffects_drop_schema(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/drop_schema"
sctest.EndToEndSideEffects(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestEndToEndSideEffects_drop_table(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/drop_table"
sctest.EndToEndSideEffects(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestEndToEndSideEffects_drop_table_udf_default(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/drop_table_udf_default"
sctest.EndToEndSideEffects(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestExecuteWithDMLInjection_add_column(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/add_column"
sctest.ExecuteWithDMLInjection(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestExecuteWithDMLInjection_add_column_default_seq(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/add_column_default_seq"
sctest.ExecuteWithDMLInjection(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestExecuteWithDMLInjection_add_column_default_unique(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/add_column_default_unique"
sctest.ExecuteWithDMLInjection(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestExecuteWithDMLInjection_add_column_no_default(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/add_column_no_default"
sctest.ExecuteWithDMLInjection(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestExecuteWithDMLInjection_add_column_with_stored(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/add_column_with_stored"
sctest.ExecuteWithDMLInjection(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestExecuteWithDMLInjection_add_column_with_stored_family(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/add_column_with_stored_family"
sctest.ExecuteWithDMLInjection(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestExecuteWithDMLInjection_alter_table_add_check_udf(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/alter_table_add_check_udf"
sctest.ExecuteWithDMLInjection(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestExecuteWithDMLInjection_alter_table_add_check_unvalidated(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/alter_table_add_check_unvalidated"
sctest.ExecuteWithDMLInjection(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestExecuteWithDMLInjection_alter_table_add_check_vanilla(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/alter_table_add_check_vanilla"
sctest.ExecuteWithDMLInjection(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestExecuteWithDMLInjection_alter_table_add_check_with_seq_and_udt(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/alter_table_add_check_with_seq_and_udt"
sctest.ExecuteWithDMLInjection(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestExecuteWithDMLInjection_alter_table_add_foreign_key(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/alter_table_add_foreign_key"
sctest.ExecuteWithDMLInjection(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestExecuteWithDMLInjection_alter_table_add_primary_key_drop_rowid(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/alter_table_add_primary_key_drop_rowid"
sctest.ExecuteWithDMLInjection(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestExecuteWithDMLInjection_alter_table_add_primary_key_drop_rowid_with_secondary_idx(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/alter_table_add_primary_key_drop_rowid_with_secondary_idx"
sctest.ExecuteWithDMLInjection(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestExecuteWithDMLInjection_alter_table_add_unique_without_index(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/alter_table_add_unique_without_index"
sctest.ExecuteWithDMLInjection(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestExecuteWithDMLInjection_alter_table_alter_column_set_not_null(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/alter_table_alter_column_set_not_null"
sctest.ExecuteWithDMLInjection(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestExecuteWithDMLInjection_alter_table_alter_primary_key_drop_rowid(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/alter_table_alter_primary_key_drop_rowid"
sctest.ExecuteWithDMLInjection(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestExecuteWithDMLInjection_alter_table_alter_primary_key_using_hash(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/alter_table_alter_primary_key_using_hash"
sctest.ExecuteWithDMLInjection(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestExecuteWithDMLInjection_alter_table_alter_primary_key_vanilla(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/alter_table_alter_primary_key_vanilla"
sctest.ExecuteWithDMLInjection(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestExecuteWithDMLInjection_alter_table_drop_constraint_check(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/alter_table_drop_constraint_check"
sctest.ExecuteWithDMLInjection(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestExecuteWithDMLInjection_alter_table_drop_constraint_fk(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/alter_table_drop_constraint_fk"
sctest.ExecuteWithDMLInjection(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestExecuteWithDMLInjection_alter_table_drop_constraint_uwi(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/alter_table_drop_constraint_uwi"
sctest.ExecuteWithDMLInjection(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestExecuteWithDMLInjection_alter_table_multiple_commands(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/alter_table_multiple_commands"
sctest.ExecuteWithDMLInjection(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestExecuteWithDMLInjection_alter_table_validate_constraint(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/alter_table_validate_constraint"
sctest.ExecuteWithDMLInjection(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestExecuteWithDMLInjection_create_function(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/create_function"
sctest.ExecuteWithDMLInjection(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestExecuteWithDMLInjection_create_function_in_txn(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/create_function_in_txn"
sctest.ExecuteWithDMLInjection(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestExecuteWithDMLInjection_create_index(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/create_index"
sctest.ExecuteWithDMLInjection(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestExecuteWithDMLInjection_create_index_create_schema_separate_statements(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/create_index_create_schema_separate_statements"
sctest.ExecuteWithDMLInjection(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestExecuteWithDMLInjection_create_schema(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/create_schema"
sctest.ExecuteWithDMLInjection(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestExecuteWithDMLInjection_create_schema_drop_schema_separate_statements(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/create_schema_drop_schema_separate_statements"
sctest.ExecuteWithDMLInjection(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestExecuteWithDMLInjection_create_sequence(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/create_sequence"
sctest.ExecuteWithDMLInjection(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestExecuteWithDMLInjection_drop_column_basic(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/drop_column_basic"
sctest.ExecuteWithDMLInjection(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestExecuteWithDMLInjection_drop_column_computed_index(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/drop_column_computed_index"
sctest.ExecuteWithDMLInjection(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestExecuteWithDMLInjection_drop_column_create_index_separate_statements(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/drop_column_create_index_separate_statements"
sctest.ExecuteWithDMLInjection(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestExecuteWithDMLInjection_drop_column_unique_index(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/drop_column_unique_index"
sctest.ExecuteWithDMLInjection(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestExecuteWithDMLInjection_drop_column_with_index(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/drop_column_with_index"
sctest.ExecuteWithDMLInjection(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestExecuteWithDMLInjection_drop_column_with_partial_index(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/drop_column_with_partial_index"
sctest.ExecuteWithDMLInjection(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestExecuteWithDMLInjection_drop_column_with_udf_default(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/drop_column_with_udf_default"
sctest.ExecuteWithDMLInjection(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestExecuteWithDMLInjection_drop_function(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/drop_function"
sctest.ExecuteWithDMLInjection(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestExecuteWithDMLInjection_drop_index_hash_sharded_index(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/drop_index_hash_sharded_index"
sctest.ExecuteWithDMLInjection(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestExecuteWithDMLInjection_drop_index_partial_expression_index(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/drop_index_partial_expression_index"
sctest.ExecuteWithDMLInjection(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestExecuteWithDMLInjection_drop_index_vanilla_index(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/drop_index_vanilla_index"
sctest.ExecuteWithDMLInjection(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestExecuteWithDMLInjection_drop_index_with_fks(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/drop_index_with_fks"
sctest.ExecuteWithDMLInjection(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestExecuteWithDMLInjection_drop_index_with_materialized_view_dep(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/drop_index_with_materialized_view_dep"
sctest.ExecuteWithDMLInjection(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestExecuteWithDMLInjection_drop_multiple_columns_separate_statements(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/drop_multiple_columns_separate_statements"
sctest.ExecuteWithDMLInjection(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestExecuteWithDMLInjection_drop_schema(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/drop_schema"
sctest.ExecuteWithDMLInjection(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestExecuteWithDMLInjection_drop_table(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/drop_table"
sctest.ExecuteWithDMLInjection(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestExecuteWithDMLInjection_drop_table_udf_default(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/drop_table_udf_default"
sctest.ExecuteWithDMLInjection(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestGenerateSchemaChangeCorpus_add_column(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/add_column"
sctest.GenerateSchemaChangeCorpus(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestGenerateSchemaChangeCorpus_add_column_default_seq(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/add_column_default_seq"
sctest.GenerateSchemaChangeCorpus(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestGenerateSchemaChangeCorpus_add_column_default_unique(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/add_column_default_unique"
sctest.GenerateSchemaChangeCorpus(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestGenerateSchemaChangeCorpus_add_column_no_default(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/add_column_no_default"
sctest.GenerateSchemaChangeCorpus(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestGenerateSchemaChangeCorpus_add_column_with_stored(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/add_column_with_stored"
sctest.GenerateSchemaChangeCorpus(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestGenerateSchemaChangeCorpus_add_column_with_stored_family(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/add_column_with_stored_family"
sctest.GenerateSchemaChangeCorpus(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestGenerateSchemaChangeCorpus_alter_table_add_check_udf(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/alter_table_add_check_udf"
sctest.GenerateSchemaChangeCorpus(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestGenerateSchemaChangeCorpus_alter_table_add_check_unvalidated(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/alter_table_add_check_unvalidated"
sctest.GenerateSchemaChangeCorpus(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestGenerateSchemaChangeCorpus_alter_table_add_check_vanilla(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/alter_table_add_check_vanilla"
sctest.GenerateSchemaChangeCorpus(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestGenerateSchemaChangeCorpus_alter_table_add_check_with_seq_and_udt(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/alter_table_add_check_with_seq_and_udt"
sctest.GenerateSchemaChangeCorpus(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestGenerateSchemaChangeCorpus_alter_table_add_foreign_key(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/alter_table_add_foreign_key"
sctest.GenerateSchemaChangeCorpus(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestGenerateSchemaChangeCorpus_alter_table_add_primary_key_drop_rowid(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/alter_table_add_primary_key_drop_rowid"
sctest.GenerateSchemaChangeCorpus(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestGenerateSchemaChangeCorpus_alter_table_add_primary_key_drop_rowid_with_secondary_idx(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/alter_table_add_primary_key_drop_rowid_with_secondary_idx"
sctest.GenerateSchemaChangeCorpus(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestGenerateSchemaChangeCorpus_alter_table_add_unique_without_index(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/alter_table_add_unique_without_index"
sctest.GenerateSchemaChangeCorpus(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestGenerateSchemaChangeCorpus_alter_table_alter_column_set_not_null(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/alter_table_alter_column_set_not_null"
sctest.GenerateSchemaChangeCorpus(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestGenerateSchemaChangeCorpus_alter_table_alter_primary_key_drop_rowid(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/alter_table_alter_primary_key_drop_rowid"
sctest.GenerateSchemaChangeCorpus(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestGenerateSchemaChangeCorpus_alter_table_alter_primary_key_using_hash(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/alter_table_alter_primary_key_using_hash"
sctest.GenerateSchemaChangeCorpus(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestGenerateSchemaChangeCorpus_alter_table_alter_primary_key_vanilla(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/alter_table_alter_primary_key_vanilla"
sctest.GenerateSchemaChangeCorpus(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestGenerateSchemaChangeCorpus_alter_table_drop_constraint_check(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/alter_table_drop_constraint_check"
sctest.GenerateSchemaChangeCorpus(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestGenerateSchemaChangeCorpus_alter_table_drop_constraint_fk(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/alter_table_drop_constraint_fk"
sctest.GenerateSchemaChangeCorpus(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestGenerateSchemaChangeCorpus_alter_table_drop_constraint_uwi(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/alter_table_drop_constraint_uwi"
sctest.GenerateSchemaChangeCorpus(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestGenerateSchemaChangeCorpus_alter_table_multiple_commands(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/alter_table_multiple_commands"
sctest.GenerateSchemaChangeCorpus(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestGenerateSchemaChangeCorpus_alter_table_validate_constraint(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/alter_table_validate_constraint"
sctest.GenerateSchemaChangeCorpus(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestGenerateSchemaChangeCorpus_create_function(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/create_function"
sctest.GenerateSchemaChangeCorpus(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestGenerateSchemaChangeCorpus_create_function_in_txn(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/create_function_in_txn"
sctest.GenerateSchemaChangeCorpus(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestGenerateSchemaChangeCorpus_create_index(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/create_index"
sctest.GenerateSchemaChangeCorpus(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestGenerateSchemaChangeCorpus_create_index_create_schema_separate_statements(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/create_index_create_schema_separate_statements"
sctest.GenerateSchemaChangeCorpus(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestGenerateSchemaChangeCorpus_create_schema(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/create_schema"
sctest.GenerateSchemaChangeCorpus(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestGenerateSchemaChangeCorpus_create_schema_drop_schema_separate_statements(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/create_schema_drop_schema_separate_statements"
sctest.GenerateSchemaChangeCorpus(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestGenerateSchemaChangeCorpus_create_sequence(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/create_sequence"
sctest.GenerateSchemaChangeCorpus(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestGenerateSchemaChangeCorpus_drop_column_basic(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/drop_column_basic"
sctest.GenerateSchemaChangeCorpus(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestGenerateSchemaChangeCorpus_drop_column_computed_index(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/drop_column_computed_index"
sctest.GenerateSchemaChangeCorpus(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestGenerateSchemaChangeCorpus_drop_column_create_index_separate_statements(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/drop_column_create_index_separate_statements"
sctest.GenerateSchemaChangeCorpus(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestGenerateSchemaChangeCorpus_drop_column_unique_index(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/drop_column_unique_index"
sctest.GenerateSchemaChangeCorpus(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestGenerateSchemaChangeCorpus_drop_column_with_index(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/drop_column_with_index"
sctest.GenerateSchemaChangeCorpus(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestGenerateSchemaChangeCorpus_drop_column_with_partial_index(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/drop_column_with_partial_index"
sctest.GenerateSchemaChangeCorpus(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestGenerateSchemaChangeCorpus_drop_column_with_udf_default(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/drop_column_with_udf_default"
sctest.GenerateSchemaChangeCorpus(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestGenerateSchemaChangeCorpus_drop_function(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/drop_function"
sctest.GenerateSchemaChangeCorpus(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestGenerateSchemaChangeCorpus_drop_index_hash_sharded_index(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/drop_index_hash_sharded_index"
sctest.GenerateSchemaChangeCorpus(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestGenerateSchemaChangeCorpus_drop_index_partial_expression_index(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/drop_index_partial_expression_index"
sctest.GenerateSchemaChangeCorpus(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestGenerateSchemaChangeCorpus_drop_index_vanilla_index(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/drop_index_vanilla_index"
sctest.GenerateSchemaChangeCorpus(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestGenerateSchemaChangeCorpus_drop_index_with_fks(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/drop_index_with_fks"
sctest.GenerateSchemaChangeCorpus(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestGenerateSchemaChangeCorpus_drop_index_with_materialized_view_dep(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/drop_index_with_materialized_view_dep"
sctest.GenerateSchemaChangeCorpus(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestGenerateSchemaChangeCorpus_drop_multiple_columns_separate_statements(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/drop_multiple_columns_separate_statements"
sctest.GenerateSchemaChangeCorpus(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestGenerateSchemaChangeCorpus_drop_schema(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/drop_schema"
sctest.GenerateSchemaChangeCorpus(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestGenerateSchemaChangeCorpus_drop_table(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/drop_table"
sctest.GenerateSchemaChangeCorpus(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestGenerateSchemaChangeCorpus_drop_table_udf_default(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/drop_table_udf_default"
sctest.GenerateSchemaChangeCorpus(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestPause_add_column(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/add_column"
sctest.Pause(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestPause_add_column_default_seq(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/add_column_default_seq"
sctest.Pause(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestPause_add_column_default_unique(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/add_column_default_unique"
sctest.Pause(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestPause_add_column_no_default(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/add_column_no_default"
sctest.Pause(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestPause_add_column_with_stored(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/add_column_with_stored"
sctest.Pause(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestPause_add_column_with_stored_family(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/add_column_with_stored_family"
sctest.Pause(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestPause_alter_table_add_check_udf(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/alter_table_add_check_udf"
sctest.Pause(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestPause_alter_table_add_check_unvalidated(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/alter_table_add_check_unvalidated"
sctest.Pause(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestPause_alter_table_add_check_vanilla(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/alter_table_add_check_vanilla"
sctest.Pause(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestPause_alter_table_add_check_with_seq_and_udt(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/alter_table_add_check_with_seq_and_udt"
sctest.Pause(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestPause_alter_table_add_foreign_key(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/alter_table_add_foreign_key"
sctest.Pause(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestPause_alter_table_add_primary_key_drop_rowid(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/alter_table_add_primary_key_drop_rowid"
sctest.Pause(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestPause_alter_table_add_primary_key_drop_rowid_with_secondary_idx(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/alter_table_add_primary_key_drop_rowid_with_secondary_idx"
sctest.Pause(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestPause_alter_table_add_unique_without_index(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/alter_table_add_unique_without_index"
sctest.Pause(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestPause_alter_table_alter_column_set_not_null(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/alter_table_alter_column_set_not_null"
sctest.Pause(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestPause_alter_table_alter_primary_key_drop_rowid(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/alter_table_alter_primary_key_drop_rowid"
sctest.Pause(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestPause_alter_table_alter_primary_key_using_hash(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/alter_table_alter_primary_key_using_hash"
sctest.Pause(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestPause_alter_table_alter_primary_key_vanilla(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/alter_table_alter_primary_key_vanilla"
sctest.Pause(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestPause_alter_table_drop_constraint_check(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/alter_table_drop_constraint_check"
sctest.Pause(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestPause_alter_table_drop_constraint_fk(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/alter_table_drop_constraint_fk"
sctest.Pause(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestPause_alter_table_drop_constraint_uwi(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/alter_table_drop_constraint_uwi"
sctest.Pause(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestPause_alter_table_multiple_commands(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/alter_table_multiple_commands"
sctest.Pause(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestPause_alter_table_validate_constraint(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/alter_table_validate_constraint"
sctest.Pause(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestPause_create_function(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/create_function"
sctest.Pause(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestPause_create_function_in_txn(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/create_function_in_txn"
sctest.Pause(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestPause_create_index(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/create_index"
sctest.Pause(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestPause_create_index_create_schema_separate_statements(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/create_index_create_schema_separate_statements"
sctest.Pause(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestPause_create_schema(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/create_schema"
sctest.Pause(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestPause_create_schema_drop_schema_separate_statements(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/create_schema_drop_schema_separate_statements"
sctest.Pause(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestPause_create_sequence(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/create_sequence"
sctest.Pause(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestPause_drop_column_basic(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/drop_column_basic"
sctest.Pause(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestPause_drop_column_computed_index(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/drop_column_computed_index"
sctest.Pause(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestPause_drop_column_create_index_separate_statements(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/drop_column_create_index_separate_statements"
sctest.Pause(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestPause_drop_column_unique_index(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/drop_column_unique_index"
sctest.Pause(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestPause_drop_column_with_index(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/drop_column_with_index"
sctest.Pause(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestPause_drop_column_with_partial_index(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/drop_column_with_partial_index"
sctest.Pause(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestPause_drop_column_with_udf_default(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/drop_column_with_udf_default"
sctest.Pause(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestPause_drop_function(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/drop_function"
sctest.Pause(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestPause_drop_index_hash_sharded_index(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/drop_index_hash_sharded_index"
sctest.Pause(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestPause_drop_index_partial_expression_index(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/drop_index_partial_expression_index"
sctest.Pause(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestPause_drop_index_vanilla_index(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/drop_index_vanilla_index"
sctest.Pause(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestPause_drop_index_with_fks(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/drop_index_with_fks"
sctest.Pause(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestPause_drop_index_with_materialized_view_dep(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/drop_index_with_materialized_view_dep"
sctest.Pause(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestPause_drop_multiple_columns_separate_statements(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/drop_multiple_columns_separate_statements"
sctest.Pause(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestPause_drop_schema(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/drop_schema"
sctest.Pause(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestPause_drop_table(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/drop_table"
sctest.Pause(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestPause_drop_table_udf_default(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/drop_table_udf_default"
sctest.Pause(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestPauseMixedVersion_add_column(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/add_column"
sctest.PauseMixedVersion(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestPauseMixedVersion_add_column_default_seq(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/add_column_default_seq"
sctest.PauseMixedVersion(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestPauseMixedVersion_add_column_default_unique(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/add_column_default_unique"
sctest.PauseMixedVersion(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestPauseMixedVersion_add_column_no_default(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/add_column_no_default"
sctest.PauseMixedVersion(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestPauseMixedVersion_add_column_with_stored(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/add_column_with_stored"
sctest.PauseMixedVersion(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestPauseMixedVersion_add_column_with_stored_family(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/add_column_with_stored_family"
sctest.PauseMixedVersion(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestPauseMixedVersion_alter_table_add_check_udf(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/alter_table_add_check_udf"
sctest.PauseMixedVersion(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestPauseMixedVersion_alter_table_add_check_unvalidated(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/alter_table_add_check_unvalidated"
sctest.PauseMixedVersion(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestPauseMixedVersion_alter_table_add_check_vanilla(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/alter_table_add_check_vanilla"
sctest.PauseMixedVersion(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestPauseMixedVersion_alter_table_add_check_with_seq_and_udt(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/alter_table_add_check_with_seq_and_udt"
sctest.PauseMixedVersion(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestPauseMixedVersion_alter_table_add_foreign_key(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/alter_table_add_foreign_key"
sctest.PauseMixedVersion(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestPauseMixedVersion_alter_table_add_primary_key_drop_rowid(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/alter_table_add_primary_key_drop_rowid"
sctest.PauseMixedVersion(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestPauseMixedVersion_alter_table_add_primary_key_drop_rowid_with_secondary_idx(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/alter_table_add_primary_key_drop_rowid_with_secondary_idx"
sctest.PauseMixedVersion(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestPauseMixedVersion_alter_table_add_unique_without_index(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/alter_table_add_unique_without_index"
sctest.PauseMixedVersion(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestPauseMixedVersion_alter_table_alter_column_set_not_null(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/alter_table_alter_column_set_not_null"
sctest.PauseMixedVersion(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestPauseMixedVersion_alter_table_alter_primary_key_drop_rowid(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/alter_table_alter_primary_key_drop_rowid"
sctest.PauseMixedVersion(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestPauseMixedVersion_alter_table_alter_primary_key_using_hash(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/alter_table_alter_primary_key_using_hash"
sctest.PauseMixedVersion(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestPauseMixedVersion_alter_table_alter_primary_key_vanilla(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/alter_table_alter_primary_key_vanilla"
sctest.PauseMixedVersion(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestPauseMixedVersion_alter_table_drop_constraint_check(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/alter_table_drop_constraint_check"
sctest.PauseMixedVersion(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestPauseMixedVersion_alter_table_drop_constraint_fk(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/alter_table_drop_constraint_fk"
sctest.PauseMixedVersion(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestPauseMixedVersion_alter_table_drop_constraint_uwi(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/alter_table_drop_constraint_uwi"
sctest.PauseMixedVersion(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestPauseMixedVersion_alter_table_multiple_commands(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/alter_table_multiple_commands"
sctest.PauseMixedVersion(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestPauseMixedVersion_alter_table_validate_constraint(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/alter_table_validate_constraint"
sctest.PauseMixedVersion(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestPauseMixedVersion_create_function(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/create_function"
sctest.PauseMixedVersion(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestPauseMixedVersion_create_function_in_txn(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/create_function_in_txn"
sctest.PauseMixedVersion(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestPauseMixedVersion_create_index(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/create_index"
sctest.PauseMixedVersion(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestPauseMixedVersion_create_index_create_schema_separate_statements(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/create_index_create_schema_separate_statements"
sctest.PauseMixedVersion(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestPauseMixedVersion_create_schema(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/create_schema"
sctest.PauseMixedVersion(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestPauseMixedVersion_create_schema_drop_schema_separate_statements(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/create_schema_drop_schema_separate_statements"
sctest.PauseMixedVersion(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestPauseMixedVersion_create_sequence(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/create_sequence"
sctest.PauseMixedVersion(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestPauseMixedVersion_drop_column_basic(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/drop_column_basic"
sctest.PauseMixedVersion(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestPauseMixedVersion_drop_column_computed_index(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/drop_column_computed_index"
sctest.PauseMixedVersion(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestPauseMixedVersion_drop_column_create_index_separate_statements(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/drop_column_create_index_separate_statements"
sctest.PauseMixedVersion(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestPauseMixedVersion_drop_column_unique_index(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/drop_column_unique_index"
sctest.PauseMixedVersion(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestPauseMixedVersion_drop_column_with_index(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/drop_column_with_index"
sctest.PauseMixedVersion(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestPauseMixedVersion_drop_column_with_partial_index(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/drop_column_with_partial_index"
sctest.PauseMixedVersion(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestPauseMixedVersion_drop_column_with_udf_default(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/drop_column_with_udf_default"
sctest.PauseMixedVersion(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestPauseMixedVersion_drop_function(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/drop_function"
sctest.PauseMixedVersion(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestPauseMixedVersion_drop_index_hash_sharded_index(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/drop_index_hash_sharded_index"
sctest.PauseMixedVersion(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestPauseMixedVersion_drop_index_partial_expression_index(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/drop_index_partial_expression_index"
sctest.PauseMixedVersion(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestPauseMixedVersion_drop_index_vanilla_index(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/drop_index_vanilla_index"
sctest.PauseMixedVersion(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestPauseMixedVersion_drop_index_with_fks(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/drop_index_with_fks"
sctest.PauseMixedVersion(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestPauseMixedVersion_drop_index_with_materialized_view_dep(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/drop_index_with_materialized_view_dep"
sctest.PauseMixedVersion(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestPauseMixedVersion_drop_multiple_columns_separate_statements(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/drop_multiple_columns_separate_statements"
sctest.PauseMixedVersion(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestPauseMixedVersion_drop_schema(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/drop_schema"
sctest.PauseMixedVersion(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestPauseMixedVersion_drop_table(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/drop_table"
sctest.PauseMixedVersion(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestPauseMixedVersion_drop_table_udf_default(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/drop_table_udf_default"
sctest.PauseMixedVersion(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestRollback_add_column(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/add_column"
sctest.Rollback(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestRollback_add_column_default_seq(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/add_column_default_seq"
sctest.Rollback(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestRollback_add_column_default_unique(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/add_column_default_unique"
sctest.Rollback(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestRollback_add_column_no_default(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/add_column_no_default"
sctest.Rollback(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestRollback_add_column_with_stored(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/add_column_with_stored"
sctest.Rollback(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestRollback_add_column_with_stored_family(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/add_column_with_stored_family"
sctest.Rollback(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestRollback_alter_table_add_check_udf(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/alter_table_add_check_udf"
sctest.Rollback(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestRollback_alter_table_add_check_unvalidated(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/alter_table_add_check_unvalidated"
sctest.Rollback(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestRollback_alter_table_add_check_vanilla(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/alter_table_add_check_vanilla"
sctest.Rollback(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestRollback_alter_table_add_check_with_seq_and_udt(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/alter_table_add_check_with_seq_and_udt"
sctest.Rollback(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestRollback_alter_table_add_foreign_key(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/alter_table_add_foreign_key"
sctest.Rollback(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestRollback_alter_table_add_primary_key_drop_rowid(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/alter_table_add_primary_key_drop_rowid"
sctest.Rollback(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestRollback_alter_table_add_primary_key_drop_rowid_with_secondary_idx(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/alter_table_add_primary_key_drop_rowid_with_secondary_idx"
sctest.Rollback(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestRollback_alter_table_add_unique_without_index(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/alter_table_add_unique_without_index"
sctest.Rollback(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestRollback_alter_table_alter_column_set_not_null(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/alter_table_alter_column_set_not_null"
sctest.Rollback(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestRollback_alter_table_alter_primary_key_drop_rowid(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/alter_table_alter_primary_key_drop_rowid"
sctest.Rollback(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestRollback_alter_table_alter_primary_key_using_hash(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/alter_table_alter_primary_key_using_hash"
sctest.Rollback(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestRollback_alter_table_alter_primary_key_vanilla(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/alter_table_alter_primary_key_vanilla"
sctest.Rollback(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestRollback_alter_table_drop_constraint_check(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/alter_table_drop_constraint_check"
sctest.Rollback(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestRollback_alter_table_drop_constraint_fk(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/alter_table_drop_constraint_fk"
sctest.Rollback(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestRollback_alter_table_drop_constraint_uwi(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/alter_table_drop_constraint_uwi"
sctest.Rollback(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestRollback_alter_table_multiple_commands(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/alter_table_multiple_commands"
sctest.Rollback(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestRollback_alter_table_validate_constraint(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/alter_table_validate_constraint"
sctest.Rollback(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestRollback_create_function(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/create_function"
sctest.Rollback(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestRollback_create_function_in_txn(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/create_function_in_txn"
sctest.Rollback(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestRollback_create_index(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/create_index"
sctest.Rollback(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestRollback_create_index_create_schema_separate_statements(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/create_index_create_schema_separate_statements"
sctest.Rollback(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestRollback_create_schema(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/create_schema"
sctest.Rollback(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestRollback_create_schema_drop_schema_separate_statements(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/create_schema_drop_schema_separate_statements"
sctest.Rollback(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestRollback_create_sequence(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/create_sequence"
sctest.Rollback(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestRollback_drop_column_basic(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/drop_column_basic"
sctest.Rollback(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestRollback_drop_column_computed_index(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/drop_column_computed_index"
sctest.Rollback(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestRollback_drop_column_create_index_separate_statements(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/drop_column_create_index_separate_statements"
sctest.Rollback(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestRollback_drop_column_unique_index(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/drop_column_unique_index"
sctest.Rollback(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestRollback_drop_column_with_index(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/drop_column_with_index"
sctest.Rollback(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestRollback_drop_column_with_partial_index(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/drop_column_with_partial_index"
sctest.Rollback(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestRollback_drop_column_with_udf_default(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/drop_column_with_udf_default"
sctest.Rollback(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestRollback_drop_function(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/drop_function"
sctest.Rollback(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestRollback_drop_index_hash_sharded_index(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/drop_index_hash_sharded_index"
sctest.Rollback(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestRollback_drop_index_partial_expression_index(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/drop_index_partial_expression_index"
sctest.Rollback(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestRollback_drop_index_vanilla_index(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/drop_index_vanilla_index"
sctest.Rollback(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestRollback_drop_index_with_fks(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/drop_index_with_fks"
sctest.Rollback(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestRollback_drop_index_with_materialized_view_dep(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/drop_index_with_materialized_view_dep"
sctest.Rollback(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestRollback_drop_multiple_columns_separate_statements(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/drop_multiple_columns_separate_statements"
sctest.Rollback(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestRollback_drop_schema(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/drop_schema"
sctest.Rollback(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestRollback_drop_table(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/drop_table"
sctest.Rollback(t, path, sctest.SingleNodeTestClusterFactory{})
}
func TestRollback_drop_table_udf_default(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const path = "pkg/sql/schemachanger/testdata/end_to_end/drop_table_udf_default"
sctest.Rollback(t, path, sctest.SingleNodeTestClusterFactory{})
}
| pkg/sql/schemachanger/sctest_generated_test.go | 0 | https://github.com/cockroachdb/cockroach/commit/dbe5954ec7032390cc5713bad5ed8fcdaf3b2d05 | [
0.0002166584599763155,
0.000170959610841237,
0.00016520557983312756,
0.00017107557505369186,
0.000004043457920488436
] |
{
"id": 0,
"code_window": [
"\t\tCommit: false,\n",
"\t\t// Resolved intents should maintain an abort span entry to prevent\n",
"\t\t// concurrent requests from failing to notice the transaction was aborted.\n",
"\t\tPoison: true,\n",
"\t})\n",
"\n",
"\tconst taskName = \"txnHeartbeater: aborting txn\"\n",
"\tlog.VEventf(ctx, 2, \"async abort for txn: %s\", txn)\n",
"\tif err := h.stopper.RunAsyncTask(h.AnnotateCtx(context.Background()), taskName,\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t// NB: Setting `Source: kvpb.AdmissionHeader_OTHER` means this request will\n",
"\t// bypass AC.\n",
"\tba.AdmissionHeader = kvpb.AdmissionHeader{\n",
"\t\tPriority: txn.AdmissionPriority,\n",
"\t\tCreateTime: timeutil.Now().UnixNano(),\n",
"\t\tSource: kvpb.AdmissionHeader_OTHER,\n",
"\t}\n"
],
"file_path": "pkg/kv/kvclient/kvcoord/txn_interceptor_heartbeater.go",
"type": "add",
"edit_start_line_idx": 527
} | select '[1, "a", true, null, {"c": "blah", "d": []}]':::JSON, '{"\n": ["\""], "\"": "\\"}'::JSONB | pkg/sql/sem/tree/testdata/pretty/json.sql | 0 | https://github.com/cockroachdb/cockroach/commit/dbe5954ec7032390cc5713bad5ed8fcdaf3b2d05 | [
0.0001726736081764102,
0.0001726736081764102,
0.0001726736081764102,
0.0001726736081764102,
0
] |
{
"id": 1,
"code_window": [
"\t\"github.com/cockroachdb/cockroach/pkg/roachpb\"\n",
"\t\"github.com/cockroachdb/cockroach/pkg/settings\"\n",
"\t\"github.com/cockroachdb/cockroach/pkg/util\"\n",
"\t\"github.com/cockroachdb/cockroach/pkg/util/envutil\"\n",
"\t\"github.com/cockroachdb/cockroach/pkg/util/growstack\"\n",
"\t\"github.com/cockroachdb/cockroach/pkg/util/hlc\"\n"
],
"labels": [
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\"github.com/cockroachdb/cockroach/pkg/util/admission/admissionpb\"\n"
],
"file_path": "pkg/kv/kvserver/replica_range_lease.go",
"type": "add",
"edit_start_line_idx": 60
} | // Copyright 2016 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
// This file contains replica methods related to range leases.
//
// Here be dragons: The lease system (especially for epoch-based
// leases) relies on multiple interlocking conditional puts (here and
// in NodeLiveness). Reads (to get expected values) and conditional
// puts have to happen in a certain order, leading to surprising
// dependencies at a distance (for example, there's a LeaseStatus
// object that gets plumbed most of the way through this file.
// LeaseStatus bundles the results of multiple checks with the time at
// which they were performed, so that timestamp must be used for later
// operations). The current arrangement is not perfect, and some
// opportunities for improvement appear, but any changes must be made
// very carefully.
//
// NOTE(bdarnell): The biggest problem with the current code is that
// with epoch-based leases, we may do two separate slow operations
// (IncrementEpoch/Heartbeat and RequestLease/AdminTransferLease). In
// the organization that was inherited from expiration-based leases,
// we prepare the arguments we're going to use for the lease
// operations before performing the liveness operations, and by the
// time the liveness operations complete those may be stale.
//
// Therefore, my suggested refactoring would be to move the liveness
// operations earlier in the process, soon after the initial
// leaseStatus call. If a liveness operation is required, do it and
// start over, with a fresh leaseStatus.
//
// This could also allow the liveness operations to be coalesced per
// node instead of having each range separately queue up redundant
// liveness operations. (The InitOrJoin model predates the
// singleflight package; could we simplify things by using it?)
package kvserver
import (
"context"
"fmt"
"time"
"github.com/cockroachdb/cockroach/pkg/base"
"github.com/cockroachdb/cockroach/pkg/keys"
"github.com/cockroachdb/cockroach/pkg/kv/kvpb"
"github.com/cockroachdb/cockroach/pkg/kv/kvserver/constraint"
"github.com/cockroachdb/cockroach/pkg/kv/kvserver/kvserverpb"
"github.com/cockroachdb/cockroach/pkg/kv/kvserver/liveness"
"github.com/cockroachdb/cockroach/pkg/kv/kvserver/liveness/livenesspb"
"github.com/cockroachdb/cockroach/pkg/kv/kvserver/raftutil"
"github.com/cockroachdb/cockroach/pkg/roachpb"
"github.com/cockroachdb/cockroach/pkg/settings"
"github.com/cockroachdb/cockroach/pkg/util"
"github.com/cockroachdb/cockroach/pkg/util/envutil"
"github.com/cockroachdb/cockroach/pkg/util/growstack"
"github.com/cockroachdb/cockroach/pkg/util/hlc"
"github.com/cockroachdb/cockroach/pkg/util/log"
"github.com/cockroachdb/cockroach/pkg/util/quotapool"
"github.com/cockroachdb/cockroach/pkg/util/retry"
"github.com/cockroachdb/cockroach/pkg/util/stop"
"github.com/cockroachdb/cockroach/pkg/util/syncutil"
"github.com/cockroachdb/cockroach/pkg/util/timeutil"
"github.com/cockroachdb/cockroach/pkg/util/tracing"
"github.com/cockroachdb/errors"
"github.com/cockroachdb/logtags"
"github.com/cockroachdb/redact"
)
var TransferExpirationLeasesFirstEnabled = settings.RegisterBoolSetting(
settings.SystemOnly,
"kv.transfer_expiration_leases_first.enabled",
"controls whether we transfer expiration-based leases that are later upgraded to epoch-based ones",
true,
)
var ExpirationLeasesOnly = settings.RegisterBoolSetting(
settings.SystemOnly,
"kv.expiration_leases_only.enabled",
"only use expiration-based leases, never epoch-based ones (experimental, affects performance)",
// false by default. Metamorphically enabled in tests, but not in deadlock
// builds because TestClusters are usually so slow that they're unable
// to maintain leases/leadership/liveness.
!syncutil.DeadlockEnabled &&
util.ConstantWithMetamorphicTestBool("kv.expiration_leases_only.enabled", false),
)
// DisableExpirationLeasesOnly is an escape hatch for ExpirationLeasesOnly,
// which can be used to hard-disable expiration-based leases e.g. if clusters
// are unable to start back up due to the lease extension load.
var DisableExpirationLeasesOnly = envutil.EnvOrDefaultBool(
"COCKROACH_DISABLE_EXPIRATION_LEASES_ONLY", false)
// EagerLeaseAcquisitionConcurrency is the number of concurrent, eager lease
// acquisitions made during Raft ticks, across all stores. Note that this does
// not include expiration lease extensions, which are unbounded.
var EagerLeaseAcquisitionConcurrency = settings.RegisterIntSetting(
settings.SystemOnly,
"kv.lease.eager_acquisition_concurrency",
"the maximum number of concurrent eager lease acquisitions (0 disables eager acquisition)",
256,
settings.NonNegativeInt,
)
// LeaseCheckPreferencesOnAcquisitionEnabled controls whether lease preferences
// are checked upon acquiring a new lease. If the new lease violates the
// configured preferences, it is enqueued in the replicate queue for
// processing.
//
// TODO(kvoli): Remove this cluster setting in 24.1, once we wish to enable
// this by default or is subsumed by another mechanism.
var LeaseCheckPreferencesOnAcquisitionEnabled = settings.RegisterBoolSetting(
settings.SystemOnly,
"kv.lease.check_preferences_on_acquisition.enabled",
"controls whether lease preferences are checked on lease acquisition, "+
"if the new lease violates preferences, it is queued for processing",
true,
)
var leaseStatusLogLimiter = func() *log.EveryN {
e := log.Every(15 * time.Second)
e.ShouldLog() // waste the first shot
return &e
}()
// leaseRequestHandle is a handle to an asynchronous lease request.
type leaseRequestHandle struct {
p *pendingLeaseRequest
c chan *kvpb.Error
}
// C returns the channel where the lease request's result will be sent on.
func (h *leaseRequestHandle) C() <-chan *kvpb.Error {
if h.c == nil {
panic("handle already canceled")
}
return h.c
}
// Cancel cancels the request handle. The asynchronous lease request will
// continue until it completes, to ensure leases can be acquired even if the
// client goes away (in particular in the face of IO delays which may trigger
// client timeouts).
func (h *leaseRequestHandle) Cancel() {
h.p.repl.mu.Lock()
defer h.p.repl.mu.Unlock()
if len(h.c) == 0 {
// Our lease request is ongoing...
// Unregister handle.
delete(h.p.llHandles, h)
}
// Mark handle as canceled.
h.c = nil
}
// resolve notifies the handle of the request's result.
//
// Requires repl.mu is exclusively locked.
func (h *leaseRequestHandle) resolve(pErr *kvpb.Error) { h.c <- pErr }
// pendingLeaseRequest coalesces RequestLease requests and lets
// callers join an in-progress lease request and wait for the result.
// The actual execution of the RequestLease Raft request is delegated
// to a replica.
//
// There are two types of leases: expiration-based and epoch-based.
// Expiration-based leases are considered valid as long as the wall
// time is less than the lease expiration timestamp minus the maximum
// clock offset. Epoch-based leases do not expire, but rely on the
// leaseholder maintaining its node liveness record (also a lease, but
// at the node level). All ranges up to and including the node
// liveness table must use expiration-based leases to avoid any
// circular dependencies.
//
// Methods are not thread-safe; a pendingLeaseRequest is logically part
// of the replica it references, so replica.mu should be used to
// synchronize all calls.
type pendingLeaseRequest struct {
// The replica that the pendingLeaseRequest is a part of.
repl *Replica
// Set of request handles attached to the lease acquisition.
// All accesses require repl.mu to be exclusively locked.
llHandles map[*leaseRequestHandle]struct{}
// nextLease is the pending RequestLease request, if any. It can be used to
// figure out if we're in the process of extending our own lease, or
// transferring it to another replica.
nextLease roachpb.Lease
}
func makePendingLeaseRequest(repl *Replica) pendingLeaseRequest {
return pendingLeaseRequest{
repl: repl,
llHandles: make(map[*leaseRequestHandle]struct{}),
}
}
// RequestPending returns the pending Lease, if one is in progress.
// The second return val is true if a lease request is pending.
//
// Requires repl.mu is read locked.
func (p *pendingLeaseRequest) RequestPending() (roachpb.Lease, bool) {
return p.nextLease, p.nextLease != roachpb.Lease{}
}
// InitOrJoinRequest executes a RequestLease command asynchronously and returns a
// handle on which the result will be posted. If there's already a request in
// progress, we join in waiting for the results of that request.
// It is an error to call InitOrJoinRequest() while a request is in progress
// naming another replica as lease holder.
//
// replica is used to schedule and execute async work (proposing a RequestLease
// command). replica.mu is locked when delivering results, so calls from the
// replica happen either before or after a result for a pending request has
// happened.
//
// The new lease will be a successor to the one in the status
// argument, and its fields will be used to fill in the expected
// values for liveness and lease operations.
//
// transfer needs to be set if the request represents a lease transfer (as
// opposed to an extension, or acquiring the lease when none is held).
//
// Requires repl.mu is exclusively locked.
func (p *pendingLeaseRequest) InitOrJoinRequest(
ctx context.Context,
nextLeaseHolder roachpb.ReplicaDescriptor,
status kvserverpb.LeaseStatus,
startKey roachpb.Key,
transfer bool,
bypassSafetyChecks bool,
limiter *quotapool.IntPool,
) *leaseRequestHandle {
if nextLease, ok := p.RequestPending(); ok {
if nextLease.Replica.ReplicaID == nextLeaseHolder.ReplicaID {
// Join a pending request asking for the same replica to become lease
// holder.
return p.JoinRequest()
}
// We can't join the request in progress.
// TODO(nvanbenschoten): should this return a LeaseRejectedError? Should
// it cancel and replace the request in progress? Reconsider.
return p.newResolvedHandle(kvpb.NewErrorf(
"request for different replica in progress (requesting: %+v, in progress: %+v)",
nextLeaseHolder.ReplicaID, nextLease.Replica.ReplicaID))
}
acquisition := !status.Lease.OwnedBy(p.repl.store.StoreID())
extension := !transfer && !acquisition
_ = extension // not used, just documentation
if acquisition {
// If this is a non-cooperative lease change (i.e. an acquisition), it
// is up to us to ensure that Lease.Start is greater than the end time
// of the previous lease. This means that if status refers to an expired
// epoch lease, we must increment the liveness epoch of the previous
// leaseholder *using status.Liveness*, which we know to be expired *at
// status.Timestamp*, before we can propose this lease. If this
// increment fails, we cannot propose this new lease (see handling of
// ErrEpochAlreadyIncremented in requestLeaseAsync).
//
// Note that the request evaluation may decrease our proposed start time
// if it decides that it is safe to do so (for example, this happens
// when renewing an expiration-based lease), but it will never increase
// it (and a start timestamp that is too low is unsafe because it
// results in incorrect initialization of the timestamp cache on the new
// leaseholder). For expiration-based leases, we have a safeguard during
// evaluation - we simply check that the new lease starts after the old
// lease ends and throw an error if now. But for epoch-based leases, we
// don't have the benefit of such a safeguard during evaluation because
// the expiration is indirectly stored in the referenced liveness record
// and not in the lease itself. So for epoch-based leases, enforcing
// this safety condition is truly up to us.
if status.State != kvserverpb.LeaseState_EXPIRED {
log.Fatalf(ctx, "cannot acquire lease from another node before it has expired: %v", status)
}
}
// No request in progress. Let's propose a Lease command asynchronously.
llHandle := p.newHandle()
reqHeader := kvpb.RequestHeader{
Key: startKey,
}
reqLease := roachpb.Lease{
Start: status.Now,
Replica: nextLeaseHolder,
ProposedTS: &status.Now,
}
if p.repl.shouldUseExpirationLeaseRLocked() ||
(transfer &&
TransferExpirationLeasesFirstEnabled.Get(&p.repl.store.ClusterSettings().SV)) {
// In addition to ranges that should be using expiration-based leases
// (typically the meta and liveness ranges), we also use them during lease
// transfers for all other ranges. After acquiring these expiration based
// leases, the leaseholders are expected to upgrade them to the more
// efficient epoch-based ones. But by transferring an expiration-based
// lease, we can limit the effect of an ill-advised lease transfer since the
// incoming leaseholder needs to recognize itself as such within a few
// seconds; if it doesn't (we accidentally sent the lease to a replica in
// need of a snapshot or far behind on its log), the lease is up for grabs.
// If we simply transferred epoch based leases, it's possible for the new
// leaseholder that's delayed in applying the lease transfer to maintain its
// lease (assuming the node it's on is able to heartbeat its liveness
// record).
reqLease.Expiration = &hlc.Timestamp{}
*reqLease.Expiration = status.Now.ToTimestamp().Add(int64(p.repl.store.cfg.RangeLeaseDuration), 0)
} else {
// Get the liveness for the next lease holder and set the epoch in the lease request.
l, ok := p.repl.store.cfg.NodeLiveness.GetLiveness(nextLeaseHolder.NodeID)
if !ok || l.Epoch == 0 {
llHandle.resolve(kvpb.NewError(&kvpb.LeaseRejectedError{
Existing: status.Lease,
Requested: reqLease,
Message: fmt.Sprintf("couldn't request lease for %+v: %v", nextLeaseHolder, liveness.ErrRecordCacheMiss),
}))
return llHandle
}
reqLease.Epoch = l.Epoch
}
var leaseReq kvpb.Request
if transfer {
leaseReq = &kvpb.TransferLeaseRequest{
RequestHeader: reqHeader,
Lease: reqLease,
PrevLease: status.Lease,
BypassSafetyChecks: bypassSafetyChecks,
}
} else {
if bypassSafetyChecks {
// TODO(nvanbenschoten): we could support a similar bypassSafetyChecks
// flag for RequestLeaseRequest, which would disable the protection in
// propBuf.maybeRejectUnsafeProposalLocked. For now, we use a testing
// knob.
log.Fatal(ctx, "bypassSafetyChecks not supported for RequestLeaseRequest")
}
minProposedTS := p.repl.mu.minLeaseProposedTS
leaseReq = &kvpb.RequestLeaseRequest{
RequestHeader: reqHeader,
Lease: reqLease,
// PrevLease must match for our lease to be accepted. If another
// lease is applied between our previous call to leaseStatus and
// our lease request applying, it will be rejected.
PrevLease: status.Lease,
MinProposedTS: &minProposedTS,
}
}
err := p.requestLeaseAsync(ctx, nextLeaseHolder, status, leaseReq, limiter)
if err != nil {
if errors.Is(err, stop.ErrThrottled) {
llHandle.resolve(kvpb.NewError(err))
} else {
// We failed to start the asynchronous task. Send a blank NotLeaseHolderError
// back to indicate that we have no idea who the range lease holder might
// be; we've withdrawn from active duty.
llHandle.resolve(kvpb.NewError(
kvpb.NewNotLeaseHolderError(roachpb.Lease{}, p.repl.store.StoreID(), p.repl.mu.state.Desc,
"lease acquisition task couldn't be started; node is shutting down")))
}
return llHandle
}
// InitOrJoinRequest requires that repl.mu is exclusively locked. requestLeaseAsync
// also requires this lock to send results on all waiter channels. This means that
// no results will be sent until we've release the lock, so there's no race between
// adding our new channel to p.llHandles below and requestLeaseAsync sending results
// on all channels in p.llHandles. The same logic applies to p.nextLease.
p.llHandles[llHandle] = struct{}{}
p.nextLease = reqLease
return llHandle
}
// requestLeaseAsync sends a transfer lease or lease request to the specified
// replica. The request is sent in an async task. If limiter is non-nil, it is
// used to bound the number of goroutines spawned, returning ErrThrottled when
// exceeded.
//
// The status argument is used as the expected value for liveness operations.
// leaseReq must be consistent with the LeaseStatus.
func (p *pendingLeaseRequest) requestLeaseAsync(
parentCtx context.Context,
nextLeaseHolder roachpb.ReplicaDescriptor,
status kvserverpb.LeaseStatus,
leaseReq kvpb.Request,
limiter *quotapool.IntPool,
) error {
// Create a new context. We run the request to completion even if all callers
// go away, to ensure leases can be acquired e.g. in the face of IO delays
// which may trigger client timeouts).
ctx := p.repl.AnnotateCtx(context.Background())
// Attach the parent's tracing span to the lease request, if any. It might
// outlive the parent in case the parent's ctx is canceled, so we use
// FollowsFrom. We can't include the trace for any other requests that join
// this one, but let's try to include it where we can.
var sp *tracing.Span
if parentSp := tracing.SpanFromContext(parentCtx); parentSp != nil {
ctx, sp = p.repl.AmbientContext.Tracer.StartSpanCtx(ctx, "request range lease",
tracing.WithParent(parentSp), tracing.WithFollowsFrom())
}
err := p.repl.store.Stopper().RunAsyncTaskEx(
ctx,
stop.TaskOpts{
TaskName: "pendingLeaseRequest: requesting lease",
SpanOpt: stop.ChildSpan,
// If a limiter is passed, use it to bound the number of spawned
// goroutines. When exceeded, return an error.
Sem: limiter,
},
func(ctx context.Context) {
defer sp.Finish()
// Grow the goroutine stack, to avoid having to re-grow it during request
// processing. This is normally done when processing batch requests via
// RPC, but here we submit the request directly to the local replica.
growstack.Grow()
err := p.requestLease(ctx, nextLeaseHolder, status, leaseReq)
// Error will be handled below.
// We reset our state below regardless of whether we've gotten an error or
// not, but note that an error is ambiguous - there's no guarantee that the
// transfer will not still apply. That's OK, however, as the "in transfer"
// state maintained by the pendingLeaseRequest is not relied on for
// correctness (see repl.mu.minLeaseProposedTS), and resetting the state
// is beneficial as it'll allow the replica to attempt to transfer again or
// extend the existing lease in the future.
p.repl.mu.Lock()
defer p.repl.mu.Unlock()
// Send result of lease to all waiter channels and cleanup request.
for llHandle := range p.llHandles {
// Don't send the same transaction object twice; this can lead to races.
if err != nil {
pErr := kvpb.NewError(err)
// TODO(tbg): why?
pErr.SetTxn(pErr.GetTxn())
llHandle.resolve(pErr)
} else {
llHandle.resolve(nil)
}
delete(p.llHandles, llHandle)
}
p.nextLease = roachpb.Lease{}
})
if err != nil {
p.nextLease = roachpb.Lease{}
sp.Finish()
return err
}
return nil
}
var logFailedHeartbeatOwnLiveness = log.Every(10 * time.Second)
// requestLease sends a synchronous transfer lease or lease request to the
// specified replica. It is only meant to be called from requestLeaseAsync,
// since it does not coordinate with other in-flight lease requests.
func (p *pendingLeaseRequest) requestLease(
ctx context.Context,
nextLeaseHolder roachpb.ReplicaDescriptor,
status kvserverpb.LeaseStatus,
leaseReq kvpb.Request,
) error {
started := timeutil.Now()
defer func() {
p.repl.store.metrics.LeaseRequestLatency.RecordValue(timeutil.Since(started).Nanoseconds())
}()
// If we're replacing an expired epoch-based lease, we must increment the
// epoch of the prior owner to invalidate its leases. If we were the owner,
// then we instead heartbeat to become live.
if status.Lease.Type() == roachpb.LeaseEpoch && status.State == kvserverpb.LeaseState_EXPIRED {
var err error
// If this replica is previous & next lease holder, manually heartbeat to become live.
if status.OwnedBy(nextLeaseHolder.StoreID) && p.repl.store.StoreID() == nextLeaseHolder.StoreID {
if err = p.repl.store.cfg.NodeLiveness.Heartbeat(ctx, status.Liveness); err != nil && logFailedHeartbeatOwnLiveness.ShouldLog() {
log.Errorf(ctx, "failed to heartbeat own liveness record: %s", err)
}
} else if status.Liveness.Epoch == status.Lease.Epoch {
// If not owner, increment epoch if necessary to invalidate lease.
// However, we only do so in the event that the next leaseholder is
// considered live at this time. If not, there's no sense in
// incrementing the expired leaseholder's epoch.
if !p.repl.store.cfg.NodeLiveness.GetNodeVitalityFromCache(nextLeaseHolder.NodeID).IsLive(livenesspb.EpochLease) {
err = errors.Errorf("not incrementing epoch on n%d because next leaseholder (n%d) not live",
status.Liveness.NodeID, nextLeaseHolder.NodeID)
log.VEventf(ctx, 1, "%v", err)
} else if err = p.repl.store.cfg.NodeLiveness.IncrementEpoch(ctx, status.Liveness); err != nil {
// If we get ErrEpochAlreadyIncremented, someone else beat
// us to it. This proves that the target node is truly
// dead *now*, but it doesn't prove that it was dead at
// status.Timestamp (which we've encoded into our lease
// request). It's possible that the node was temporarily
// considered dead but revived without having its epoch
// incremented, i.e. that it was in fact live at
// status.Timestamp.
//
// It would be incorrect to simply proceed to sending our
// lease request since our lease.Start may precede the
// effective end timestamp of the predecessor lease (the
// expiration of the last successful heartbeat before the
// epoch increment), and so under this lease this node's
// timestamp cache would not necessarily reflect all reads
// served by the prior leaseholder.
//
// It would be correct to bump the timestamp in the lease
// request and proceed, but that just sets up another race
// between this node and the one that already incremented
// the epoch. They're probably going to beat us this time
// too, so just return the NotLeaseHolderError here
// instead of trying to fix up the timestamps and submit
// the lease request.
//
// ErrEpochAlreadyIncremented is not an unusual situation,
// so we don't log it as an error.
//
// https://github.com/cockroachdb/cockroach/issues/35986
if errors.Is(err, liveness.ErrEpochAlreadyIncremented) {
// ignore
} else if errors.HasType(err, &liveness.ErrEpochCondFailed{}) {
// ErrEpochCondFailed indicates that someone else changed the liveness
// record while we were incrementing it. The node could still be
// alive, or someone else updated it. Don't log this as an error.
log.Infof(ctx, "failed to increment leaseholder's epoch: %s", err)
} else {
log.Errorf(ctx, "failed to increment leaseholder's epoch: %s", err)
}
}
}
if err != nil {
// Return an NLHE with an empty lease, since we know the previous lease
// isn't valid. In particular, if it was ours but we failed to reacquire
// it (e.g. because our heartbeat failed due to a stalled disk) then we
// don't want DistSender to retry us.
return kvpb.NewNotLeaseHolderError(roachpb.Lease{}, p.repl.store.StoreID(), p.repl.Desc(),
fmt.Sprintf("failed to manipulate liveness record: %s", err))
}
}
// Send the RequestLeaseRequest or TransferLeaseRequest and wait for the new
// lease to be applied.
//
// The Replica circuit breakers together with round-tripping a ProbeRequest
// here before asking for the lease could provide an alternative, simpler
// solution to the below issue:
//
// https://github.com/cockroachdb/cockroach/issues/37906
ba := &kvpb.BatchRequest{}
ba.Timestamp = p.repl.store.Clock().Now()
ba.RangeID = p.repl.RangeID
// NB:
// RequestLease always bypasses the circuit breaker (i.e. will prefer to
// get stuck on an unavailable range rather than failing fast; see
// `(*RequestLeaseRequest).flags()`). This enables the caller to chose
// between either behavior for themselves: if they too want to bypass
// the circuit breaker, they simply don't check for the circuit breaker
// while waiting for their lease handle. If they want to fail-fast, they
// do. If the lease instead adopted the caller's preference, we'd have
// to handle the case of multiple preferences joining onto one lease
// request, which is more difficult.
//
// TransferLease will observe the circuit breaker, as transferring a
// lease when the range is unavailable results in, essentially, giving
// up on the lease and thus worsening the situation.
ba.Add(leaseReq)
_, pErr := p.repl.Send(ctx, ba)
return pErr.GoError()
}
// JoinRequest adds one more waiter to the currently pending request.
// It is the caller's responsibility to ensure that there is a pending request,
// and that the request is compatible with whatever the caller is currently
// wanting to do (i.e. the request is naming the intended node as the next
// lease holder).
//
// Requires repl.mu is exclusively locked.
func (p *pendingLeaseRequest) JoinRequest() *leaseRequestHandle {
llHandle := p.newHandle()
if _, ok := p.RequestPending(); !ok {
llHandle.resolve(kvpb.NewErrorf("no request in progress"))
return llHandle
}
p.llHandles[llHandle] = struct{}{}
return llHandle
}
// TransferInProgress returns whether the replica is in the process of
// transferring away its range lease. Note that the return values are
// best-effort and shouldn't be relied upon for correctness: if a previous
// transfer has returned an error, TransferInProgress will return `false`, but
// that doesn't necessarily mean that the transfer cannot still apply (see
// replica.mu.minLeaseProposedTS).
//
// It is assumed that the replica owning this pendingLeaseRequest owns the
// LeaderLease.
//
// replicaID is the ID of the parent replica.
//
// Requires repl.mu is read locked.
func (p *pendingLeaseRequest) TransferInProgress(replicaID roachpb.ReplicaID) bool {
if nextLease, ok := p.RequestPending(); ok {
// Is the lease being transferred? (as opposed to just extended)
return replicaID != nextLease.Replica.ReplicaID
}
return false
}
// newHandle creates a new leaseRequestHandle referencing the pending lease
// request.
func (p *pendingLeaseRequest) newHandle() *leaseRequestHandle {
return &leaseRequestHandle{
p: p,
c: make(chan *kvpb.Error, 1),
}
}
// newResolvedHandle creates a new leaseRequestHandle referencing the pending
// lease request. It then resolves the handle with the provided error.
func (p *pendingLeaseRequest) newResolvedHandle(pErr *kvpb.Error) *leaseRequestHandle {
h := p.newHandle()
h.resolve(pErr)
return h
}
// leaseStatus returns a lease status. The lease status is linked to the desire
// to serve a request at a specific timestamp (which may be a future timestamp)
// under the lease, as well as a notion of the current hlc time (now).
//
// # Explanation
//
// A status of ERROR indicates a failure to determine the correct lease status,
// and should not occur under normal operations. The caller's only recourse is
// to give up or to retry.
//
// If the lease is expired according to the now timestamp (and, in the case of
// epoch-based leases, the liveness epoch), a status of EXPIRED is returned.
// Note that this ignores the timestamp of the request, which may well
// technically be eligible to be served under the lease. The key feature of an
// EXPIRED status is that it reflects that a new lease with a start timestamp
// greater than or equal to now can be acquired non-cooperatively.
//
// If the lease is not EXPIRED, the lease's start timestamp is checked against
// the minProposedTimestamp. This timestamp indicates the oldest timestamp that
// a lease can have as its start time and still be used by the node. It is set
// both in cooperative lease transfers and to prevent reuse of leases across
// node restarts (which would result in latching violations). Leases with start
// times preceding this timestamp are assigned a status of PROSCRIBED and can
// not be used. Instead, a new lease should be acquired by callers.
//
// If the lease is not EXPIRED or PROSCRIBED, the request timestamp is taken
// into account. The expiration timestamp is adjusted for clock offset; if the
// request timestamp falls into the so-called "stasis period" at the end of the
// lifetime of the lease, or if the request timestamp is beyond the end of the
// lifetime of the lease, the status is UNUSABLE. Callers typically want to
// react to an UNUSABLE lease status by extending the lease, if they are in a
// position to do so.
//
// Finally, for requests timestamps falling before the stasis period of a lease
// that is not EXPIRED and also not PROSCRIBED, the status is VALID.
//
// # Implementation Note
//
// On the surface, it might seem like we could easily abandon the lease stasis
// concept in favor of consulting a request's uncertainty interval. We would
// then define a request's timestamp as the maximum of its read_timestamp and
// its global_uncertainty_limit, and simply check whether this timestamp falls
// below a lease's expiration. This could allow certain transactional requests
// to operate more closely to a lease's expiration. But not all requests that
// expect linearizability use an uncertainty interval (e.g. non-transactional
// requests), and so the lease stasis period serves as a kind of catch-all
// uncertainty interval for non-transactional and admin requests.
//
// Without that stasis period, the following linearizability violation could
// occur for two non-transactional requests operating on a single register
// during a lease change:
//
// - a range lease gets committed on the new lease holder (but not the old).
// - client proposes and commits a write on new lease holder (with a timestamp
// just greater than the expiration of the old lease).
// - client tries to read what it wrote, but hits a slow coordinator (which
// assigns a timestamp covered by the old lease).
// - the read is served by the old lease holder (which has not processed the
// change in lease holdership).
// - the client fails to read their own write.
func (r *Replica) leaseStatus(
ctx context.Context,
lease roachpb.Lease,
now hlc.ClockTimestamp,
minProposedTS hlc.ClockTimestamp,
minValidObservedTS hlc.ClockTimestamp,
reqTS hlc.Timestamp,
) kvserverpb.LeaseStatus {
status := kvserverpb.LeaseStatus{
Lease: lease,
// NOTE: it would not be correct to accept either only the request time
// or only the current time in this method, we need both. We need the
// request time to determine whether the current lease can serve a given
// request, even if that request has a timestamp in the future of
// present time. We need the current time to distinguish between an
// EXPIRED lease and an UNUSABLE lease. Only an EXPIRED lease can change
// hands through a lease acquisition.
Now: now,
RequestTime: reqTS,
MinValidObservedTimestamp: minValidObservedTS,
}
var expiration hlc.Timestamp
if lease.Type() == roachpb.LeaseExpiration {
expiration = lease.GetExpiration()
} else {
l, ok := r.store.cfg.NodeLiveness.GetLiveness(lease.Replica.NodeID)
status.Liveness = l.Liveness
if !ok || status.Liveness.Epoch < lease.Epoch {
// If lease validity can't be determined (e.g. gossip is down
// and liveness info isn't available for owner), we can neither
// use the lease nor do we want to attempt to acquire it.
var msg redact.StringBuilder
if !ok {
msg.Printf("can't determine lease status of %s due to node liveness error: %v",
lease.Replica, liveness.ErrRecordCacheMiss)
} else {
msg.Printf("can't determine lease status of %s because node liveness info for n%d is stale. lease: %s, liveness: %s",
lease.Replica, lease.Replica.NodeID, lease, l.Liveness)
}
if leaseStatusLogLimiter.ShouldLog() {
log.Infof(ctx, "%s", msg)
}
status.State = kvserverpb.LeaseState_ERROR
status.ErrInfo = msg.String()
return status
}
if status.Liveness.Epoch > lease.Epoch {
status.State = kvserverpb.LeaseState_EXPIRED
return status
}
expiration = status.Liveness.Expiration.ToTimestamp()
}
maxOffset := r.store.Clock().MaxOffset()
stasis := expiration.Add(-int64(maxOffset), 0)
ownedLocally := lease.OwnedBy(r.store.StoreID())
// NB: the order of these checks is important, and goes from stronger to
// weaker reasons why the lease may be considered invalid. For example,
// EXPIRED or PROSCRIBED must take precedence over UNUSABLE, because some
// callers consider UNUSABLE as valid. For an example issue that this ordering
// may cause, see https://github.com/cockroachdb/cockroach/issues/100101.
if expiration.LessEq(now.ToTimestamp()) {
status.State = kvserverpb.LeaseState_EXPIRED
} else if ownedLocally && lease.ProposedTS != nil && lease.ProposedTS.Less(minProposedTS) {
// If the replica owns the lease, additional verify that the lease's
// proposed timestamp is not earlier than the min proposed timestamp.
status.State = kvserverpb.LeaseState_PROSCRIBED
} else if stasis.LessEq(reqTS) {
status.State = kvserverpb.LeaseState_UNUSABLE
} else {
status.State = kvserverpb.LeaseState_VALID
}
return status
}
// CurrentLeaseStatus returns the status of the current lease for the
// current time.
//
// Common operations to perform on the resulting status are to check if
// it is valid using the IsValid method and to check whether the lease
// is held locally using the OwnedBy method.
//
// Note that this method does not check to see if a transfer is pending,
// but returns the status of the current lease and ownership at the
// specified point in time.
func (r *Replica) CurrentLeaseStatus(ctx context.Context) kvserverpb.LeaseStatus {
return r.LeaseStatusAt(ctx, r.Clock().NowAsClockTimestamp())
}
// LeaseStatusAt is like CurrentLeaseStatus, but accepts a now timestamp.
func (r *Replica) LeaseStatusAt(
ctx context.Context, now hlc.ClockTimestamp,
) kvserverpb.LeaseStatus {
r.mu.RLock()
defer r.mu.RUnlock()
return r.leaseStatusAtRLocked(ctx, now)
}
func (r *Replica) leaseStatusAtRLocked(
ctx context.Context, now hlc.ClockTimestamp,
) kvserverpb.LeaseStatus {
return r.leaseStatusForRequestRLocked(ctx, now, hlc.Timestamp{})
}
func (r *Replica) leaseStatusForRequestRLocked(
ctx context.Context, now hlc.ClockTimestamp, reqTS hlc.Timestamp,
) kvserverpb.LeaseStatus {
if reqTS.IsEmpty() {
// If the request timestamp is empty, return the status that
// would be given to a request with a timestamp of now.
reqTS = now.ToTimestamp()
}
return r.leaseStatus(ctx, *r.mu.state.Lease, now, r.mu.minLeaseProposedTS,
r.mu.minValidObservedTimestamp, reqTS)
}
// OwnsValidLease returns whether this replica is the current valid
// leaseholder.
//
// Note that this method does not check to see if a transfer is pending,
// but returns the status of the current lease and ownership at the
// specified point in time.
func (r *Replica) OwnsValidLease(ctx context.Context, now hlc.ClockTimestamp) bool {
r.mu.RLock()
defer r.mu.RUnlock()
return r.ownsValidLeaseRLocked(ctx, now)
}
func (r *Replica) ownsValidLeaseRLocked(ctx context.Context, now hlc.ClockTimestamp) bool {
st := r.leaseStatusAtRLocked(ctx, now)
return st.IsValid() && st.OwnedBy(r.store.StoreID())
}
// requiresExpirationLeaseRLocked returns whether this range unconditionally
// uses an expiration-based lease. Ranges located before or including the node
// liveness table must always use expiration leases to avoid circular
// dependencies on the node liveness table. All other ranges typically use
// epoch-based leases, but may temporarily use expiration based leases during
// lease transfers.
//
// TODO(erikgrinaker): It isn't always clear when to use this and when to use
// shouldUseExpirationLeaseRLocked. We can merge these once there are no more
// callers: when expiration leases don't quiesce and are always eagerly renewed.
func (r *Replica) requiresExpirationLeaseRLocked() bool {
return r.store.cfg.NodeLiveness == nil ||
r.mu.state.Desc.StartKey.Less(roachpb.RKey(keys.NodeLivenessKeyMax))
}
// shouldUseExpirationLeaseRLocked returns true if this range should be using an
// expiration-based lease, either because it requires one or because
// kv.expiration_leases_only.enabled is enabled.
func (r *Replica) shouldUseExpirationLeaseRLocked() bool {
return (ExpirationLeasesOnly.Get(&r.ClusterSettings().SV) && !DisableExpirationLeasesOnly) ||
r.requiresExpirationLeaseRLocked()
}
// requestLeaseLocked executes a request to obtain or extend a lease
// asynchronously and returns a channel on which the result will be posted. If
// there's already a request in progress, we join in waiting for the results of
// that request. Unless an error is returned, the obtained lease will be valid
// for a time interval containing the requested timestamp.
//
// A limiter can be passed to bound the number of new lease requests spawned.
// The function is responsible for acquiring quota and releasing it. If there is
// no quota, it resolves the returned handle with an error. Joining onto an
// existing lease request does not count towards the limit.
func (r *Replica) requestLeaseLocked(
ctx context.Context, status kvserverpb.LeaseStatus, limiter *quotapool.IntPool,
) *leaseRequestHandle {
if r.store.TestingKnobs().LeaseRequestEvent != nil {
if err := r.store.TestingKnobs().LeaseRequestEvent(status.Now.ToTimestamp(), r.StoreID(), r.GetRangeID()); err != nil {
return r.mu.pendingLeaseRequest.newResolvedHandle(err)
}
}
if pErr := r.store.TestingKnobs().PinnedLeases.rejectLeaseIfPinnedElsewhere(r); pErr != nil {
return r.mu.pendingLeaseRequest.newResolvedHandle(pErr)
}
// Propose a Raft command to get a lease for this replica.
repDesc, err := r.getReplicaDescriptorRLocked()
if err != nil {
return r.mu.pendingLeaseRequest.newResolvedHandle(kvpb.NewError(err))
}
return r.mu.pendingLeaseRequest.InitOrJoinRequest(
ctx, repDesc, status, r.mu.state.Desc.StartKey.AsRawKey(),
false /* transfer */, false /* bypassSafetyChecks */, limiter)
}
// AdminTransferLease transfers the LeaderLease to another replica. Only the
// current holder of the LeaderLease can do a transfer, because it needs to stop
// serving reads and proposing Raft commands (CPut is a read) while evaluating
// and proposing the TransferLease request. This synchronization with all other
// requests on the leaseholder is enforced through latching. The TransferLease
// request grabs a write latch over all keys in the range.
//
// If the leaseholder did not respect latching and did not stop serving reads
// during the lease transfer, it would potentially serve reads with timestamps
// greater than the start timestamp of the new (transferred) lease, which is
// determined during the evaluation of the TransferLease request. More subtly,
// the replica can't even serve reads or propose commands with timestamps lower
// than the start of the new lease because it could lead to read your own write
// violations (see comments on the stasis period on leaseStatus). We could, in
// principle, serve reads more than the maximum clock offset in the past.
//
// The method waits for any in-progress lease extension to be done, and it also
// blocks until the transfer is done. If a transfer is already in progress, this
// method joins in waiting for it to complete if it's transferring to the same
// replica. Otherwise, a NotLeaseHolderError is returned.
//
// AdminTransferLease implements the ReplicaLeaseMover interface.
func (r *Replica) AdminTransferLease(
ctx context.Context, target roachpb.StoreID, bypassSafetyChecks bool,
) error {
if r.store.cfg.TestingKnobs.DisableLeaderFollowsLeaseholder {
// Ensure lease transfers still work when we don't colocate leaders and leases.
bypassSafetyChecks = true
}
// initTransferHelper inits a transfer if no extension is in progress.
// It returns a channel for waiting for the result of a pending
// extension (if any is in progress) and a channel for waiting for the
// transfer (if it was successfully initiated).
var nextLeaseHolder roachpb.ReplicaDescriptor
initTransferHelper := func() (extension, transfer *leaseRequestHandle, err error) {
r.mu.Lock()
defer r.mu.Unlock()
now := r.store.Clock().NowAsClockTimestamp()
status := r.leaseStatusAtRLocked(ctx, now)
if status.Lease.OwnedBy(target) {
// The target is already the lease holder. Nothing to do.
return nil, nil, nil
}
desc := r.mu.state.Desc
if !status.Lease.OwnedBy(r.store.StoreID()) {
return nil, nil, kvpb.NewNotLeaseHolderError(status.Lease, r.store.StoreID(), desc,
"can't transfer the lease because this store doesn't own it")
}
// Verify the target is a replica of the range.
var ok bool
if nextLeaseHolder, ok = desc.GetReplicaDescriptor(target); !ok {
return nil, nil, roachpb.ErrReplicaNotFound
}
if nextLease, ok := r.mu.pendingLeaseRequest.RequestPending(); ok &&
nextLease.Replica != nextLeaseHolder {
repDesc, err := r.getReplicaDescriptorRLocked()
if err != nil {
return nil, nil, err
}
if nextLease.Replica == repDesc {
// There's an extension in progress. Let's wait for it to succeed and
// try again.
return r.mu.pendingLeaseRequest.JoinRequest(), nil, nil
}
// Another transfer is in progress, and it's not transferring to the
// same replica we'd like.
return nil, nil, kvpb.NewNotLeaseHolderError(nextLease, r.store.StoreID(), desc,
"another transfer to a different store is in progress")
}
// Verify that the lease transfer would be safe. This check is best-effort
// in that it can race with Raft leadership changes and log truncation. See
// propBuf.maybeRejectUnsafeProposalLocked for a non-racy version of this
// check, along with a full explanation of why it is important. We include
// both because rejecting a lease transfer in the propBuf after we have
// revoked our current lease is more disruptive than doing so here, before
// we have revoked our current lease.
raftStatus := r.raftStatusRLocked()
raftFirstIndex := r.raftFirstIndexRLocked()
snapStatus := raftutil.ReplicaMayNeedSnapshot(raftStatus, raftFirstIndex, nextLeaseHolder.ReplicaID)
if snapStatus != raftutil.NoSnapshotNeeded && !bypassSafetyChecks && !r.store.cfg.TestingKnobs.DisableAboveRaftLeaseTransferSafetyChecks {
r.store.metrics.LeaseTransferErrorCount.Inc(1)
log.VEventf(ctx, 2, "not initiating lease transfer because the target %s may "+
"need a snapshot: %s", nextLeaseHolder, snapStatus)
err := NewLeaseTransferRejectedBecauseTargetMayNeedSnapshotError(nextLeaseHolder, snapStatus)
return nil, nil, err
}
transfer = r.mu.pendingLeaseRequest.InitOrJoinRequest(ctx, nextLeaseHolder, status,
desc.StartKey.AsRawKey(), true /* transfer */, bypassSafetyChecks, nil /* limiter */)
return nil, transfer, nil
}
// Before transferring a lease, we ensure that the lease transfer is safe. If
// the leaseholder cannot guarantee this, we reject the lease transfer. To
// make such a claim, the leaseholder needs to become the Raft leader and
// probe the lease target's log. Doing so may take time, so we use a small
// exponential backoff loop with a maximum retry count before returning the
// rejection to the client. As configured, this retry loop should back off
// for about 6 seconds before returning an error.
retryOpts := retry.Options{
InitialBackoff: 50 * time.Millisecond,
MaxBackoff: 1 * time.Second,
Multiplier: 2,
MaxRetries: 10,
}
if count := r.store.TestingKnobs().LeaseTransferRejectedRetryLoopCount; count != 0 {
retryOpts.MaxRetries = count
}
transferRejectedRetry := retry.StartWithCtx(ctx, retryOpts)
transferRejectedRetry.Next() // The first call to Next does not block.
// Loop while there's an extension in progress.
for {
// See if there's an extension in progress that we have to wait for.
// If there isn't, request a transfer.
extension, transfer, err := initTransferHelper()
if err != nil {
if IsLeaseTransferRejectedBecauseTargetMayNeedSnapshotError(err) && transferRejectedRetry.Next() {
// If the lease transfer was rejected because the target may need a
// snapshot, try again. After the backoff, we may have become the Raft
// leader (through maybeTransferRaftLeadershipToLeaseholderLocked) or
// may have learned more about the state of the lease target's log.
log.VEventf(ctx, 2, "retrying lease transfer to store %d after rejection", target)
continue
}
return err
}
if extension == nil {
if transfer == nil {
// The target is us and we're the lease holder.
return nil
}
select {
case pErr := <-transfer.C():
return pErr.GoError()
case <-ctx.Done():
transfer.Cancel()
return ctx.Err()
}
}
// Wait for the in-progress extension without holding the mutex.
if r.store.TestingKnobs().LeaseTransferBlockedOnExtensionEvent != nil {
r.store.TestingKnobs().LeaseTransferBlockedOnExtensionEvent(nextLeaseHolder)
}
select {
case <-extension.C():
continue
case <-ctx.Done():
extension.Cancel()
return ctx.Err()
}
}
}
// GetLease returns the lease and, if available, the proposed next lease.
func (r *Replica) GetLease() (roachpb.Lease, roachpb.Lease) {
r.mu.RLock()
defer r.mu.RUnlock()
return r.getLeaseRLocked()
}
func (r *Replica) getLeaseRLocked() (roachpb.Lease, roachpb.Lease) {
if nextLease, ok := r.mu.pendingLeaseRequest.RequestPending(); ok {
return *r.mu.state.Lease, nextLease
}
return *r.mu.state.Lease, roachpb.Lease{}
}
// RevokeLease stops the replica from using its current lease, if that lease
// matches the provided lease sequence. All future calls to leaseStatus on this
// node with the current lease will now return a PROSCRIBED status.
func (r *Replica) RevokeLease(ctx context.Context, seq roachpb.LeaseSequence) {
r.mu.Lock()
defer r.mu.Unlock()
if r.mu.state.Lease.Sequence == seq {
r.mu.minLeaseProposedTS = r.Clock().NowAsClockTimestamp()
}
}
// NewLeaseTransferRejectedBecauseTargetMayNeedSnapshotError return an error
// indicating that a lease transfer failed because the current leaseholder could
// not prove that the lease transfer target did not need a Raft snapshot.
func NewLeaseTransferRejectedBecauseTargetMayNeedSnapshotError(
target roachpb.ReplicaDescriptor, snapStatus raftutil.ReplicaNeedsSnapshotStatus,
) error {
err := errors.Errorf("refusing to transfer lease to %d because target may need a Raft snapshot: %s",
target, snapStatus)
return errors.Mark(err, errMarkLeaseTransferRejectedBecauseTargetMayNeedSnapshot)
}
// checkRequestTimeRLocked checks that the provided request timestamp is not
// too far in the future. We define "too far" as a time that would require a
// lease extension even if we were perfectly proactive about extending our
// lease asynchronously to always ensure at least a "leaseRenewal" duration
// worth of runway. Doing so ensures that we detect client behavior that
// will inevitably run into frequent synchronous lease extensions.
//
// This serves as a stricter version of a check that if we were to perform
// a lease extension at now, the request would be contained within the new
// lease's expiration (and stasis period).
func (r *Replica) checkRequestTimeRLocked(now hlc.ClockTimestamp, reqTS hlc.Timestamp) error {
var leaseRenewal time.Duration
if r.shouldUseExpirationLeaseRLocked() {
_, leaseRenewal = r.store.cfg.RangeLeaseDurations()
} else {
_, leaseRenewal = r.store.cfg.NodeLivenessDurations()
}
leaseRenewalMinusStasis := leaseRenewal - r.store.Clock().MaxOffset()
if leaseRenewalMinusStasis < 0 {
// If maxOffset > leaseRenewal, such that present time operations risk
// ending up in the stasis period, allow requests up to clock.Now(). Can
// happen in tests.
leaseRenewalMinusStasis = 0
}
maxReqTS := now.ToTimestamp().Add(leaseRenewalMinusStasis.Nanoseconds(), 0)
if maxReqTS.Less(reqTS) {
return errors.Errorf("request timestamp %s too far in future (> %s)", reqTS, maxReqTS)
}
return nil
}
// leaseGoodToGoRLocked verifies that the replica has a lease that is
// valid, owned by the current replica, and usable to serve requests at
// the specified timestamp. The method will return the lease status if
// these conditions are satisfied or an error if they are unsatisfied.
// The lease status is either empty or fully populated.
//
// Latches must be acquired on the range before calling this method.
// This ensures that callers are properly sequenced with TransferLease
// requests, which declare a conflict with all other commands.
//
// The method can has four possible outcomes:
//
// (1) the request timestamp is too far in the future. In this case,
//
// a nonstructured error is returned. This shouldn't happen.
//
// (2) the lease is invalid or otherwise unable to serve a request at
//
// the specified timestamp. In this case, an InvalidLeaseError is
// returned, which is caught in executeBatchWithConcurrencyRetries
// and used to trigger a lease acquisition/extension.
//
// (3) the lease is valid but held by a different replica. In this case,
//
// a NotLeaseHolderError is returned, which is propagated back up to
// the DistSender and triggers a redirection of the request.
//
// (4) the lease is valid, held locally, and capable of serving the
//
// given request. In this case, no error is returned.
func (r *Replica) leaseGoodToGoRLocked(
ctx context.Context, now hlc.ClockTimestamp, reqTS hlc.Timestamp,
) (kvserverpb.LeaseStatus, error) {
st := r.leaseStatusForRequestRLocked(ctx, now, reqTS)
err := r.leaseGoodToGoForStatusRLocked(ctx, now, reqTS, st)
if err != nil {
return kvserverpb.LeaseStatus{}, err
}
return st, err
}
func (r *Replica) leaseGoodToGoForStatusRLocked(
ctx context.Context, now hlc.ClockTimestamp, reqTS hlc.Timestamp, st kvserverpb.LeaseStatus,
) error {
if err := r.checkRequestTimeRLocked(now, reqTS); err != nil {
// Case (1): invalid request.
return err
}
if !st.IsValid() {
// Case (2): invalid lease.
return &kvpb.InvalidLeaseError{}
}
if !st.Lease.OwnedBy(r.store.StoreID()) {
// Case (3): not leaseholder.
_, stillMember := r.mu.state.Desc.GetReplicaDescriptor(st.Lease.Replica.StoreID)
if !stillMember {
// This would be the situation in which the lease holder gets removed when
// holding the lease, or in which a lease request erroneously gets accepted
// for a replica that is not in the replica set. Neither of the two can
// happen in normal usage since appropriate mechanisms have been added:
//
// 1. Only the lease holder (at the time) schedules removal of a replica,
// but the lease can change hands and so the situation in which a follower
// coordinates a replica removal of the (new) lease holder is possible (if
// unlikely) in practice. In this situation, the new lease holder would at
// some point be asked to propose the replica change's EndTxn to Raft. A
// check has been added that prevents proposals that amount to the removal
// of the proposer's (and hence lease holder's) Replica, preventing this
// scenario.
//
// 2. A lease is accepted for a Replica that has been removed. Without
// precautions, this could happen because lease requests are special in
// that they are the only command that is proposed on a follower (other
// commands may be proposed from followers, but not successfully so). For
// all proposals, processRaftCommand checks that their ProposalLease is
// compatible with the active lease for the log position. For commands
// proposed on the lease holder, the spanlatch manager then serializes
// everything. But lease requests get created on followers based on their
// local state and thus without being sequenced through latching. Thus
// a recently removed follower (unaware of its own removal) could submit
// a proposal for the lease (correctly using as a ProposerLease the last
// active lease), and would receive it given the up-to-date ProposerLease.
// Hence, an extra check is in order: processRaftCommand makes sure that
// lease requests for a replica not in the descriptor are bounced.
//
// However, this is possible if the `cockroach debug recover` command has
// been used, so this is just a logged error instead of a fatal assertion.
log.Errorf(ctx, "lease %s owned by replica %+v that no longer exists",
st.Lease, st.Lease.Replica)
}
// Otherwise, if the lease is currently held by another replica, redirect
// to the holder.
return kvpb.NewNotLeaseHolderError(
st.Lease, r.store.StoreID(), r.descRLocked(), "lease held by different store",
)
}
// Case (4): all good.
return nil
}
// leaseGoodToGo is like leaseGoodToGoRLocked, but will acquire the replica read
// lock.
func (r *Replica) leaseGoodToGo(
ctx context.Context, now hlc.ClockTimestamp, reqTS hlc.Timestamp,
) (kvserverpb.LeaseStatus, error) {
r.mu.RLock()
defer r.mu.RUnlock()
return r.leaseGoodToGoRLocked(ctx, now, reqTS)
}
// redirectOnOrAcquireLease checks whether this replica has the lease at
// the current timestamp. If it does, returns the lease and its status.
// If another replica currently holds the lease, redirects by returning
// NotLeaseHolderError and an empty lease status.
//
// If the lease is expired, a renewal is synchronously requested.
// Expiration-based leases are eagerly renewed when a request with a
// timestamp within RangeLeaseRenewalDuration of the lease expiration is
// served.
//
// TODO(spencer): for write commands, don't wait while requesting
//
// the range lease. If the lease acquisition fails, the write cmd
// will fail as well. If it succeeds, as is likely, then the write
// will not incur latency waiting for the command to complete.
// Reads, however, must wait.
func (r *Replica) redirectOnOrAcquireLease(
ctx context.Context,
) (kvserverpb.LeaseStatus, *kvpb.Error) {
return r.redirectOnOrAcquireLeaseForRequest(ctx, hlc.Timestamp{}, r.breaker.Signal())
}
// TestingAcquireLease is redirectOnOrAcquireLease exposed for tests.
func (r *Replica) TestingAcquireLease(ctx context.Context) (kvserverpb.LeaseStatus, error) {
ctx = r.AnnotateCtx(ctx)
ctx = logtags.AddTag(ctx, "lease-acq", nil)
l, pErr := r.redirectOnOrAcquireLease(ctx)
return l, pErr.GoError()
}
func (s *Store) rangeLeaseAcquireTimeout() time.Duration {
if d := s.cfg.TestingKnobs.RangeLeaseAcquireTimeoutOverride; d != 0 {
return d
}
return s.cfg.RangeLeaseAcquireTimeout()
}
// redirectOnOrAcquireLeaseForRequest is like redirectOnOrAcquireLease,
// but it accepts a specific request timestamp instead of assuming that
// the request is operating at the current time.
func (r *Replica) redirectOnOrAcquireLeaseForRequest(
ctx context.Context, reqTS hlc.Timestamp, brSig signaller,
) (status kvserverpb.LeaseStatus, pErr *kvpb.Error) {
// Does not use RunWithTimeout(), because we do not want to mask the
// NotLeaseHolderError on context cancellation.
ctx, cancel := context.WithTimeout(ctx, r.store.rangeLeaseAcquireTimeout()) // nolint:context
defer cancel()
// Try fast-path.
now := r.store.Clock().NowAsClockTimestamp()
{
status, err := r.leaseGoodToGo(ctx, now, reqTS)
if err == nil {
return status, nil
} else if !errors.HasType(err, (*kvpb.InvalidLeaseError)(nil)) {
return kvserverpb.LeaseStatus{}, kvpb.NewError(err)
}
}
if err := brSig.Err(); err != nil {
return kvserverpb.LeaseStatus{}, kvpb.NewError(err)
}
// Loop until the lease is held or the replica ascertains the actual lease
// holder. Returns also on context.Done() (timeout or cancellation).
for attempt := 1; ; attempt++ {
now = r.store.Clock().NowAsClockTimestamp()
llHandle, status, transfer, pErr := func() (*leaseRequestHandle, kvserverpb.LeaseStatus, bool, *kvpb.Error) {
r.mu.Lock()
defer r.mu.Unlock()
// Check that we're not in the process of transferring the lease
// away. If we are doing so, we can't serve reads or propose Raft
// commands - see comments on AdminTransferLease and TransferLease.
// So wait on the lease transfer to complete either successfully or
// unsuccessfully before redirecting or retrying.
repDesc, err := r.getReplicaDescriptorRLocked()
if err != nil {
return nil, kvserverpb.LeaseStatus{}, false, kvpb.NewError(err)
}
if ok := r.mu.pendingLeaseRequest.TransferInProgress(repDesc.ReplicaID); ok {
return r.mu.pendingLeaseRequest.JoinRequest(), kvserverpb.LeaseStatus{}, true /* transfer */, nil
}
status := r.leaseStatusForRequestRLocked(ctx, now, reqTS)
switch status.State {
case kvserverpb.LeaseState_ERROR:
// Lease state couldn't be determined.
msg := status.ErrInfo
if msg == "" {
msg = "lease state could not be determined"
}
log.VEventf(ctx, 2, "%s", msg)
return nil, kvserverpb.LeaseStatus{}, false, kvpb.NewError(
kvpb.NewNotLeaseHolderError(roachpb.Lease{}, r.store.StoreID(), r.mu.state.Desc, msg))
case kvserverpb.LeaseState_VALID, kvserverpb.LeaseState_UNUSABLE:
if !status.Lease.OwnedBy(r.store.StoreID()) {
_, stillMember := r.mu.state.Desc.GetReplicaDescriptor(status.Lease.Replica.StoreID)
if !stillMember {
// See corresponding comment in leaseGoodToGoRLocked.
log.Errorf(ctx, "lease %s owned by replica %+v that no longer exists",
status.Lease, status.Lease.Replica)
}
// Otherwise, if the lease is currently held by another replica, redirect
// to the holder.
return nil, kvserverpb.LeaseStatus{}, false, kvpb.NewError(
kvpb.NewNotLeaseHolderError(status.Lease, r.store.StoreID(), r.mu.state.Desc,
"lease held by different store"))
}
// If the lease is in stasis, we can't serve requests until we've
// renewed the lease, so we return the handle to block on renewal.
if status.State == kvserverpb.LeaseState_UNUSABLE {
return r.requestLeaseLocked(ctx, status, nil), kvserverpb.LeaseStatus{}, false, nil
}
// Return a nil handle and status to signal that we have a valid lease.
return nil, status, false, nil
case kvserverpb.LeaseState_EXPIRED:
// No active lease: Request renewal if a renewal is not already pending.
log.VEventf(ctx, 2, "request range lease (attempt #%d)", attempt)
return r.requestLeaseLocked(ctx, status, nil), kvserverpb.LeaseStatus{}, false, nil
case kvserverpb.LeaseState_PROSCRIBED:
// Lease proposed timestamp is earlier than the min proposed
// timestamp limit this replica must observe. If this store
// owns the lease, re-request. Otherwise, redirect.
if status.Lease.OwnedBy(r.store.StoreID()) {
log.VEventf(ctx, 2, "request range lease (attempt #%d)", attempt)
return r.requestLeaseLocked(ctx, status, nil), kvserverpb.LeaseStatus{}, false, nil
}
// If lease is currently held by another, redirect to holder.
return nil, kvserverpb.LeaseStatus{}, false, kvpb.NewError(
kvpb.NewNotLeaseHolderError(status.Lease, r.store.StoreID(), r.mu.state.Desc, "lease proscribed"))
default:
return nil, kvserverpb.LeaseStatus{}, false, kvpb.NewErrorf("unknown lease status state %v", status)
}
}()
if pErr != nil {
return kvserverpb.LeaseStatus{}, pErr
}
if llHandle == nil {
// We own a valid lease.
log.Eventf(ctx, "valid lease %+v", status)
return status, nil
}
// Wait for the range lease acquisition/transfer to finish, or the
// context to expire.
//
// Note that even if the operation completes successfully, we can't
// assume that we have the lease. This is clearly not the case when
// waiting on a lease transfer and also not the case if our request
// timestamp is not covered by the new lease (though we try to protect
// against this in checkRequestTimeRLocked). So instead of assuming
// anything, we iterate and check again.
pErr = func() (pErr *kvpb.Error) {
slowTimer := timeutil.NewTimer()
defer slowTimer.Stop()
slowTimer.Reset(base.SlowRequestThreshold)
tBegin := timeutil.Now()
for {
select {
case pErr = <-llHandle.C():
if transfer {
// We were waiting on a transfer to finish. Ignore its
// result and try again.
return nil
}
if pErr != nil {
goErr := pErr.GoError()
switch {
case errors.HasType(goErr, (*kvpb.AmbiguousResultError)(nil)):
// This can happen if the RequestLease command we sent has been
// applied locally through a snapshot: the RequestLeaseRequest
// cannot be reproposed so we get this ambiguity.
// We'll just loop around.
return nil
case errors.HasType(goErr, (*kvpb.LeaseRejectedError)(nil)):
var tErr *kvpb.LeaseRejectedError
errors.As(goErr, &tErr)
if tErr.Existing.OwnedBy(r.store.StoreID()) {
// The RequestLease command we sent was rejected because another
// lease was applied in the meantime, but we own that other
// lease. So, loop until the current node becomes aware that
// it's the leaseholder.
return nil
}
// Getting a LeaseRejectedError back means someone else got there
// first, or the lease request was somehow invalid due to a concurrent
// change. That concurrent change could have been that this replica was
// removed (see processRaftCommand), so check for that case before
// falling back to a NotLeaseHolderError.
var err error
if _, descErr := r.GetReplicaDescriptor(); descErr != nil {
err = descErr
} else if st := r.CurrentLeaseStatus(ctx); !st.IsValid() {
err = kvpb.NewNotLeaseHolderError(roachpb.Lease{}, r.store.StoreID(), r.Desc(),
"lease acquisition attempt lost to another lease, which has expired in the meantime")
} else {
err = kvpb.NewNotLeaseHolderError(st.Lease, r.store.StoreID(), r.Desc(),
"lease acquisition attempt lost to another lease")
}
pErr = kvpb.NewError(err)
}
return pErr
}
log.VEventf(ctx, 2, "lease acquisition succeeded: %+v", status.Lease)
return nil
case <-brSig.C():
llHandle.Cancel()
err := brSig.Err()
log.VErrEventf(ctx, 2, "lease acquisition failed: %s", err)
return kvpb.NewError(err)
case <-slowTimer.C:
slowTimer.Read = true
log.Warningf(ctx, "have been waiting %s attempting to acquire lease (%d attempts)",
base.SlowRequestThreshold, attempt)
r.store.metrics.SlowLeaseRequests.Inc(1)
defer func(attempt int) {
r.store.metrics.SlowLeaseRequests.Dec(1)
log.Infof(ctx, "slow lease acquisition finished after %s with error %v after %d attempts", timeutil.Since(tBegin), pErr, attempt)
}(attempt)
case <-ctx.Done():
llHandle.Cancel()
log.VErrEventf(ctx, 2, "lease acquisition failed: %s", ctx.Err())
return kvpb.NewError(kvpb.NewNotLeaseHolderError(roachpb.Lease{}, r.store.StoreID(), r.Desc(),
"lease acquisition canceled because context canceled"))
case <-r.store.Stopper().ShouldQuiesce():
llHandle.Cancel()
return kvpb.NewError(kvpb.NewNotLeaseHolderError(roachpb.Lease{}, r.store.StoreID(), r.Desc(),
"lease acquisition canceled because node is stopping"))
}
}
}()
if pErr != nil {
return kvserverpb.LeaseStatus{}, pErr
}
// Retry...
}
}
// shouldRequestLeaseRLocked determines whether the replica should request a new
// lease. It also returns whether this is a lease extension. This covers the
// following cases:
//
// - The lease has expired, so the Raft leader should attempt to acquire it.
// - The lease is expiration-based, ours, and in need of extension.
// - The node has restarted, and should reacquire its former leases.
// - The lease is ours but has an incorrect type (epoch/expiration).
func (r *Replica) shouldRequestLeaseRLocked(
st kvserverpb.LeaseStatus,
) (shouldRequest bool, isExtension bool) {
switch st.State {
case kvserverpb.LeaseState_EXPIRED:
// Attempt to acquire an expired lease, but only if we're the Raft leader.
// We want the lease and leader to be colocated, and a non-leader lease
// proposal would be rejected by the Raft proposal buffer anyway. This also
// reduces aggregate work across ranges, since only 1 replica will attempt
// to acquire the lease, and only if there is a leader.
return r.isRaftLeaderRLocked(), false
case kvserverpb.LeaseState_PROSCRIBED:
// Reacquire leases after a restart, if they're still ours. We could also
// have revoked our lease as part of a lease transfer, but the transferred
// lease would typically take effect before we get here, and if not then the
// lease compare-and-swap would fail anyway.
return st.OwnedBy(r.StoreID()), false
case kvserverpb.LeaseState_VALID, kvserverpb.LeaseState_UNUSABLE:
// If someone else has the lease, leave it alone.
if !st.OwnedBy(r.StoreID()) {
return false, false
}
// Extend expiration leases if they're due.
if st.Lease.Type() == roachpb.LeaseExpiration {
renewal := st.Lease.Expiration.Add(-r.store.cfg.RangeLeaseRenewalDuration().Nanoseconds(), 0)
if renewal.LessEq(st.Now.ToTimestamp()) {
return true, true
}
}
// Switch the lease type if it's incorrect.
if !r.hasCorrectLeaseTypeRLocked(st.Lease) {
return true, false
}
return false, false
case kvserverpb.LeaseState_ERROR:
return false, false
default:
log.Fatalf(context.Background(), "invalid lease state %s", st.State)
return false, false
}
}
// maybeSwitchLeaseType will synchronously renew a lease using the appropriate
// type if it is (or was) owned by this replica and has an incorrect type. This
// typically happens when changing kv.expiration_leases_only.enabled.
func (r *Replica) maybeSwitchLeaseType(ctx context.Context, st kvserverpb.LeaseStatus) *kvpb.Error {
if !st.OwnedBy(r.store.StoreID()) {
return nil
}
var llHandle *leaseRequestHandle
r.mu.Lock()
if !r.hasCorrectLeaseTypeRLocked(st.Lease) {
llHandle = r.requestLeaseLocked(ctx, st, nil /* limiter */)
}
r.mu.Unlock()
if llHandle != nil {
select {
case pErr := <-llHandle.C():
return pErr
case <-ctx.Done():
return kvpb.NewError(ctx.Err())
}
}
return nil
}
// HasCorrectLeaseType returns true if the lease type is correct for this replica.
func (r *Replica) HasCorrectLeaseType(lease roachpb.Lease) bool {
r.mu.RLock()
defer r.mu.RUnlock()
return r.hasCorrectLeaseTypeRLocked(lease)
}
func (r *Replica) hasCorrectLeaseTypeRLocked(lease roachpb.Lease) bool {
hasExpirationLease := lease.Type() == roachpb.LeaseExpiration
return hasExpirationLease == r.shouldUseExpirationLeaseRLocked()
}
// LeasePreferencesStatus represents the state of satisfying lease preferences.
type LeasePreferencesStatus int
const (
_ LeasePreferencesStatus = iota
// LeasePreferencesViolating indicates the checked store does not satisfy any
// lease preference applied.
LeasePreferencesViolating
// LeasePreferencesLessPreferred indicates the checked store satisfies _some_
// preference, however not the most preferred.
LeasePreferencesLessPreferred
// LeasePreferencesOK indicates the checked store satisfies the first
// preference, or no lease preferences are applied.
LeasePreferencesOK
)
// LeaseViolatesPreferences checks if this replica owns the lease and if it
// violates the lease preferences defined in the span config. If no preferences
// are defined then it will return false and consider it to be in conformance.
func (r *Replica) LeaseViolatesPreferences(ctx context.Context, conf *roachpb.SpanConfig) bool {
storeID := r.store.StoreID()
preferences := conf.LeasePreferences
leaseStatus := r.CurrentLeaseStatus(ctx)
if !leaseStatus.IsValid() || !leaseStatus.Lease.OwnedBy(storeID) {
// We can't determine if the lease preferences are being conformed to or
// not, as the store either doesn't own the lease, or doesn't own a valid
// lease.
return false
}
storeAttrs := r.store.Attrs()
nodeAttrs := r.store.nodeDesc.Attrs
nodeLocality := r.store.nodeDesc.Locality
preferenceStatus := CheckStoreAgainstLeasePreferences(
storeID, storeAttrs, nodeAttrs, nodeLocality, preferences)
return preferenceStatus == LeasePreferencesViolating
}
// CheckStoreAgainstLeasePreferences returns whether the given store would
// violate, be less preferred or ok, leaseholder, according the the lease
// preferences.
func CheckStoreAgainstLeasePreferences(
storeID roachpb.StoreID,
storeAttrs, nodeAttrs roachpb.Attributes,
nodeLocality roachpb.Locality,
preferences []roachpb.LeasePreference,
) LeasePreferencesStatus {
if len(preferences) == 0 {
return LeasePreferencesOK
}
for i, preference := range preferences {
if constraint.CheckConjunction(storeAttrs, nodeAttrs, nodeLocality, preference.Constraints) {
if i > 0 {
return LeasePreferencesLessPreferred
}
return LeasePreferencesOK
}
}
return LeasePreferencesViolating
}
| pkg/kv/kvserver/replica_range_lease.go | 1 | https://github.com/cockroachdb/cockroach/commit/dbe5954ec7032390cc5713bad5ed8fcdaf3b2d05 | [
0.12184959650039673,
0.0015071425586938858,
0.00016291666543111205,
0.00017011156887747347,
0.011725514195859432
] |
{
"id": 1,
"code_window": [
"\t\"github.com/cockroachdb/cockroach/pkg/roachpb\"\n",
"\t\"github.com/cockroachdb/cockroach/pkg/settings\"\n",
"\t\"github.com/cockroachdb/cockroach/pkg/util\"\n",
"\t\"github.com/cockroachdb/cockroach/pkg/util/envutil\"\n",
"\t\"github.com/cockroachdb/cockroach/pkg/util/growstack\"\n",
"\t\"github.com/cockroachdb/cockroach/pkg/util/hlc\"\n"
],
"labels": [
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\"github.com/cockroachdb/cockroach/pkg/util/admission/admissionpb\"\n"
],
"file_path": "pkg/kv/kvserver/replica_range_lease.go",
"type": "add",
"edit_start_line_idx": 60
} | repartition from=../partition/single_col_range_partitioning_maxvalue to=../partition/unpartitioned
----
| pkg/ccl/spanconfigccl/spanconfigsqltranslatorccl/testdata/3node/repartition/single_col_range_partitioning_maxvalue_to_unpartitioned | 0 | https://github.com/cockroachdb/cockroach/commit/dbe5954ec7032390cc5713bad5ed8fcdaf3b2d05 | [
0.0001688830234343186,
0.0001688830234343186,
0.0001688830234343186,
0.0001688830234343186,
0
] |
{
"id": 1,
"code_window": [
"\t\"github.com/cockroachdb/cockroach/pkg/roachpb\"\n",
"\t\"github.com/cockroachdb/cockroach/pkg/settings\"\n",
"\t\"github.com/cockroachdb/cockroach/pkg/util\"\n",
"\t\"github.com/cockroachdb/cockroach/pkg/util/envutil\"\n",
"\t\"github.com/cockroachdb/cockroach/pkg/util/growstack\"\n",
"\t\"github.com/cockroachdb/cockroach/pkg/util/hlc\"\n"
],
"labels": [
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\"github.com/cockroachdb/cockroach/pkg/util/admission/admissionpb\"\n"
],
"file_path": "pkg/kv/kvserver/replica_range_lease.go",
"type": "add",
"edit_start_line_idx": 60
} | // Copyright 2018 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package lang
import (
"fmt"
"io"
"strings"
"testing"
"github.com/cockroachdb/cockroach/pkg/testutils/datapathutils"
"github.com/cockroachdb/datadriven"
)
func TestParser(t *testing.T) {
datadriven.RunTest(t, datapathutils.TestDataPath(t, "parser"), func(t *testing.T, d *datadriven.TestData) string {
// Only parse command supported.
if d.Cmd != "parse" {
t.FailNow()
}
args := []string{"test.opt"}
for _, cmdArg := range d.CmdArgs {
// Add additional args.
args = append(args, cmdArg.String())
}
p := NewParser(args...)
p.SetFileResolver(func(name string) (io.Reader, error) {
if name == "test.opt" {
return strings.NewReader(d.Input), nil
}
return nil, fmt.Errorf("unknown file '%s'", name)
})
var actual string
root := p.Parse()
if root != nil {
actual = root.String() + "\n"
} else {
// Concatenate errors.
for _, err := range p.Errors() {
actual = fmt.Sprintf("%s%s\n", actual, err.Error())
}
}
return actual
})
}
| pkg/sql/opt/optgen/lang/parser_test.go | 0 | https://github.com/cockroachdb/cockroach/commit/dbe5954ec7032390cc5713bad5ed8fcdaf3b2d05 | [
0.0028300215490162373,
0.0006147320964373648,
0.00016944773960858583,
0.00017231132369488478,
0.0009907082421705127
] |
{
"id": 1,
"code_window": [
"\t\"github.com/cockroachdb/cockroach/pkg/roachpb\"\n",
"\t\"github.com/cockroachdb/cockroach/pkg/settings\"\n",
"\t\"github.com/cockroachdb/cockroach/pkg/util\"\n",
"\t\"github.com/cockroachdb/cockroach/pkg/util/envutil\"\n",
"\t\"github.com/cockroachdb/cockroach/pkg/util/growstack\"\n",
"\t\"github.com/cockroachdb/cockroach/pkg/util/hlc\"\n"
],
"labels": [
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\"github.com/cockroachdb/cockroach/pkg/util/admission/admissionpb\"\n"
],
"file_path": "pkg/kv/kvserver/replica_range_lease.go",
"type": "add",
"edit_start_line_idx": 60
} | exec-ddl
CREATE TABLE a (k INT PRIMARY KEY, i INT, s STRING, d DECIMAL NOT NULL)
----
exec-ddl
CREATE TABLE b (x INT, z INT NOT NULL)
----
opt
SELECT k, i FROM a UNION SELECT * FROM b
----
union
├── columns: k:12 i:13
├── left columns: a.k:1 a.i:2
├── right columns: x:7 z:8
├── stats: [rows=2000, distinct(12,13)=2000, null(12,13)=0]
├── cost: 2207.76501
├── key: (12,13)
├── scan a
│ ├── columns: a.k:1!null a.i:2
│ ├── stats: [rows=1000, distinct(1,2)=1000, null(1,2)=0]
│ ├── cost: 1088.62
│ ├── key: (1)
│ └── fd: (1)-->(2)
└── scan b
├── columns: x:7 z:8!null
├── stats: [rows=1000, distinct(7,8)=1000, null(7,8)=0]
└── cost: 1078.52
opt
SELECT k, i FROM a UNION ALL SELECT * FROM b
----
union-all
├── columns: k:12 i:13
├── left columns: a.k:1 a.i:2
├── right columns: x:7 z:8
├── stats: [rows=2000]
├── cost: 2187.16
├── scan a
│ ├── columns: a.k:1!null a.i:2
│ ├── stats: [rows=1000]
│ ├── cost: 1088.62
│ ├── key: (1)
│ └── fd: (1)-->(2)
└── scan b
├── columns: x:7 z:8!null
├── stats: [rows=1000]
└── cost: 1078.52
opt
SELECT k, i FROM a INTERSECT SELECT * FROM b
----
intersect-all
├── columns: k:1 i:2
├── left columns: k:1 i:2
├── right columns: x:7 z:8
├── stats: [rows=1000]
├── cost: 2197.30625
├── key: (1)
├── fd: (1)-->(2)
├── scan a
│ ├── columns: k:1!null i:2
│ ├── stats: [rows=1000]
│ ├── cost: 1088.62
│ ├── key: (1)
│ └── fd: (1)-->(2)
└── scan b
├── columns: x:7 z:8!null
├── stats: [rows=1000]
└── cost: 1078.52
opt
SELECT k, i FROM a INTERSECT ALL SELECT * FROM b
----
intersect-all
├── columns: k:1 i:2
├── left columns: k:1 i:2
├── right columns: x:7 z:8
├── stats: [rows=1000]
├── cost: 2197.30625
├── key: (1)
├── fd: (1)-->(2)
├── scan a
│ ├── columns: k:1!null i:2
│ ├── stats: [rows=1000]
│ ├── cost: 1088.62
│ ├── key: (1)
│ └── fd: (1)-->(2)
└── scan b
├── columns: x:7 z:8!null
├── stats: [rows=1000]
└── cost: 1078.52
opt
SELECT k, i FROM a EXCEPT SELECT * FROM b
----
except-all
├── columns: k:1 i:2
├── left columns: k:1 i:2
├── right columns: x:7 z:8
├── stats: [rows=1000]
├── cost: 2197.30625
├── key: (1)
├── fd: (1)-->(2)
├── scan a
│ ├── columns: k:1!null i:2
│ ├── stats: [rows=1000]
│ ├── cost: 1088.62
│ ├── key: (1)
│ └── fd: (1)-->(2)
└── scan b
├── columns: x:7 z:8!null
├── stats: [rows=1000]
└── cost: 1078.52
opt
SELECT k, i FROM a EXCEPT ALL SELECT * FROM b
----
except-all
├── columns: k:1 i:2
├── left columns: k:1 i:2
├── right columns: x:7 z:8
├── stats: [rows=1000]
├── cost: 2197.30625
├── key: (1)
├── fd: (1)-->(2)
├── scan a
│ ├── columns: k:1!null i:2
│ ├── stats: [rows=1000]
│ ├── cost: 1088.62
│ ├── key: (1)
│ └── fd: (1)-->(2)
└── scan b
├── columns: x:7 z:8!null
├── stats: [rows=1000]
└── cost: 1078.52
| pkg/sql/opt/xform/testdata/coster/set | 0 | https://github.com/cockroachdb/cockroach/commit/dbe5954ec7032390cc5713bad5ed8fcdaf3b2d05 | [
0.00017784176452551037,
0.00017585685418453068,
0.00017041928367689252,
0.0001760628365445882,
0.000001662975250837917
] |
{
"id": 2,
"code_window": [
"\t// TransferLease will observe the circuit breaker, as transferring a\n",
"\t// lease when the range is unavailable results in, essentially, giving\n",
"\t// up on the lease and thus worsening the situation.\n",
"\tba.Add(leaseReq)\n",
"\t_, pErr := p.repl.Send(ctx, ba)\n",
"\treturn pErr.GoError()\n",
"}\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t// NB: Setting `Source: kvpb.AdmissionHeader_OTHER` means this request will\n",
"\t// bypass AC.\n",
"\tba.AdmissionHeader = kvpb.AdmissionHeader{\n",
"\t\tPriority: int32(admissionpb.NormalPri),\n",
"\t\tCreateTime: timeutil.Now().UnixNano(),\n",
"\t\tSource: kvpb.AdmissionHeader_OTHER,\n",
"\t}\n"
],
"file_path": "pkg/kv/kvserver/replica_range_lease.go",
"type": "add",
"edit_start_line_idx": 573
} | // Copyright 2018 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package kvcoord
import (
"context"
"sync"
"time"
"github.com/cockroachdb/cockroach/pkg/kv/kvbase"
"github.com/cockroachdb/cockroach/pkg/kv/kvpb"
"github.com/cockroachdb/cockroach/pkg/kv/kvserver/txnwait"
"github.com/cockroachdb/cockroach/pkg/roachpb"
"github.com/cockroachdb/cockroach/pkg/util/hlc"
"github.com/cockroachdb/cockroach/pkg/util/log"
"github.com/cockroachdb/cockroach/pkg/util/stop"
"github.com/cockroachdb/cockroach/pkg/util/timeutil"
"github.com/cockroachdb/cockroach/pkg/util/tracing"
)
// abortTxnAsyncTimeout is the context timeout for abortTxnAsyncLocked()
// rollbacks. If the intent resolver has spare async task capacity, this timeout
// only needs to be long enough for the EndTxn request to make it through Raft,
// but if the cleanup task is synchronous (to backpressure clients) then cleanup
// will be abandoned when the timeout expires. We generally want to clean up if
// possible, but not at any cost, so we set it high at 1 minute.
const abortTxnAsyncTimeout = time.Minute
// heartbeatTxnBufferPeriod is a buffer period used to determine when to start
// the heartbeat loop for the transaction. If the first locking operation
// occurs within this buffer period of expiration of the transaction, the
// transaction heartbeat should happen immediately, otherwise the heartbeat
// loop should start (at the latest) by this buffer period prior to the
// expiration. The buffer period should ensure that it is not possible
// for intents to be written prior to the transaction being considered expired.
// This attempts to avoid a transaction being considered expired (due to
// lacking a transaction record) by another pushing transaction that encounters
// its intents, as this will result in the transaction being aborted.
const heartbeatTxnBufferPeriod = 200 * time.Millisecond
// txnHeartbeater is a txnInterceptor in charge of a transaction's heartbeat
// loop. Transaction coordinators heartbeat their transaction record
// periodically to indicate the liveness of their transaction. Other actors like
// concurrent transactions and GC processes observe a transaction record's last
// heartbeat time to learn about its disposition and to determine whether it
// should be considered abandoned. When a transaction is considered abandoned,
// other actors are free to abort it at will. As such, it is important for a
// transaction coordinator to heartbeat its transaction record with a
// periodicity well below the abandonment threshold.
//
// Transaction coordinators only need to perform heartbeats for transactions
// that risk running for longer than the abandonment duration. For transactions
// that finish well beneath this time, a heartbeat will never be sent and the
// EndTxn request will create and immediately finalize the transaction. However,
// for transactions that live long enough that they risk running into issues
// with other's perceiving them as abandoned, the first HeartbeatTxn request
// they send will create the transaction record in the PENDING state. Future
// heartbeats will update the transaction record to indicate progressively
// larger heartbeat timestamps.
//
// NOTE: there are other mechanisms by which concurrent actors could determine
// the liveness of transactions. One proposal is to have concurrent actors
// communicate directly with transaction coordinators themselves. This would
// avoid the need for transaction heartbeats and the PENDING transaction state
// entirely. Another proposal is to detect abandoned transactions and failed
// coordinators at an entirely different level - by maintaining a node health
// plane. This would function under the idea that if the node a transaction's
// coordinator is running on is alive then that transaction is still in-progress
// unless it specifies otherwise. These are both approaches we could consider in
// the future.
type txnHeartbeater struct {
log.AmbientContext
stopper *stop.Stopper
clock *hlc.Clock
metrics *TxnMetrics
loopInterval time.Duration
// wrapped is the next sender in the interceptor stack.
wrapped lockedSender
// gatekeeper is the sender to which heartbeat requests need to be sent. It is
// set to the gatekeeper interceptor, so sending directly to it will bypass
// all the other interceptors; heartbeats don't need them and they can only
// hurt - we don't want heartbeats to get sequence numbers or to check any
// intents. Note that the async rollbacks that this interceptor sometimes
// sends got through `wrapped`, not directly through `gatekeeper`.
gatekeeper lockedSender
// mu contains state protected by the TxnCoordSender's mutex.
mu struct {
sync.Locker
// txn is a reference to the TxnCoordSender's proto.
txn *roachpb.Transaction
// loopStarted indicates whether the heartbeat loop has been launched
// for the transaction or not. It remains true once the loop terminates.
loopStarted bool
// loopCancel is a function to cancel the context of the heartbeat loop.
// Non-nil if the heartbeat loop is currently running.
loopCancel func()
// finalObservedStatus is the finalized status that the heartbeat loop
// observed while heartbeating the transaction's record. As soon as the
// heartbeat loop observes a finalized status, it shuts down.
//
// If the status here is COMMITTED then the transaction definitely
// committed. However, if the status here is ABORTED then the
// transaction may or may not have been aborted. Instead, it's possible
// that the transaction was committed by an EndTxn request and then its
// record was garbage collected before the heartbeat request reached the
// record. The only way to distinguish this situation from a truly
// aborted transaction is to consider whether or not the transaction
// coordinator sent an EndTxn request and, if so, consider whether it
// succeeded or not.
//
// Because of this ambiguity, the status is not used to immediately
// update txn in case the heartbeat loop raced with an EndTxn request.
// Instead, it is used by the transaction coordinator to reject any
// future requests sent though it (which indicates that the heartbeat
// loop did not race with an EndTxn request).
finalObservedStatus roachpb.TransactionStatus
// ifReqs tracks the number of in-flight requests. This is expected to
// be either 0 or 1, but we let the txnLockGatekeeper enforce that.
//
// This is used to make sure we don't send EndTxn(commit=false) from
// abortTxnAsyncLocked() concurrently with another in-flight request.
// The TxnCoordSender assumes synchronous operation; in particular,
// the txnPipeliner must update its lock spans with pending responses
// before attaching the final lock spans to the EndTxn request.
ifReqs uint8
// abortTxnAsyncPending, if true, signals that an abortTxnAsyncLocked()
// call is waiting for in-flight requests to complete. Once the last
// request returns (setting ifReqs=0), it calls abortTxnAsyncLocked().
abortTxnAsyncPending bool
// abortTxnAsyncResultC is non-nil when an abortTxnAsyncLocked()
// rollback is in-flight. If a client rollback arrives concurrently, it
// will wait for the result on this channel, collapsing the requests to
// prevent concurrent rollbacks. Only EndTxn(commit=false) requests can
// arrive during rollback, the TxnCoordSender blocks any others due to
// finalObservedStatus.
abortTxnAsyncResultC chan abortTxnAsyncResult
}
}
type abortTxnAsyncResult struct {
br *kvpb.BatchResponse
pErr *kvpb.Error
}
// init initializes the txnHeartbeater. This method exists instead of a
// constructor because txnHeartbeaters live in a pool in the TxnCoordSender.
func (h *txnHeartbeater) init(
ac log.AmbientContext,
stopper *stop.Stopper,
clock *hlc.Clock,
metrics *TxnMetrics,
loopInterval time.Duration,
gatekeeper lockedSender,
mu sync.Locker,
txn *roachpb.Transaction,
) {
h.AmbientContext = ac
h.stopper = stopper
h.clock = clock
h.metrics = metrics
h.loopInterval = loopInterval
h.gatekeeper = gatekeeper
h.mu.Locker = mu
h.mu.txn = txn
}
// SendLocked is part of the txnInterceptor interface.
func (h *txnHeartbeater) SendLocked(
ctx context.Context, ba *kvpb.BatchRequest,
) (*kvpb.BatchResponse, *kvpb.Error) {
etArg, hasET := ba.GetArg(kvpb.EndTxn)
firstLockingIndex, pErr := firstLockingIndex(ba)
if pErr != nil {
return nil, pErr
}
if firstLockingIndex != -1 {
// Set txn key based on the key of the first transactional write if not
// already set. If it is already set, make sure we keep the anchor key
// the same.
if len(h.mu.txn.Key) == 0 {
anchor := ba.Requests[firstLockingIndex].GetInner().Header().Key
h.mu.txn.Key = anchor
// Put the anchor also in the ba's copy of the txn, since this batch
// was prepared before we had an anchor.
ba.Txn.Key = anchor
}
// Start the heartbeat loop if it has not already started.
if !h.mu.loopStarted {
h.startHeartbeatLoopLocked(ctx)
}
}
if hasET {
et := etArg.(*kvpb.EndTxnRequest)
// Preemptively stop the heartbeat loop in case of transaction abort.
// In case of transaction commit we don't want to do this because commit
// could fail with retryable error and transaction would be restarted
// with the next epoch.
if !et.Commit {
h.cancelHeartbeatLoopLocked()
// If an abortTxnAsyncLocked() rollback is in flight, we'll wait for
// its result here to avoid sending a concurrent rollback.
// Otherwise, txnLockGatekeeper would error since it does not allow
// concurrent requests (to enforce a synchronous client protocol).
if resultC := h.mu.abortTxnAsyncResultC; resultC != nil {
// We have to unlock the mutex while waiting, to allow the
// txnLockGatekeeper to acquire the mutex when receiving the
// async abort response. Once we receive our copy of the
// response, we re-acquire the lock to return it to the client.
h.mu.Unlock()
defer h.mu.Lock()
select {
case res := <-resultC:
return res.br, res.pErr
case <-ctx.Done():
return nil, kvpb.NewError(ctx.Err())
}
}
}
}
// Forward the batch through the wrapped lockedSender, recording the
// in-flight request to coordinate with abortTxnAsyncLocked(). Recall that
// the mutex is unlocked for the duration of the SendLocked() call.
h.mu.ifReqs++
br, pErr := h.wrapped.SendLocked(ctx, ba)
h.mu.ifReqs--
// If an abortTxnAsyncLocked() call is waiting for this in-flight
// request to complete, call it. At this point, finalObservedStatus has
// already been set, so we don't have to worry about additional incoming
// requests (except rollbacks) -- the TxnCoordSender will block them.
if h.mu.abortTxnAsyncPending && h.mu.ifReqs == 0 {
h.abortTxnAsyncLocked(ctx)
h.mu.abortTxnAsyncPending = false
}
return br, pErr
}
// setWrapped is part of the txnInterceptor interface.
func (h *txnHeartbeater) setWrapped(wrapped lockedSender) {
h.wrapped = wrapped
}
// populateLeafInputState is part of the txnInterceptor interface.
func (*txnHeartbeater) populateLeafInputState(*roachpb.LeafTxnInputState) {}
// populateLeafFinalState is part of the txnInterceptor interface.
func (*txnHeartbeater) populateLeafFinalState(*roachpb.LeafTxnFinalState) {}
// importLeafFinalState is part of the txnInterceptor interface.
func (*txnHeartbeater) importLeafFinalState(context.Context, *roachpb.LeafTxnFinalState) error {
return nil
}
// epochBumpedLocked is part of the txnInterceptor interface.
func (h *txnHeartbeater) epochBumpedLocked() {}
// createSavepointLocked is part of the txnInterceptor interface.
func (*txnHeartbeater) createSavepointLocked(context.Context, *savepoint) {}
// rollbackToSavepointLocked is part of the txnInterceptor interface.
func (*txnHeartbeater) rollbackToSavepointLocked(context.Context, savepoint) {}
// closeLocked is part of the txnInterceptor interface.
func (h *txnHeartbeater) closeLocked() {
h.cancelHeartbeatLoopLocked()
}
// startHeartbeatLoopLocked starts a heartbeat loop in a different goroutine.
func (h *txnHeartbeater) startHeartbeatLoopLocked(ctx context.Context) {
if h.loopInterval < 0 {
log.Infof(ctx, "coordinator heartbeat loop disabled")
return
}
if h.mu.loopStarted {
log.Fatal(ctx, "attempting to start a second heartbeat loop")
}
log.VEventf(ctx, 2, kvbase.SpawningHeartbeatLoopMsg)
h.mu.loopStarted = true
// NB: we can't do this in init() because the txn isn't populated yet then
// (it's zero).
h.AmbientContext.AddLogTag("txn-hb", h.mu.txn.Short())
// Create a new context so that the heartbeat loop doesn't inherit the
// caller's cancelation or span.
hbCtx, hbCancel := context.WithCancel(h.AnnotateCtx(context.Background()))
// If, by the time heartbeatTxnBufferPeriod has passed, this transaction would
// be considered expired, then synchronously attempt to heartbeat immediately
// before spawning the loop.
heartbeatLoopDelay := h.loopInterval
now := h.clock.Now()
if txnwait.IsExpired(
now.Add(heartbeatTxnBufferPeriod.Nanoseconds(), 0 /* logical */),
h.mu.txn,
) {
log.VEventf(ctx, 2, "heartbeating immediately to avoid expiration")
h.heartbeatLocked(ctx)
} else {
timeUntilExpiry := txnwait.TxnExpiration(h.mu.txn).GoTime().Sub(now.GoTime())
if (timeUntilExpiry - heartbeatTxnBufferPeriod) < heartbeatLoopDelay {
log.VEventf(ctx, 2, "scheduling heartbeat early to avoid expiration")
heartbeatLoopDelay = timeUntilExpiry - heartbeatTxnBufferPeriod
}
}
// Delay spawning the loop goroutine until the first loopInterval passes or
// until a defined buffer period prior to expiration (whichever is first) to
// avoid the associated cost for small write transactions. In benchmarks,
// this gave a 3% throughput increase for point writes at high concurrency.
timer := time.AfterFunc(heartbeatLoopDelay, func() {
const taskName = "kv.TxnCoordSender: heartbeat loop"
var span *tracing.Span
hbCtx, span = h.AmbientContext.Tracer.StartSpanCtx(hbCtx, taskName)
defer span.Finish()
// Only errors on quiesce, which is safe to ignore.
_ = h.stopper.RunTask(hbCtx, taskName, h.heartbeatLoop)
})
h.mu.loopCancel = func() {
timer.Stop()
hbCancel()
}
}
func (h *txnHeartbeater) cancelHeartbeatLoopLocked() {
// If the heartbeat loop has already started, cancel it.
if h.heartbeatLoopRunningLocked() {
h.mu.loopCancel()
h.mu.loopCancel = nil
}
}
func (h *txnHeartbeater) heartbeatLoopRunningLocked() bool {
return h.mu.loopCancel != nil
}
// heartbeatLoop periodically sends a HeartbeatTxn request to the transaction
// record, stopping in the event the transaction is aborted or committed after
// attempting to resolve the intents.
func (h *txnHeartbeater) heartbeatLoop(ctx context.Context) {
defer func() {
h.mu.Lock()
h.cancelHeartbeatLoopLocked()
h.mu.Unlock()
}()
var tickChan <-chan time.Time
{
ticker := time.NewTicker(h.loopInterval)
tickChan = ticker.C
defer ticker.Stop()
}
// Loop is only spawned after loopInterval, so heartbeat immediately.
if !h.heartbeat(ctx) {
return
}
// Loop with ticker for periodic heartbeats.
for {
select {
case <-tickChan:
if !h.heartbeat(ctx) {
// The heartbeat noticed a finalized transaction,
// so shut down the heartbeat loop.
return
}
case <-ctx.Done():
// Transaction finished normally.
return
case <-h.stopper.ShouldQuiesce():
return
}
}
}
// heartbeat is a convenience method to be called by the heartbeat loop, acquiring
// the mutex and issuing a request using heartbeatLocked before releasing it.
// See comment on heartbeatLocked for more explanation.
func (h *txnHeartbeater) heartbeat(ctx context.Context) bool {
// Like with the TxnCoordSender, the locking here is peculiar. The lock is not
// held continuously throughout the heartbeatLocked method: we acquire the
// lock here and then, inside the wrapped.Send() call, the interceptor at the
// bottom of the stack will unlock until it receives a response.
h.mu.Lock()
defer h.mu.Unlock()
// The heartbeat loop might have raced with the cancellation of the heartbeat.
if ctx.Err() != nil {
return false
}
return h.heartbeatLocked(ctx)
}
// heartbeatLocked sends a HeartbeatTxnRequest to the txn record.
// Returns true if heartbeating should continue, false if the transaction is no
// longer Pending and so there's no point in heartbeating further.
func (h *txnHeartbeater) heartbeatLocked(ctx context.Context) bool {
if h.mu.txn.Status != roachpb.PENDING {
if h.mu.txn.Status == roachpb.COMMITTED {
log.Fatalf(ctx, "txn committed but heartbeat loop hasn't been signaled to stop: %s", h.mu.txn)
}
// If the transaction is aborted, there's no point in heartbeating. The
// client needs to send a rollback.
return false
}
// Clone the txn in order to put it in the heartbeat request.
txn := h.mu.txn.Clone()
if txn.Key == nil {
log.Fatalf(ctx, "attempting to heartbeat txn without anchor key: %v", txn)
}
ba := &kvpb.BatchRequest{}
ba.Txn = txn
ba.Add(&kvpb.HeartbeatTxnRequest{
RequestHeader: kvpb.RequestHeader{
Key: txn.Key,
},
Now: h.clock.Now(),
})
// Send the heartbeat request directly through the gatekeeper interceptor.
// See comment on h.gatekeeper for a discussion of why.
log.VEvent(ctx, 2, "heartbeat")
br, pErr := h.gatekeeper.SendLocked(ctx, ba)
// If the txn is no longer pending, ignore the result of the heartbeat
// and tear down the heartbeat loop.
if h.mu.txn.Status != roachpb.PENDING {
return false
}
var respTxn *roachpb.Transaction
if pErr != nil {
log.VEventf(ctx, 2, "heartbeat failed for %s: %s", h.mu.txn, pErr)
// We need to be prepared here to handle the case of a
// TransactionAbortedError with no transaction proto in it.
//
// TODO(nvanbenschoten): Make this the only case where we get back an
// Aborted txn.
if _, ok := pErr.GetDetail().(*kvpb.TransactionAbortedError); ok {
// Note that it's possible that the txn actually committed but its
// record got GC'ed. In that case, aborting won't hurt anyone though,
// since all intents have already been resolved.
// The only thing we must ascertain is that we don't tell the client
// about this error - it will get either a definitive result of
// its commit or an ambiguous one and we have nothing to offer that
// provides more clarity. We do however prevent it from running more
// requests in case it isn't aware that the transaction is over.
log.VEventf(ctx, 1, "Heartbeat detected aborted txn, cleaning up for %s", h.mu.txn)
h.abortTxnAsyncLocked(ctx)
h.mu.finalObservedStatus = roachpb.ABORTED
return false
}
respTxn = pErr.GetTxn()
} else {
respTxn = br.Txn
}
// Tear down the heartbeat loop if the response transaction is finalized.
if respTxn != nil && respTxn.Status.IsFinalized() {
switch respTxn.Status {
case roachpb.COMMITTED:
// Shut down the heartbeat loop without doing anything else.
// We must have raced with an EndTxn(commit=true).
case roachpb.ABORTED:
// Roll back the transaction record to clean up intents and
// then shut down the heartbeat loop.
log.VEventf(ctx, 1, "Heartbeat detected aborted txn, cleaning up for %s", h.mu.txn)
h.abortTxnAsyncLocked(ctx)
}
h.mu.finalObservedStatus = respTxn.Status
return false
}
return true
}
// abortTxnAsyncLocked sends an EndTxn(commit=false) asynchronously.
// The purpose of the async cleanup is to resolve transaction intents as soon
// as possible when a transaction coordinator observes an ABORTED transaction.
func (h *txnHeartbeater) abortTxnAsyncLocked(ctx context.Context) {
// If a request is in flight, we must wait for it to complete first such
// that txnPipeliner can record its lock spans and attach them to the EndTxn
// request we'll send.
if h.mu.ifReqs > 0 {
h.mu.abortTxnAsyncPending = true
log.VEventf(ctx, 2, "async abort waiting for in-flight request for txn %s", h.mu.txn)
return
}
// Construct a batch with an EndTxn request.
txn := h.mu.txn.Clone()
ba := &kvpb.BatchRequest{}
ba.Header = kvpb.Header{Txn: txn}
ba.Add(&kvpb.EndTxnRequest{
Commit: false,
// Resolved intents should maintain an abort span entry to prevent
// concurrent requests from failing to notice the transaction was aborted.
Poison: true,
})
const taskName = "txnHeartbeater: aborting txn"
log.VEventf(ctx, 2, "async abort for txn: %s", txn)
if err := h.stopper.RunAsyncTask(h.AnnotateCtx(context.Background()), taskName,
func(ctx context.Context) {
if err := timeutil.RunWithTimeout(ctx, taskName, abortTxnAsyncTimeout,
func(ctx context.Context) error {
h.mu.Lock()
defer h.mu.Unlock()
// If we find an abortTxnAsyncResultC, that means an async
// rollback request is already in flight, so there's no
// point in us running another. This can happen because the
// TxnCoordSender also calls abortTxnAsyncLocked()
// independently of the heartbeat loop.
if h.mu.abortTxnAsyncResultC != nil {
log.VEventf(ctx, 2,
"skipping async abort due to concurrent async abort for %s", txn)
return nil
}
// TxnCoordSender allows EndTxn(commit=false) through even
// after we set finalObservedStatus, and that request can
// race with us for the mutex. Thus, if we find an in-flight
// request here, after checking ifReqs=0 before being spawned,
// we deduce that it must have been a rollback and there's no
// point in sending another rollback.
if h.mu.ifReqs > 0 {
log.VEventf(ctx, 2,
"skipping async abort due to client rollback for %s", txn)
return nil
}
// Set up a result channel to signal to an incoming client
// rollback that an async rollback is already in progress,
// and pass it the result. The buffer allows storing the
// result even when no client rollback arrives. Recall that
// the SendLocked() call below releases the mutex while
// running, allowing concurrent incoming requests.
h.mu.abortTxnAsyncResultC = make(chan abortTxnAsyncResult, 1)
// Send the abort request through the interceptor stack. This is
// important because we need the txnPipeliner to append lock spans
// to the EndTxn request.
br, pErr := h.wrapped.SendLocked(ctx, ba)
if pErr != nil {
log.VErrEventf(ctx, 1, "async abort failed for %s: %s ", txn, pErr)
h.metrics.AsyncRollbacksFailed.Inc(1)
}
// Pass the result to a waiting client rollback, if any, and
// remove the channel since we're no longer in flight.
h.mu.abortTxnAsyncResultC <- abortTxnAsyncResult{br: br, pErr: pErr}
h.mu.abortTxnAsyncResultC = nil
return nil
},
); err != nil {
log.VEventf(ctx, 1, "async abort failed for %s: %s", txn, err)
}
},
); err != nil {
log.Warningf(ctx, "%v", err)
h.metrics.AsyncRollbacksFailed.Inc(1)
}
}
// firstLockingIndex returns the index of the first request that acquires locks
// in the BatchRequest. Returns -1 if the batch has no intention to acquire
// locks. It also verifies that if an EndTxnRequest is included, then it is the
// last request in the batch.
func firstLockingIndex(ba *kvpb.BatchRequest) (int, *kvpb.Error) {
for i, ru := range ba.Requests {
args := ru.GetInner()
if i < len(ba.Requests)-1 /* if not last*/ {
if _, ok := args.(*kvpb.EndTxnRequest); ok {
return -1, kvpb.NewErrorf("%s sent as non-terminal call", args.Method())
}
}
if kvpb.IsLocking(args) {
return i, nil
}
}
return -1, nil
}
| pkg/kv/kvclient/kvcoord/txn_interceptor_heartbeater.go | 1 | https://github.com/cockroachdb/cockroach/commit/dbe5954ec7032390cc5713bad5ed8fcdaf3b2d05 | [
0.9966540336608887,
0.11530564725399017,
0.0001638974790694192,
0.0001696006511338055,
0.3019724190235138
] |
{
"id": 2,
"code_window": [
"\t// TransferLease will observe the circuit breaker, as transferring a\n",
"\t// lease when the range is unavailable results in, essentially, giving\n",
"\t// up on the lease and thus worsening the situation.\n",
"\tba.Add(leaseReq)\n",
"\t_, pErr := p.repl.Send(ctx, ba)\n",
"\treturn pErr.GoError()\n",
"}\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t// NB: Setting `Source: kvpb.AdmissionHeader_OTHER` means this request will\n",
"\t// bypass AC.\n",
"\tba.AdmissionHeader = kvpb.AdmissionHeader{\n",
"\t\tPriority: int32(admissionpb.NormalPri),\n",
"\t\tCreateTime: timeutil.Now().UnixNano(),\n",
"\t\tSource: kvpb.AdmissionHeader_OTHER,\n",
"\t}\n"
],
"file_path": "pkg/kv/kvserver/replica_range_lease.go",
"type": "add",
"edit_start_line_idx": 573
} | # =============================================================================
# decorrelate.opt contains normalization patterns that try to eliminate
# correlated subqueries. A correlated subquery is a subquery with one or more
# outer columns. For example:
#
# SELECT * FROM a WHERE (SELECT b.y FROM b WHERE a.x=b.x) < 5
#
# A correlated join has outer columns in its right input that refer to columns
# in its left input. For example:
#
# SELECT * FROM a INNER JOIN LATERAL (SELECT * FROM b WHERE a.x=b.x)
#
# Normalization rules "hoist" or "pull up" subqueries so that they are directly
# joined with the outer relation to which they are bound. Other patterns try
# to "push down" correlated joins (apply) until they disappear or can no
# longer be pushed further. An apply join can be rewritten as a non-apply join
# once there are no outer columns in the right side of the join that are bound
# by the left side of the join (i.e. the inputs are not "correlated").
#
# Together, these pattens tend to eliminate unnecessary correlation, which has
# the desirable effect of eliminating patterns that can only be executed using
# often expensive nested loops, and instead open up other physical plan
# possibilities.
#
# Citations: [3]
# =============================================================================
# DecorrelateJoin maps an apply join into the corresponding join without an
# apply if the right side of the join is not correlated with the left side.
# This allows the optimizer to consider additional physical join operators that
# are unable to handle correlated inputs.
#
# NOTE: Keep this before other decorrelation patterns, as if the correlated
# join can be removed first, it avoids unnecessarily matching other
# patterns that only exist to get to this pattern.
#
# Citations: [3]
[DecorrelateJoin, Normalize]
(JoinApply
$left:*
$right:* & ^(IsCorrelated $right (OutputCols $left))
$on:*
$private:*
)
=>
(ConstructNonApplyJoin (OpName) $left $right $on $private)
# DecorrelateProjectSet pulls an input relation outside of a ProjectSet if the
# input is not correlated with any of the functions in the ProjectSet. The
# input is then cross-joined with a new ProjectSet, which contains the same
# functions but has an empty input (a unary VALUES node).
#
# The advantage of this transformation is it means each of the functions in the
# ProjectSet only need to be executed once in total, instead of once for each
# input row.
[DecorrelateProjectSet, Normalize]
(ProjectSet
$input:^(Values)
$zip:* & ^(IsZipCorrelated $zip (OutputCols $input))
)
=>
(InnerJoin
$input
(ProjectSet (ConstructNoColsRow) $zip)
[]
(EmptyJoinPrivate)
)
# TryRemapJoinOuterColsRight attempts to replace outer column references in the
# right input of a join with equivalent non-outer columns using the ON filters.
# It is valid to do this whenever it is possible to push the equality filter(s)
# down the tree until it holds true for the outer-column reference. Using the
# following query as an example:
#
# SELECT * FROM xy INNER JOIN LATERAL (SELECT * FROM ab WHERE a = x) ON b = x
#
# It is possible to push the 'b = x' filter down into the correlated right input
# of the join, which would allow replacing the 'a = x' filter with 'a = b', thus
# decorrelating the join. The match condition is similar to that for
# PushFilterIntoJoinRight because TryRemapJoinOuterColsRight simulates filter
# push-down when it makes the replacement.
#
# It is desirable to attempt to fire TryRemapJoinOuterColsRight before other
# decorrelation rules because it does not perform any transformations beyond the
# variable replacement. This prevents situations where decorrelation rules make
# the plan worse in their attempts to decorrelate the query. For example,
# decorrelation can pull filters up the operator tree or hoist subqueries into
# joins. This can cause plan changes which are difficult for the optimizer to
# reverse, and it won't even attempt to do so if the query isn't successfully
# decorrelated.
[TryRemapJoinOuterColsRight, Normalize]
(InnerJoin | InnerJoinApply | LeftJoin | LeftJoinApply | SemiJoin
| SemiJoinApply | AntiJoin | AntiJoinApply
$left:*
$right:* & (HasOuterCols $right)
$on:* &
(CanMaybeRemapOuterCols $right $on) &
(Let ($remapped $ok):(TryRemapOuterCols $right $on) $ok)
$private:*
)
=>
((OpName) $left $remapped $on $private)
# TryRemapJoinOuterColsLeft is similar to TryRemapJoinOuterColsRight, but it
# applies to the left input of a join.
[TryRemapJoinOuterColsLeft, Normalize]
(InnerJoin | InnerJoinApply | SemiJoin | SemiJoinApply
$left:* & (HasOuterCols $left)
$right:*
$on:* &
(CanMaybeRemapOuterCols $left $on) &
(Let ($remapped $ok):(TryRemapOuterCols $left $on) $ok)
$private:*
)
=>
((OpName) $remapped $right $on $private)
# TryRemapSelectOuterCols is similar to TryRemapJoinOuterColsRight, but it
# applies to the input of a Select.
[TryRemapSelectOuterCols, Normalize]
(Select
$input:* & (HasOuterCols $input)
$on:* &
(CanMaybeRemapOuterCols $input $on) &
(Let ($remapped $ok):(TryRemapOuterCols $input $on) $ok)
)
=>
(Select $remapped $on)
# TryDecorrelateSelect "pushes down" the join apply into the select operator,
# in order to eliminate any correlation between the select filter list and the
# left side of the join, and also to keep "digging" down to find and eliminate
# other unnecessary correlation. Eventually, the hope is to trigger the
# DecorrelateJoin pattern to turn JoinApply operators into non-apply Join
# operators.
#
# Note that citation [3] doesn't directly contain this identity, since it
# assumes that the Select will be hoisted above the Join rather than becoming
# part of its On condition. PushFilterIntoJoinRight allows the condition to be
# pushed down, so this rule can correctly pull it up.
#
# Citations: [3] (see identity #3)
[TryDecorrelateSelect, Normalize]
(InnerJoin | InnerJoinApply | LeftJoin | LeftJoinApply | SemiJoin
| SemiJoinApply | AntiJoin | AntiJoinApply
$left:*
$right:* &
(HasOuterCols $right) &
(Select $input:* $filters:*)
$on:*
$private:*
)
=>
((OpName) $left $input (ConcatFilters $on $filters) $private)
# TryDecorrelateProject "pushes down" a Join into a Project operator, in an
# attempt to eliminate any correlation between the projection list and the left
# side of the join, and also to keep "digging" down to find and eliminate other
# unnecessary correlation. The eventual hope is to trigger the DecorrelateJoin
# rule to turn a JoinApply operator into a non-apply Join operator.
#
# Citations: [3] (see identity #4)
[TryDecorrelateProject, Normalize]
(InnerJoin | InnerJoinApply
$left:*
$right:* &
(HasOuterCols $right) &
(Project $input:* $projections:* $passthrough:*)
$on:*
$private:*
)
=>
(Select
(Project
((OpName) $left $input [] $private)
$projections
(UnionCols (OutputCols $left) $passthrough)
)
$on
)
# TryDecorrelateProjectSelect tries to decorrelate by hoisting a Select operator
# that sits below a LeftJoin/Project operator combo. The Project operator itself
# can't be reordered above the LeftJoin like it can in the InnerJoin case.
# However, the Select filter can be merged with the LeftJoin filter, which is
# enough to decorrelate in several useful cases.
[TryDecorrelateProjectSelect, Normalize]
(LeftJoinApply
$left:*
$right:(Project
(Select
$selectInput:*
$filters:* &
^(FiltersBoundBy
$filters
(OutputCols $selectInput)
)
)
$projections:*
$passthrough:*
)
$on:*
$private:*
)
=>
(Project
((OpName)
$left
(Project
$selectInput
$projections
(UnionCols $passthrough (OutputCols $selectInput))
)
(ConcatFilters $on $filters)
$private
)
[]
(OutputCols2 $left $right)
)
# TryDecorrelateProjectInnerJoin tries to decorrelate by hoisting the filter of
# an InnerJoin operator that sits below a LeftJoin/Project operator combo. The
# Project operator itself can't be reordered above the LeftJoin like it can in
# the InnerJoin case. However, the InnerJoin filter can be merged with the
# LeftJoin filter, which is enough to decorrelate in several useful cases. This
# rule works similarly to TryDecorrelateProjectSelect.
[TryDecorrelateProjectInnerJoin, Normalize, HighPriority]
(LeftJoinApply
$left:*
$right:(Project
$join:(InnerJoin | InnerJoinApply
$innerLeft:*
$innerRight:*
$innerOn:* &
^(FiltersBoundBy
$innerOn
(OutputCols2 $innerLeft $innerRight)
)
$innerPrivate:*
)
$projections:*
$passthrough:*
)
$on:*
$private:*
)
=>
(Project
(LeftJoinApply
$left
(Project
((OpName $join)
$innerLeft
$innerRight
[]
$innerPrivate
)
$projections
(UnionCols $passthrough (OutputCols $join))
)
(ConcatFilters $on $innerOn)
$private
)
[]
(OutputCols2 $left $right)
)
# TryDecorrelateInnerJoin tries to decorrelate an InnerJoin operator nested
# beneath another Join operator by pulling up its join condition to the outer
# join. This may be enough to decorrelate the outer join, or it may allow any
# outer column references to continue to journey upwards.
#
# TODO(andyk): Consider adding case for outer cols in $left.
[TryDecorrelateInnerJoin, Normalize]
(InnerJoin | InnerJoinApply | LeftJoin | LeftJoinApply | SemiJoin
| SemiJoinApply | AntiJoin | AntiJoinApply
$left:*
$right:* &
(HasOuterCols $right) &
(InnerJoin | InnerJoinApply
$innerLeft:*
$innerRight:*
$innerOn:* &
^(FiltersBoundBy
$innerOn
(OutputCols2 $innerLeft $innerRight)
)
$innerPrivate:*
)
$on:*
$private:*
)
=>
((OpName)
$left
((OpName $right) $innerLeft $innerRight [] $innerPrivate)
(ConcatFilters $on $innerOn)
$private
)
# TryDecorrelateInnerLeftJoin tries to decorrelate a LeftJoin operator nested
# beneath an InnerJoin operator by using the associative identity to pull up the
# left join to become the outer join. This may be enough to decorrelate the
# outer join, or it may allow any outer column references to continue to journey
# upwards.
#
# Citations: [1] (see identity #6)
[TryDecorrelateInnerLeftJoin, Normalize]
(InnerJoin | InnerJoinApply
$left:*
$right:* &
(HasOuterCols $right) &
(LeftJoin
$innerLeft:*
$innerRight:*
$innerOn:*
$innerPrivate:*
)
$on:* & (FiltersBoundBy $on (OutputCols2 $left $innerLeft))
$private:*
)
=>
(LeftJoinApply
((OpName) $left $innerLeft $on $innerPrivate)
$innerRight
$innerOn
$private
)
# TryDecorrelateGroupBy "pushes down" a Join into a GroupBy operator, in an
# attempt to keep "digging" down to find and eliminate unnecessary correlation.
# The eventual hope is to trigger the DecorrelateJoin rule to turn a JoinApply
# operator into a non-apply Join operator.
#
# Example:
#
# SELECT left.x, left.y, input.*
# FROM left
# INNER JOIN LATERAL
# (
# SELECT COUNT(*) FROM input WHERE input.x = left.x GROUP BY c
# ) AS input
# ON left.y = 10
# =>
# SELECT CONST_AGG(left.x), CONST_AGG(left.y), COUNT(*)
# FROM left WITH ORDINALITY
# INNER JOIN LATERAL
# (
# SELECT * FROM input WHERE input.x = left.x
# ) AS input
# ON True
# GROUP BY input.c, left.ordinality
# HAVING left.y = 10
#
# In other cases, we can use an existing non-null column as a canary; that
# column would not be constant necessarily, hence the use of ANY_NOT_NULL
# instead of CONST_AGG.
#
# An ordinality column only needs to be synthesized if "left" does not already
# have a strict key. We wrap the output in a Project operator to ensure that
# the original output columns are preserved and the ordinality column is not
# inadvertently added as a new output column.
#
# CONST_AGG is an internal aggregation function used when all rows in the
# grouping set have the same value on the column.
#
# Citations: [3] (see identity #8)
[TryDecorrelateGroupBy, Normalize]
(InnerJoin | InnerJoinApply
$left:*
$right:* &
(HasOuterCols $right) &
(GroupBy | DistinctOn
$input:*
$aggregations:*
$groupingPrivate:*
) &
(IsUnorderedGrouping $groupingPrivate)
$on:*
$private:*
)
=>
(Project
# Needed to project away any columns added by EnsureKey.
(Select
((OpName $right)
(InnerJoinApply
$newLeft:(EnsureKey $left)
$input
[]
$private
)
(AppendAggCols
$aggregations
ConstAgg
(NonKeyCols $newLeft)
)
(AddColsToGrouping
$groupingPrivate
(KeyCols $newLeft)
)
)
$on
)
[]
(OutputCols2 $left $right)
)
# TryDecorrelateScalarGroupBy "pushes down" a Join into a ScalarGroupBy
# operator, in an attempt to keep "digging" down to find and eliminate
# unnecessary correlation. The eventual hope is to trigger the DecorrelateJoin
# rule to turn a JoinApply operator into a non-apply Join operator. This rule
# has several requirements:
#
# 1. The left input must have a strict key. If not already present, a key can
# be synthesized by using the RowNumber operator to uniquely number the
# rows.
# 2. All aggregate functions must ignore null values, so that they will
# ignore the null values generated by the left join. We can remap the ones
# that do not ignore null values:
# - CountRows is mapped into a Count aggregate that operates over a
# not-null column from the right input (one is synthesized if
# necessary).
# - ConstAgg is mapped into the less restrictive ConstNotNullAgg.
# - Any other operator which doesn't ignore NULLs can be replaced with a
# projection taking into account a non-null column (one is synthesized
# if necessary) to distinguish NULLs which were present in the right
# input from those that arose from the left join:
#
# CASE
# WHEN notnull IS NOT NULL THEN aggregated_value
# ELSE NULL
# END
#
# This works because for every group there is just one left row
# (because we group by its key), and if there are right rows we can
# take the aggregation verbatim, but if there were no matches on the
# right we need to return the appropriate "0 rows" value for that
# aggregate (which for now is assumed to be NULL).
#
# Example:
#
# SELECT left.x, left.y, input.*
# FROM left
# INNER JOIN LATERAL
# (
# SELECT COUNT(*), SUM(c) FROM input WHERE input.x = left.x
# ) AS input
# ON left.y = 10
# =>
# SELECT CONST_AGG(left.x), CONST_AGG(left.y), COUNT(input.t), SUM(input.c)
# FROM left WITH ORDINALITY
# LEFT JOIN LATERAL
# (
# SELECT c, True t FROM input WHERE input.x = left.x
# ) AS input
# ON True
# GROUP BY left.ordinality
# HAVING left.y = 10
#
# Non-null ignoring example:
#
# SELECT left.x, input.*
# FROM left
# INNER JOIN LATERAL
# (
# SELECT ARRAY_AGG(c) FORM INPUT WHERE input.x = left.x
# ) AS input
# ON left.y = 10
# =>
# SELECT
# CONST_AGG(left.x),
# CASE
# WHEN ANY_NOT_NULL(notnull) IS NOT NULL THEN aggregated_value
# ELSE NULL
# END
# FROM left WITH ORDINALITY
# LEFT JOIN LATERAL
# (
# SELECT c, True notnull FROM input WHERE input.x = left.x
# ) AS input
# GROUP BY left.ordinality
# HAVING left.y = 10
#
# In this example, the "notnull" canary is needed to determine if the value of
# the ARRAY_AGG aggregation should be NULL or {NULL}.
#
# An ordinality column only needs to be synthesized if "left" does not already
# have a key. The "true" column only needs to be added if "input" does not
# already have a not-null column (and COUNT(*) is used).
#
# CONST_AGG is an internal aggregation function used when all rows in the
# grouping set have the same value on the column.
#
# Citations: [3] (see identity #9)
[TryDecorrelateScalarGroupBy, Normalize]
(InnerJoin | InnerJoinApply
$left:*
$right:* &
(HasOuterCols $right) &
(ScalarGroupBy
$input:*
$aggregations:*
$groupingPrivate:*
) &
(AggsCanBeDecorrelated $aggregations)
$on:*
$private:*
)
=>
(Select
(Project
# Needed to project away any columns added by EnsureKey.
# TranslateNonIgnoreAggs is where the actual discriminating CASE
# expressions are introduced.
(TranslateNonIgnoreAggs
(GroupBy
(LeftJoinApply
$leftWithKey:(EnsureKey $left)
# canaryCol might be 0 if no canary is necessary, in which case
# this function does nothing.
$rightWithCanary:(EnsureCanary
$input
$canaryCol:(EnsureCanaryCol
$input
$aggregations
)
)
[]
$private
)
(AppendAggCols2
$translatedAggs:(EnsureAggsCanIgnoreNulls
$rightWithCanary
$aggregations
)
ConstAgg
(NonKeyCols $leftWithKey)
AnyNotNullAgg
(CanaryColSet $canaryCol)
)
(MakeGrouping
(KeyCols $leftWithKey)
(ExtractGroupingOrdering $groupingPrivate)
)
)
$translatedAggs
$rightWithCanary
$aggregations
$canaryCol
)
[]
(OutputCols2 $left $right)
)
$on
)
# TryDecorrelateSemiJoin maps a SemiJoin to an equivalent GroupBy/InnerJoin
# complex in hopes of triggering further rules that will ultimately decorrelate
# the query. Once this rule fires, a corresponding InnerJoin decorrelation rule
# will match (i.e. TryDecorrelateGroupBy or TryDecorrelateProject).
#
# Citations: [5]
[TryDecorrelateSemiJoin, Normalize]
(SemiJoin | SemiJoinApply
$left:*
$right:* &
(HasOuterCols $right) &
(CanHaveZeroRows $right) &
# Let EliminateExistsGroupBy match instead.
(GroupBy | DistinctOn | Project | ProjectSet | Window)
$on:*
$private:*
)
=>
(Project
# Needed to project away any columns added by EnsureKey.
(GroupBy
(InnerJoinApply
$newLeft:(EnsureKey $left)
$right
$on
$private
)
(MakeAggCols ConstAgg (NonKeyCols $newLeft))
(MakeGrouping (KeyCols $newLeft) (EmptyOrdering))
)
[]
(OutputCols $left)
)
# TryDecorrelateLimitOne "pushes down" a Join into a Limit 1 operator, in an
# attempt to keep "digging" down to find and eliminate unnecessary correlation.
# The eventual hope is to trigger the DecorrelateJoin rule to turn a JoinApply
# operator into a non-apply Join operator.
#
# Like the TryDecorrelateGroupBy and TryDecorrelateScalarGroupBy rules, this
# rule rewrites the expression to perform the join first, followed by a grouping
# that eliminates any extra rows introduced by the join. The DistinctOn operator
# uses First aggregates to select values from the first row in each group. Non-
# key columns from the left join input become Const aggregates, since they are
# functionally dependent on the grouped key columns (and are therefore constant
# in each group).
#
# TODO(andyk): Add other join types.
[TryDecorrelateLimitOne, Normalize]
(InnerJoin | InnerJoinApply | LeftJoin | LeftJoinApply
$left:*
$right:* &
(HasOuterCols $right) &
(Limit $input:* (Const 1) $ordering:*)
$on:*
$private:*
)
=>
(Project
# Needed to project away any columns added by EnsureKey.
(DistinctOn
((OpName) $newLeft:(EnsureKey $left) $input $on $private)
(MakeAggCols2
ConstAgg
(NonKeyCols $newLeft)
FirstAgg
(OutputCols $input)
)
(MakeGrouping (KeyCols $newLeft) $ordering)
)
[]
(OutputCols2 $left $right)
)
# TryDecorrelateLimit "pushes down" a Join into a Limit operator with a limit
# greater than one, in an attempt to keep "digging" down to find and eliminate
# unnecessary correlation. The eventual hope is to trigger the DecorrelateJoin
# rule to turn a JoinApply operator into a non-apply Join operator.
#
# The limit is replaced with a row_number window function on the right input and
# a filter on top of the apply-join that removes all rows for which row_number()
# is less than the limit.
[TryDecorrelateLimit, Normalize]
(InnerJoin | InnerJoinApply
$left:*
$right:(Limit $input:* (Const $limit:*) $ordering:*) &
(HasOuterCols $right) &
(IsGreaterThan $limit 1)
$on:*
$private:*
)
=>
(Select
((OpName)
$left
(Window
$input
(Let
($rowNum $rowNumCol):(MakeRowNumberWindowFunc)
$rowNum
)
(MakeWindowPrivate (MakeEmptyColSet) $ordering)
)
$on
$private
)
(LimitToRowNumberFilter $limit $rowNumCol)
)
# TryDecorrelateProjectSet "pushes down" an InnerJoinApply operator into a
# ProjectSet operator, in hopes of eliminating any correlation between the
# ProjectSet operator and the InnerJoinApply operator. Eventually, the
# hope is to trigger the DecorrelateJoin pattern to turn JoinApply operators
# into non-apply Join operators.
[TryDecorrelateProjectSet, Normalize]
(InnerJoinApply
$left:*
(ProjectSet $input:* $zip:*)
$on:*
$private:*
)
=>
(Select
(ProjectSet (InnerJoinApply $left $input [] $private) $zip)
$on
)
# TryDecorrelateWindow "pushes down" a Join into a Window operator, in an
# attempt to keep "digging" down to find and eliminate unnecessary correlation.
# The eventual hope is to trigger the DecorrelateJoin rule to turn a JoinApply
# operator into a non-apply Join operator. This rule is very similar to
# TryDecorrelateGroupBy.
#
# This rule adds the output columns of the left side of the join to the Window
# operator's partition cols. This effectively means that each row of the left
# side of the join is windowed independently, assuming the left side has a key
# (and if it doesn't, we can give it one via EnsureKey).
#
# SELECT
# left.k, left.x, right.x, rank
# FROM
# left
# INNER JOIN LATERAL (
# SELECT rank() OVER () AS rank, right.x FROM (SELECT * FROM right WHERE left.k = right.k)
# )
# =>
# SELECT
# left.k, left.x, right.x, rank() OVER (PARTITION BY left.k) AS rank
# FROM
# left INNER JOIN right ON left.k = right.k
#
# Sketch of why this rule works (assume A has a key):
#
# Recall from [3] that the definition of Apply (for cross joins) is:
#
# (InnerJoinApply A E true) = (Union_{r ∈ A} {r} × E(r))
#
# Where E is a relational expression mapping rows r ∈ A to relational result
# sets.
#
# Starting with (InnerJoinApply A (Window B partcols) on), where P is the set of
# partition columns and p is the join predicate.
#
# = (Select (InnerJoinApply A (Window B partcols) true) on)
#
# By the inverse of MergeSelectInnerJoin.
#
# = (Select
# (Union_{r ∈ A} {r} × (Window B partcols)(r))
# on
# )
#
# By the definition of Apply.
#
# = (Select
# (Union_{r ∈ A} {r} × (Window B(r) partcols))
# on
# )
#
# By the fact that by construction, window functions only refer to
# variable references in their input.
#
# = (Select
# (Union_{r ∈ A} (Window {r} × B(r) partcols))
# on
# )
#
# Because the Window only looks at columns from B(r).
#
# = (Select
# (Window
# (Union_{r ∈ A} {r} × B(r))
# (Union partcols (KeyCols A))
# )
# on
# )
#
# Roughly, since A has a key, partitioning (Union_{r ∈ A} r × B(r)) by the key
# of A results in exactly one partition for each row in A, and so partitioning
# higher up has the same effect as performing the window function for each row.
#
# = (Select
# (Window
# (InnerJoinApply A B true)
# (Union partcols (OutputCols A))
# )
# on
# )
#
# Again by the definition of Apply.
[TryDecorrelateWindow, Normalize]
(InnerJoinApply | InnerJoin
$left:*
$right:(Window $input:* $windows:* $private:*) &
(HasOuterCols $right)
$on:*
$joinPrivate:*
)
=>
(Project
# Needed to project away any columns added by EnsureKey.
(Select
(Window
((OpName)
$newLeft:(EnsureKey $left)
$input
[]
$joinPrivate
)
$windows
(AddColsToPartition $private (KeyCols $newLeft))
)
$on
)
[]
(OutputCols2 $left $right)
)
# TryDecorrelateMax1Row "pushes down" a Join into a Max1Row operator, in an
# attempt to keep "digging" down to find and eliminate unnecessary correlation.
# The eventual hope is to trigger the DecorrelateJoin rule to turn a JoinApply
# operator into a non-apply Join operator.
#
# The Max1Row operator is mapped into an EnsureDistinctOn operator that wraps
# the join and raises an error if it detects duplicates in the column(s) that
# made up the key of the join's left input. A duplicate value in those key
# column(s) indicates that more than one row from the right input matched that
# value. Or in other words, it indicates that the Max1Row's subquery input would
# have returned more than one row corresponding to that value. Therefore, the
# two formulations are equivalent.
#
# TryDecorrelateMax1Row only matches when the join's "on" condition is true.
# This is because pushing a non-true filter through the EnsureDistinctOn would
# result in different error behavior. Since there are currently no situations
# where the join's "on" condition is anything other than true, and since these
# cases therefore cannot be tested, TryDecorrelateMax1Row only matches when the
# "on" condition is true. If this changes, TryDecorrelateMax1Row should hoist
# the non-true "on" conditions above the EnsureDistinctOn operator.
[TryDecorrelateMax1Row, Normalize]
(InnerJoin | InnerJoinApply | LeftJoin | LeftJoinApply
$left:*
$right:* &
(HasOuterCols $right) &
(Max1Row $input:* $errorText:*)
[]
$private:*
)
=>
(Project
(EnsureDistinctOn
((OpName) $newLeft:(EnsureKey $left) $input [] $private)
(MakeAggCols
ConstAgg
(UnionCols (NonKeyCols $newLeft) (OutputCols $input))
)
(MakeErrorOnDupGrouping
(KeyCols $newLeft)
(EmptyOrdering)
$errorText
)
)
[]
(OutputCols2 $left $right)
)
# HoistSelectExists extracts existential subqueries from Select filters,
# turning them into semi-joins. This eliminates the subquery, which is often
# expensive to execute and restricts the optimizer's plan choices.
#
# This rule is marked as low priority so that it runs after other rules like
# filter pushdown. Hoisting a correlated subquery is an expensive operation that
# can't be undone, so do it only once all other work is complete. For example,
# filter pushdown rules might be able to move the subquery nearer to the input
# to which it's correlated before it's hoisted, making it easier to decorrelate.
[HoistSelectExists, Normalize, LowPriority]
(Select
$input:*
$filters:[
...
$item:* &
(HasHoistableSubquery $item) &
(FiltersItem (Exists $subquery:*))
...
]
)
=>
(Select
(SemiJoinApply $input $subquery [] (EmptyJoinPrivate))
(RemoveFiltersItem $filters $item)
)
# HoistSelectNotExists extracts non-existential subqueries from Select filters,
# turning them into anti-joins. This eliminates the subquery, which is often
# expensive to execute and restricts the optimizer's plan choices.
#
# This rule is marked as low priority for the same reason as HoistSelectExists.
[HoistSelectNotExists, Normalize, LowPriority]
(Select
$input:*
$filters:[
...
$item:* &
(HasHoistableSubquery $item) &
(FiltersItem (Not (Exists $subquery:*)))
...
]
)
=>
(Select
(AntiJoinApply $input $subquery [] (EmptyJoinPrivate))
(RemoveFiltersItem $filters $item)
)
# HoistSelectSubquery extracts subqueries from a Select filter and joins them
# with the Select input. This and other subquery hoisting patterns create a
# single, top-level relational query with no nesting.
#
# NOTE: Keep this ordered after the HoistSelectExists and HoistSelectNotExists
# rules. This rule will hoist any existential subqueries using
# LeftJoinApply, which is equivalent to, but not as efficient as, using
# SemiJoinApply and AntiJoinApply.
#
# This rule is marked as low priority for the same reason as HoistSelectExists.
#
# Citations: [4]
[HoistSelectSubquery, Normalize, LowPriority]
(Select
$input:*
$filters:[ ... $item:* & (HasHoistableSubquery $item) ... ]
)
=>
(HoistSelectSubquery $input $filters)
# HoistProjectSubquery extracts subqueries from a projections list and joins
# them with the Project input. This and other subquery hoisting patterns create
# a single, top-level relational query with no nesting.
#
# This rule is marked as low priority for the same reason as HoistSelectExists.
[HoistProjectSubquery, Normalize, LowPriority]
(Project
$input:*
$projections:[
...
$item:* & (HasHoistableSubquery $item)
...
]
$passthrough:*
)
=>
(HoistProjectSubquery $input $projections $passthrough)
# HoistJoinSubquery extracts subqueries from a join filter and joins them with
# the join's right input. This and other subquery hoisting patterns create a
# single, top-level relational query with no nesting. This rule only applies to
# join types which have a legal apply variant.
#
# This rule is marked as low priority for the same reason as HoistSelectExists.
[HoistJoinSubquery, Normalize, LowPriority]
(InnerJoin | LeftJoin | SemiJoin | AntiJoin
$left:*
$right:*
$on:[ ... $item:* & (HasHoistableSubquery $item) ... ]
$private:*
)
=>
(HoistJoinSubquery (OpName) $left $right $on $private)
# HoistValuesSubquery extracts subqueries from row tuples and joins them with
# the Values operator. This and other subquery hoisting patterns create a
# single, top-level relational query with no nesting.
#
# This rule is marked as low priority for the same reason as HoistSelectExists.
[HoistValuesSubquery, Normalize, LowPriority]
(Values
$rows:[ ... $item:* & (HasHoistableSubquery $item) ... ]
$private:*
)
=>
(HoistValuesSubquery $rows $private)
# HoistProjectSetSubquery extracts subqueries from zipped functions and joins
# them with the ProjectSet operator's input. This and other subquery hoisting
# patterns create a single, top-level relational query with no nesting.
#
# This rule is marked as low priority for the same reason as HoistSelectExists.
[HoistProjectSetSubquery, Normalize, LowPriority]
(ProjectSet
$input:*
$zip:[ ... $item:* & (HasHoistableSubquery $item) ... ]
)
=>
(HoistProjectSetSubquery $input $zip)
# NormalizeSelectAnyFilter rewrites an Any expression that is a top-level
# conjunct in Select filters, turning it into an Exists expression. Any can be
# rewritten as Exists in this context because a NULL return value is treated as
# False by the filter.
#
# Exists is more efficient than Any, since its null handling is much simpler. In
# addition, the Exists can be transformed into a semi-join.
#
# Citations: [5] (section 3.5)
[NormalizeSelectAnyFilter, Normalize]
(Select
$input:*
$filters:[
...
$item:(FiltersItem
(Any $anyInput:* $scalar:* $anyPrivate:*)
)
...
]
)
=>
(Select
$input
(ReplaceFiltersItem
$filters
$item
(Exists
(Select
$anyInput
[
(FiltersItem
(ConstructAnyCondition
$anyInput
$scalar
$anyPrivate
)
)
]
)
$anyPrivate
)
)
)
# NormalizeJoinAnyFilter is similar to NormalizeSelectAnyFilter, except that it
# operates on Any expressions within Join filters rather than Select filters.
[NormalizeJoinAnyFilter, Normalize]
(Join
$left:*
$right:*
$on:[
...
$item:(FiltersItem
(Any $anyInput:* $scalar:* $anyPrivate:*)
)
...
]
$private:*
)
=>
((OpName)
$left
$right
(ReplaceFiltersItem
$on
$item
(Exists
(Select
$anyInput
[
(FiltersItem
(ConstructAnyCondition
$anyInput
$scalar
$anyPrivate
)
)
]
)
$anyPrivate
)
)
$private
)
# NormalizeSelectNotAnyFilter rewrites a Not Any expression that is a top-level
# conjunct in Select filters, turning it into a Not Exists expression. Not Any
# can be rewritten as Not Exists in this context because a NULL return value is
# treated as False by the filter.
#
# Not Exists is more efficient than Not Any, since its null handling is much
# simpler. In addition, the Not Exists can be transformed into an anti-join.
#
# Citations: [5] (section 3.5)
[NormalizeSelectNotAnyFilter, Normalize]
(Select
$input:*
$filters:[
...
$item:(FiltersItem
(Not (Any $anyInput:* $scalar:* $anyPrivate:*))
)
...
]
)
=>
(Select
$input
(ReplaceFiltersItem
$filters
$item
(Not
(Exists
(Select
$anyInput
[
(FiltersItem
(IsNot
(ConstructAnyCondition
$anyInput
$scalar
$anyPrivate
)
(False)
)
)
]
)
$anyPrivate
)
)
)
)
# NormalizeJoinNotAnyFilter is similar to NormalizeSelectNotAnyFilter, except
# that it operates on Not Any expressions within Join filters rather than Select
# filters.
[NormalizeJoinNotAnyFilter, Normalize]
(Join
$left:*
$right:*
$on:[
...
$item:(FiltersItem
(Not (Any $anyInput:* $scalar:* $anyPrivate:*))
)
...
]
$private:*
)
=>
((OpName)
$left
$right
(ReplaceFiltersItem
$on
$item
(Not
(Exists
(Select
$anyInput
[
(FiltersItem
(IsNot
(ConstructAnyCondition
$anyInput
$scalar
$anyPrivate
)
(False)
)
)
]
)
$anyPrivate
)
)
)
$private
)
| pkg/sql/opt/norm/rules/decorrelate.opt | 0 | https://github.com/cockroachdb/cockroach/commit/dbe5954ec7032390cc5713bad5ed8fcdaf3b2d05 | [
0.00017872113676276058,
0.00017200957518070936,
0.0001604821445653215,
0.00017290443065576255,
0.000003855243448924739
] |
{
"id": 2,
"code_window": [
"\t// TransferLease will observe the circuit breaker, as transferring a\n",
"\t// lease when the range is unavailable results in, essentially, giving\n",
"\t// up on the lease and thus worsening the situation.\n",
"\tba.Add(leaseReq)\n",
"\t_, pErr := p.repl.Send(ctx, ba)\n",
"\treturn pErr.GoError()\n",
"}\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t// NB: Setting `Source: kvpb.AdmissionHeader_OTHER` means this request will\n",
"\t// bypass AC.\n",
"\tba.AdmissionHeader = kvpb.AdmissionHeader{\n",
"\t\tPriority: int32(admissionpb.NormalPri),\n",
"\t\tCreateTime: timeutil.Now().UnixNano(),\n",
"\t\tSource: kvpb.AdmissionHeader_OTHER,\n",
"\t}\n"
],
"file_path": "pkg/kv/kvserver/replica_range_lease.go",
"type": "add",
"edit_start_line_idx": 573
} |
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
--------------------------------------------------
SOFTWARE DISTRIBUTED WITH THRIFT:
The Apache Thrift software includes a number of subcomponents with
separate copyright notices and license terms. Your use of the source
code for the these subcomponents is subject to the terms and
conditions of the following licenses.
--------------------------------------------------
Portions of the following files are licensed under the MIT License:
lib/erl/src/Makefile.am
Please see doc/otp-base-license.txt for the full terms of this license.
--------------------------------------------------
For the aclocal/ax_boost_base.m4 and contrib/fb303/aclocal/ax_boost_base.m4 components:
# Copyright (c) 2007 Thomas Porschberg <[email protected]>
#
# Copying and distribution of this file, with or without
# modification, are permitted in any medium without royalty provided
# the copyright notice and this notice are preserved.
--------------------------------------------------
For the lib/nodejs/lib/thrift/json_parse.js:
/*
json_parse.js
2015-05-02
Public Domain.
NO WARRANTY EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
*/
(By Douglas Crockford <[email protected]>)
--------------------------------------------------
For lib/cpp/src/thrift/windows/SocketPair.cpp
/* socketpair.c
* Copyright 2007 by Nathan C. Myers <[email protected]>; some rights reserved.
* This code is Free Software. It may be copied freely, in original or
* modified form, subject only to the restrictions that (1) the author is
* relieved from all responsibilities for any use for any purpose, and (2)
* this copyright notice must be retained, unchanged, in its entirety. If
* for any reason the author might be held responsible for any consequences
* of copying or use, license is withheld.
*/
--------------------------------------------------
For lib/py/compat/win32/stdint.h
// ISO C9x compliant stdint.h for Microsoft Visual Studio
// Based on ISO/IEC 9899:TC2 Committee draft (May 6, 2005) WG14/N1124
//
// Copyright (c) 2006-2008 Alexander Chemeris
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// 2. Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
//
// 3. The name of the author may be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
// WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
// EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
// OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
// OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
// ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
///////////////////////////////////////////////////////////////////////////////
--------------------------------------------------
Codegen template in t_html_generator.h
* Bootstrap v2.0.3
*
* Copyright 2012 Twitter, Inc
* Licensed under the Apache License v2.0
* http://www.apache.org/licenses/LICENSE-2.0
*
* Designed and built with all the love in the world @twitter by @mdo and @fat.
---------------------------------------------------
For t_cl_generator.cc
* Copyright (c) 2008- Patrick Collison <[email protected]>
* Copyright (c) 2006- Facebook
---------------------------------------------------
| licenses/BSD3-apache.thrift.lib.go.thrift.txt | 0 | https://github.com/cockroachdb/cockroach/commit/dbe5954ec7032390cc5713bad5ed8fcdaf3b2d05 | [
0.00017909285088535398,
0.00017598856356926262,
0.00017105617735069245,
0.0001762822939781472,
0.000002034728140642983
] |
{
"id": 2,
"code_window": [
"\t// TransferLease will observe the circuit breaker, as transferring a\n",
"\t// lease when the range is unavailable results in, essentially, giving\n",
"\t// up on the lease and thus worsening the situation.\n",
"\tba.Add(leaseReq)\n",
"\t_, pErr := p.repl.Send(ctx, ba)\n",
"\treturn pErr.GoError()\n",
"}\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t// NB: Setting `Source: kvpb.AdmissionHeader_OTHER` means this request will\n",
"\t// bypass AC.\n",
"\tba.AdmissionHeader = kvpb.AdmissionHeader{\n",
"\t\tPriority: int32(admissionpb.NormalPri),\n",
"\t\tCreateTime: timeutil.Now().UnixNano(),\n",
"\t\tSource: kvpb.AdmissionHeader_OTHER,\n",
"\t}\n"
],
"file_path": "pkg/kv/kvserver/replica_range_lease.go",
"type": "add",
"edit_start_line_idx": 573
} | // Copyright 2020 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package cli
import (
"strings"
"testing"
"github.com/cockroachdb/cockroach/pkg/testutils/datapathutils"
"github.com/cockroachdb/cockroach/pkg/util/leaktest"
"github.com/cockroachdb/datadriven"
)
// This test doctoring a secure cluster.
func TestDoctorCluster(t *testing.T) {
defer leaktest.AfterTest(t)()
c := NewCLITest(TestCLIParams{T: t})
defer c.Cleanup()
// Introduce a corruption in the descriptor table by adding a table and
// removing its parent.
c.RunWithArgs([]string{"sql", "-e", strings.Join([]string{
"CREATE TABLE to_drop (id INT)",
"DROP TABLE to_drop",
"CREATE TABLE foo (id INT)",
"INSERT INTO system.users VALUES ('node', NULL, true, 3)",
"GRANT node TO root",
"DELETE FROM system.namespace WHERE name = 'foo'",
"SELECT pg_catalog.pg_sleep(1)",
}, ";\n"),
})
t.Run("examine", func(t *testing.T) {
out, err := c.RunWithCapture("debug doctor examine cluster")
if err != nil {
t.Fatal(err)
}
// Using datadriven allows TESTFLAGS=-rewrite.
datadriven.RunTest(t, datapathutils.TestDataPath(t, "doctor", "test_examine_cluster"), func(t *testing.T, td *datadriven.TestData) string {
return out
})
})
}
// This test the operation of zip over secure clusters.
func TestDoctorZipDir(t *testing.T) {
defer leaktest.AfterTest(t)()
c := NewCLITest(TestCLIParams{T: t, NoServer: true})
defer c.Cleanup()
t.Run("examine", func(t *testing.T) {
out, err := c.RunWithCapture("debug doctor examine zipdir testdata/doctor/debugzip 21.1-52")
if err != nil {
t.Fatal(err)
}
// Using datadriven allows TESTFLAGS=-rewrite.
datadriven.RunTest(t, datapathutils.TestDataPath(t, "doctor", "test_examine_zipdir"), func(t *testing.T, td *datadriven.TestData) string {
return out
})
})
t.Run("examine", func(t *testing.T) {
out, err := c.RunWithCapture("debug doctor examine zipdir testdata/doctor/debugzip-with-quotes")
if err != nil {
t.Fatal(err)
}
// Using datadriven allows TESTFLAGS=-rewrite.
datadriven.RunTest(t, datapathutils.TestDataPath(t, "doctor", "test_examine_zipdir_with_quotes"), func(t *testing.T, td *datadriven.TestData) string {
return out
})
})
t.Run("recreate", func(t *testing.T) {
out, err := c.RunWithCapture("debug doctor recreate zipdir testdata/doctor/debugzip")
if err != nil {
t.Fatal(err)
}
// Using datadriven allows TESTFLAGS=-rewrite.
datadriven.RunTest(t, datapathutils.TestDataPath(t, "doctor", "test_recreate_zipdir"), func(t *testing.T, td *datadriven.TestData) string {
return out
})
})
t.Run("recreate-json", func(t *testing.T) {
out, err := c.RunWithCapture("debug doctor recreate zipdir testdata/doctor/debugzip-json")
if err != nil {
t.Fatal(err)
}
// Using datadriven allows TESTFLAGS=-rewrite.
datadriven.RunTest(t, datapathutils.TestDataPath(t, "doctor", "test_recreate_zipdir-json"), func(t *testing.T, td *datadriven.TestData) string {
return out
})
})
t.Run("deprecated doctor zipdir with verbose", func(t *testing.T) {
out, err := c.RunWithCapture("debug doctor zipdir testdata/doctor/debugzip 21.11-52 --verbose")
if err != nil {
t.Fatal(err)
}
// Using datadriven allows TESTFLAGS=-rewrite.
datadriven.RunTest(t, datapathutils.TestDataPath(t, "doctor", "test_examine_zipdir_verbose"), func(t *testing.T, td *datadriven.TestData) string {
return out
})
})
}
| pkg/cli/doctor_test.go | 0 | https://github.com/cockroachdb/cockroach/commit/dbe5954ec7032390cc5713bad5ed8fcdaf3b2d05 | [
0.00017955091607291251,
0.0001744298351695761,
0.00016817163850646466,
0.0001749871880747378,
0.0000028359665975585813
] |
{
"id": 0,
"code_window": [
"\n",
"\tapiEndpointStr := strings.Join(apiEndpoints, \" \")\n",
"\t// Colorize the message and print.\n",
"\tlogger.StartupMessage(colorBlue(\"\\nEndpoint: \") + colorBold(fmt.Sprintf(getFormatStr(len(apiEndpointStr), 1), apiEndpointStr)))\n",
"\tlogger.StartupMessage(colorBlue(\"AccessKey: \") + colorBold(fmt.Sprintf(\"%s \", cred.AccessKey)))\n",
"\tlogger.StartupMessage(colorBlue(\"SecretKey: \") + colorBold(fmt.Sprintf(\"%s \", cred.SecretKey)))\n",
"\n",
"\tif globalIsBrowserEnabled {\n",
"\t\tlogger.StartupMessage(colorBlue(\"\\nBrowser Access:\"))\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tif isTerminal() {\n",
"\t\tlogger.StartupMessage(colorBlue(\"AccessKey: \") + colorBold(fmt.Sprintf(\"%s \", cred.AccessKey)))\n",
"\t\tlogger.StartupMessage(colorBlue(\"SecretKey: \") + colorBold(fmt.Sprintf(\"%s \", cred.SecretKey)))\n",
"\t}\n"
],
"file_path": "cmd/gateway-startup-msg.go",
"type": "replace",
"edit_start_line_idx": 59
} | /*
* Minio Cloud Storage, (C) 2016, 2017, 2018 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import (
"context"
"crypto/x509"
"fmt"
"net/url"
"runtime"
"strings"
humanize "github.com/dustin/go-humanize"
"github.com/minio/minio/cmd/logger"
)
// Documentation links, these are part of message printing code.
const (
mcQuickStartGuide = "https://docs.minio.io/docs/minio-client-quickstart-guide"
goQuickStartGuide = "https://docs.minio.io/docs/golang-client-quickstart-guide"
jsQuickStartGuide = "https://docs.minio.io/docs/javascript-client-quickstart-guide"
javaQuickStartGuide = "https://docs.minio.io/docs/java-client-quickstart-guide"
pyQuickStartGuide = "https://docs.minio.io/docs/python-client-quickstart-guide"
dotnetQuickStartGuide = "https://docs.minio.io/docs/dotnet-client-quickstart-guide"
)
// generates format string depending on the string length and padding.
func getFormatStr(strLen int, padding int) string {
formatStr := fmt.Sprintf("%ds", strLen+padding)
return "%" + formatStr
}
// Prints the formatted startup message.
func printStartupMessage(apiEndPoints []string) {
strippedAPIEndpoints := stripStandardPorts(apiEndPoints)
// If cache layer is enabled, print cache capacity.
cacheObjectAPI := newCacheObjectsFn()
if cacheObjectAPI != nil {
printCacheStorageInfo(cacheObjectAPI.StorageInfo(context.Background()))
}
// Object layer is initialized then print StorageInfo.
objAPI := newObjectLayerFn()
if objAPI != nil {
printStorageInfo(objAPI.StorageInfo(context.Background()))
}
// Prints credential, region and browser access.
printServerCommonMsg(strippedAPIEndpoints)
// Prints `mc` cli configuration message chooses
// first endpoint as default.
printCLIAccessMsg(strippedAPIEndpoints[0], "myminio")
// Prints documentation message.
printObjectAPIMsg()
// SSL is configured reads certification chain, prints
// authority and expiry.
if globalIsSSL {
printCertificateMsg(globalPublicCerts)
}
}
// strip api endpoints list with standard ports such as
// port "80" and "443" before displaying on the startup
// banner. Returns a new list of API endpoints.
func stripStandardPorts(apiEndpoints []string) (newAPIEndpoints []string) {
newAPIEndpoints = make([]string, len(apiEndpoints))
// Check all API endpoints for standard ports and strip them.
for i, apiEndpoint := range apiEndpoints {
url, err := url.Parse(apiEndpoint)
if err != nil {
newAPIEndpoints[i] = apiEndpoint
continue
}
host, port := mustSplitHostPort(url.Host)
// For standard HTTP(s) ports such as "80" and "443"
// apiEndpoints should only be host without port.
switch {
case url.Scheme == "http" && port == "80":
fallthrough
case url.Scheme == "https" && port == "443":
url.Host = host
newAPIEndpoints[i] = url.String()
default:
newAPIEndpoints[i] = apiEndpoint
}
}
return newAPIEndpoints
}
// Prints common server startup message. Prints credential, region and browser access.
func printServerCommonMsg(apiEndpoints []string) {
// Get saved credentials.
cred := globalServerConfig.GetCredential()
// Get saved region.
region := globalServerConfig.GetRegion()
apiEndpointStr := strings.Join(apiEndpoints, " ")
// Colorize the message and print.
logger.StartupMessage(colorBlue("Endpoint: ") + colorBold(fmt.Sprintf(getFormatStr(len(apiEndpointStr), 1), apiEndpointStr)))
logger.StartupMessage(colorBlue("AccessKey: ") + colorBold(fmt.Sprintf("%s ", cred.AccessKey)))
logger.StartupMessage(colorBlue("SecretKey: ") + colorBold(fmt.Sprintf("%s ", cred.SecretKey)))
if region != "" {
logger.StartupMessage(colorBlue("Region: ") + colorBold(fmt.Sprintf(getFormatStr(len(region), 3), region)))
}
printEventNotifiers()
if globalIsBrowserEnabled {
logger.StartupMessage(colorBlue("\nBrowser Access:"))
logger.StartupMessage(fmt.Sprintf(getFormatStr(len(apiEndpointStr), 3), apiEndpointStr))
}
}
// Prints bucket notification configurations.
func printEventNotifiers() {
arns := globalNotificationSys.GetARNList()
if len(arns) == 0 {
return
}
arnMsg := colorBlue("SQS ARNs: ")
for _, arn := range arns {
arnMsg += colorBold(fmt.Sprintf(getFormatStr(len(arn), 1), arn))
}
logger.StartupMessage(arnMsg)
}
// Prints startup message for command line access. Prints link to our documentation
// and custom platform specific message.
func printCLIAccessMsg(endPoint string, alias string) {
// Get saved credentials.
cred := globalServerConfig.GetCredential()
// Configure 'mc', following block prints platform specific information for minio client.
logger.StartupMessage(colorBlue("\nCommand-line Access: ") + mcQuickStartGuide)
if runtime.GOOS == globalWindowsOSName {
mcMessage := fmt.Sprintf("$ mc.exe config host add %s %s %s %s", alias, endPoint, cred.AccessKey, cred.SecretKey)
logger.StartupMessage(fmt.Sprintf(getFormatStr(len(mcMessage), 3), mcMessage))
} else {
mcMessage := fmt.Sprintf("$ mc config host add %s %s %s %s", alias, endPoint, cred.AccessKey, cred.SecretKey)
logger.StartupMessage(fmt.Sprintf(getFormatStr(len(mcMessage), 3), mcMessage))
}
}
// Prints startup message for Object API acces, prints link to our SDK documentation.
func printObjectAPIMsg() {
logger.StartupMessage(colorBlue("\nObject API (Amazon S3 compatible):"))
logger.StartupMessage(colorBlue(" Go: ") + fmt.Sprintf(getFormatStr(len(goQuickStartGuide), 8), goQuickStartGuide))
logger.StartupMessage(colorBlue(" Java: ") + fmt.Sprintf(getFormatStr(len(javaQuickStartGuide), 6), javaQuickStartGuide))
logger.StartupMessage(colorBlue(" Python: ") + fmt.Sprintf(getFormatStr(len(pyQuickStartGuide), 4), pyQuickStartGuide))
logger.StartupMessage(colorBlue(" JavaScript: ") + jsQuickStartGuide)
logger.StartupMessage(colorBlue(" .NET: ") + fmt.Sprintf(getFormatStr(len(dotnetQuickStartGuide), 6), dotnetQuickStartGuide))
}
// Get formatted disk/storage info message.
func getStorageInfoMsg(storageInfo StorageInfo) string {
var msg string
if storageInfo.Backend.Type == BackendErasure {
diskInfo := fmt.Sprintf(" %d Online, %d Offline. ", storageInfo.Backend.OnlineDisks, storageInfo.Backend.OfflineDisks)
msg += colorBlue("Status:") + fmt.Sprintf(getFormatStr(len(diskInfo), 8), diskInfo)
}
return msg
}
// Prints startup message of storage capacity and erasure information.
func printStorageInfo(storageInfo StorageInfo) {
if msg := getStorageInfoMsg(storageInfo); msg != "" {
logger.StartupMessage(msg)
}
}
func printCacheStorageInfo(storageInfo CacheStorageInfo) {
msg := fmt.Sprintf("%s %s Free, %s Total", colorBlue("Cache Capacity:"),
humanize.IBytes(uint64(storageInfo.Free)),
humanize.IBytes(uint64(storageInfo.Total)))
logger.StartupMessage(msg)
}
// Prints certificate expiry date warning
func getCertificateChainMsg(certs []*x509.Certificate) string {
msg := colorBlue("\nCertificate expiry info:\n")
totalCerts := len(certs)
var expiringCerts int
for i := totalCerts - 1; i >= 0; i-- {
cert := certs[i]
if cert.NotAfter.Before(UTCNow().Add(globalMinioCertExpireWarnDays)) {
expiringCerts++
msg += fmt.Sprintf(colorBold("#%d %s will expire on %s\n"), expiringCerts, cert.Subject.CommonName, cert.NotAfter)
}
}
if expiringCerts > 0 {
return msg
}
return ""
}
// Prints the certificate expiry message.
func printCertificateMsg(certs []*x509.Certificate) {
logger.StartupMessage(getCertificateChainMsg(certs))
}
| cmd/server-startup-msg.go | 1 | https://github.com/minio/minio/commit/21c8693d9cca8f3ff1cedf2aee9f3bde89387880 | [
0.9981784820556641,
0.09023909270763397,
0.0001638282701605931,
0.0003873045789077878,
0.2810373604297638
] |
{
"id": 0,
"code_window": [
"\n",
"\tapiEndpointStr := strings.Join(apiEndpoints, \" \")\n",
"\t// Colorize the message and print.\n",
"\tlogger.StartupMessage(colorBlue(\"\\nEndpoint: \") + colorBold(fmt.Sprintf(getFormatStr(len(apiEndpointStr), 1), apiEndpointStr)))\n",
"\tlogger.StartupMessage(colorBlue(\"AccessKey: \") + colorBold(fmt.Sprintf(\"%s \", cred.AccessKey)))\n",
"\tlogger.StartupMessage(colorBlue(\"SecretKey: \") + colorBold(fmt.Sprintf(\"%s \", cred.SecretKey)))\n",
"\n",
"\tif globalIsBrowserEnabled {\n",
"\t\tlogger.StartupMessage(colorBlue(\"\\nBrowser Access:\"))\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tif isTerminal() {\n",
"\t\tlogger.StartupMessage(colorBlue(\"AccessKey: \") + colorBold(fmt.Sprintf(\"%s \", cred.AccessKey)))\n",
"\t\tlogger.StartupMessage(colorBlue(\"SecretKey: \") + colorBold(fmt.Sprintf(\"%s \", cred.SecretKey)))\n",
"\t}\n"
],
"file_path": "cmd/gateway-startup-msg.go",
"type": "replace",
"edit_start_line_idx": 59
} | # 使用Kubernetes部署Minio [](https://slack.minio.io) [](https://goreportcard.com/report/minio/minio) [](https://hub.docker.com/r/minio/minio/) [](https://codecov.io/gh/minio/minio)
Kubernetes的部署和状态集提供了在独立,分布式或共享模式下部署Minio服务器的完美平台。 在Kubernetes上部署Minio有多种选择,您可以选择最适合您的。
- Minio [Helm](https://helm.sh) Chart通过一个简单的命令即可提供自定义而且简单的Minio部署。更多关于Minio Helm部署的资料,请访问[这里](#prerequisites).
- 你也可以浏览Kubernetes [Minio示例](https://github.com/minio/minio/blob/master/docs/orchestration/kubernetes-yaml/README.md) ,通过`.yaml`文件来部署Minio。
- 如果您想在Kubernetes上开始使用Minio,而无需创建真正的容器集群,您也可以使用Minikube [deploy Minio locally](https://raw.githubusercontent.com/minio/minio/master/docs/orchestration/minikube/README.md)。
<a name="prerequisites"></a>
## 1. 前提条件
* 默认standaline模式下,需要开启Beta API的Kubernetes 1.4+。
* [distributed 模式](#distributed-minio),需要开启Beta API的Kubernetes 1.5+。
* 底层支持PV provisioner。
* 你的K8s集群里需要有Helm package manager [installed](https://github.com/kubernetes/helm#install)。
## 2. 使用Helm Chart部署Minio
安装 Minio chart
```bash
$ helm install stable/minio
```
以上命令以默认配置在Kubernetes群集上部署Minio。 以下部分列出了Minio图表的所有可配置参数及其默认值。
### 配置
| 参数 | 描述 | 默认值 |
|----------------------------|-------------------------------------|---------------------------------------------------------|
| `image` | Minio镜像名称 | `minio/minio` |
| `imageTag` | Minio镜像tag. 可选值在 [这里](https://hub.docker.com/r/minio/minio/tags/).| `RELEASE.2017-08-05T00-00-53Z`|
| `imagePullPolicy` | Image pull policy | `Always` |
| `mode` | Minio server模式 (`standalone`, `shared` 或者 `distributed`)| `standalone` |
| `numberOfNodes` | 节点数 (仅对分布式模式生效). 可选值 4 <= x <= 16 | `4` |
| `accessKey` | 默认access key | `AKIAIOSFODNN7EXAMPLE` |
| `secretKey` | 默认secret key | `wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY` |
| `configPath` | 默认配置文件路径 | `~/.minio` |
| `mountPath` | 默认挂载路径| `/export` |
| `serviceType` | Kubernetes service type | `LoadBalancer` |
| `servicePort` | Kubernetes端口 | `9000` |
| `persistence.enabled` | 是否使用持久卷存储数据 | `true` |
| `persistence.size` | 持久卷大小 | `10Gi` |
| `persistence.storageClass` | 持久卷类型 | `generic` |
| `persistence.accessMode` | ReadWriteOnce 或者 ReadOnly | `ReadWriteOnce` |
| `resources` | CPU/Memory 资源需求/限制 | Memory: `256Mi`, CPU: `100m` |
你可以通过`--set key=value[,key=value]`给`helm install`。 比如,
```bash
$ helm install --name my-release \
--set persistence.size=100Gi \
stable/minio
```
上述命令部署了一个带上100G持久卷的Minio服务。
或者,您可以提供一个YAML文件,用于在安装chart时指定参数值。 例如,
```bash
$ helm install --name my-release -f values.yaml stable/minio
```
### 分布式Minio
默认情况下,此图表以独立模式提供Minio服务器。 要在[分布式模式](https://docs.minio.io/cn/distributed-minio-quickstart-guide)中配置Minio服务器,请将`mode`字段设置为`distributed`,
```bash
$ helm install --set mode=distributed stable/minio
```
上述命令部署了个带有4个节点的分布式Minio服务器。 要更改分布式Minio服务器中的节点数,请设置`numberOfNodes`属性。
```bash
$ helm install --set mode=distributed,numberOfNodes=8 stable/minio
```
上述命令部署了个带有8个节点的分布式Minio服务器。注意一下,`numberOfNodes`取值范围是[4,16]。
#### StatefulSet [限制](http://kubernetes.io/docs/concepts/abstractions/controllers/statefulsets/#limitations),适用于分布式Minio
* StatefulSets需要持久化存储,所以如果 `mode`设成 `distributed`的话,`persistence.enabled`参数不生效。
* 卸载分布式Minio版本时,需要手动删除与StatefulSet关联的卷。
### Shared Minio
如需采用[shared mode](https://github.com/minio/minio/blob/master/docs/shared-backend/README.md)部署Minio, 将`mode` 设为`shared`,
```bash
$ helm install --set mode=shared stable/minio
```
上述命令规定了4个Minio服务器节点,一个存储。 要更改共享的Minio部署中的节点数,请设置`numberOfNodes`字段,
```bash
$ helm install --set mode=shared,numberOfNodes=8 stable/minio
```
上述命令规定了Minio服务有8个节点,采用shared模式。
### 持久化
这里规定了PersistentVolumeClaim并将相应的持久卷挂载到默认位置`/export`。 您需要Kubernetes集群中的物理存储才能使其工作。 如果您宁愿使用`emptyDir`,请通过以下方式禁用PersistentVolumeClaim:
```bash
$ helm install --set persistence.enabled=false stable/minio
```
> *"当Pod分配给节点时,首先创建一个emptyDir卷,只要该节点上的Pod正在运行,它就会存在。 当某个Pod由于任何原因从节点中删除时,emptyDir中的数据将永久删除。"*
## 3. 使用Helm更新Minio版本
您可以更新现有的Minio Helm Release以使用较新的Minio Docker镜像。 为此,请使用`helm upgrade`命令:
```bash
$ helm upgrade --set imageTag=<replace-with-minio-docker-image-tag> <helm-release-name> stable/minio
```
如果更新成功,你可以看到下面的输出信息
```bash
Release "your-helm-release" has been upgraded. Happy Helming!
```
## 4. 卸载Chart
假设你的版本被命名为`my-release`,使用下面的命令删除它:
```bash
$ helm delete my-release
```
该命令删除与chart关联的所有Kubernetes组件,并删除该release。
### 提示
* 在Kubernetes群集中运行的chart的实例称为release。 安装chart后,Helm会自动分配唯一的release名称。 你也可以通过下面的命令设置你心仪的名称:
```bash
$ helm install --name my-release stable/minio
```
* 为了覆盖默认的秘钥,可在运行helm install时将access key和secret key做为参数传进去。
```bash
$ helm install --set accessKey=myaccesskey,secretKey=mysecretkey \
stable/minio
```
### 了解更多
- [Minio纠删码快速入门](https://docs.minio.io/cn/minio-erasure-code-quickstart-guide)
- [Kubernetes文档](https://kubernetes.io/docs/home/)
- [Helm package manager for kubernetes](https://helm.sh/)
| docs/zh_CN/orchestration/kubernetes/README.md | 0 | https://github.com/minio/minio/commit/21c8693d9cca8f3ff1cedf2aee9f3bde89387880 | [
0.00017367771943099797,
0.00016919072368182242,
0.00016411987598985434,
0.00016920320922508836,
0.0000023325237634708174
] |
{
"id": 0,
"code_window": [
"\n",
"\tapiEndpointStr := strings.Join(apiEndpoints, \" \")\n",
"\t// Colorize the message and print.\n",
"\tlogger.StartupMessage(colorBlue(\"\\nEndpoint: \") + colorBold(fmt.Sprintf(getFormatStr(len(apiEndpointStr), 1), apiEndpointStr)))\n",
"\tlogger.StartupMessage(colorBlue(\"AccessKey: \") + colorBold(fmt.Sprintf(\"%s \", cred.AccessKey)))\n",
"\tlogger.StartupMessage(colorBlue(\"SecretKey: \") + colorBold(fmt.Sprintf(\"%s \", cred.SecretKey)))\n",
"\n",
"\tif globalIsBrowserEnabled {\n",
"\t\tlogger.StartupMessage(colorBlue(\"\\nBrowser Access:\"))\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tif isTerminal() {\n",
"\t\tlogger.StartupMessage(colorBlue(\"AccessKey: \") + colorBold(fmt.Sprintf(\"%s \", cred.AccessKey)))\n",
"\t\tlogger.StartupMessage(colorBlue(\"SecretKey: \") + colorBold(fmt.Sprintf(\"%s \", cred.SecretKey)))\n",
"\t}\n"
],
"file_path": "cmd/gateway-startup-msg.go",
"type": "replace",
"edit_start_line_idx": 59
} | foo = [
"1",
"2", # comment
]
| vendor/github.com/hashicorp/hcl/hcl/parser/test-fixtures/array_comment.hcl | 0 | https://github.com/minio/minio/commit/21c8693d9cca8f3ff1cedf2aee9f3bde89387880 | [
0.0001637534733163193,
0.0001637534733163193,
0.0001637534733163193,
0.0001637534733163193,
0
] |
{
"id": 0,
"code_window": [
"\n",
"\tapiEndpointStr := strings.Join(apiEndpoints, \" \")\n",
"\t// Colorize the message and print.\n",
"\tlogger.StartupMessage(colorBlue(\"\\nEndpoint: \") + colorBold(fmt.Sprintf(getFormatStr(len(apiEndpointStr), 1), apiEndpointStr)))\n",
"\tlogger.StartupMessage(colorBlue(\"AccessKey: \") + colorBold(fmt.Sprintf(\"%s \", cred.AccessKey)))\n",
"\tlogger.StartupMessage(colorBlue(\"SecretKey: \") + colorBold(fmt.Sprintf(\"%s \", cred.SecretKey)))\n",
"\n",
"\tif globalIsBrowserEnabled {\n",
"\t\tlogger.StartupMessage(colorBlue(\"\\nBrowser Access:\"))\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tif isTerminal() {\n",
"\t\tlogger.StartupMessage(colorBlue(\"AccessKey: \") + colorBold(fmt.Sprintf(\"%s \", cred.AccessKey)))\n",
"\t\tlogger.StartupMessage(colorBlue(\"SecretKey: \") + colorBold(fmt.Sprintf(\"%s \", cred.SecretKey)))\n",
"\t}\n"
],
"file_path": "cmd/gateway-startup-msg.go",
"type": "replace",
"edit_start_line_idx": 59
} | /*
Copyright 2017 Google Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package sqlparser
import (
"bytes"
"fmt"
)
// NodeFormatter defines the signature of a custom node formatter
// function that can be given to TrackedBuffer for code generation.
type NodeFormatter func(buf *TrackedBuffer, node SQLNode)
// TrackedBuffer is used to rebuild a query from the ast.
// bindLocations keeps track of locations in the buffer that
// use bind variables for efficient future substitutions.
// nodeFormatter is the formatting function the buffer will
// use to format a node. By default(nil), it's FormatNode.
// But you can supply a different formatting function if you
// want to generate a query that's different from the default.
type TrackedBuffer struct {
*bytes.Buffer
bindLocations []bindLocation
nodeFormatter NodeFormatter
}
// NewTrackedBuffer creates a new TrackedBuffer.
func NewTrackedBuffer(nodeFormatter NodeFormatter) *TrackedBuffer {
return &TrackedBuffer{
Buffer: new(bytes.Buffer),
nodeFormatter: nodeFormatter,
}
}
// WriteNode function, initiates the writing of a single SQLNode tree by passing
// through to Myprintf with a default format string
func (buf *TrackedBuffer) WriteNode(node SQLNode) *TrackedBuffer {
buf.Myprintf("%v", node)
return buf
}
// Myprintf mimics fmt.Fprintf(buf, ...), but limited to Node(%v),
// Node.Value(%s) and string(%s). It also allows a %a for a value argument, in
// which case it adds tracking info for future substitutions.
//
// The name must be something other than the usual Printf() to avoid "go vet"
// warnings due to our custom format specifiers.
func (buf *TrackedBuffer) Myprintf(format string, values ...interface{}) {
end := len(format)
fieldnum := 0
for i := 0; i < end; {
lasti := i
for i < end && format[i] != '%' {
i++
}
if i > lasti {
buf.WriteString(format[lasti:i])
}
if i >= end {
break
}
i++ // '%'
switch format[i] {
case 'c':
switch v := values[fieldnum].(type) {
case byte:
buf.WriteByte(v)
case rune:
buf.WriteRune(v)
default:
panic(fmt.Sprintf("unexpected TrackedBuffer type %T", v))
}
case 's':
switch v := values[fieldnum].(type) {
case []byte:
buf.Write(v)
case string:
buf.WriteString(v)
default:
panic(fmt.Sprintf("unexpected TrackedBuffer type %T", v))
}
case 'v':
node := values[fieldnum].(SQLNode)
if buf.nodeFormatter == nil {
node.Format(buf)
} else {
buf.nodeFormatter(buf, node)
}
case 'a':
buf.WriteArg(values[fieldnum].(string))
default:
panic("unexpected")
}
fieldnum++
i++
}
}
// WriteArg writes a value argument into the buffer along with
// tracking information for future substitutions. arg must contain
// the ":" or "::" prefix.
func (buf *TrackedBuffer) WriteArg(arg string) {
buf.bindLocations = append(buf.bindLocations, bindLocation{
offset: buf.Len(),
length: len(arg),
})
buf.WriteString(arg)
}
// ParsedQuery returns a ParsedQuery that contains bind
// locations for easy substitution.
func (buf *TrackedBuffer) ParsedQuery() *ParsedQuery {
return &ParsedQuery{Query: buf.String(), bindLocations: buf.bindLocations}
}
// HasBindVars returns true if the parsed query uses bind vars.
func (buf *TrackedBuffer) HasBindVars() bool {
return len(buf.bindLocations) != 0
}
// BuildParsedQuery builds a ParsedQuery from the input.
func BuildParsedQuery(in string, vars ...interface{}) *ParsedQuery {
buf := NewTrackedBuffer(nil)
buf.Myprintf(in, vars...)
return buf.ParsedQuery()
}
| vendor/github.com/xwb1989/sqlparser/tracked_buffer.go | 0 | https://github.com/minio/minio/commit/21c8693d9cca8f3ff1cedf2aee9f3bde89387880 | [
0.0002933082287199795,
0.00017892620235215873,
0.0001612796913832426,
0.00017066695727407932,
0.000031728519388707355
] |
{
"id": 1,
"code_window": [
"\n",
"\tapiEndpointStr := strings.Join(apiEndpoints, \" \")\n",
"\n",
"\t// Colorize the message and print.\n",
"\tlogger.StartupMessage(colorBlue(\"Endpoint: \") + colorBold(fmt.Sprintf(getFormatStr(len(apiEndpointStr), 1), apiEndpointStr)))\n",
"\tlogger.StartupMessage(colorBlue(\"AccessKey: \") + colorBold(fmt.Sprintf(\"%s \", cred.AccessKey)))\n",
"\tlogger.StartupMessage(colorBlue(\"SecretKey: \") + colorBold(fmt.Sprintf(\"%s \", cred.SecretKey)))\n",
"\tif region != \"\" {\n",
"\t\tlogger.StartupMessage(colorBlue(\"Region: \") + colorBold(fmt.Sprintf(getFormatStr(len(region), 3), region)))\n",
"\t}\n",
"\tprintEventNotifiers()\n",
"\n",
"\tif globalIsBrowserEnabled {\n",
"\t\tlogger.StartupMessage(colorBlue(\"\\nBrowser Access:\"))\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tif isTerminal() {\n",
"\t\tlogger.StartupMessage(colorBlue(\"AccessKey: \") + colorBold(fmt.Sprintf(\"%s \", cred.AccessKey)))\n",
"\t\tlogger.StartupMessage(colorBlue(\"SecretKey: \") + colorBold(fmt.Sprintf(\"%s \", cred.SecretKey)))\n",
"\t\tif region != \"\" {\n",
"\t\t\tlogger.StartupMessage(colorBlue(\"Region: \") + colorBold(fmt.Sprintf(getFormatStr(len(region), 3), region)))\n",
"\t\t}\n"
],
"file_path": "cmd/server-startup-msg.go",
"type": "replace",
"edit_start_line_idx": 118
} | /*
* Minio Cloud Storage, (C) 2016, 2017, 2018 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import (
"context"
"crypto/x509"
"fmt"
"net/url"
"runtime"
"strings"
humanize "github.com/dustin/go-humanize"
"github.com/minio/minio/cmd/logger"
)
// Documentation links, these are part of message printing code.
const (
mcQuickStartGuide = "https://docs.minio.io/docs/minio-client-quickstart-guide"
goQuickStartGuide = "https://docs.minio.io/docs/golang-client-quickstart-guide"
jsQuickStartGuide = "https://docs.minio.io/docs/javascript-client-quickstart-guide"
javaQuickStartGuide = "https://docs.minio.io/docs/java-client-quickstart-guide"
pyQuickStartGuide = "https://docs.minio.io/docs/python-client-quickstart-guide"
dotnetQuickStartGuide = "https://docs.minio.io/docs/dotnet-client-quickstart-guide"
)
// generates format string depending on the string length and padding.
func getFormatStr(strLen int, padding int) string {
formatStr := fmt.Sprintf("%ds", strLen+padding)
return "%" + formatStr
}
// Prints the formatted startup message.
func printStartupMessage(apiEndPoints []string) {
strippedAPIEndpoints := stripStandardPorts(apiEndPoints)
// If cache layer is enabled, print cache capacity.
cacheObjectAPI := newCacheObjectsFn()
if cacheObjectAPI != nil {
printCacheStorageInfo(cacheObjectAPI.StorageInfo(context.Background()))
}
// Object layer is initialized then print StorageInfo.
objAPI := newObjectLayerFn()
if objAPI != nil {
printStorageInfo(objAPI.StorageInfo(context.Background()))
}
// Prints credential, region and browser access.
printServerCommonMsg(strippedAPIEndpoints)
// Prints `mc` cli configuration message chooses
// first endpoint as default.
printCLIAccessMsg(strippedAPIEndpoints[0], "myminio")
// Prints documentation message.
printObjectAPIMsg()
// SSL is configured reads certification chain, prints
// authority and expiry.
if globalIsSSL {
printCertificateMsg(globalPublicCerts)
}
}
// strip api endpoints list with standard ports such as
// port "80" and "443" before displaying on the startup
// banner. Returns a new list of API endpoints.
func stripStandardPorts(apiEndpoints []string) (newAPIEndpoints []string) {
newAPIEndpoints = make([]string, len(apiEndpoints))
// Check all API endpoints for standard ports and strip them.
for i, apiEndpoint := range apiEndpoints {
url, err := url.Parse(apiEndpoint)
if err != nil {
newAPIEndpoints[i] = apiEndpoint
continue
}
host, port := mustSplitHostPort(url.Host)
// For standard HTTP(s) ports such as "80" and "443"
// apiEndpoints should only be host without port.
switch {
case url.Scheme == "http" && port == "80":
fallthrough
case url.Scheme == "https" && port == "443":
url.Host = host
newAPIEndpoints[i] = url.String()
default:
newAPIEndpoints[i] = apiEndpoint
}
}
return newAPIEndpoints
}
// Prints common server startup message. Prints credential, region and browser access.
func printServerCommonMsg(apiEndpoints []string) {
// Get saved credentials.
cred := globalServerConfig.GetCredential()
// Get saved region.
region := globalServerConfig.GetRegion()
apiEndpointStr := strings.Join(apiEndpoints, " ")
// Colorize the message and print.
logger.StartupMessage(colorBlue("Endpoint: ") + colorBold(fmt.Sprintf(getFormatStr(len(apiEndpointStr), 1), apiEndpointStr)))
logger.StartupMessage(colorBlue("AccessKey: ") + colorBold(fmt.Sprintf("%s ", cred.AccessKey)))
logger.StartupMessage(colorBlue("SecretKey: ") + colorBold(fmt.Sprintf("%s ", cred.SecretKey)))
if region != "" {
logger.StartupMessage(colorBlue("Region: ") + colorBold(fmt.Sprintf(getFormatStr(len(region), 3), region)))
}
printEventNotifiers()
if globalIsBrowserEnabled {
logger.StartupMessage(colorBlue("\nBrowser Access:"))
logger.StartupMessage(fmt.Sprintf(getFormatStr(len(apiEndpointStr), 3), apiEndpointStr))
}
}
// Prints bucket notification configurations.
func printEventNotifiers() {
arns := globalNotificationSys.GetARNList()
if len(arns) == 0 {
return
}
arnMsg := colorBlue("SQS ARNs: ")
for _, arn := range arns {
arnMsg += colorBold(fmt.Sprintf(getFormatStr(len(arn), 1), arn))
}
logger.StartupMessage(arnMsg)
}
// Prints startup message for command line access. Prints link to our documentation
// and custom platform specific message.
func printCLIAccessMsg(endPoint string, alias string) {
// Get saved credentials.
cred := globalServerConfig.GetCredential()
// Configure 'mc', following block prints platform specific information for minio client.
logger.StartupMessage(colorBlue("\nCommand-line Access: ") + mcQuickStartGuide)
if runtime.GOOS == globalWindowsOSName {
mcMessage := fmt.Sprintf("$ mc.exe config host add %s %s %s %s", alias, endPoint, cred.AccessKey, cred.SecretKey)
logger.StartupMessage(fmt.Sprintf(getFormatStr(len(mcMessage), 3), mcMessage))
} else {
mcMessage := fmt.Sprintf("$ mc config host add %s %s %s %s", alias, endPoint, cred.AccessKey, cred.SecretKey)
logger.StartupMessage(fmt.Sprintf(getFormatStr(len(mcMessage), 3), mcMessage))
}
}
// Prints startup message for Object API acces, prints link to our SDK documentation.
func printObjectAPIMsg() {
logger.StartupMessage(colorBlue("\nObject API (Amazon S3 compatible):"))
logger.StartupMessage(colorBlue(" Go: ") + fmt.Sprintf(getFormatStr(len(goQuickStartGuide), 8), goQuickStartGuide))
logger.StartupMessage(colorBlue(" Java: ") + fmt.Sprintf(getFormatStr(len(javaQuickStartGuide), 6), javaQuickStartGuide))
logger.StartupMessage(colorBlue(" Python: ") + fmt.Sprintf(getFormatStr(len(pyQuickStartGuide), 4), pyQuickStartGuide))
logger.StartupMessage(colorBlue(" JavaScript: ") + jsQuickStartGuide)
logger.StartupMessage(colorBlue(" .NET: ") + fmt.Sprintf(getFormatStr(len(dotnetQuickStartGuide), 6), dotnetQuickStartGuide))
}
// Get formatted disk/storage info message.
func getStorageInfoMsg(storageInfo StorageInfo) string {
var msg string
if storageInfo.Backend.Type == BackendErasure {
diskInfo := fmt.Sprintf(" %d Online, %d Offline. ", storageInfo.Backend.OnlineDisks, storageInfo.Backend.OfflineDisks)
msg += colorBlue("Status:") + fmt.Sprintf(getFormatStr(len(diskInfo), 8), diskInfo)
}
return msg
}
// Prints startup message of storage capacity and erasure information.
func printStorageInfo(storageInfo StorageInfo) {
if msg := getStorageInfoMsg(storageInfo); msg != "" {
logger.StartupMessage(msg)
}
}
func printCacheStorageInfo(storageInfo CacheStorageInfo) {
msg := fmt.Sprintf("%s %s Free, %s Total", colorBlue("Cache Capacity:"),
humanize.IBytes(uint64(storageInfo.Free)),
humanize.IBytes(uint64(storageInfo.Total)))
logger.StartupMessage(msg)
}
// Prints certificate expiry date warning
func getCertificateChainMsg(certs []*x509.Certificate) string {
msg := colorBlue("\nCertificate expiry info:\n")
totalCerts := len(certs)
var expiringCerts int
for i := totalCerts - 1; i >= 0; i-- {
cert := certs[i]
if cert.NotAfter.Before(UTCNow().Add(globalMinioCertExpireWarnDays)) {
expiringCerts++
msg += fmt.Sprintf(colorBold("#%d %s will expire on %s\n"), expiringCerts, cert.Subject.CommonName, cert.NotAfter)
}
}
if expiringCerts > 0 {
return msg
}
return ""
}
// Prints the certificate expiry message.
func printCertificateMsg(certs []*x509.Certificate) {
logger.StartupMessage(getCertificateChainMsg(certs))
}
| cmd/server-startup-msg.go | 1 | https://github.com/minio/minio/commit/21c8693d9cca8f3ff1cedf2aee9f3bde89387880 | [
0.9980067610740662,
0.09558088332414627,
0.00016776469419710338,
0.0004387489752843976,
0.28266239166259766
] |
{
"id": 1,
"code_window": [
"\n",
"\tapiEndpointStr := strings.Join(apiEndpoints, \" \")\n",
"\n",
"\t// Colorize the message and print.\n",
"\tlogger.StartupMessage(colorBlue(\"Endpoint: \") + colorBold(fmt.Sprintf(getFormatStr(len(apiEndpointStr), 1), apiEndpointStr)))\n",
"\tlogger.StartupMessage(colorBlue(\"AccessKey: \") + colorBold(fmt.Sprintf(\"%s \", cred.AccessKey)))\n",
"\tlogger.StartupMessage(colorBlue(\"SecretKey: \") + colorBold(fmt.Sprintf(\"%s \", cred.SecretKey)))\n",
"\tif region != \"\" {\n",
"\t\tlogger.StartupMessage(colorBlue(\"Region: \") + colorBold(fmt.Sprintf(getFormatStr(len(region), 3), region)))\n",
"\t}\n",
"\tprintEventNotifiers()\n",
"\n",
"\tif globalIsBrowserEnabled {\n",
"\t\tlogger.StartupMessage(colorBlue(\"\\nBrowser Access:\"))\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tif isTerminal() {\n",
"\t\tlogger.StartupMessage(colorBlue(\"AccessKey: \") + colorBold(fmt.Sprintf(\"%s \", cred.AccessKey)))\n",
"\t\tlogger.StartupMessage(colorBlue(\"SecretKey: \") + colorBold(fmt.Sprintf(\"%s \", cred.SecretKey)))\n",
"\t\tif region != \"\" {\n",
"\t\t\tlogger.StartupMessage(colorBlue(\"Region: \") + colorBold(fmt.Sprintf(getFormatStr(len(region), 3), region)))\n",
"\t\t}\n"
],
"file_path": "cmd/server-startup-msg.go",
"type": "replace",
"edit_start_line_idx": 118
} | // Protocol Buffers for Go with Gadgets
//
// Copyright (c) 2016, The GoGo Authors. All rights reserved.
// http://github.com/gogo/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package proto
import (
"reflect"
"time"
)
var timeType = reflect.TypeOf((*time.Time)(nil)).Elem()
type timestamp struct {
Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"`
Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"`
}
func (m *timestamp) Reset() { *m = timestamp{} }
func (*timestamp) ProtoMessage() {}
func (*timestamp) String() string { return "timestamp<string>" }
func init() {
RegisterType((*timestamp)(nil), "gogo.protobuf.proto.timestamp")
}
func (o *Buffer) decTimestamp() (time.Time, error) {
b, err := o.DecodeRawBytes(true)
if err != nil {
return time.Time{}, err
}
tproto := ×tamp{}
if err := Unmarshal(b, tproto); err != nil {
return time.Time{}, err
}
return timestampFromProto(tproto)
}
func (o *Buffer) dec_time(p *Properties, base structPointer) error {
t, err := o.decTimestamp()
if err != nil {
return err
}
setPtrCustomType(base, p.field, &t)
return nil
}
func (o *Buffer) dec_ref_time(p *Properties, base structPointer) error {
t, err := o.decTimestamp()
if err != nil {
return err
}
setCustomType(base, p.field, &t)
return nil
}
func (o *Buffer) dec_slice_time(p *Properties, base structPointer) error {
t, err := o.decTimestamp()
if err != nil {
return err
}
newBas := appendStructPointer(base, p.field, reflect.SliceOf(reflect.PtrTo(timeType)))
var zero field
setPtrCustomType(newBas, zero, &t)
return nil
}
func (o *Buffer) dec_slice_ref_time(p *Properties, base structPointer) error {
t, err := o.decTimestamp()
if err != nil {
return err
}
newBas := appendStructPointer(base, p.field, reflect.SliceOf(timeType))
var zero field
setCustomType(newBas, zero, &t)
return nil
}
func size_time(p *Properties, base structPointer) (n int) {
structp := structPointer_GetStructPointer(base, p.field)
if structPointer_IsNil(structp) {
return 0
}
tim := structPointer_Interface(structp, timeType).(*time.Time)
t, err := timestampProto(*tim)
if err != nil {
return 0
}
size := Size(t)
return size + sizeVarint(uint64(size)) + len(p.tagcode)
}
func (o *Buffer) enc_time(p *Properties, base structPointer) error {
structp := structPointer_GetStructPointer(base, p.field)
if structPointer_IsNil(structp) {
return ErrNil
}
tim := structPointer_Interface(structp, timeType).(*time.Time)
t, err := timestampProto(*tim)
if err != nil {
return err
}
data, err := Marshal(t)
if err != nil {
return err
}
o.buf = append(o.buf, p.tagcode...)
o.EncodeRawBytes(data)
return nil
}
func size_ref_time(p *Properties, base structPointer) (n int) {
tim := structPointer_InterfaceAt(base, p.field, timeType).(*time.Time)
t, err := timestampProto(*tim)
if err != nil {
return 0
}
size := Size(t)
return size + sizeVarint(uint64(size)) + len(p.tagcode)
}
func (o *Buffer) enc_ref_time(p *Properties, base structPointer) error {
tim := structPointer_InterfaceAt(base, p.field, timeType).(*time.Time)
t, err := timestampProto(*tim)
if err != nil {
return err
}
data, err := Marshal(t)
if err != nil {
return err
}
o.buf = append(o.buf, p.tagcode...)
o.EncodeRawBytes(data)
return nil
}
func size_slice_time(p *Properties, base structPointer) (n int) {
ptims := structPointer_InterfaceAt(base, p.field, reflect.SliceOf(reflect.PtrTo(timeType))).(*[]*time.Time)
tims := *ptims
for i := 0; i < len(tims); i++ {
if tims[i] == nil {
return 0
}
tproto, err := timestampProto(*tims[i])
if err != nil {
return 0
}
size := Size(tproto)
n += len(p.tagcode) + size + sizeVarint(uint64(size))
}
return n
}
func (o *Buffer) enc_slice_time(p *Properties, base structPointer) error {
ptims := structPointer_InterfaceAt(base, p.field, reflect.SliceOf(reflect.PtrTo(timeType))).(*[]*time.Time)
tims := *ptims
for i := 0; i < len(tims); i++ {
if tims[i] == nil {
return errRepeatedHasNil
}
tproto, err := timestampProto(*tims[i])
if err != nil {
return err
}
data, err := Marshal(tproto)
if err != nil {
return err
}
o.buf = append(o.buf, p.tagcode...)
o.EncodeRawBytes(data)
}
return nil
}
func size_slice_ref_time(p *Properties, base structPointer) (n int) {
ptims := structPointer_InterfaceAt(base, p.field, reflect.SliceOf(timeType)).(*[]time.Time)
tims := *ptims
for i := 0; i < len(tims); i++ {
tproto, err := timestampProto(tims[i])
if err != nil {
return 0
}
size := Size(tproto)
n += len(p.tagcode) + size + sizeVarint(uint64(size))
}
return n
}
func (o *Buffer) enc_slice_ref_time(p *Properties, base structPointer) error {
ptims := structPointer_InterfaceAt(base, p.field, reflect.SliceOf(timeType)).(*[]time.Time)
tims := *ptims
for i := 0; i < len(tims); i++ {
tproto, err := timestampProto(tims[i])
if err != nil {
return err
}
data, err := Marshal(tproto)
if err != nil {
return err
}
o.buf = append(o.buf, p.tagcode...)
o.EncodeRawBytes(data)
}
return nil
}
| vendor/github.com/gogo/protobuf/proto/timestamp_gogo.go | 0 | https://github.com/minio/minio/commit/21c8693d9cca8f3ff1cedf2aee9f3bde89387880 | [
0.00017792063590604812,
0.0001726946356939152,
0.00016588924336247146,
0.00017348492110613734,
0.0000029244076813483844
] |
{
"id": 1,
"code_window": [
"\n",
"\tapiEndpointStr := strings.Join(apiEndpoints, \" \")\n",
"\n",
"\t// Colorize the message and print.\n",
"\tlogger.StartupMessage(colorBlue(\"Endpoint: \") + colorBold(fmt.Sprintf(getFormatStr(len(apiEndpointStr), 1), apiEndpointStr)))\n",
"\tlogger.StartupMessage(colorBlue(\"AccessKey: \") + colorBold(fmt.Sprintf(\"%s \", cred.AccessKey)))\n",
"\tlogger.StartupMessage(colorBlue(\"SecretKey: \") + colorBold(fmt.Sprintf(\"%s \", cred.SecretKey)))\n",
"\tif region != \"\" {\n",
"\t\tlogger.StartupMessage(colorBlue(\"Region: \") + colorBold(fmt.Sprintf(getFormatStr(len(region), 3), region)))\n",
"\t}\n",
"\tprintEventNotifiers()\n",
"\n",
"\tif globalIsBrowserEnabled {\n",
"\t\tlogger.StartupMessage(colorBlue(\"\\nBrowser Access:\"))\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tif isTerminal() {\n",
"\t\tlogger.StartupMessage(colorBlue(\"AccessKey: \") + colorBold(fmt.Sprintf(\"%s \", cred.AccessKey)))\n",
"\t\tlogger.StartupMessage(colorBlue(\"SecretKey: \") + colorBold(fmt.Sprintf(\"%s \", cred.SecretKey)))\n",
"\t\tif region != \"\" {\n",
"\t\t\tlogger.StartupMessage(colorBlue(\"Region: \") + colorBold(fmt.Sprintf(getFormatStr(len(region), 3), region)))\n",
"\t\t}\n"
],
"file_path": "cmd/server-startup-msg.go",
"type": "replace",
"edit_start_line_idx": 118
} | // Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package http2
import (
"bytes"
"fmt"
"log"
"net/http"
"net/url"
"time"
"golang.org/x/net/http2/hpack"
"golang.org/x/net/lex/httplex"
)
// writeFramer is implemented by any type that is used to write frames.
type writeFramer interface {
writeFrame(writeContext) error
// staysWithinBuffer reports whether this writer promises that
// it will only write less than or equal to size bytes, and it
// won't Flush the write context.
staysWithinBuffer(size int) bool
}
// writeContext is the interface needed by the various frame writer
// types below. All the writeFrame methods below are scheduled via the
// frame writing scheduler (see writeScheduler in writesched.go).
//
// This interface is implemented by *serverConn.
//
// TODO: decide whether to a) use this in the client code (which didn't
// end up using this yet, because it has a simpler design, not
// currently implementing priorities), or b) delete this and
// make the server code a bit more concrete.
type writeContext interface {
Framer() *Framer
Flush() error
CloseConn() error
// HeaderEncoder returns an HPACK encoder that writes to the
// returned buffer.
HeaderEncoder() (*hpack.Encoder, *bytes.Buffer)
}
// writeEndsStream reports whether w writes a frame that will transition
// the stream to a half-closed local state. This returns false for RST_STREAM,
// which closes the entire stream (not just the local half).
func writeEndsStream(w writeFramer) bool {
switch v := w.(type) {
case *writeData:
return v.endStream
case *writeResHeaders:
return v.endStream
case nil:
// This can only happen if the caller reuses w after it's
// been intentionally nil'ed out to prevent use. Keep this
// here to catch future refactoring breaking it.
panic("writeEndsStream called on nil writeFramer")
}
return false
}
type flushFrameWriter struct{}
func (flushFrameWriter) writeFrame(ctx writeContext) error {
return ctx.Flush()
}
func (flushFrameWriter) staysWithinBuffer(max int) bool { return false }
type writeSettings []Setting
func (s writeSettings) staysWithinBuffer(max int) bool {
const settingSize = 6 // uint16 + uint32
return frameHeaderLen+settingSize*len(s) <= max
}
func (s writeSettings) writeFrame(ctx writeContext) error {
return ctx.Framer().WriteSettings([]Setting(s)...)
}
type writeGoAway struct {
maxStreamID uint32
code ErrCode
}
func (p *writeGoAway) writeFrame(ctx writeContext) error {
err := ctx.Framer().WriteGoAway(p.maxStreamID, p.code, nil)
if p.code != 0 {
ctx.Flush() // ignore error: we're hanging up on them anyway
time.Sleep(50 * time.Millisecond)
ctx.CloseConn()
}
return err
}
func (*writeGoAway) staysWithinBuffer(max int) bool { return false } // flushes
type writeData struct {
streamID uint32
p []byte
endStream bool
}
func (w *writeData) String() string {
return fmt.Sprintf("writeData(stream=%d, p=%d, endStream=%v)", w.streamID, len(w.p), w.endStream)
}
func (w *writeData) writeFrame(ctx writeContext) error {
return ctx.Framer().WriteData(w.streamID, w.endStream, w.p)
}
func (w *writeData) staysWithinBuffer(max int) bool {
return frameHeaderLen+len(w.p) <= max
}
// handlerPanicRST is the message sent from handler goroutines when
// the handler panics.
type handlerPanicRST struct {
StreamID uint32
}
func (hp handlerPanicRST) writeFrame(ctx writeContext) error {
return ctx.Framer().WriteRSTStream(hp.StreamID, ErrCodeInternal)
}
func (hp handlerPanicRST) staysWithinBuffer(max int) bool { return frameHeaderLen+4 <= max }
func (se StreamError) writeFrame(ctx writeContext) error {
return ctx.Framer().WriteRSTStream(se.StreamID, se.Code)
}
func (se StreamError) staysWithinBuffer(max int) bool { return frameHeaderLen+4 <= max }
type writePingAck struct{ pf *PingFrame }
func (w writePingAck) writeFrame(ctx writeContext) error {
return ctx.Framer().WritePing(true, w.pf.Data)
}
func (w writePingAck) staysWithinBuffer(max int) bool { return frameHeaderLen+len(w.pf.Data) <= max }
type writeSettingsAck struct{}
func (writeSettingsAck) writeFrame(ctx writeContext) error {
return ctx.Framer().WriteSettingsAck()
}
func (writeSettingsAck) staysWithinBuffer(max int) bool { return frameHeaderLen <= max }
// splitHeaderBlock splits headerBlock into fragments so that each fragment fits
// in a single frame, then calls fn for each fragment. firstFrag/lastFrag are true
// for the first/last fragment, respectively.
func splitHeaderBlock(ctx writeContext, headerBlock []byte, fn func(ctx writeContext, frag []byte, firstFrag, lastFrag bool) error) error {
// For now we're lazy and just pick the minimum MAX_FRAME_SIZE
// that all peers must support (16KB). Later we could care
// more and send larger frames if the peer advertised it, but
// there's little point. Most headers are small anyway (so we
// generally won't have CONTINUATION frames), and extra frames
// only waste 9 bytes anyway.
const maxFrameSize = 16384
first := true
for len(headerBlock) > 0 {
frag := headerBlock
if len(frag) > maxFrameSize {
frag = frag[:maxFrameSize]
}
headerBlock = headerBlock[len(frag):]
if err := fn(ctx, frag, first, len(headerBlock) == 0); err != nil {
return err
}
first = false
}
return nil
}
// writeResHeaders is a request to write a HEADERS and 0+ CONTINUATION frames
// for HTTP response headers or trailers from a server handler.
type writeResHeaders struct {
streamID uint32
httpResCode int // 0 means no ":status" line
h http.Header // may be nil
trailers []string // if non-nil, which keys of h to write. nil means all.
endStream bool
date string
contentType string
contentLength string
}
func encKV(enc *hpack.Encoder, k, v string) {
if VerboseLogs {
log.Printf("http2: server encoding header %q = %q", k, v)
}
enc.WriteField(hpack.HeaderField{Name: k, Value: v})
}
func (w *writeResHeaders) staysWithinBuffer(max int) bool {
// TODO: this is a common one. It'd be nice to return true
// here and get into the fast path if we could be clever and
// calculate the size fast enough, or at least a conservative
// uppper bound that usually fires. (Maybe if w.h and
// w.trailers are nil, so we don't need to enumerate it.)
// Otherwise I'm afraid that just calculating the length to
// answer this question would be slower than the ~2µs benefit.
return false
}
func (w *writeResHeaders) writeFrame(ctx writeContext) error {
enc, buf := ctx.HeaderEncoder()
buf.Reset()
if w.httpResCode != 0 {
encKV(enc, ":status", httpCodeString(w.httpResCode))
}
encodeHeaders(enc, w.h, w.trailers)
if w.contentType != "" {
encKV(enc, "content-type", w.contentType)
}
if w.contentLength != "" {
encKV(enc, "content-length", w.contentLength)
}
if w.date != "" {
encKV(enc, "date", w.date)
}
headerBlock := buf.Bytes()
if len(headerBlock) == 0 && w.trailers == nil {
panic("unexpected empty hpack")
}
return splitHeaderBlock(ctx, headerBlock, w.writeHeaderBlock)
}
func (w *writeResHeaders) writeHeaderBlock(ctx writeContext, frag []byte, firstFrag, lastFrag bool) error {
if firstFrag {
return ctx.Framer().WriteHeaders(HeadersFrameParam{
StreamID: w.streamID,
BlockFragment: frag,
EndStream: w.endStream,
EndHeaders: lastFrag,
})
} else {
return ctx.Framer().WriteContinuation(w.streamID, lastFrag, frag)
}
}
// writePushPromise is a request to write a PUSH_PROMISE and 0+ CONTINUATION frames.
type writePushPromise struct {
streamID uint32 // pusher stream
method string // for :method
url *url.URL // for :scheme, :authority, :path
h http.Header
// Creates an ID for a pushed stream. This runs on serveG just before
// the frame is written. The returned ID is copied to promisedID.
allocatePromisedID func() (uint32, error)
promisedID uint32
}
func (w *writePushPromise) staysWithinBuffer(max int) bool {
// TODO: see writeResHeaders.staysWithinBuffer
return false
}
func (w *writePushPromise) writeFrame(ctx writeContext) error {
enc, buf := ctx.HeaderEncoder()
buf.Reset()
encKV(enc, ":method", w.method)
encKV(enc, ":scheme", w.url.Scheme)
encKV(enc, ":authority", w.url.Host)
encKV(enc, ":path", w.url.RequestURI())
encodeHeaders(enc, w.h, nil)
headerBlock := buf.Bytes()
if len(headerBlock) == 0 {
panic("unexpected empty hpack")
}
return splitHeaderBlock(ctx, headerBlock, w.writeHeaderBlock)
}
func (w *writePushPromise) writeHeaderBlock(ctx writeContext, frag []byte, firstFrag, lastFrag bool) error {
if firstFrag {
return ctx.Framer().WritePushPromise(PushPromiseParam{
StreamID: w.streamID,
PromiseID: w.promisedID,
BlockFragment: frag,
EndHeaders: lastFrag,
})
} else {
return ctx.Framer().WriteContinuation(w.streamID, lastFrag, frag)
}
}
type write100ContinueHeadersFrame struct {
streamID uint32
}
func (w write100ContinueHeadersFrame) writeFrame(ctx writeContext) error {
enc, buf := ctx.HeaderEncoder()
buf.Reset()
encKV(enc, ":status", "100")
return ctx.Framer().WriteHeaders(HeadersFrameParam{
StreamID: w.streamID,
BlockFragment: buf.Bytes(),
EndStream: false,
EndHeaders: true,
})
}
func (w write100ContinueHeadersFrame) staysWithinBuffer(max int) bool {
// Sloppy but conservative:
return 9+2*(len(":status")+len("100")) <= max
}
type writeWindowUpdate struct {
streamID uint32 // or 0 for conn-level
n uint32
}
func (wu writeWindowUpdate) staysWithinBuffer(max int) bool { return frameHeaderLen+4 <= max }
func (wu writeWindowUpdate) writeFrame(ctx writeContext) error {
return ctx.Framer().WriteWindowUpdate(wu.streamID, wu.n)
}
// encodeHeaders encodes an http.Header. If keys is not nil, then (k, h[k])
// is encoded only only if k is in keys.
func encodeHeaders(enc *hpack.Encoder, h http.Header, keys []string) {
if keys == nil {
sorter := sorterPool.Get().(*sorter)
// Using defer here, since the returned keys from the
// sorter.Keys method is only valid until the sorter
// is returned:
defer sorterPool.Put(sorter)
keys = sorter.Keys(h)
}
for _, k := range keys {
vv := h[k]
k = lowerHeader(k)
if !validWireHeaderFieldName(k) {
// Skip it as backup paranoia. Per
// golang.org/issue/14048, these should
// already be rejected at a higher level.
continue
}
isTE := k == "transfer-encoding"
for _, v := range vv {
if !httplex.ValidHeaderFieldValue(v) {
// TODO: return an error? golang.org/issue/14048
// For now just omit it.
continue
}
// TODO: more of "8.1.2.2 Connection-Specific Header Fields"
if isTE && v != "trailers" {
continue
}
encKV(enc, k, v)
}
}
}
| vendor/golang.org/x/net/http2/write.go | 0 | https://github.com/minio/minio/commit/21c8693d9cca8f3ff1cedf2aee9f3bde89387880 | [
0.00022653045016340911,
0.00017228156502824277,
0.00016063217481132597,
0.00017199473222717643,
0.000009837843208515551
] |
{
"id": 1,
"code_window": [
"\n",
"\tapiEndpointStr := strings.Join(apiEndpoints, \" \")\n",
"\n",
"\t// Colorize the message and print.\n",
"\tlogger.StartupMessage(colorBlue(\"Endpoint: \") + colorBold(fmt.Sprintf(getFormatStr(len(apiEndpointStr), 1), apiEndpointStr)))\n",
"\tlogger.StartupMessage(colorBlue(\"AccessKey: \") + colorBold(fmt.Sprintf(\"%s \", cred.AccessKey)))\n",
"\tlogger.StartupMessage(colorBlue(\"SecretKey: \") + colorBold(fmt.Sprintf(\"%s \", cred.SecretKey)))\n",
"\tif region != \"\" {\n",
"\t\tlogger.StartupMessage(colorBlue(\"Region: \") + colorBold(fmt.Sprintf(getFormatStr(len(region), 3), region)))\n",
"\t}\n",
"\tprintEventNotifiers()\n",
"\n",
"\tif globalIsBrowserEnabled {\n",
"\t\tlogger.StartupMessage(colorBlue(\"\\nBrowser Access:\"))\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tif isTerminal() {\n",
"\t\tlogger.StartupMessage(colorBlue(\"AccessKey: \") + colorBold(fmt.Sprintf(\"%s \", cred.AccessKey)))\n",
"\t\tlogger.StartupMessage(colorBlue(\"SecretKey: \") + colorBold(fmt.Sprintf(\"%s \", cred.SecretKey)))\n",
"\t\tif region != \"\" {\n",
"\t\t\tlogger.StartupMessage(colorBlue(\"Region: \") + colorBold(fmt.Sprintf(getFormatStr(len(region), 3), region)))\n",
"\t\t}\n"
],
"file_path": "cmd/server-startup-msg.go",
"type": "replace",
"edit_start_line_idx": 118
} | // Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package norm
// This file contains Form-specific logic and wrappers for data in tables.go.
// Rune info is stored in a separate trie per composing form. A composing form
// and its corresponding decomposing form share the same trie. Each trie maps
// a rune to a uint16. The values take two forms. For v >= 0x8000:
// bits
// 15: 1 (inverse of NFD_QC bit of qcInfo)
// 13..7: qcInfo (see below). isYesD is always true (no decompostion).
// 6..0: ccc (compressed CCC value).
// For v < 0x8000, the respective rune has a decomposition and v is an index
// into a byte array of UTF-8 decomposition sequences and additional info and
// has the form:
// <header> <decomp_byte>* [<tccc> [<lccc>]]
// The header contains the number of bytes in the decomposition (excluding this
// length byte). The two most significant bits of this length byte correspond
// to bit 5 and 4 of qcInfo (see below). The byte sequence itself starts at v+1.
// The byte sequence is followed by a trailing and leading CCC if the values
// for these are not zero. The value of v determines which ccc are appended
// to the sequences. For v < firstCCC, there are none, for v >= firstCCC,
// the sequence is followed by a trailing ccc, and for v >= firstLeadingCC
// there is an additional leading ccc. The value of tccc itself is the
// trailing CCC shifted left 2 bits. The two least-significant bits of tccc
// are the number of trailing non-starters.
const (
qcInfoMask = 0x3F // to clear all but the relevant bits in a qcInfo
headerLenMask = 0x3F // extract the length value from the header byte
headerFlagsMask = 0xC0 // extract the qcInfo bits from the header byte
)
// Properties provides access to normalization properties of a rune.
type Properties struct {
pos uint8 // start position in reorderBuffer; used in composition.go
size uint8 // length of UTF-8 encoding of this rune
ccc uint8 // leading canonical combining class (ccc if not decomposition)
tccc uint8 // trailing canonical combining class (ccc if not decomposition)
nLead uint8 // number of leading non-starters.
flags qcInfo // quick check flags
index uint16
}
// functions dispatchable per form
type lookupFunc func(b input, i int) Properties
// formInfo holds Form-specific functions and tables.
type formInfo struct {
form Form
composing, compatibility bool // form type
info lookupFunc
nextMain iterFunc
}
var formTable = []*formInfo{{
form: NFC,
composing: true,
compatibility: false,
info: lookupInfoNFC,
nextMain: nextComposed,
}, {
form: NFD,
composing: false,
compatibility: false,
info: lookupInfoNFC,
nextMain: nextDecomposed,
}, {
form: NFKC,
composing: true,
compatibility: true,
info: lookupInfoNFKC,
nextMain: nextComposed,
}, {
form: NFKD,
composing: false,
compatibility: true,
info: lookupInfoNFKC,
nextMain: nextDecomposed,
}}
// We do not distinguish between boundaries for NFC, NFD, etc. to avoid
// unexpected behavior for the user. For example, in NFD, there is a boundary
// after 'a'. However, 'a' might combine with modifiers, so from the application's
// perspective it is not a good boundary. We will therefore always use the
// boundaries for the combining variants.
// BoundaryBefore returns true if this rune starts a new segment and
// cannot combine with any rune on the left.
func (p Properties) BoundaryBefore() bool {
if p.ccc == 0 && !p.combinesBackward() {
return true
}
// We assume that the CCC of the first character in a decomposition
// is always non-zero if different from info.ccc and that we can return
// false at this point. This is verified by maketables.
return false
}
// BoundaryAfter returns true if runes cannot combine with or otherwise
// interact with this or previous runes.
func (p Properties) BoundaryAfter() bool {
// TODO: loosen these conditions.
return p.isInert()
}
// We pack quick check data in 4 bits:
// 5: Combines forward (0 == false, 1 == true)
// 4..3: NFC_QC Yes(00), No (10), or Maybe (11)
// 2: NFD_QC Yes (0) or No (1). No also means there is a decomposition.
// 1..0: Number of trailing non-starters.
//
// When all 4 bits are zero, the character is inert, meaning it is never
// influenced by normalization.
type qcInfo uint8
func (p Properties) isYesC() bool { return p.flags&0x10 == 0 }
func (p Properties) isYesD() bool { return p.flags&0x4 == 0 }
func (p Properties) combinesForward() bool { return p.flags&0x20 != 0 }
func (p Properties) combinesBackward() bool { return p.flags&0x8 != 0 } // == isMaybe
func (p Properties) hasDecomposition() bool { return p.flags&0x4 != 0 } // == isNoD
func (p Properties) isInert() bool {
return p.flags&qcInfoMask == 0 && p.ccc == 0
}
func (p Properties) multiSegment() bool {
return p.index >= firstMulti && p.index < endMulti
}
func (p Properties) nLeadingNonStarters() uint8 {
return p.nLead
}
func (p Properties) nTrailingNonStarters() uint8 {
return uint8(p.flags & 0x03)
}
// Decomposition returns the decomposition for the underlying rune
// or nil if there is none.
func (p Properties) Decomposition() []byte {
// TODO: create the decomposition for Hangul?
if p.index == 0 {
return nil
}
i := p.index
n := decomps[i] & headerLenMask
i++
return decomps[i : i+uint16(n)]
}
// Size returns the length of UTF-8 encoding of the rune.
func (p Properties) Size() int {
return int(p.size)
}
// CCC returns the canonical combining class of the underlying rune.
func (p Properties) CCC() uint8 {
if p.index >= firstCCCZeroExcept {
return 0
}
return ccc[p.ccc]
}
// LeadCCC returns the CCC of the first rune in the decomposition.
// If there is no decomposition, LeadCCC equals CCC.
func (p Properties) LeadCCC() uint8 {
return ccc[p.ccc]
}
// TrailCCC returns the CCC of the last rune in the decomposition.
// If there is no decomposition, TrailCCC equals CCC.
func (p Properties) TrailCCC() uint8 {
return ccc[p.tccc]
}
// Recomposition
// We use 32-bit keys instead of 64-bit for the two codepoint keys.
// This clips off the bits of three entries, but we know this will not
// result in a collision. In the unlikely event that changes to
// UnicodeData.txt introduce collisions, the compiler will catch it.
// Note that the recomposition map for NFC and NFKC are identical.
// combine returns the combined rune or 0 if it doesn't exist.
func combine(a, b rune) rune {
key := uint32(uint16(a))<<16 + uint32(uint16(b))
return recompMap[key]
}
func lookupInfoNFC(b input, i int) Properties {
v, sz := b.charinfoNFC(i)
return compInfo(v, sz)
}
func lookupInfoNFKC(b input, i int) Properties {
v, sz := b.charinfoNFKC(i)
return compInfo(v, sz)
}
// Properties returns properties for the first rune in s.
func (f Form) Properties(s []byte) Properties {
if f == NFC || f == NFD {
return compInfo(nfcData.lookup(s))
}
return compInfo(nfkcData.lookup(s))
}
// PropertiesString returns properties for the first rune in s.
func (f Form) PropertiesString(s string) Properties {
if f == NFC || f == NFD {
return compInfo(nfcData.lookupString(s))
}
return compInfo(nfkcData.lookupString(s))
}
// compInfo converts the information contained in v and sz
// to a Properties. See the comment at the top of the file
// for more information on the format.
func compInfo(v uint16, sz int) Properties {
if v == 0 {
return Properties{size: uint8(sz)}
} else if v >= 0x8000 {
p := Properties{
size: uint8(sz),
ccc: uint8(v),
tccc: uint8(v),
flags: qcInfo(v >> 8),
}
if p.ccc > 0 || p.combinesBackward() {
p.nLead = uint8(p.flags & 0x3)
}
return p
}
// has decomposition
h := decomps[v]
f := (qcInfo(h&headerFlagsMask) >> 2) | 0x4
p := Properties{size: uint8(sz), flags: f, index: v}
if v >= firstCCC {
v += uint16(h&headerLenMask) + 1
c := decomps[v]
p.tccc = c >> 2
p.flags |= qcInfo(c & 0x3)
if v >= firstLeadingCCC {
p.nLead = c & 0x3
if v >= firstStarterWithNLead {
// We were tricked. Remove the decomposition.
p.flags &= 0x03
p.index = 0
return p
}
p.ccc = decomps[v+1]
}
}
return p
}
| vendor/golang.org/x/text/unicode/norm/forminfo.go | 0 | https://github.com/minio/minio/commit/21c8693d9cca8f3ff1cedf2aee9f3bde89387880 | [
0.00017838300846051425,
0.00017435848712921143,
0.0001668655313551426,
0.0001753221295075491,
0.0000029034404178673867
] |
{
"id": 2,
"code_window": [
"func printCLIAccessMsg(endPoint string, alias string) {\n",
"\t// Get saved credentials.\n",
"\tcred := globalServerConfig.GetCredential()\n",
"\n",
"\t// Configure 'mc', following block prints platform specific information for minio client.\n",
"\tlogger.StartupMessage(colorBlue(\"\\nCommand-line Access: \") + mcQuickStartGuide)\n",
"\tif runtime.GOOS == globalWindowsOSName {\n",
"\t\tmcMessage := fmt.Sprintf(\"$ mc.exe config host add %s %s %s %s\", alias, endPoint, cred.AccessKey, cred.SecretKey)\n",
"\t\tlogger.StartupMessage(fmt.Sprintf(getFormatStr(len(mcMessage), 3), mcMessage))\n",
"\t} else {\n",
"\t\tmcMessage := fmt.Sprintf(\"$ mc config host add %s %s %s %s\", alias, endPoint, cred.AccessKey, cred.SecretKey)\n",
"\t\tlogger.StartupMessage(fmt.Sprintf(getFormatStr(len(mcMessage), 3), mcMessage))\n",
"\t}\n",
"}\n",
"\n",
"// Prints startup message for Object API acces, prints link to our SDK documentation.\n",
"func printObjectAPIMsg() {\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tif isTerminal() {\n",
"\t\tlogger.StartupMessage(colorBlue(\"\\nCommand-line Access: \") + mcQuickStartGuide)\n",
"\t\tif runtime.GOOS == globalWindowsOSName {\n",
"\t\t\tmcMessage := fmt.Sprintf(\"$ mc.exe config host add %s %s %s %s\", alias, endPoint, cred.AccessKey, cred.SecretKey)\n",
"\t\t\tlogger.StartupMessage(fmt.Sprintf(getFormatStr(len(mcMessage), 3), mcMessage))\n",
"\t\t} else {\n",
"\t\t\tmcMessage := fmt.Sprintf(\"$ mc config host add %s %s %s %s\", alias, endPoint, cred.AccessKey, cred.SecretKey)\n",
"\t\t\tlogger.StartupMessage(fmt.Sprintf(getFormatStr(len(mcMessage), 3), mcMessage))\n",
"\t\t}\n"
],
"file_path": "cmd/server-startup-msg.go",
"type": "replace",
"edit_start_line_idx": 153
} | /*
* Minio Cloud Storage, (C) 2016, 2017, 2018 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import (
"context"
"crypto/x509"
"fmt"
"net/url"
"runtime"
"strings"
humanize "github.com/dustin/go-humanize"
"github.com/minio/minio/cmd/logger"
)
// Documentation links, these are part of message printing code.
const (
mcQuickStartGuide = "https://docs.minio.io/docs/minio-client-quickstart-guide"
goQuickStartGuide = "https://docs.minio.io/docs/golang-client-quickstart-guide"
jsQuickStartGuide = "https://docs.minio.io/docs/javascript-client-quickstart-guide"
javaQuickStartGuide = "https://docs.minio.io/docs/java-client-quickstart-guide"
pyQuickStartGuide = "https://docs.minio.io/docs/python-client-quickstart-guide"
dotnetQuickStartGuide = "https://docs.minio.io/docs/dotnet-client-quickstart-guide"
)
// generates format string depending on the string length and padding.
func getFormatStr(strLen int, padding int) string {
formatStr := fmt.Sprintf("%ds", strLen+padding)
return "%" + formatStr
}
// Prints the formatted startup message.
func printStartupMessage(apiEndPoints []string) {
strippedAPIEndpoints := stripStandardPorts(apiEndPoints)
// If cache layer is enabled, print cache capacity.
cacheObjectAPI := newCacheObjectsFn()
if cacheObjectAPI != nil {
printCacheStorageInfo(cacheObjectAPI.StorageInfo(context.Background()))
}
// Object layer is initialized then print StorageInfo.
objAPI := newObjectLayerFn()
if objAPI != nil {
printStorageInfo(objAPI.StorageInfo(context.Background()))
}
// Prints credential, region and browser access.
printServerCommonMsg(strippedAPIEndpoints)
// Prints `mc` cli configuration message chooses
// first endpoint as default.
printCLIAccessMsg(strippedAPIEndpoints[0], "myminio")
// Prints documentation message.
printObjectAPIMsg()
// SSL is configured reads certification chain, prints
// authority and expiry.
if globalIsSSL {
printCertificateMsg(globalPublicCerts)
}
}
// strip api endpoints list with standard ports such as
// port "80" and "443" before displaying on the startup
// banner. Returns a new list of API endpoints.
func stripStandardPorts(apiEndpoints []string) (newAPIEndpoints []string) {
newAPIEndpoints = make([]string, len(apiEndpoints))
// Check all API endpoints for standard ports and strip them.
for i, apiEndpoint := range apiEndpoints {
url, err := url.Parse(apiEndpoint)
if err != nil {
newAPIEndpoints[i] = apiEndpoint
continue
}
host, port := mustSplitHostPort(url.Host)
// For standard HTTP(s) ports such as "80" and "443"
// apiEndpoints should only be host without port.
switch {
case url.Scheme == "http" && port == "80":
fallthrough
case url.Scheme == "https" && port == "443":
url.Host = host
newAPIEndpoints[i] = url.String()
default:
newAPIEndpoints[i] = apiEndpoint
}
}
return newAPIEndpoints
}
// Prints common server startup message. Prints credential, region and browser access.
func printServerCommonMsg(apiEndpoints []string) {
// Get saved credentials.
cred := globalServerConfig.GetCredential()
// Get saved region.
region := globalServerConfig.GetRegion()
apiEndpointStr := strings.Join(apiEndpoints, " ")
// Colorize the message and print.
logger.StartupMessage(colorBlue("Endpoint: ") + colorBold(fmt.Sprintf(getFormatStr(len(apiEndpointStr), 1), apiEndpointStr)))
logger.StartupMessage(colorBlue("AccessKey: ") + colorBold(fmt.Sprintf("%s ", cred.AccessKey)))
logger.StartupMessage(colorBlue("SecretKey: ") + colorBold(fmt.Sprintf("%s ", cred.SecretKey)))
if region != "" {
logger.StartupMessage(colorBlue("Region: ") + colorBold(fmt.Sprintf(getFormatStr(len(region), 3), region)))
}
printEventNotifiers()
if globalIsBrowserEnabled {
logger.StartupMessage(colorBlue("\nBrowser Access:"))
logger.StartupMessage(fmt.Sprintf(getFormatStr(len(apiEndpointStr), 3), apiEndpointStr))
}
}
// Prints bucket notification configurations.
func printEventNotifiers() {
arns := globalNotificationSys.GetARNList()
if len(arns) == 0 {
return
}
arnMsg := colorBlue("SQS ARNs: ")
for _, arn := range arns {
arnMsg += colorBold(fmt.Sprintf(getFormatStr(len(arn), 1), arn))
}
logger.StartupMessage(arnMsg)
}
// Prints startup message for command line access. Prints link to our documentation
// and custom platform specific message.
func printCLIAccessMsg(endPoint string, alias string) {
// Get saved credentials.
cred := globalServerConfig.GetCredential()
// Configure 'mc', following block prints platform specific information for minio client.
logger.StartupMessage(colorBlue("\nCommand-line Access: ") + mcQuickStartGuide)
if runtime.GOOS == globalWindowsOSName {
mcMessage := fmt.Sprintf("$ mc.exe config host add %s %s %s %s", alias, endPoint, cred.AccessKey, cred.SecretKey)
logger.StartupMessage(fmt.Sprintf(getFormatStr(len(mcMessage), 3), mcMessage))
} else {
mcMessage := fmt.Sprintf("$ mc config host add %s %s %s %s", alias, endPoint, cred.AccessKey, cred.SecretKey)
logger.StartupMessage(fmt.Sprintf(getFormatStr(len(mcMessage), 3), mcMessage))
}
}
// Prints startup message for Object API acces, prints link to our SDK documentation.
func printObjectAPIMsg() {
logger.StartupMessage(colorBlue("\nObject API (Amazon S3 compatible):"))
logger.StartupMessage(colorBlue(" Go: ") + fmt.Sprintf(getFormatStr(len(goQuickStartGuide), 8), goQuickStartGuide))
logger.StartupMessage(colorBlue(" Java: ") + fmt.Sprintf(getFormatStr(len(javaQuickStartGuide), 6), javaQuickStartGuide))
logger.StartupMessage(colorBlue(" Python: ") + fmt.Sprintf(getFormatStr(len(pyQuickStartGuide), 4), pyQuickStartGuide))
logger.StartupMessage(colorBlue(" JavaScript: ") + jsQuickStartGuide)
logger.StartupMessage(colorBlue(" .NET: ") + fmt.Sprintf(getFormatStr(len(dotnetQuickStartGuide), 6), dotnetQuickStartGuide))
}
// Get formatted disk/storage info message.
func getStorageInfoMsg(storageInfo StorageInfo) string {
var msg string
if storageInfo.Backend.Type == BackendErasure {
diskInfo := fmt.Sprintf(" %d Online, %d Offline. ", storageInfo.Backend.OnlineDisks, storageInfo.Backend.OfflineDisks)
msg += colorBlue("Status:") + fmt.Sprintf(getFormatStr(len(diskInfo), 8), diskInfo)
}
return msg
}
// Prints startup message of storage capacity and erasure information.
func printStorageInfo(storageInfo StorageInfo) {
if msg := getStorageInfoMsg(storageInfo); msg != "" {
logger.StartupMessage(msg)
}
}
func printCacheStorageInfo(storageInfo CacheStorageInfo) {
msg := fmt.Sprintf("%s %s Free, %s Total", colorBlue("Cache Capacity:"),
humanize.IBytes(uint64(storageInfo.Free)),
humanize.IBytes(uint64(storageInfo.Total)))
logger.StartupMessage(msg)
}
// Prints certificate expiry date warning
func getCertificateChainMsg(certs []*x509.Certificate) string {
msg := colorBlue("\nCertificate expiry info:\n")
totalCerts := len(certs)
var expiringCerts int
for i := totalCerts - 1; i >= 0; i-- {
cert := certs[i]
if cert.NotAfter.Before(UTCNow().Add(globalMinioCertExpireWarnDays)) {
expiringCerts++
msg += fmt.Sprintf(colorBold("#%d %s will expire on %s\n"), expiringCerts, cert.Subject.CommonName, cert.NotAfter)
}
}
if expiringCerts > 0 {
return msg
}
return ""
}
// Prints the certificate expiry message.
func printCertificateMsg(certs []*x509.Certificate) {
logger.StartupMessage(getCertificateChainMsg(certs))
}
| cmd/server-startup-msg.go | 1 | https://github.com/minio/minio/commit/21c8693d9cca8f3ff1cedf2aee9f3bde89387880 | [
0.9983097314834595,
0.09455926716327667,
0.00016324974421877414,
0.0006173868896439672,
0.2853582501411438
] |
{
"id": 2,
"code_window": [
"func printCLIAccessMsg(endPoint string, alias string) {\n",
"\t// Get saved credentials.\n",
"\tcred := globalServerConfig.GetCredential()\n",
"\n",
"\t// Configure 'mc', following block prints platform specific information for minio client.\n",
"\tlogger.StartupMessage(colorBlue(\"\\nCommand-line Access: \") + mcQuickStartGuide)\n",
"\tif runtime.GOOS == globalWindowsOSName {\n",
"\t\tmcMessage := fmt.Sprintf(\"$ mc.exe config host add %s %s %s %s\", alias, endPoint, cred.AccessKey, cred.SecretKey)\n",
"\t\tlogger.StartupMessage(fmt.Sprintf(getFormatStr(len(mcMessage), 3), mcMessage))\n",
"\t} else {\n",
"\t\tmcMessage := fmt.Sprintf(\"$ mc config host add %s %s %s %s\", alias, endPoint, cred.AccessKey, cred.SecretKey)\n",
"\t\tlogger.StartupMessage(fmt.Sprintf(getFormatStr(len(mcMessage), 3), mcMessage))\n",
"\t}\n",
"}\n",
"\n",
"// Prints startup message for Object API acces, prints link to our SDK documentation.\n",
"func printObjectAPIMsg() {\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tif isTerminal() {\n",
"\t\tlogger.StartupMessage(colorBlue(\"\\nCommand-line Access: \") + mcQuickStartGuide)\n",
"\t\tif runtime.GOOS == globalWindowsOSName {\n",
"\t\t\tmcMessage := fmt.Sprintf(\"$ mc.exe config host add %s %s %s %s\", alias, endPoint, cred.AccessKey, cred.SecretKey)\n",
"\t\t\tlogger.StartupMessage(fmt.Sprintf(getFormatStr(len(mcMessage), 3), mcMessage))\n",
"\t\t} else {\n",
"\t\t\tmcMessage := fmt.Sprintf(\"$ mc config host add %s %s %s %s\", alias, endPoint, cred.AccessKey, cred.SecretKey)\n",
"\t\t\tlogger.StartupMessage(fmt.Sprintf(getFormatStr(len(mcMessage), 3), mcMessage))\n",
"\t\t}\n"
],
"file_path": "cmd/server-startup-msg.go",
"type": "replace",
"edit_start_line_idx": 153
} | version: "{build}"
platform: x64
clone_folder: c:\gopath\src\go.opencensus.io
environment:
GOPATH: 'c:\gopath'
GOVERSION: '1.11'
GO111MODULE: 'on'
CGO_ENABLED: '0' # See: https://github.com/appveyor/ci/issues/2613
install:
- set PATH=%GOPATH%\bin;c:\go\bin;%PATH%
- go version
- go env
build: false
deploy: false
test_script:
- cd %APPVEYOR_BUILD_FOLDER%
- go build -v .\...
- go test -v .\... # No -race because cgo is disabled
| vendor/go.opencensus.io/appveyor.yml | 0 | https://github.com/minio/minio/commit/21c8693d9cca8f3ff1cedf2aee9f3bde89387880 | [
0.0001669852208578959,
0.00016612563922535628,
0.0001655087835388258,
0.00016588291327934712,
6.267129606385424e-7
] |
{
"id": 2,
"code_window": [
"func printCLIAccessMsg(endPoint string, alias string) {\n",
"\t// Get saved credentials.\n",
"\tcred := globalServerConfig.GetCredential()\n",
"\n",
"\t// Configure 'mc', following block prints platform specific information for minio client.\n",
"\tlogger.StartupMessage(colorBlue(\"\\nCommand-line Access: \") + mcQuickStartGuide)\n",
"\tif runtime.GOOS == globalWindowsOSName {\n",
"\t\tmcMessage := fmt.Sprintf(\"$ mc.exe config host add %s %s %s %s\", alias, endPoint, cred.AccessKey, cred.SecretKey)\n",
"\t\tlogger.StartupMessage(fmt.Sprintf(getFormatStr(len(mcMessage), 3), mcMessage))\n",
"\t} else {\n",
"\t\tmcMessage := fmt.Sprintf(\"$ mc config host add %s %s %s %s\", alias, endPoint, cred.AccessKey, cred.SecretKey)\n",
"\t\tlogger.StartupMessage(fmt.Sprintf(getFormatStr(len(mcMessage), 3), mcMessage))\n",
"\t}\n",
"}\n",
"\n",
"// Prints startup message for Object API acces, prints link to our SDK documentation.\n",
"func printObjectAPIMsg() {\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tif isTerminal() {\n",
"\t\tlogger.StartupMessage(colorBlue(\"\\nCommand-line Access: \") + mcQuickStartGuide)\n",
"\t\tif runtime.GOOS == globalWindowsOSName {\n",
"\t\t\tmcMessage := fmt.Sprintf(\"$ mc.exe config host add %s %s %s %s\", alias, endPoint, cred.AccessKey, cred.SecretKey)\n",
"\t\t\tlogger.StartupMessage(fmt.Sprintf(getFormatStr(len(mcMessage), 3), mcMessage))\n",
"\t\t} else {\n",
"\t\t\tmcMessage := fmt.Sprintf(\"$ mc config host add %s %s %s %s\", alias, endPoint, cred.AccessKey, cred.SecretKey)\n",
"\t\t\tlogger.StartupMessage(fmt.Sprintf(getFormatStr(len(mcMessage), 3), mcMessage))\n",
"\t\t}\n"
],
"file_path": "cmd/server-startup-msg.go",
"type": "replace",
"edit_start_line_idx": 153
} | /*
* Isomorphic Javascript library for Minio Browser JSON-RPC API, (C) 2016 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
var webpack = require('webpack')
var path = require('path')
var CopyWebpackPlugin = require('copy-webpack-plugin')
var purify = require("purifycss-webpack-plugin")
var exports = {
context: __dirname,
entry: [
path.resolve(__dirname, 'app/index.js')
],
output: {
path: path.resolve(__dirname, 'production'),
filename: 'index_bundle.js'
},
module: {
rules: [{
test: /\.js$/,
exclude: /(node_modules|bower_components)/,
use: [{
loader: 'babel-loader',
options: {
presets: ['react', 'es2015']
}
}]
}, {
test: /\.less$/,
use: [{
loader: 'style-loader'
}, {
loader: 'css-loader'
}, {
loader: 'less-loader'
}]
}, {
test: /\.css$/,
use: [{
loader: 'style-loader'
}, {
loader: 'css-loader'
}]
}, {
test: /\.(eot|woff|woff2|ttf|svg|png)/,
use: [{
loader: 'url-loader'
}]
}]
},
node:{
fs:'empty'
},
plugins: [
new CopyWebpackPlugin([
{from: 'app/css/loader.css'},
{from: 'app/img/favicon.ico'},
{from: 'app/img/browsers/chrome.png'},
{from: 'app/img/browsers/firefox.png'},
{from: 'app/img/browsers/safari.png'},
{from: 'app/img/logo.svg'},
{from: 'app/index.html'}
]),
new webpack.DefinePlugin({
'process.env.NODE_ENV': '"production"'
}),
new webpack.ContextReplacementPlugin(/moment[\\\/]locale$/, /^\.\/(en)$/),
new purify({
basePath: __dirname,
paths: [
"app/index.html",
"app/js/*.js"
]
})
]
}
if (process.env.NODE_ENV === 'dev') {
exports.entry = [
'webpack-dev-server/client?http://localhost:8080',
path.resolve(__dirname, 'app/index.js')
]
}
module.exports = exports
| browser/webpack.production.config.js | 0 | https://github.com/minio/minio/commit/21c8693d9cca8f3ff1cedf2aee9f3bde89387880 | [
0.00017255691636819392,
0.00016946368850767612,
0.000165039804414846,
0.00017017259960994124,
0.000002347872850805288
] |
{
"id": 2,
"code_window": [
"func printCLIAccessMsg(endPoint string, alias string) {\n",
"\t// Get saved credentials.\n",
"\tcred := globalServerConfig.GetCredential()\n",
"\n",
"\t// Configure 'mc', following block prints platform specific information for minio client.\n",
"\tlogger.StartupMessage(colorBlue(\"\\nCommand-line Access: \") + mcQuickStartGuide)\n",
"\tif runtime.GOOS == globalWindowsOSName {\n",
"\t\tmcMessage := fmt.Sprintf(\"$ mc.exe config host add %s %s %s %s\", alias, endPoint, cred.AccessKey, cred.SecretKey)\n",
"\t\tlogger.StartupMessage(fmt.Sprintf(getFormatStr(len(mcMessage), 3), mcMessage))\n",
"\t} else {\n",
"\t\tmcMessage := fmt.Sprintf(\"$ mc config host add %s %s %s %s\", alias, endPoint, cred.AccessKey, cred.SecretKey)\n",
"\t\tlogger.StartupMessage(fmt.Sprintf(getFormatStr(len(mcMessage), 3), mcMessage))\n",
"\t}\n",
"}\n",
"\n",
"// Prints startup message for Object API acces, prints link to our SDK documentation.\n",
"func printObjectAPIMsg() {\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tif isTerminal() {\n",
"\t\tlogger.StartupMessage(colorBlue(\"\\nCommand-line Access: \") + mcQuickStartGuide)\n",
"\t\tif runtime.GOOS == globalWindowsOSName {\n",
"\t\t\tmcMessage := fmt.Sprintf(\"$ mc.exe config host add %s %s %s %s\", alias, endPoint, cred.AccessKey, cred.SecretKey)\n",
"\t\t\tlogger.StartupMessage(fmt.Sprintf(getFormatStr(len(mcMessage), 3), mcMessage))\n",
"\t\t} else {\n",
"\t\t\tmcMessage := fmt.Sprintf(\"$ mc config host add %s %s %s %s\", alias, endPoint, cred.AccessKey, cred.SecretKey)\n",
"\t\t\tlogger.StartupMessage(fmt.Sprintf(getFormatStr(len(mcMessage), 3), mcMessage))\n",
"\t\t}\n"
],
"file_path": "cmd/server-startup-msg.go",
"type": "replace",
"edit_start_line_idx": 153
} | Copyright (c) 2015 Olivier Poitrey <[email protected]>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is furnished
to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
| vendor/github.com/rs/xhandler/LICENSE | 0 | https://github.com/minio/minio/commit/21c8693d9cca8f3ff1cedf2aee9f3bde89387880 | [
0.00017513599595986307,
0.00017446541460230947,
0.0001737948477966711,
0.00017446541460230947,
6.705740815959871e-7
] |
{
"id": 0,
"code_window": [
"\t\tc.nodeLister,\n",
"\t\tc.maxEndpointsPerSlice,\n",
"\t\tc.endpointSliceTracker,\n",
"\t\tc.topologyCache,\n",
"\t\tutilfeature.DefaultFeatureGate.Enabled(features.ServiceTrafficDistribution),\n",
"\t\tc.eventRecorder,\n",
"\t\tcontrollerName,\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep"
],
"after_edit": [],
"file_path": "pkg/controller/endpointslice/endpointslice_controller.go",
"type": "replace",
"edit_start_line_idx": 175
} | /*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package endpointslice
import (
"context"
"fmt"
"sort"
"time"
corev1 "k8s.io/api/core/v1"
discovery "k8s.io/api/discovery/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/conversion"
"k8s.io/apimachinery/pkg/types"
utilerrors "k8s.io/apimachinery/pkg/util/errors"
"k8s.io/apimachinery/pkg/util/sets"
clientset "k8s.io/client-go/kubernetes"
corelisters "k8s.io/client-go/listers/core/v1"
"k8s.io/client-go/tools/record"
"k8s.io/endpointslice/metrics"
"k8s.io/endpointslice/topologycache"
"k8s.io/endpointslice/trafficdist"
endpointsliceutil "k8s.io/endpointslice/util"
"k8s.io/klog/v2"
)
// Reconciler is responsible for transforming current EndpointSlice state into
// desired state
type Reconciler struct {
client clientset.Interface
nodeLister corelisters.NodeLister
maxEndpointsPerSlice int32
endpointSliceTracker *endpointsliceutil.EndpointSliceTracker
metricsCache *metrics.Cache
// topologyCache tracks the distribution of Nodes and endpoints across zones
// to enable TopologyAwareHints.
topologyCache *topologycache.TopologyCache
// trafficDistributionEnabled determines if endpointDistribution field is to
// be considered when reconciling EndpointSlice hints.
trafficDistributionEnabled bool
// eventRecorder allows Reconciler to record and publish events.
eventRecorder record.EventRecorder
controllerName string
}
// endpointMeta includes the attributes we group slices on, this type helps with
// that logic in Reconciler
type endpointMeta struct {
ports []discovery.EndpointPort
addressType discovery.AddressType
}
// Reconcile takes a set of pods currently matching a service selector and
// compares them with the endpoints already present in any existing endpoint
// slices for the given service. It creates, updates, or deletes endpoint slices
// to ensure the desired set of pods are represented by endpoint slices.
func (r *Reconciler) Reconcile(logger klog.Logger, service *corev1.Service, pods []*corev1.Pod, existingSlices []*discovery.EndpointSlice, triggerTime time.Time) error {
slicesToDelete := []*discovery.EndpointSlice{} // slices that are no longer matching any address the service has
errs := []error{} // all errors generated in the process of reconciling
slicesByAddressType := make(map[discovery.AddressType][]*discovery.EndpointSlice) // slices by address type
// addresses that this service supports [o(1) find]
serviceSupportedAddressesTypes := getAddressTypesForService(logger, service)
// loop through slices identifying their address type.
// slices that no longer match address type supported by services
// go to delete, other slices goes to the Reconciler machinery
// for further adjustment
for _, existingSlice := range existingSlices {
// service no longer supports that address type, add it to deleted slices
if !serviceSupportedAddressesTypes.Has(existingSlice.AddressType) {
if r.topologyCache != nil {
svcKey, err := ServiceControllerKey(existingSlice)
if err != nil {
logger.Info("Couldn't get key to remove EndpointSlice from topology cache", "existingSlice", existingSlice, "err", err)
} else {
r.topologyCache.RemoveHints(svcKey, existingSlice.AddressType)
}
}
slicesToDelete = append(slicesToDelete, existingSlice)
continue
}
// add list if it is not on our map
if _, ok := slicesByAddressType[existingSlice.AddressType]; !ok {
slicesByAddressType[existingSlice.AddressType] = make([]*discovery.EndpointSlice, 0, 1)
}
slicesByAddressType[existingSlice.AddressType] = append(slicesByAddressType[existingSlice.AddressType], existingSlice)
}
// reconcile for existing.
for addressType := range serviceSupportedAddressesTypes {
existingSlices := slicesByAddressType[addressType]
err := r.reconcileByAddressType(logger, service, pods, existingSlices, triggerTime, addressType)
if err != nil {
errs = append(errs, err)
}
}
// delete those which are of addressType that is no longer supported
// by the service
for _, sliceToDelete := range slicesToDelete {
err := r.client.DiscoveryV1().EndpointSlices(service.Namespace).Delete(context.TODO(), sliceToDelete.Name, metav1.DeleteOptions{})
if err != nil {
errs = append(errs, fmt.Errorf("error deleting %s EndpointSlice for Service %s/%s: %w", sliceToDelete.Name, service.Namespace, service.Name, err))
} else {
r.endpointSliceTracker.ExpectDeletion(sliceToDelete)
metrics.EndpointSliceChanges.WithLabelValues("delete").Inc()
}
}
return utilerrors.NewAggregate(errs)
}
// reconcileByAddressType takes a set of pods currently matching a service selector and
// compares them with the endpoints already present in any existing endpoint
// slices (by address type) for the given service. It creates, updates, or deletes endpoint slices
// to ensure the desired set of pods are represented by endpoint slices.
func (r *Reconciler) reconcileByAddressType(logger klog.Logger, service *corev1.Service, pods []*corev1.Pod, existingSlices []*discovery.EndpointSlice, triggerTime time.Time, addressType discovery.AddressType) error {
errs := []error{}
slicesToCreate := []*discovery.EndpointSlice{}
slicesToUpdate := []*discovery.EndpointSlice{}
slicesToDelete := []*discovery.EndpointSlice{}
events := []*topologycache.EventBuilder{}
// Build data structures for existing state.
existingSlicesByPortMap := map[endpointsliceutil.PortMapKey][]*discovery.EndpointSlice{}
for _, existingSlice := range existingSlices {
if ownedBy(existingSlice, service) {
epHash := endpointsliceutil.NewPortMapKey(existingSlice.Ports)
existingSlicesByPortMap[epHash] = append(existingSlicesByPortMap[epHash], existingSlice)
} else {
slicesToDelete = append(slicesToDelete, existingSlice)
}
}
// Build data structures for desired state.
desiredMetaByPortMap := map[endpointsliceutil.PortMapKey]*endpointMeta{}
desiredEndpointsByPortMap := map[endpointsliceutil.PortMapKey]endpointsliceutil.EndpointSet{}
for _, pod := range pods {
if !endpointsliceutil.ShouldPodBeInEndpoints(pod, true) {
continue
}
endpointPorts := getEndpointPorts(logger, service, pod)
epHash := endpointsliceutil.NewPortMapKey(endpointPorts)
if _, ok := desiredEndpointsByPortMap[epHash]; !ok {
desiredEndpointsByPortMap[epHash] = endpointsliceutil.EndpointSet{}
}
if _, ok := desiredMetaByPortMap[epHash]; !ok {
desiredMetaByPortMap[epHash] = &endpointMeta{
addressType: addressType,
ports: endpointPorts,
}
}
node, err := r.nodeLister.Get(pod.Spec.NodeName)
if err != nil {
// we are getting the information from the local informer,
// an error different than IsNotFound should not happen
if !errors.IsNotFound(err) {
return err
}
// If the Node specified by the Pod doesn't exist we want to requeue the Service so we
// retry later, but also update the EndpointSlice without the problematic Pod.
// Theoretically, the pod Garbage Collector will remove the Pod, but we want to avoid
// situations where a reference from a Pod to a missing node can leave the EndpointSlice
// stuck forever.
// On the other side, if the service.Spec.PublishNotReadyAddresses is set we just add the
// Pod, since the user is explicitly indicating that the Pod address should be published.
if !service.Spec.PublishNotReadyAddresses {
logger.Info("skipping Pod for Service, Node not found", "pod", klog.KObj(pod), "service", klog.KObj(service), "node", klog.KRef("", pod.Spec.NodeName))
errs = append(errs, fmt.Errorf("skipping Pod %s for Service %s/%s: Node %s Not Found", pod.Name, service.Namespace, service.Name, pod.Spec.NodeName))
continue
}
}
endpoint := podToEndpoint(pod, node, service, addressType)
if len(endpoint.Addresses) > 0 {
desiredEndpointsByPortMap[epHash].Insert(&endpoint)
}
}
spMetrics := metrics.NewServicePortCache()
totalAdded := 0
totalRemoved := 0
// Determine changes necessary for each group of slices by port map.
for portMap, desiredEndpoints := range desiredEndpointsByPortMap {
numEndpoints := len(desiredEndpoints)
pmSlicesToCreate, pmSlicesToUpdate, pmSlicesToDelete, added, removed := r.reconcileByPortMapping(
logger, service, existingSlicesByPortMap[portMap], desiredEndpoints, desiredMetaByPortMap[portMap])
totalAdded += added
totalRemoved += removed
spMetrics.Set(portMap, metrics.EfficiencyInfo{
Endpoints: numEndpoints,
Slices: len(existingSlicesByPortMap[portMap]) + len(pmSlicesToCreate) - len(pmSlicesToDelete),
})
slicesToCreate = append(slicesToCreate, pmSlicesToCreate...)
slicesToUpdate = append(slicesToUpdate, pmSlicesToUpdate...)
slicesToDelete = append(slicesToDelete, pmSlicesToDelete...)
}
// If there are unique sets of ports that are no longer desired, mark
// the corresponding endpoint slices for deletion.
for portMap, existingSlices := range existingSlicesByPortMap {
if _, ok := desiredEndpointsByPortMap[portMap]; !ok {
slicesToDelete = append(slicesToDelete, existingSlices...)
}
}
// When no endpoint slices would usually exist, we need to add a placeholder.
if len(existingSlices) == len(slicesToDelete) && len(slicesToCreate) < 1 {
// Check for existing placeholder slice outside of the core control flow
placeholderSlice := newEndpointSlice(logger, service, &endpointMeta{ports: []discovery.EndpointPort{}, addressType: addressType}, r.controllerName)
if len(slicesToDelete) == 1 && placeholderSliceCompare.DeepEqual(slicesToDelete[0], placeholderSlice) {
// We are about to unnecessarily delete/recreate the placeholder, remove it now.
slicesToDelete = slicesToDelete[:0]
} else {
slicesToCreate = append(slicesToCreate, placeholderSlice)
}
spMetrics.Set(endpointsliceutil.NewPortMapKey(placeholderSlice.Ports), metrics.EfficiencyInfo{
Endpoints: 0,
Slices: 1,
})
}
metrics.EndpointsAddedPerSync.WithLabelValues().Observe(float64(totalAdded))
metrics.EndpointsRemovedPerSync.WithLabelValues().Observe(float64(totalRemoved))
serviceNN := types.NamespacedName{Name: service.Name, Namespace: service.Namespace}
r.metricsCache.UpdateServicePortCache(serviceNN, spMetrics)
// Topology hints are assigned per address type. This means it is
// theoretically possible for endpoints of one address type to be assigned
// hints while another endpoints of another address type are not.
si := &topologycache.SliceInfo{
ServiceKey: fmt.Sprintf("%s/%s", service.Namespace, service.Name),
AddressType: addressType,
ToCreate: slicesToCreate,
ToUpdate: slicesToUpdate,
Unchanged: unchangedSlices(existingSlices, slicesToUpdate, slicesToDelete),
}
canUseTrafficDistribution := r.trafficDistributionEnabled && !hintsEnabled(service.Annotations)
// Check if we need to add/remove hints based on the topology annotation.
//
// This if/else clause can be removed once the annotation has been deprecated.
// Ref: https://github.com/kubernetes/enhancements/tree/master/keps/sig-network/4444-service-routing-preference
if r.topologyCache != nil && hintsEnabled(service.Annotations) {
// Reaching this point means that we need to configure hints based on the
// topology annotation.
slicesToCreate, slicesToUpdate, events = r.topologyCache.AddHints(logger, si)
} else {
// Reaching this point means that we will not be configuring hints based on
// the topology annotation. We need to do 2 things:
// 1. If hints were added previously based on the annotation, we need to
// clear up any locally cached hints from the topologyCache object.
// 2. Optionally remove the actual hints from the EndpointSlice if we know
// that the `trafficDistribution` field is also NOT being used. In other
// words, if we know that the `trafficDistribution` field has been
// correctly configured by the customer, we DO NOT remove the hints and
// wait for the trafficDist handlers to correctly configure them. Always
// unconditionally removing hints here (and letting them get readded by
// the trafficDist) adds extra overhead in the form of DeepCopy (done
// within topologyCache.RemoveHints)
// Check 1.
if r.topologyCache != nil {
if r.topologyCache.HasPopulatedHints(si.ServiceKey) {
logger.Info("TopologyAwareHints annotation has changed, removing hints", "serviceKey", si.ServiceKey, "addressType", si.AddressType)
events = append(events, &topologycache.EventBuilder{
EventType: corev1.EventTypeWarning,
Reason: "TopologyAwareHintsDisabled",
Message: topologycache.FormatWithAddressType(topologycache.TopologyAwareHintsDisabled, si.AddressType),
})
}
r.topologyCache.RemoveHints(si.ServiceKey, addressType)
}
// Check 2.
if !canUseTrafficDistribution {
slicesToCreate, slicesToUpdate = topologycache.RemoveHintsFromSlices(si)
}
}
if canUseTrafficDistribution {
r.metricsCache.UpdateTrafficDistributionForService(serviceNN, service.Spec.TrafficDistribution)
slicesToCreate, slicesToUpdate, _ = trafficdist.ReconcileHints(service.Spec.TrafficDistribution, slicesToCreate, slicesToUpdate, unchangedSlices(existingSlices, slicesToUpdate, slicesToDelete))
} else {
r.metricsCache.UpdateTrafficDistributionForService(serviceNN, nil)
}
err := r.finalize(service, slicesToCreate, slicesToUpdate, slicesToDelete, triggerTime)
if err != nil {
errs = append(errs, err)
}
for _, event := range events {
r.eventRecorder.Event(service, event.EventType, event.Reason, event.Message)
}
return utilerrors.NewAggregate(errs)
}
func NewReconciler(client clientset.Interface, nodeLister corelisters.NodeLister, maxEndpointsPerSlice int32, endpointSliceTracker *endpointsliceutil.EndpointSliceTracker, topologyCache *topologycache.TopologyCache, trafficDistributionEnabled bool, eventRecorder record.EventRecorder, controllerName string) *Reconciler {
return &Reconciler{
client: client,
nodeLister: nodeLister,
maxEndpointsPerSlice: maxEndpointsPerSlice,
endpointSliceTracker: endpointSliceTracker,
metricsCache: metrics.NewCache(maxEndpointsPerSlice),
topologyCache: topologyCache,
trafficDistributionEnabled: trafficDistributionEnabled,
eventRecorder: eventRecorder,
controllerName: controllerName,
}
}
// placeholderSliceCompare is a conversion func for comparing two placeholder endpoint slices.
// It only compares the specific fields we care about.
var placeholderSliceCompare = conversion.EqualitiesOrDie(
func(a, b metav1.OwnerReference) bool {
return a.String() == b.String()
},
func(a, b metav1.ObjectMeta) bool {
if a.Namespace != b.Namespace {
return false
}
for k, v := range a.Labels {
if b.Labels[k] != v {
return false
}
}
for k, v := range b.Labels {
if a.Labels[k] != v {
return false
}
}
return true
},
)
// finalize creates, updates, and deletes slices as specified
func (r *Reconciler) finalize(
service *corev1.Service,
slicesToCreate,
slicesToUpdate,
slicesToDelete []*discovery.EndpointSlice,
triggerTime time.Time,
) error {
// If there are slices to create and delete, change the creates to updates
// of the slices that would otherwise be deleted.
for i := 0; i < len(slicesToDelete); {
if len(slicesToCreate) == 0 {
break
}
sliceToDelete := slicesToDelete[i]
slice := slicesToCreate[len(slicesToCreate)-1]
// Only update EndpointSlices that are owned by this Service and have
// the same AddressType. We need to avoid updating EndpointSlices that
// are being garbage collected for an old Service with the same name.
// The AddressType field is immutable. Since Services also consider
// IPFamily immutable, the only case where this should matter will be
// the migration from IP to IPv4 and IPv6 AddressTypes, where there's a
// chance EndpointSlices with an IP AddressType would otherwise be
// updated to IPv4 or IPv6 without this check.
if sliceToDelete.AddressType == slice.AddressType && ownedBy(sliceToDelete, service) {
slice.Name = sliceToDelete.Name
slicesToCreate = slicesToCreate[:len(slicesToCreate)-1]
slicesToUpdate = append(slicesToUpdate, slice)
slicesToDelete = append(slicesToDelete[:i], slicesToDelete[i+1:]...)
} else {
i++
}
}
// Don't create new EndpointSlices if the Service is pending deletion. This
// is to avoid a potential race condition with the garbage collector where
// it tries to delete EndpointSlices as this controller replaces them.
if service.DeletionTimestamp == nil {
for _, endpointSlice := range slicesToCreate {
addTriggerTimeAnnotation(endpointSlice, triggerTime)
createdSlice, err := r.client.DiscoveryV1().EndpointSlices(service.Namespace).Create(context.TODO(), endpointSlice, metav1.CreateOptions{})
if err != nil {
// If the namespace is terminating, creates will continue to fail. Simply drop the item.
if errors.HasStatusCause(err, corev1.NamespaceTerminatingCause) {
return nil
}
return fmt.Errorf("failed to create EndpointSlice for Service %s/%s: %v", service.Namespace, service.Name, err)
}
r.endpointSliceTracker.Update(createdSlice)
metrics.EndpointSliceChanges.WithLabelValues("create").Inc()
}
}
for _, endpointSlice := range slicesToUpdate {
addTriggerTimeAnnotation(endpointSlice, triggerTime)
updatedSlice, err := r.client.DiscoveryV1().EndpointSlices(service.Namespace).Update(context.TODO(), endpointSlice, metav1.UpdateOptions{})
if err != nil {
return fmt.Errorf("failed to update %s EndpointSlice for Service %s/%s: %v", endpointSlice.Name, service.Namespace, service.Name, err)
}
r.endpointSliceTracker.Update(updatedSlice)
metrics.EndpointSliceChanges.WithLabelValues("update").Inc()
}
for _, endpointSlice := range slicesToDelete {
err := r.client.DiscoveryV1().EndpointSlices(service.Namespace).Delete(context.TODO(), endpointSlice.Name, metav1.DeleteOptions{})
if err != nil {
return fmt.Errorf("failed to delete %s EndpointSlice for Service %s/%s: %v", endpointSlice.Name, service.Namespace, service.Name, err)
}
r.endpointSliceTracker.ExpectDeletion(endpointSlice)
metrics.EndpointSliceChanges.WithLabelValues("delete").Inc()
}
topologyLabel := "Disabled"
if r.topologyCache != nil && hintsEnabled(service.Annotations) {
topologyLabel = "Auto"
}
var trafficDistribution string
if r.trafficDistributionEnabled && !hintsEnabled(service.Annotations) {
if service.Spec.TrafficDistribution != nil && *service.Spec.TrafficDistribution == corev1.ServiceTrafficDistributionPreferClose {
trafficDistribution = *service.Spec.TrafficDistribution
}
}
numSlicesChanged := len(slicesToCreate) + len(slicesToUpdate) + len(slicesToDelete)
metrics.EndpointSlicesChangedPerSync.WithLabelValues(topologyLabel, trafficDistribution).Observe(float64(numSlicesChanged))
return nil
}
// reconcileByPortMapping compares the endpoints found in existing slices with
// the list of desired endpoints and returns lists of slices to create, update,
// and delete. It also checks that the slices mirror the parent services labels.
// The logic is split up into several main steps:
// 1. Iterate through existing slices, delete endpoints that are no longer
// desired and update matching endpoints that have changed. It also checks
// if the slices have the labels of the parent services, and updates them if not.
// 2. Iterate through slices that have been modified in 1 and fill them up with
// any remaining desired endpoints.
// 3. If there still desired endpoints left, try to fit them into a previously
// unchanged slice and/or create new ones.
func (r *Reconciler) reconcileByPortMapping(
logger klog.Logger,
service *corev1.Service,
existingSlices []*discovery.EndpointSlice,
desiredSet endpointsliceutil.EndpointSet,
endpointMeta *endpointMeta,
) ([]*discovery.EndpointSlice, []*discovery.EndpointSlice, []*discovery.EndpointSlice, int, int) {
slicesByName := map[string]*discovery.EndpointSlice{}
sliceNamesUnchanged := sets.New[string]()
sliceNamesToUpdate := sets.New[string]()
sliceNamesToDelete := sets.New[string]()
numRemoved := 0
// 1. Iterate through existing slices to delete endpoints no longer desired
// and update endpoints that have changed
for _, existingSlice := range existingSlices {
slicesByName[existingSlice.Name] = existingSlice
newEndpoints := []discovery.Endpoint{}
endpointUpdated := false
for _, endpoint := range existingSlice.Endpoints {
got := desiredSet.Get(&endpoint)
// If endpoint is desired add it to list of endpoints to keep.
if got != nil {
newEndpoints = append(newEndpoints, *got)
// If existing version of endpoint doesn't match desired version
// set endpointUpdated to ensure endpoint changes are persisted.
if !endpointsliceutil.EndpointsEqualBeyondHash(got, &endpoint) {
endpointUpdated = true
}
// once an endpoint has been placed/found in a slice, it no
// longer needs to be handled
desiredSet.Delete(&endpoint)
}
}
// generate the slice labels and check if parent labels have changed
labels, labelsChanged := setEndpointSliceLabels(logger, existingSlice, service, r.controllerName)
// If an endpoint was updated or removed, mark for update or delete
if endpointUpdated || len(existingSlice.Endpoints) != len(newEndpoints) {
if len(existingSlice.Endpoints) > len(newEndpoints) {
numRemoved += len(existingSlice.Endpoints) - len(newEndpoints)
}
if len(newEndpoints) == 0 {
// if no endpoints desired in this slice, mark for deletion
sliceNamesToDelete.Insert(existingSlice.Name)
} else {
// otherwise, copy and mark for update
epSlice := existingSlice.DeepCopy()
epSlice.Endpoints = newEndpoints
epSlice.Labels = labels
slicesByName[existingSlice.Name] = epSlice
sliceNamesToUpdate.Insert(epSlice.Name)
}
} else if labelsChanged {
// if labels have changed, copy and mark for update
epSlice := existingSlice.DeepCopy()
epSlice.Labels = labels
slicesByName[existingSlice.Name] = epSlice
sliceNamesToUpdate.Insert(epSlice.Name)
} else {
// slices with no changes will be useful if there are leftover endpoints
sliceNamesUnchanged.Insert(existingSlice.Name)
}
}
numAdded := desiredSet.Len()
// 2. If we still have desired endpoints to add and slices marked for update,
// iterate through the slices and fill them up with the desired endpoints.
if desiredSet.Len() > 0 && sliceNamesToUpdate.Len() > 0 {
slices := []*discovery.EndpointSlice{}
for _, sliceName := range sliceNamesToUpdate.UnsortedList() {
slices = append(slices, slicesByName[sliceName])
}
// Sort endpoint slices by length so we're filling up the fullest ones
// first.
sort.Sort(endpointSliceEndpointLen(slices))
// Iterate through slices and fill them up with desired endpoints.
for _, slice := range slices {
for desiredSet.Len() > 0 && len(slice.Endpoints) < int(r.maxEndpointsPerSlice) {
endpoint, _ := desiredSet.PopAny()
slice.Endpoints = append(slice.Endpoints, *endpoint)
}
}
}
// 3. If there are still desired endpoints left at this point, we try to fit
// the endpoints in a single existing slice. If there are no slices with
// that capacity, we create new slices for the endpoints.
slicesToCreate := []*discovery.EndpointSlice{}
for desiredSet.Len() > 0 {
var sliceToFill *discovery.EndpointSlice
// If the remaining amounts of endpoints is smaller than the max
// endpoints per slice and we have slices that haven't already been
// filled, try to fit them in one.
if desiredSet.Len() < int(r.maxEndpointsPerSlice) && sliceNamesUnchanged.Len() > 0 {
unchangedSlices := []*discovery.EndpointSlice{}
for _, sliceName := range sliceNamesUnchanged.UnsortedList() {
unchangedSlices = append(unchangedSlices, slicesByName[sliceName])
}
sliceToFill = getSliceToFill(unchangedSlices, desiredSet.Len(), int(r.maxEndpointsPerSlice))
}
// If we didn't find a sliceToFill, generate a new empty one.
if sliceToFill == nil {
sliceToFill = newEndpointSlice(logger, service, endpointMeta, r.controllerName)
} else {
// deep copy required to modify this slice.
sliceToFill = sliceToFill.DeepCopy()
slicesByName[sliceToFill.Name] = sliceToFill
}
// Fill the slice up with remaining endpoints.
for desiredSet.Len() > 0 && len(sliceToFill.Endpoints) < int(r.maxEndpointsPerSlice) {
endpoint, _ := desiredSet.PopAny()
sliceToFill.Endpoints = append(sliceToFill.Endpoints, *endpoint)
}
// New slices will not have a Name set, use this to determine whether
// this should be an update or create.
if sliceToFill.Name != "" {
sliceNamesToUpdate.Insert(sliceToFill.Name)
sliceNamesUnchanged.Delete(sliceToFill.Name)
} else {
slicesToCreate = append(slicesToCreate, sliceToFill)
}
}
// Build slicesToUpdate from slice names.
slicesToUpdate := []*discovery.EndpointSlice{}
for _, sliceName := range sliceNamesToUpdate.UnsortedList() {
slicesToUpdate = append(slicesToUpdate, slicesByName[sliceName])
}
// Build slicesToDelete from slice names.
slicesToDelete := []*discovery.EndpointSlice{}
for _, sliceName := range sliceNamesToDelete.UnsortedList() {
slicesToDelete = append(slicesToDelete, slicesByName[sliceName])
}
return slicesToCreate, slicesToUpdate, slicesToDelete, numAdded, numRemoved
}
func (r *Reconciler) DeleteService(namespace, name string) {
r.metricsCache.DeleteService(types.NamespacedName{Namespace: namespace, Name: name})
}
func (r *Reconciler) GetControllerName() string {
return r.controllerName
}
// ManagedByChanged returns true if one of the provided EndpointSlices is
// managed by the EndpointSlice controller while the other is not.
func (r *Reconciler) ManagedByChanged(endpointSlice1, endpointSlice2 *discovery.EndpointSlice) bool {
return r.ManagedByController(endpointSlice1) != r.ManagedByController(endpointSlice2)
}
// ManagedByController returns true if the controller of the provided
// EndpointSlices is the EndpointSlice controller.
func (r *Reconciler) ManagedByController(endpointSlice *discovery.EndpointSlice) bool {
managedBy := endpointSlice.Labels[discovery.LabelManagedBy]
return managedBy == r.controllerName
}
| staging/src/k8s.io/endpointslice/reconciler.go | 1 | https://github.com/kubernetes/kubernetes/commit/ec6fd2befac30ebb7dfb55b94d8c9874489a3902 | [
0.016052622348070145,
0.0013853604905307293,
0.0001638388930587098,
0.0002579843858256936,
0.002843814203515649
] |
{
"id": 0,
"code_window": [
"\t\tc.nodeLister,\n",
"\t\tc.maxEndpointsPerSlice,\n",
"\t\tc.endpointSliceTracker,\n",
"\t\tc.topologyCache,\n",
"\t\tutilfeature.DefaultFeatureGate.Enabled(features.ServiceTrafficDistribution),\n",
"\t\tc.eventRecorder,\n",
"\t\tcontrollerName,\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep"
],
"after_edit": [],
"file_path": "pkg/controller/endpointslice/endpointslice_controller.go",
"type": "replace",
"edit_start_line_idx": 175
} | apiVersion: v1
kind: Pod
metadata:
name: restrictedvolumes9
spec:
containers:
- image: registry.k8s.io/pause
name: container1
securityContext:
allowPrivilegeEscalation: false
initContainers:
- image: registry.k8s.io/pause
name: initcontainer1
securityContext:
allowPrivilegeEscalation: false
securityContext:
runAsNonRoot: true
volumes:
- cephfs:
monitors:
- test
name: volume1
| staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.13/fail/restrictedvolumes9.yaml | 0 | https://github.com/kubernetes/kubernetes/commit/ec6fd2befac30ebb7dfb55b94d8c9874489a3902 | [
0.00017668328655418009,
0.0001734253455651924,
0.00016842536570038646,
0.00017516738444101065,
0.0000035892746836907463
] |
{
"id": 0,
"code_window": [
"\t\tc.nodeLister,\n",
"\t\tc.maxEndpointsPerSlice,\n",
"\t\tc.endpointSliceTracker,\n",
"\t\tc.topologyCache,\n",
"\t\tutilfeature.DefaultFeatureGate.Enabled(features.ServiceTrafficDistribution),\n",
"\t\tc.eventRecorder,\n",
"\t\tcontrollerName,\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep"
],
"after_edit": [],
"file_path": "pkg/controller/endpointslice/endpointslice_controller.go",
"type": "replace",
"edit_start_line_idx": 175
} | /*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by lister-gen. DO NOT EDIT.
package v1
import (
v1 "k8s.io/api/storage/v1"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/client-go/tools/cache"
)
// CSINodeLister helps list CSINodes.
// All objects returned here must be treated as read-only.
type CSINodeLister interface {
// List lists all CSINodes in the indexer.
// Objects returned here must be treated as read-only.
List(selector labels.Selector) (ret []*v1.CSINode, err error)
// Get retrieves the CSINode from the index for a given name.
// Objects returned here must be treated as read-only.
Get(name string) (*v1.CSINode, error)
CSINodeListerExpansion
}
// cSINodeLister implements the CSINodeLister interface.
type cSINodeLister struct {
indexer cache.Indexer
}
// NewCSINodeLister returns a new CSINodeLister.
func NewCSINodeLister(indexer cache.Indexer) CSINodeLister {
return &cSINodeLister{indexer: indexer}
}
// List lists all CSINodes in the indexer.
func (s *cSINodeLister) List(selector labels.Selector) (ret []*v1.CSINode, err error) {
err = cache.ListAll(s.indexer, selector, func(m interface{}) {
ret = append(ret, m.(*v1.CSINode))
})
return ret, err
}
// Get retrieves the CSINode from the index for a given name.
func (s *cSINodeLister) Get(name string) (*v1.CSINode, error) {
obj, exists, err := s.indexer.GetByKey(name)
if err != nil {
return nil, err
}
if !exists {
return nil, errors.NewNotFound(v1.Resource("csinode"), name)
}
return obj.(*v1.CSINode), nil
}
| staging/src/k8s.io/client-go/listers/storage/v1/csinode.go | 0 | https://github.com/kubernetes/kubernetes/commit/ec6fd2befac30ebb7dfb55b94d8c9874489a3902 | [
0.000187592493603006,
0.00017386696708854288,
0.00016441245679743588,
0.0001732298405840993,
0.000006763125838915585
] |
{
"id": 0,
"code_window": [
"\t\tc.nodeLister,\n",
"\t\tc.maxEndpointsPerSlice,\n",
"\t\tc.endpointSliceTracker,\n",
"\t\tc.topologyCache,\n",
"\t\tutilfeature.DefaultFeatureGate.Enabled(features.ServiceTrafficDistribution),\n",
"\t\tc.eventRecorder,\n",
"\t\tcontrollerName,\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep"
],
"after_edit": [],
"file_path": "pkg/controller/endpointslice/endpointslice_controller.go",
"type": "replace",
"edit_start_line_idx": 175
} | /*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by client-gen. DO NOT EDIT.
package v1
import (
"net/http"
v1 "k8s.io/api/rbac/v1"
"k8s.io/client-go/kubernetes/scheme"
rest "k8s.io/client-go/rest"
)
type RbacV1Interface interface {
RESTClient() rest.Interface
ClusterRolesGetter
ClusterRoleBindingsGetter
RolesGetter
RoleBindingsGetter
}
// RbacV1Client is used to interact with features provided by the rbac.authorization.k8s.io group.
type RbacV1Client struct {
restClient rest.Interface
}
func (c *RbacV1Client) ClusterRoles() ClusterRoleInterface {
return newClusterRoles(c)
}
func (c *RbacV1Client) ClusterRoleBindings() ClusterRoleBindingInterface {
return newClusterRoleBindings(c)
}
func (c *RbacV1Client) Roles(namespace string) RoleInterface {
return newRoles(c, namespace)
}
func (c *RbacV1Client) RoleBindings(namespace string) RoleBindingInterface {
return newRoleBindings(c, namespace)
}
// NewForConfig creates a new RbacV1Client for the given config.
// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient),
// where httpClient was generated with rest.HTTPClientFor(c).
func NewForConfig(c *rest.Config) (*RbacV1Client, error) {
config := *c
if err := setConfigDefaults(&config); err != nil {
return nil, err
}
httpClient, err := rest.HTTPClientFor(&config)
if err != nil {
return nil, err
}
return NewForConfigAndClient(&config, httpClient)
}
// NewForConfigAndClient creates a new RbacV1Client for the given config and http client.
// Note the http client provided takes precedence over the configured transport values.
func NewForConfigAndClient(c *rest.Config, h *http.Client) (*RbacV1Client, error) {
config := *c
if err := setConfigDefaults(&config); err != nil {
return nil, err
}
client, err := rest.RESTClientForConfigAndClient(&config, h)
if err != nil {
return nil, err
}
return &RbacV1Client{client}, nil
}
// NewForConfigOrDie creates a new RbacV1Client for the given config and
// panics if there is an error in the config.
func NewForConfigOrDie(c *rest.Config) *RbacV1Client {
client, err := NewForConfig(c)
if err != nil {
panic(err)
}
return client
}
// New creates a new RbacV1Client for the given RESTClient.
func New(c rest.Interface) *RbacV1Client {
return &RbacV1Client{c}
}
func setConfigDefaults(config *rest.Config) error {
gv := v1.SchemeGroupVersion
config.GroupVersion = &gv
config.APIPath = "/apis"
config.NegotiatedSerializer = scheme.Codecs.WithoutConversion()
if config.UserAgent == "" {
config.UserAgent = rest.DefaultKubernetesUserAgent()
}
return nil
}
// RESTClient returns a RESTClient that is used to communicate
// with API server by this client implementation.
func (c *RbacV1Client) RESTClient() rest.Interface {
if c == nil {
return nil
}
return c.restClient
}
| staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1/rbac_client.go | 0 | https://github.com/kubernetes/kubernetes/commit/ec6fd2befac30ebb7dfb55b94d8c9874489a3902 | [
0.0030166683718562126,
0.0004698796838056296,
0.00016508449334651232,
0.00017503048002254218,
0.0007576635689474642
] |
{
"id": 1,
"code_window": [
"\t\tc.eventRecorder,\n",
"\t\tcontrollerName,\n",
"\t)\n",
"\n",
"\treturn c\n",
"}\n",
"\n"
],
"labels": [
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tendpointslicerec.WithTrafficDistributionEnabled(utilfeature.DefaultFeatureGate.Enabled(features.ServiceTrafficDistribution)),\n"
],
"file_path": "pkg/controller/endpointslice/endpointslice_controller.go",
"type": "add",
"edit_start_line_idx": 178
} | /*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package endpointslice
import (
"context"
"fmt"
"sort"
"time"
corev1 "k8s.io/api/core/v1"
discovery "k8s.io/api/discovery/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/conversion"
"k8s.io/apimachinery/pkg/types"
utilerrors "k8s.io/apimachinery/pkg/util/errors"
"k8s.io/apimachinery/pkg/util/sets"
clientset "k8s.io/client-go/kubernetes"
corelisters "k8s.io/client-go/listers/core/v1"
"k8s.io/client-go/tools/record"
"k8s.io/endpointslice/metrics"
"k8s.io/endpointslice/topologycache"
"k8s.io/endpointslice/trafficdist"
endpointsliceutil "k8s.io/endpointslice/util"
"k8s.io/klog/v2"
)
// Reconciler is responsible for transforming current EndpointSlice state into
// desired state
type Reconciler struct {
client clientset.Interface
nodeLister corelisters.NodeLister
maxEndpointsPerSlice int32
endpointSliceTracker *endpointsliceutil.EndpointSliceTracker
metricsCache *metrics.Cache
// topologyCache tracks the distribution of Nodes and endpoints across zones
// to enable TopologyAwareHints.
topologyCache *topologycache.TopologyCache
// trafficDistributionEnabled determines if endpointDistribution field is to
// be considered when reconciling EndpointSlice hints.
trafficDistributionEnabled bool
// eventRecorder allows Reconciler to record and publish events.
eventRecorder record.EventRecorder
controllerName string
}
// endpointMeta includes the attributes we group slices on, this type helps with
// that logic in Reconciler
type endpointMeta struct {
ports []discovery.EndpointPort
addressType discovery.AddressType
}
// Reconcile takes a set of pods currently matching a service selector and
// compares them with the endpoints already present in any existing endpoint
// slices for the given service. It creates, updates, or deletes endpoint slices
// to ensure the desired set of pods are represented by endpoint slices.
func (r *Reconciler) Reconcile(logger klog.Logger, service *corev1.Service, pods []*corev1.Pod, existingSlices []*discovery.EndpointSlice, triggerTime time.Time) error {
slicesToDelete := []*discovery.EndpointSlice{} // slices that are no longer matching any address the service has
errs := []error{} // all errors generated in the process of reconciling
slicesByAddressType := make(map[discovery.AddressType][]*discovery.EndpointSlice) // slices by address type
// addresses that this service supports [o(1) find]
serviceSupportedAddressesTypes := getAddressTypesForService(logger, service)
// loop through slices identifying their address type.
// slices that no longer match address type supported by services
// go to delete, other slices goes to the Reconciler machinery
// for further adjustment
for _, existingSlice := range existingSlices {
// service no longer supports that address type, add it to deleted slices
if !serviceSupportedAddressesTypes.Has(existingSlice.AddressType) {
if r.topologyCache != nil {
svcKey, err := ServiceControllerKey(existingSlice)
if err != nil {
logger.Info("Couldn't get key to remove EndpointSlice from topology cache", "existingSlice", existingSlice, "err", err)
} else {
r.topologyCache.RemoveHints(svcKey, existingSlice.AddressType)
}
}
slicesToDelete = append(slicesToDelete, existingSlice)
continue
}
// add list if it is not on our map
if _, ok := slicesByAddressType[existingSlice.AddressType]; !ok {
slicesByAddressType[existingSlice.AddressType] = make([]*discovery.EndpointSlice, 0, 1)
}
slicesByAddressType[existingSlice.AddressType] = append(slicesByAddressType[existingSlice.AddressType], existingSlice)
}
// reconcile for existing.
for addressType := range serviceSupportedAddressesTypes {
existingSlices := slicesByAddressType[addressType]
err := r.reconcileByAddressType(logger, service, pods, existingSlices, triggerTime, addressType)
if err != nil {
errs = append(errs, err)
}
}
// delete those which are of addressType that is no longer supported
// by the service
for _, sliceToDelete := range slicesToDelete {
err := r.client.DiscoveryV1().EndpointSlices(service.Namespace).Delete(context.TODO(), sliceToDelete.Name, metav1.DeleteOptions{})
if err != nil {
errs = append(errs, fmt.Errorf("error deleting %s EndpointSlice for Service %s/%s: %w", sliceToDelete.Name, service.Namespace, service.Name, err))
} else {
r.endpointSliceTracker.ExpectDeletion(sliceToDelete)
metrics.EndpointSliceChanges.WithLabelValues("delete").Inc()
}
}
return utilerrors.NewAggregate(errs)
}
// reconcileByAddressType takes a set of pods currently matching a service selector and
// compares them with the endpoints already present in any existing endpoint
// slices (by address type) for the given service. It creates, updates, or deletes endpoint slices
// to ensure the desired set of pods are represented by endpoint slices.
func (r *Reconciler) reconcileByAddressType(logger klog.Logger, service *corev1.Service, pods []*corev1.Pod, existingSlices []*discovery.EndpointSlice, triggerTime time.Time, addressType discovery.AddressType) error {
errs := []error{}
slicesToCreate := []*discovery.EndpointSlice{}
slicesToUpdate := []*discovery.EndpointSlice{}
slicesToDelete := []*discovery.EndpointSlice{}
events := []*topologycache.EventBuilder{}
// Build data structures for existing state.
existingSlicesByPortMap := map[endpointsliceutil.PortMapKey][]*discovery.EndpointSlice{}
for _, existingSlice := range existingSlices {
if ownedBy(existingSlice, service) {
epHash := endpointsliceutil.NewPortMapKey(existingSlice.Ports)
existingSlicesByPortMap[epHash] = append(existingSlicesByPortMap[epHash], existingSlice)
} else {
slicesToDelete = append(slicesToDelete, existingSlice)
}
}
// Build data structures for desired state.
desiredMetaByPortMap := map[endpointsliceutil.PortMapKey]*endpointMeta{}
desiredEndpointsByPortMap := map[endpointsliceutil.PortMapKey]endpointsliceutil.EndpointSet{}
for _, pod := range pods {
if !endpointsliceutil.ShouldPodBeInEndpoints(pod, true) {
continue
}
endpointPorts := getEndpointPorts(logger, service, pod)
epHash := endpointsliceutil.NewPortMapKey(endpointPorts)
if _, ok := desiredEndpointsByPortMap[epHash]; !ok {
desiredEndpointsByPortMap[epHash] = endpointsliceutil.EndpointSet{}
}
if _, ok := desiredMetaByPortMap[epHash]; !ok {
desiredMetaByPortMap[epHash] = &endpointMeta{
addressType: addressType,
ports: endpointPorts,
}
}
node, err := r.nodeLister.Get(pod.Spec.NodeName)
if err != nil {
// we are getting the information from the local informer,
// an error different than IsNotFound should not happen
if !errors.IsNotFound(err) {
return err
}
// If the Node specified by the Pod doesn't exist we want to requeue the Service so we
// retry later, but also update the EndpointSlice without the problematic Pod.
// Theoretically, the pod Garbage Collector will remove the Pod, but we want to avoid
// situations where a reference from a Pod to a missing node can leave the EndpointSlice
// stuck forever.
// On the other side, if the service.Spec.PublishNotReadyAddresses is set we just add the
// Pod, since the user is explicitly indicating that the Pod address should be published.
if !service.Spec.PublishNotReadyAddresses {
logger.Info("skipping Pod for Service, Node not found", "pod", klog.KObj(pod), "service", klog.KObj(service), "node", klog.KRef("", pod.Spec.NodeName))
errs = append(errs, fmt.Errorf("skipping Pod %s for Service %s/%s: Node %s Not Found", pod.Name, service.Namespace, service.Name, pod.Spec.NodeName))
continue
}
}
endpoint := podToEndpoint(pod, node, service, addressType)
if len(endpoint.Addresses) > 0 {
desiredEndpointsByPortMap[epHash].Insert(&endpoint)
}
}
spMetrics := metrics.NewServicePortCache()
totalAdded := 0
totalRemoved := 0
// Determine changes necessary for each group of slices by port map.
for portMap, desiredEndpoints := range desiredEndpointsByPortMap {
numEndpoints := len(desiredEndpoints)
pmSlicesToCreate, pmSlicesToUpdate, pmSlicesToDelete, added, removed := r.reconcileByPortMapping(
logger, service, existingSlicesByPortMap[portMap], desiredEndpoints, desiredMetaByPortMap[portMap])
totalAdded += added
totalRemoved += removed
spMetrics.Set(portMap, metrics.EfficiencyInfo{
Endpoints: numEndpoints,
Slices: len(existingSlicesByPortMap[portMap]) + len(pmSlicesToCreate) - len(pmSlicesToDelete),
})
slicesToCreate = append(slicesToCreate, pmSlicesToCreate...)
slicesToUpdate = append(slicesToUpdate, pmSlicesToUpdate...)
slicesToDelete = append(slicesToDelete, pmSlicesToDelete...)
}
// If there are unique sets of ports that are no longer desired, mark
// the corresponding endpoint slices for deletion.
for portMap, existingSlices := range existingSlicesByPortMap {
if _, ok := desiredEndpointsByPortMap[portMap]; !ok {
slicesToDelete = append(slicesToDelete, existingSlices...)
}
}
// When no endpoint slices would usually exist, we need to add a placeholder.
if len(existingSlices) == len(slicesToDelete) && len(slicesToCreate) < 1 {
// Check for existing placeholder slice outside of the core control flow
placeholderSlice := newEndpointSlice(logger, service, &endpointMeta{ports: []discovery.EndpointPort{}, addressType: addressType}, r.controllerName)
if len(slicesToDelete) == 1 && placeholderSliceCompare.DeepEqual(slicesToDelete[0], placeholderSlice) {
// We are about to unnecessarily delete/recreate the placeholder, remove it now.
slicesToDelete = slicesToDelete[:0]
} else {
slicesToCreate = append(slicesToCreate, placeholderSlice)
}
spMetrics.Set(endpointsliceutil.NewPortMapKey(placeholderSlice.Ports), metrics.EfficiencyInfo{
Endpoints: 0,
Slices: 1,
})
}
metrics.EndpointsAddedPerSync.WithLabelValues().Observe(float64(totalAdded))
metrics.EndpointsRemovedPerSync.WithLabelValues().Observe(float64(totalRemoved))
serviceNN := types.NamespacedName{Name: service.Name, Namespace: service.Namespace}
r.metricsCache.UpdateServicePortCache(serviceNN, spMetrics)
// Topology hints are assigned per address type. This means it is
// theoretically possible for endpoints of one address type to be assigned
// hints while another endpoints of another address type are not.
si := &topologycache.SliceInfo{
ServiceKey: fmt.Sprintf("%s/%s", service.Namespace, service.Name),
AddressType: addressType,
ToCreate: slicesToCreate,
ToUpdate: slicesToUpdate,
Unchanged: unchangedSlices(existingSlices, slicesToUpdate, slicesToDelete),
}
canUseTrafficDistribution := r.trafficDistributionEnabled && !hintsEnabled(service.Annotations)
// Check if we need to add/remove hints based on the topology annotation.
//
// This if/else clause can be removed once the annotation has been deprecated.
// Ref: https://github.com/kubernetes/enhancements/tree/master/keps/sig-network/4444-service-routing-preference
if r.topologyCache != nil && hintsEnabled(service.Annotations) {
// Reaching this point means that we need to configure hints based on the
// topology annotation.
slicesToCreate, slicesToUpdate, events = r.topologyCache.AddHints(logger, si)
} else {
// Reaching this point means that we will not be configuring hints based on
// the topology annotation. We need to do 2 things:
// 1. If hints were added previously based on the annotation, we need to
// clear up any locally cached hints from the topologyCache object.
// 2. Optionally remove the actual hints from the EndpointSlice if we know
// that the `trafficDistribution` field is also NOT being used. In other
// words, if we know that the `trafficDistribution` field has been
// correctly configured by the customer, we DO NOT remove the hints and
// wait for the trafficDist handlers to correctly configure them. Always
// unconditionally removing hints here (and letting them get readded by
// the trafficDist) adds extra overhead in the form of DeepCopy (done
// within topologyCache.RemoveHints)
// Check 1.
if r.topologyCache != nil {
if r.topologyCache.HasPopulatedHints(si.ServiceKey) {
logger.Info("TopologyAwareHints annotation has changed, removing hints", "serviceKey", si.ServiceKey, "addressType", si.AddressType)
events = append(events, &topologycache.EventBuilder{
EventType: corev1.EventTypeWarning,
Reason: "TopologyAwareHintsDisabled",
Message: topologycache.FormatWithAddressType(topologycache.TopologyAwareHintsDisabled, si.AddressType),
})
}
r.topologyCache.RemoveHints(si.ServiceKey, addressType)
}
// Check 2.
if !canUseTrafficDistribution {
slicesToCreate, slicesToUpdate = topologycache.RemoveHintsFromSlices(si)
}
}
if canUseTrafficDistribution {
r.metricsCache.UpdateTrafficDistributionForService(serviceNN, service.Spec.TrafficDistribution)
slicesToCreate, slicesToUpdate, _ = trafficdist.ReconcileHints(service.Spec.TrafficDistribution, slicesToCreate, slicesToUpdate, unchangedSlices(existingSlices, slicesToUpdate, slicesToDelete))
} else {
r.metricsCache.UpdateTrafficDistributionForService(serviceNN, nil)
}
err := r.finalize(service, slicesToCreate, slicesToUpdate, slicesToDelete, triggerTime)
if err != nil {
errs = append(errs, err)
}
for _, event := range events {
r.eventRecorder.Event(service, event.EventType, event.Reason, event.Message)
}
return utilerrors.NewAggregate(errs)
}
func NewReconciler(client clientset.Interface, nodeLister corelisters.NodeLister, maxEndpointsPerSlice int32, endpointSliceTracker *endpointsliceutil.EndpointSliceTracker, topologyCache *topologycache.TopologyCache, trafficDistributionEnabled bool, eventRecorder record.EventRecorder, controllerName string) *Reconciler {
return &Reconciler{
client: client,
nodeLister: nodeLister,
maxEndpointsPerSlice: maxEndpointsPerSlice,
endpointSliceTracker: endpointSliceTracker,
metricsCache: metrics.NewCache(maxEndpointsPerSlice),
topologyCache: topologyCache,
trafficDistributionEnabled: trafficDistributionEnabled,
eventRecorder: eventRecorder,
controllerName: controllerName,
}
}
// placeholderSliceCompare is a conversion func for comparing two placeholder endpoint slices.
// It only compares the specific fields we care about.
var placeholderSliceCompare = conversion.EqualitiesOrDie(
func(a, b metav1.OwnerReference) bool {
return a.String() == b.String()
},
func(a, b metav1.ObjectMeta) bool {
if a.Namespace != b.Namespace {
return false
}
for k, v := range a.Labels {
if b.Labels[k] != v {
return false
}
}
for k, v := range b.Labels {
if a.Labels[k] != v {
return false
}
}
return true
},
)
// finalize creates, updates, and deletes slices as specified
func (r *Reconciler) finalize(
service *corev1.Service,
slicesToCreate,
slicesToUpdate,
slicesToDelete []*discovery.EndpointSlice,
triggerTime time.Time,
) error {
// If there are slices to create and delete, change the creates to updates
// of the slices that would otherwise be deleted.
for i := 0; i < len(slicesToDelete); {
if len(slicesToCreate) == 0 {
break
}
sliceToDelete := slicesToDelete[i]
slice := slicesToCreate[len(slicesToCreate)-1]
// Only update EndpointSlices that are owned by this Service and have
// the same AddressType. We need to avoid updating EndpointSlices that
// are being garbage collected for an old Service with the same name.
// The AddressType field is immutable. Since Services also consider
// IPFamily immutable, the only case where this should matter will be
// the migration from IP to IPv4 and IPv6 AddressTypes, where there's a
// chance EndpointSlices with an IP AddressType would otherwise be
// updated to IPv4 or IPv6 without this check.
if sliceToDelete.AddressType == slice.AddressType && ownedBy(sliceToDelete, service) {
slice.Name = sliceToDelete.Name
slicesToCreate = slicesToCreate[:len(slicesToCreate)-1]
slicesToUpdate = append(slicesToUpdate, slice)
slicesToDelete = append(slicesToDelete[:i], slicesToDelete[i+1:]...)
} else {
i++
}
}
// Don't create new EndpointSlices if the Service is pending deletion. This
// is to avoid a potential race condition with the garbage collector where
// it tries to delete EndpointSlices as this controller replaces them.
if service.DeletionTimestamp == nil {
for _, endpointSlice := range slicesToCreate {
addTriggerTimeAnnotation(endpointSlice, triggerTime)
createdSlice, err := r.client.DiscoveryV1().EndpointSlices(service.Namespace).Create(context.TODO(), endpointSlice, metav1.CreateOptions{})
if err != nil {
// If the namespace is terminating, creates will continue to fail. Simply drop the item.
if errors.HasStatusCause(err, corev1.NamespaceTerminatingCause) {
return nil
}
return fmt.Errorf("failed to create EndpointSlice for Service %s/%s: %v", service.Namespace, service.Name, err)
}
r.endpointSliceTracker.Update(createdSlice)
metrics.EndpointSliceChanges.WithLabelValues("create").Inc()
}
}
for _, endpointSlice := range slicesToUpdate {
addTriggerTimeAnnotation(endpointSlice, triggerTime)
updatedSlice, err := r.client.DiscoveryV1().EndpointSlices(service.Namespace).Update(context.TODO(), endpointSlice, metav1.UpdateOptions{})
if err != nil {
return fmt.Errorf("failed to update %s EndpointSlice for Service %s/%s: %v", endpointSlice.Name, service.Namespace, service.Name, err)
}
r.endpointSliceTracker.Update(updatedSlice)
metrics.EndpointSliceChanges.WithLabelValues("update").Inc()
}
for _, endpointSlice := range slicesToDelete {
err := r.client.DiscoveryV1().EndpointSlices(service.Namespace).Delete(context.TODO(), endpointSlice.Name, metav1.DeleteOptions{})
if err != nil {
return fmt.Errorf("failed to delete %s EndpointSlice for Service %s/%s: %v", endpointSlice.Name, service.Namespace, service.Name, err)
}
r.endpointSliceTracker.ExpectDeletion(endpointSlice)
metrics.EndpointSliceChanges.WithLabelValues("delete").Inc()
}
topologyLabel := "Disabled"
if r.topologyCache != nil && hintsEnabled(service.Annotations) {
topologyLabel = "Auto"
}
var trafficDistribution string
if r.trafficDistributionEnabled && !hintsEnabled(service.Annotations) {
if service.Spec.TrafficDistribution != nil && *service.Spec.TrafficDistribution == corev1.ServiceTrafficDistributionPreferClose {
trafficDistribution = *service.Spec.TrafficDistribution
}
}
numSlicesChanged := len(slicesToCreate) + len(slicesToUpdate) + len(slicesToDelete)
metrics.EndpointSlicesChangedPerSync.WithLabelValues(topologyLabel, trafficDistribution).Observe(float64(numSlicesChanged))
return nil
}
// reconcileByPortMapping compares the endpoints found in existing slices with
// the list of desired endpoints and returns lists of slices to create, update,
// and delete. It also checks that the slices mirror the parent services labels.
// The logic is split up into several main steps:
// 1. Iterate through existing slices, delete endpoints that are no longer
// desired and update matching endpoints that have changed. It also checks
// if the slices have the labels of the parent services, and updates them if not.
// 2. Iterate through slices that have been modified in 1 and fill them up with
// any remaining desired endpoints.
// 3. If there still desired endpoints left, try to fit them into a previously
// unchanged slice and/or create new ones.
func (r *Reconciler) reconcileByPortMapping(
logger klog.Logger,
service *corev1.Service,
existingSlices []*discovery.EndpointSlice,
desiredSet endpointsliceutil.EndpointSet,
endpointMeta *endpointMeta,
) ([]*discovery.EndpointSlice, []*discovery.EndpointSlice, []*discovery.EndpointSlice, int, int) {
slicesByName := map[string]*discovery.EndpointSlice{}
sliceNamesUnchanged := sets.New[string]()
sliceNamesToUpdate := sets.New[string]()
sliceNamesToDelete := sets.New[string]()
numRemoved := 0
// 1. Iterate through existing slices to delete endpoints no longer desired
// and update endpoints that have changed
for _, existingSlice := range existingSlices {
slicesByName[existingSlice.Name] = existingSlice
newEndpoints := []discovery.Endpoint{}
endpointUpdated := false
for _, endpoint := range existingSlice.Endpoints {
got := desiredSet.Get(&endpoint)
// If endpoint is desired add it to list of endpoints to keep.
if got != nil {
newEndpoints = append(newEndpoints, *got)
// If existing version of endpoint doesn't match desired version
// set endpointUpdated to ensure endpoint changes are persisted.
if !endpointsliceutil.EndpointsEqualBeyondHash(got, &endpoint) {
endpointUpdated = true
}
// once an endpoint has been placed/found in a slice, it no
// longer needs to be handled
desiredSet.Delete(&endpoint)
}
}
// generate the slice labels and check if parent labels have changed
labels, labelsChanged := setEndpointSliceLabels(logger, existingSlice, service, r.controllerName)
// If an endpoint was updated or removed, mark for update or delete
if endpointUpdated || len(existingSlice.Endpoints) != len(newEndpoints) {
if len(existingSlice.Endpoints) > len(newEndpoints) {
numRemoved += len(existingSlice.Endpoints) - len(newEndpoints)
}
if len(newEndpoints) == 0 {
// if no endpoints desired in this slice, mark for deletion
sliceNamesToDelete.Insert(existingSlice.Name)
} else {
// otherwise, copy and mark for update
epSlice := existingSlice.DeepCopy()
epSlice.Endpoints = newEndpoints
epSlice.Labels = labels
slicesByName[existingSlice.Name] = epSlice
sliceNamesToUpdate.Insert(epSlice.Name)
}
} else if labelsChanged {
// if labels have changed, copy and mark for update
epSlice := existingSlice.DeepCopy()
epSlice.Labels = labels
slicesByName[existingSlice.Name] = epSlice
sliceNamesToUpdate.Insert(epSlice.Name)
} else {
// slices with no changes will be useful if there are leftover endpoints
sliceNamesUnchanged.Insert(existingSlice.Name)
}
}
numAdded := desiredSet.Len()
// 2. If we still have desired endpoints to add and slices marked for update,
// iterate through the slices and fill them up with the desired endpoints.
if desiredSet.Len() > 0 && sliceNamesToUpdate.Len() > 0 {
slices := []*discovery.EndpointSlice{}
for _, sliceName := range sliceNamesToUpdate.UnsortedList() {
slices = append(slices, slicesByName[sliceName])
}
// Sort endpoint slices by length so we're filling up the fullest ones
// first.
sort.Sort(endpointSliceEndpointLen(slices))
// Iterate through slices and fill them up with desired endpoints.
for _, slice := range slices {
for desiredSet.Len() > 0 && len(slice.Endpoints) < int(r.maxEndpointsPerSlice) {
endpoint, _ := desiredSet.PopAny()
slice.Endpoints = append(slice.Endpoints, *endpoint)
}
}
}
// 3. If there are still desired endpoints left at this point, we try to fit
// the endpoints in a single existing slice. If there are no slices with
// that capacity, we create new slices for the endpoints.
slicesToCreate := []*discovery.EndpointSlice{}
for desiredSet.Len() > 0 {
var sliceToFill *discovery.EndpointSlice
// If the remaining amounts of endpoints is smaller than the max
// endpoints per slice and we have slices that haven't already been
// filled, try to fit them in one.
if desiredSet.Len() < int(r.maxEndpointsPerSlice) && sliceNamesUnchanged.Len() > 0 {
unchangedSlices := []*discovery.EndpointSlice{}
for _, sliceName := range sliceNamesUnchanged.UnsortedList() {
unchangedSlices = append(unchangedSlices, slicesByName[sliceName])
}
sliceToFill = getSliceToFill(unchangedSlices, desiredSet.Len(), int(r.maxEndpointsPerSlice))
}
// If we didn't find a sliceToFill, generate a new empty one.
if sliceToFill == nil {
sliceToFill = newEndpointSlice(logger, service, endpointMeta, r.controllerName)
} else {
// deep copy required to modify this slice.
sliceToFill = sliceToFill.DeepCopy()
slicesByName[sliceToFill.Name] = sliceToFill
}
// Fill the slice up with remaining endpoints.
for desiredSet.Len() > 0 && len(sliceToFill.Endpoints) < int(r.maxEndpointsPerSlice) {
endpoint, _ := desiredSet.PopAny()
sliceToFill.Endpoints = append(sliceToFill.Endpoints, *endpoint)
}
// New slices will not have a Name set, use this to determine whether
// this should be an update or create.
if sliceToFill.Name != "" {
sliceNamesToUpdate.Insert(sliceToFill.Name)
sliceNamesUnchanged.Delete(sliceToFill.Name)
} else {
slicesToCreate = append(slicesToCreate, sliceToFill)
}
}
// Build slicesToUpdate from slice names.
slicesToUpdate := []*discovery.EndpointSlice{}
for _, sliceName := range sliceNamesToUpdate.UnsortedList() {
slicesToUpdate = append(slicesToUpdate, slicesByName[sliceName])
}
// Build slicesToDelete from slice names.
slicesToDelete := []*discovery.EndpointSlice{}
for _, sliceName := range sliceNamesToDelete.UnsortedList() {
slicesToDelete = append(slicesToDelete, slicesByName[sliceName])
}
return slicesToCreate, slicesToUpdate, slicesToDelete, numAdded, numRemoved
}
func (r *Reconciler) DeleteService(namespace, name string) {
r.metricsCache.DeleteService(types.NamespacedName{Namespace: namespace, Name: name})
}
func (r *Reconciler) GetControllerName() string {
return r.controllerName
}
// ManagedByChanged returns true if one of the provided EndpointSlices is
// managed by the EndpointSlice controller while the other is not.
func (r *Reconciler) ManagedByChanged(endpointSlice1, endpointSlice2 *discovery.EndpointSlice) bool {
return r.ManagedByController(endpointSlice1) != r.ManagedByController(endpointSlice2)
}
// ManagedByController returns true if the controller of the provided
// EndpointSlices is the EndpointSlice controller.
func (r *Reconciler) ManagedByController(endpointSlice *discovery.EndpointSlice) bool {
managedBy := endpointSlice.Labels[discovery.LabelManagedBy]
return managedBy == r.controllerName
}
| staging/src/k8s.io/endpointslice/reconciler.go | 1 | https://github.com/kubernetes/kubernetes/commit/ec6fd2befac30ebb7dfb55b94d8c9874489a3902 | [
0.020573200657963753,
0.0006776320515200496,
0.0001646680320845917,
0.000169869395904243,
0.002725253812968731
] |
{
"id": 1,
"code_window": [
"\t\tc.eventRecorder,\n",
"\t\tcontrollerName,\n",
"\t)\n",
"\n",
"\treturn c\n",
"}\n",
"\n"
],
"labels": [
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tendpointslicerec.WithTrafficDistributionEnabled(utilfeature.DefaultFeatureGate.Enabled(features.ServiceTrafficDistribution)),\n"
],
"file_path": "pkg/controller/endpointslice/endpointslice_controller.go",
"type": "add",
"edit_start_line_idx": 178
} | /*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package flexvolume
import (
"os"
"strconv"
"k8s.io/kubernetes/pkg/volume"
"k8s.io/kubernetes/pkg/volume/util"
"k8s.io/utils/exec"
)
// FlexVolumeMounter is the disk that will be exposed by this plugin.
type flexVolumeMounter struct {
*flexVolume
// Runner used to setup the volume.
runner exec.Interface
// the considered volume spec
spec *volume.Spec
readOnly bool
}
var _ volume.Mounter = &flexVolumeMounter{}
// Mounter interface
// SetUp creates new directory.
func (f *flexVolumeMounter) SetUp(mounterArgs volume.MounterArgs) error {
return f.SetUpAt(f.GetPath(), mounterArgs)
}
// SetUpAt creates new directory.
func (f *flexVolumeMounter) SetUpAt(dir string, mounterArgs volume.MounterArgs) error {
// Mount only once.
alreadyMounted, err := prepareForMount(f.mounter, dir)
if err != nil {
return err
}
if alreadyMounted {
return nil
}
call := f.plugin.NewDriverCall(mountCmd)
// Interface parameters
call.Append(dir)
extraOptions := make(map[string]string)
// pod metadata
extraOptions[optionKeyPodName] = f.podName
extraOptions[optionKeyPodNamespace] = f.podNamespace
extraOptions[optionKeyPodUID] = string(f.podUID)
// service account metadata
extraOptions[optionKeyServiceAccountName] = f.podServiceAccountName
// Extract secret and pass it as options.
if err := addSecretsToOptions(extraOptions, f.spec, f.podNamespace, f.driverName, f.plugin.host); err != nil {
os.Remove(dir)
return err
}
// Implicit parameters
if mounterArgs.FsGroup != nil {
extraOptions[optionFSGroup] = strconv.FormatInt(int64(*mounterArgs.FsGroup), 10)
}
call.AppendSpec(f.spec, f.plugin.host, extraOptions)
_, err = call.Run()
if isCmdNotSupportedErr(err) {
err = (*mounterDefaults)(f).SetUpAt(dir, mounterArgs)
}
if err != nil {
os.Remove(dir)
return err
}
if !f.readOnly {
if f.plugin.capabilities.FSGroup {
// fullPluginName helps to distinguish different driver from flex volume plugin
volume.SetVolumeOwnership(f, dir, mounterArgs.FsGroup, mounterArgs.FSGroupChangePolicy, util.FSGroupCompleteHook(f.plugin, f.spec))
}
}
return nil
}
// GetAttributes get the flex volume attributes. The attributes will be queried
// using plugin callout after we finalize the callout syntax.
func (f *flexVolumeMounter) GetAttributes() volume.Attributes {
return (*mounterDefaults)(f).GetAttributes()
}
| pkg/volume/flexvolume/mounter.go | 0 | https://github.com/kubernetes/kubernetes/commit/ec6fd2befac30ebb7dfb55b94d8c9874489a3902 | [
0.005395421292632818,
0.0006475703557953238,
0.00016756706463638693,
0.000172886298969388,
0.0015014056116342545
] |
{
"id": 1,
"code_window": [
"\t\tc.eventRecorder,\n",
"\t\tcontrollerName,\n",
"\t)\n",
"\n",
"\treturn c\n",
"}\n",
"\n"
],
"labels": [
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tendpointslicerec.WithTrafficDistributionEnabled(utilfeature.DefaultFeatureGate.Enabled(features.ServiceTrafficDistribution)),\n"
],
"file_path": "pkg/controller/endpointslice/endpointslice_controller.go",
"type": "add",
"edit_start_line_idx": 178
} | file {
name: "github.com/containerd/cgroups/stats/v1/metrics.proto"
package: "io.containerd.cgroups.v1"
dependency: "gogoproto/gogo.proto"
message_type {
name: "Metrics"
field {
name: "hugetlb"
number: 1
label: LABEL_REPEATED
type: TYPE_MESSAGE
type_name: ".io.containerd.cgroups.v1.HugetlbStat"
json_name: "hugetlb"
}
field {
name: "pids"
number: 2
label: LABEL_OPTIONAL
type: TYPE_MESSAGE
type_name: ".io.containerd.cgroups.v1.PidsStat"
json_name: "pids"
}
field {
name: "cpu"
number: 3
label: LABEL_OPTIONAL
type: TYPE_MESSAGE
type_name: ".io.containerd.cgroups.v1.CPUStat"
options {
65004: "CPU"
}
json_name: "cpu"
}
field {
name: "memory"
number: 4
label: LABEL_OPTIONAL
type: TYPE_MESSAGE
type_name: ".io.containerd.cgroups.v1.MemoryStat"
json_name: "memory"
}
field {
name: "blkio"
number: 5
label: LABEL_OPTIONAL
type: TYPE_MESSAGE
type_name: ".io.containerd.cgroups.v1.BlkIOStat"
json_name: "blkio"
}
field {
name: "rdma"
number: 6
label: LABEL_OPTIONAL
type: TYPE_MESSAGE
type_name: ".io.containerd.cgroups.v1.RdmaStat"
json_name: "rdma"
}
field {
name: "network"
number: 7
label: LABEL_REPEATED
type: TYPE_MESSAGE
type_name: ".io.containerd.cgroups.v1.NetworkStat"
json_name: "network"
}
field {
name: "cgroup_stats"
number: 8
label: LABEL_OPTIONAL
type: TYPE_MESSAGE
type_name: ".io.containerd.cgroups.v1.CgroupStats"
json_name: "cgroupStats"
}
field {
name: "memory_oom_control"
number: 9
label: LABEL_OPTIONAL
type: TYPE_MESSAGE
type_name: ".io.containerd.cgroups.v1.MemoryOomControl"
json_name: "memoryOomControl"
}
}
message_type {
name: "HugetlbStat"
field {
name: "usage"
number: 1
label: LABEL_OPTIONAL
type: TYPE_UINT64
json_name: "usage"
}
field {
name: "max"
number: 2
label: LABEL_OPTIONAL
type: TYPE_UINT64
json_name: "max"
}
field {
name: "failcnt"
number: 3
label: LABEL_OPTIONAL
type: TYPE_UINT64
json_name: "failcnt"
}
field {
name: "pagesize"
number: 4
label: LABEL_OPTIONAL
type: TYPE_STRING
json_name: "pagesize"
}
}
message_type {
name: "PidsStat"
field {
name: "current"
number: 1
label: LABEL_OPTIONAL
type: TYPE_UINT64
json_name: "current"
}
field {
name: "limit"
number: 2
label: LABEL_OPTIONAL
type: TYPE_UINT64
json_name: "limit"
}
}
message_type {
name: "CPUStat"
field {
name: "usage"
number: 1
label: LABEL_OPTIONAL
type: TYPE_MESSAGE
type_name: ".io.containerd.cgroups.v1.CPUUsage"
json_name: "usage"
}
field {
name: "throttling"
number: 2
label: LABEL_OPTIONAL
type: TYPE_MESSAGE
type_name: ".io.containerd.cgroups.v1.Throttle"
json_name: "throttling"
}
}
message_type {
name: "CPUUsage"
field {
name: "total"
number: 1
label: LABEL_OPTIONAL
type: TYPE_UINT64
json_name: "total"
}
field {
name: "kernel"
number: 2
label: LABEL_OPTIONAL
type: TYPE_UINT64
json_name: "kernel"
}
field {
name: "user"
number: 3
label: LABEL_OPTIONAL
type: TYPE_UINT64
json_name: "user"
}
field {
name: "per_cpu"
number: 4
label: LABEL_REPEATED
type: TYPE_UINT64
options {
65004: "PerCPU"
}
json_name: "perCpu"
}
}
message_type {
name: "Throttle"
field {
name: "periods"
number: 1
label: LABEL_OPTIONAL
type: TYPE_UINT64
json_name: "periods"
}
field {
name: "throttled_periods"
number: 2
label: LABEL_OPTIONAL
type: TYPE_UINT64
json_name: "throttledPeriods"
}
field {
name: "throttled_time"
number: 3
label: LABEL_OPTIONAL
type: TYPE_UINT64
json_name: "throttledTime"
}
}
message_type {
name: "MemoryStat"
field {
name: "cache"
number: 1
label: LABEL_OPTIONAL
type: TYPE_UINT64
json_name: "cache"
}
field {
name: "rss"
number: 2
label: LABEL_OPTIONAL
type: TYPE_UINT64
options {
65004: "RSS"
}
json_name: "rss"
}
field {
name: "rss_huge"
number: 3
label: LABEL_OPTIONAL
type: TYPE_UINT64
options {
65004: "RSSHuge"
}
json_name: "rssHuge"
}
field {
name: "mapped_file"
number: 4
label: LABEL_OPTIONAL
type: TYPE_UINT64
json_name: "mappedFile"
}
field {
name: "dirty"
number: 5
label: LABEL_OPTIONAL
type: TYPE_UINT64
json_name: "dirty"
}
field {
name: "writeback"
number: 6
label: LABEL_OPTIONAL
type: TYPE_UINT64
json_name: "writeback"
}
field {
name: "pg_pg_in"
number: 7
label: LABEL_OPTIONAL
type: TYPE_UINT64
json_name: "pgPgIn"
}
field {
name: "pg_pg_out"
number: 8
label: LABEL_OPTIONAL
type: TYPE_UINT64
json_name: "pgPgOut"
}
field {
name: "pg_fault"
number: 9
label: LABEL_OPTIONAL
type: TYPE_UINT64
json_name: "pgFault"
}
field {
name: "pg_maj_fault"
number: 10
label: LABEL_OPTIONAL
type: TYPE_UINT64
json_name: "pgMajFault"
}
field {
name: "inactive_anon"
number: 11
label: LABEL_OPTIONAL
type: TYPE_UINT64
json_name: "inactiveAnon"
}
field {
name: "active_anon"
number: 12
label: LABEL_OPTIONAL
type: TYPE_UINT64
json_name: "activeAnon"
}
field {
name: "inactive_file"
number: 13
label: LABEL_OPTIONAL
type: TYPE_UINT64
json_name: "inactiveFile"
}
field {
name: "active_file"
number: 14
label: LABEL_OPTIONAL
type: TYPE_UINT64
json_name: "activeFile"
}
field {
name: "unevictable"
number: 15
label: LABEL_OPTIONAL
type: TYPE_UINT64
json_name: "unevictable"
}
field {
name: "hierarchical_memory_limit"
number: 16
label: LABEL_OPTIONAL
type: TYPE_UINT64
json_name: "hierarchicalMemoryLimit"
}
field {
name: "hierarchical_swap_limit"
number: 17
label: LABEL_OPTIONAL
type: TYPE_UINT64
json_name: "hierarchicalSwapLimit"
}
field {
name: "total_cache"
number: 18
label: LABEL_OPTIONAL
type: TYPE_UINT64
json_name: "totalCache"
}
field {
name: "total_rss"
number: 19
label: LABEL_OPTIONAL
type: TYPE_UINT64
options {
65004: "TotalRSS"
}
json_name: "totalRss"
}
field {
name: "total_rss_huge"
number: 20
label: LABEL_OPTIONAL
type: TYPE_UINT64
options {
65004: "TotalRSSHuge"
}
json_name: "totalRssHuge"
}
field {
name: "total_mapped_file"
number: 21
label: LABEL_OPTIONAL
type: TYPE_UINT64
json_name: "totalMappedFile"
}
field {
name: "total_dirty"
number: 22
label: LABEL_OPTIONAL
type: TYPE_UINT64
json_name: "totalDirty"
}
field {
name: "total_writeback"
number: 23
label: LABEL_OPTIONAL
type: TYPE_UINT64
json_name: "totalWriteback"
}
field {
name: "total_pg_pg_in"
number: 24
label: LABEL_OPTIONAL
type: TYPE_UINT64
json_name: "totalPgPgIn"
}
field {
name: "total_pg_pg_out"
number: 25
label: LABEL_OPTIONAL
type: TYPE_UINT64
json_name: "totalPgPgOut"
}
field {
name: "total_pg_fault"
number: 26
label: LABEL_OPTIONAL
type: TYPE_UINT64
json_name: "totalPgFault"
}
field {
name: "total_pg_maj_fault"
number: 27
label: LABEL_OPTIONAL
type: TYPE_UINT64
json_name: "totalPgMajFault"
}
field {
name: "total_inactive_anon"
number: 28
label: LABEL_OPTIONAL
type: TYPE_UINT64
json_name: "totalInactiveAnon"
}
field {
name: "total_active_anon"
number: 29
label: LABEL_OPTIONAL
type: TYPE_UINT64
json_name: "totalActiveAnon"
}
field {
name: "total_inactive_file"
number: 30
label: LABEL_OPTIONAL
type: TYPE_UINT64
json_name: "totalInactiveFile"
}
field {
name: "total_active_file"
number: 31
label: LABEL_OPTIONAL
type: TYPE_UINT64
json_name: "totalActiveFile"
}
field {
name: "total_unevictable"
number: 32
label: LABEL_OPTIONAL
type: TYPE_UINT64
json_name: "totalUnevictable"
}
field {
name: "usage"
number: 33
label: LABEL_OPTIONAL
type: TYPE_MESSAGE
type_name: ".io.containerd.cgroups.v1.MemoryEntry"
json_name: "usage"
}
field {
name: "swap"
number: 34
label: LABEL_OPTIONAL
type: TYPE_MESSAGE
type_name: ".io.containerd.cgroups.v1.MemoryEntry"
json_name: "swap"
}
field {
name: "kernel"
number: 35
label: LABEL_OPTIONAL
type: TYPE_MESSAGE
type_name: ".io.containerd.cgroups.v1.MemoryEntry"
json_name: "kernel"
}
field {
name: "kernel_tcp"
number: 36
label: LABEL_OPTIONAL
type: TYPE_MESSAGE
type_name: ".io.containerd.cgroups.v1.MemoryEntry"
options {
65004: "KernelTCP"
}
json_name: "kernelTcp"
}
}
message_type {
name: "MemoryEntry"
field {
name: "limit"
number: 1
label: LABEL_OPTIONAL
type: TYPE_UINT64
json_name: "limit"
}
field {
name: "usage"
number: 2
label: LABEL_OPTIONAL
type: TYPE_UINT64
json_name: "usage"
}
field {
name: "max"
number: 3
label: LABEL_OPTIONAL
type: TYPE_UINT64
json_name: "max"
}
field {
name: "failcnt"
number: 4
label: LABEL_OPTIONAL
type: TYPE_UINT64
json_name: "failcnt"
}
}
message_type {
name: "MemoryOomControl"
field {
name: "oom_kill_disable"
number: 1
label: LABEL_OPTIONAL
type: TYPE_UINT64
json_name: "oomKillDisable"
}
field {
name: "under_oom"
number: 2
label: LABEL_OPTIONAL
type: TYPE_UINT64
json_name: "underOom"
}
field {
name: "oom_kill"
number: 3
label: LABEL_OPTIONAL
type: TYPE_UINT64
json_name: "oomKill"
}
}
message_type {
name: "BlkIOStat"
field {
name: "io_service_bytes_recursive"
number: 1
label: LABEL_REPEATED
type: TYPE_MESSAGE
type_name: ".io.containerd.cgroups.v1.BlkIOEntry"
json_name: "ioServiceBytesRecursive"
}
field {
name: "io_serviced_recursive"
number: 2
label: LABEL_REPEATED
type: TYPE_MESSAGE
type_name: ".io.containerd.cgroups.v1.BlkIOEntry"
json_name: "ioServicedRecursive"
}
field {
name: "io_queued_recursive"
number: 3
label: LABEL_REPEATED
type: TYPE_MESSAGE
type_name: ".io.containerd.cgroups.v1.BlkIOEntry"
json_name: "ioQueuedRecursive"
}
field {
name: "io_service_time_recursive"
number: 4
label: LABEL_REPEATED
type: TYPE_MESSAGE
type_name: ".io.containerd.cgroups.v1.BlkIOEntry"
json_name: "ioServiceTimeRecursive"
}
field {
name: "io_wait_time_recursive"
number: 5
label: LABEL_REPEATED
type: TYPE_MESSAGE
type_name: ".io.containerd.cgroups.v1.BlkIOEntry"
json_name: "ioWaitTimeRecursive"
}
field {
name: "io_merged_recursive"
number: 6
label: LABEL_REPEATED
type: TYPE_MESSAGE
type_name: ".io.containerd.cgroups.v1.BlkIOEntry"
json_name: "ioMergedRecursive"
}
field {
name: "io_time_recursive"
number: 7
label: LABEL_REPEATED
type: TYPE_MESSAGE
type_name: ".io.containerd.cgroups.v1.BlkIOEntry"
json_name: "ioTimeRecursive"
}
field {
name: "sectors_recursive"
number: 8
label: LABEL_REPEATED
type: TYPE_MESSAGE
type_name: ".io.containerd.cgroups.v1.BlkIOEntry"
json_name: "sectorsRecursive"
}
}
message_type {
name: "BlkIOEntry"
field {
name: "op"
number: 1
label: LABEL_OPTIONAL
type: TYPE_STRING
json_name: "op"
}
field {
name: "device"
number: 2
label: LABEL_OPTIONAL
type: TYPE_STRING
json_name: "device"
}
field {
name: "major"
number: 3
label: LABEL_OPTIONAL
type: TYPE_UINT64
json_name: "major"
}
field {
name: "minor"
number: 4
label: LABEL_OPTIONAL
type: TYPE_UINT64
json_name: "minor"
}
field {
name: "value"
number: 5
label: LABEL_OPTIONAL
type: TYPE_UINT64
json_name: "value"
}
}
message_type {
name: "RdmaStat"
field {
name: "current"
number: 1
label: LABEL_REPEATED
type: TYPE_MESSAGE
type_name: ".io.containerd.cgroups.v1.RdmaEntry"
json_name: "current"
}
field {
name: "limit"
number: 2
label: LABEL_REPEATED
type: TYPE_MESSAGE
type_name: ".io.containerd.cgroups.v1.RdmaEntry"
json_name: "limit"
}
}
message_type {
name: "RdmaEntry"
field {
name: "device"
number: 1
label: LABEL_OPTIONAL
type: TYPE_STRING
json_name: "device"
}
field {
name: "hca_handles"
number: 2
label: LABEL_OPTIONAL
type: TYPE_UINT32
json_name: "hcaHandles"
}
field {
name: "hca_objects"
number: 3
label: LABEL_OPTIONAL
type: TYPE_UINT32
json_name: "hcaObjects"
}
}
message_type {
name: "NetworkStat"
field {
name: "name"
number: 1
label: LABEL_OPTIONAL
type: TYPE_STRING
json_name: "name"
}
field {
name: "rx_bytes"
number: 2
label: LABEL_OPTIONAL
type: TYPE_UINT64
json_name: "rxBytes"
}
field {
name: "rx_packets"
number: 3
label: LABEL_OPTIONAL
type: TYPE_UINT64
json_name: "rxPackets"
}
field {
name: "rx_errors"
number: 4
label: LABEL_OPTIONAL
type: TYPE_UINT64
json_name: "rxErrors"
}
field {
name: "rx_dropped"
number: 5
label: LABEL_OPTIONAL
type: TYPE_UINT64
json_name: "rxDropped"
}
field {
name: "tx_bytes"
number: 6
label: LABEL_OPTIONAL
type: TYPE_UINT64
json_name: "txBytes"
}
field {
name: "tx_packets"
number: 7
label: LABEL_OPTIONAL
type: TYPE_UINT64
json_name: "txPackets"
}
field {
name: "tx_errors"
number: 8
label: LABEL_OPTIONAL
type: TYPE_UINT64
json_name: "txErrors"
}
field {
name: "tx_dropped"
number: 9
label: LABEL_OPTIONAL
type: TYPE_UINT64
json_name: "txDropped"
}
}
message_type {
name: "CgroupStats"
field {
name: "nr_sleeping"
number: 1
label: LABEL_OPTIONAL
type: TYPE_UINT64
json_name: "nrSleeping"
}
field {
name: "nr_running"
number: 2
label: LABEL_OPTIONAL
type: TYPE_UINT64
json_name: "nrRunning"
}
field {
name: "nr_stopped"
number: 3
label: LABEL_OPTIONAL
type: TYPE_UINT64
json_name: "nrStopped"
}
field {
name: "nr_uninterruptible"
number: 4
label: LABEL_OPTIONAL
type: TYPE_UINT64
json_name: "nrUninterruptible"
}
field {
name: "nr_io_wait"
number: 5
label: LABEL_OPTIONAL
type: TYPE_UINT64
json_name: "nrIoWait"
}
}
syntax: "proto3"
}
| vendor/github.com/containerd/cgroups/stats/v1/metrics.pb.txt | 0 | https://github.com/kubernetes/kubernetes/commit/ec6fd2befac30ebb7dfb55b94d8c9874489a3902 | [
0.00018290890147909522,
0.00017401458171661943,
0.00016738774138502777,
0.0001742905587889254,
0.000002241148649773095
] |
{
"id": 1,
"code_window": [
"\t\tc.eventRecorder,\n",
"\t\tcontrollerName,\n",
"\t)\n",
"\n",
"\treturn c\n",
"}\n",
"\n"
],
"labels": [
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tendpointslicerec.WithTrafficDistributionEnabled(utilfeature.DefaultFeatureGate.Enabled(features.ServiceTrafficDistribution)),\n"
],
"file_path": "pkg/controller/endpointslice/endpointslice_controller.go",
"type": "add",
"edit_start_line_idx": 178
} | # See the OWNERS docs at https://go.k8s.io/owners
approvers:
- bentheelder
- spiffxp
reviewers:
- bentheelder
- spiffxp
labels:
- sig/testing
| pkg/util/coverage/OWNERS | 0 | https://github.com/kubernetes/kubernetes/commit/ec6fd2befac30ebb7dfb55b94d8c9874489a3902 | [
0.00018290890147909522,
0.00017812629812397063,
0.00017334369476884604,
0.00017812629812397063,
0.000004782603355124593
] |
{
"id": 2,
"code_window": [
"\t// eventRecorder allows Reconciler to record and publish events.\n",
"\teventRecorder record.EventRecorder\n",
"\tcontrollerName string\n",
"}\n",
"\n",
"// endpointMeta includes the attributes we group slices on, this type helps with\n",
"// that logic in Reconciler\n",
"type endpointMeta struct {\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"type ReconcilerOption func(*Reconciler)\n",
"\n",
"// WithTrafficDistributionEnabled controls whether the Reconciler considers the\n",
"// `trafficDistribution` field while reconciling EndpointSlices.\n",
"func WithTrafficDistributionEnabled(enabled bool) ReconcilerOption {\n",
"\treturn func(r *Reconciler) {\n",
"\t\tr.trafficDistributionEnabled = enabled\n",
"\t}\n",
"}\n",
"\n"
],
"file_path": "staging/src/k8s.io/endpointslice/reconciler.go",
"type": "add",
"edit_start_line_idx": 61
} | /*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package endpointslice
import (
"context"
"fmt"
"sort"
"time"
corev1 "k8s.io/api/core/v1"
discovery "k8s.io/api/discovery/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/conversion"
"k8s.io/apimachinery/pkg/types"
utilerrors "k8s.io/apimachinery/pkg/util/errors"
"k8s.io/apimachinery/pkg/util/sets"
clientset "k8s.io/client-go/kubernetes"
corelisters "k8s.io/client-go/listers/core/v1"
"k8s.io/client-go/tools/record"
"k8s.io/endpointslice/metrics"
"k8s.io/endpointslice/topologycache"
"k8s.io/endpointslice/trafficdist"
endpointsliceutil "k8s.io/endpointslice/util"
"k8s.io/klog/v2"
)
// Reconciler is responsible for transforming current EndpointSlice state into
// desired state
type Reconciler struct {
client clientset.Interface
nodeLister corelisters.NodeLister
maxEndpointsPerSlice int32
endpointSliceTracker *endpointsliceutil.EndpointSliceTracker
metricsCache *metrics.Cache
// topologyCache tracks the distribution of Nodes and endpoints across zones
// to enable TopologyAwareHints.
topologyCache *topologycache.TopologyCache
// trafficDistributionEnabled determines if endpointDistribution field is to
// be considered when reconciling EndpointSlice hints.
trafficDistributionEnabled bool
// eventRecorder allows Reconciler to record and publish events.
eventRecorder record.EventRecorder
controllerName string
}
// endpointMeta includes the attributes we group slices on, this type helps with
// that logic in Reconciler
type endpointMeta struct {
ports []discovery.EndpointPort
addressType discovery.AddressType
}
// Reconcile takes a set of pods currently matching a service selector and
// compares them with the endpoints already present in any existing endpoint
// slices for the given service. It creates, updates, or deletes endpoint slices
// to ensure the desired set of pods are represented by endpoint slices.
func (r *Reconciler) Reconcile(logger klog.Logger, service *corev1.Service, pods []*corev1.Pod, existingSlices []*discovery.EndpointSlice, triggerTime time.Time) error {
slicesToDelete := []*discovery.EndpointSlice{} // slices that are no longer matching any address the service has
errs := []error{} // all errors generated in the process of reconciling
slicesByAddressType := make(map[discovery.AddressType][]*discovery.EndpointSlice) // slices by address type
// addresses that this service supports [o(1) find]
serviceSupportedAddressesTypes := getAddressTypesForService(logger, service)
// loop through slices identifying their address type.
// slices that no longer match address type supported by services
// go to delete, other slices goes to the Reconciler machinery
// for further adjustment
for _, existingSlice := range existingSlices {
// service no longer supports that address type, add it to deleted slices
if !serviceSupportedAddressesTypes.Has(existingSlice.AddressType) {
if r.topologyCache != nil {
svcKey, err := ServiceControllerKey(existingSlice)
if err != nil {
logger.Info("Couldn't get key to remove EndpointSlice from topology cache", "existingSlice", existingSlice, "err", err)
} else {
r.topologyCache.RemoveHints(svcKey, existingSlice.AddressType)
}
}
slicesToDelete = append(slicesToDelete, existingSlice)
continue
}
// add list if it is not on our map
if _, ok := slicesByAddressType[existingSlice.AddressType]; !ok {
slicesByAddressType[existingSlice.AddressType] = make([]*discovery.EndpointSlice, 0, 1)
}
slicesByAddressType[existingSlice.AddressType] = append(slicesByAddressType[existingSlice.AddressType], existingSlice)
}
// reconcile for existing.
for addressType := range serviceSupportedAddressesTypes {
existingSlices := slicesByAddressType[addressType]
err := r.reconcileByAddressType(logger, service, pods, existingSlices, triggerTime, addressType)
if err != nil {
errs = append(errs, err)
}
}
// delete those which are of addressType that is no longer supported
// by the service
for _, sliceToDelete := range slicesToDelete {
err := r.client.DiscoveryV1().EndpointSlices(service.Namespace).Delete(context.TODO(), sliceToDelete.Name, metav1.DeleteOptions{})
if err != nil {
errs = append(errs, fmt.Errorf("error deleting %s EndpointSlice for Service %s/%s: %w", sliceToDelete.Name, service.Namespace, service.Name, err))
} else {
r.endpointSliceTracker.ExpectDeletion(sliceToDelete)
metrics.EndpointSliceChanges.WithLabelValues("delete").Inc()
}
}
return utilerrors.NewAggregate(errs)
}
// reconcileByAddressType takes a set of pods currently matching a service selector and
// compares them with the endpoints already present in any existing endpoint
// slices (by address type) for the given service. It creates, updates, or deletes endpoint slices
// to ensure the desired set of pods are represented by endpoint slices.
func (r *Reconciler) reconcileByAddressType(logger klog.Logger, service *corev1.Service, pods []*corev1.Pod, existingSlices []*discovery.EndpointSlice, triggerTime time.Time, addressType discovery.AddressType) error {
errs := []error{}
slicesToCreate := []*discovery.EndpointSlice{}
slicesToUpdate := []*discovery.EndpointSlice{}
slicesToDelete := []*discovery.EndpointSlice{}
events := []*topologycache.EventBuilder{}
// Build data structures for existing state.
existingSlicesByPortMap := map[endpointsliceutil.PortMapKey][]*discovery.EndpointSlice{}
for _, existingSlice := range existingSlices {
if ownedBy(existingSlice, service) {
epHash := endpointsliceutil.NewPortMapKey(existingSlice.Ports)
existingSlicesByPortMap[epHash] = append(existingSlicesByPortMap[epHash], existingSlice)
} else {
slicesToDelete = append(slicesToDelete, existingSlice)
}
}
// Build data structures for desired state.
desiredMetaByPortMap := map[endpointsliceutil.PortMapKey]*endpointMeta{}
desiredEndpointsByPortMap := map[endpointsliceutil.PortMapKey]endpointsliceutil.EndpointSet{}
for _, pod := range pods {
if !endpointsliceutil.ShouldPodBeInEndpoints(pod, true) {
continue
}
endpointPorts := getEndpointPorts(logger, service, pod)
epHash := endpointsliceutil.NewPortMapKey(endpointPorts)
if _, ok := desiredEndpointsByPortMap[epHash]; !ok {
desiredEndpointsByPortMap[epHash] = endpointsliceutil.EndpointSet{}
}
if _, ok := desiredMetaByPortMap[epHash]; !ok {
desiredMetaByPortMap[epHash] = &endpointMeta{
addressType: addressType,
ports: endpointPorts,
}
}
node, err := r.nodeLister.Get(pod.Spec.NodeName)
if err != nil {
// we are getting the information from the local informer,
// an error different than IsNotFound should not happen
if !errors.IsNotFound(err) {
return err
}
// If the Node specified by the Pod doesn't exist we want to requeue the Service so we
// retry later, but also update the EndpointSlice without the problematic Pod.
// Theoretically, the pod Garbage Collector will remove the Pod, but we want to avoid
// situations where a reference from a Pod to a missing node can leave the EndpointSlice
// stuck forever.
// On the other side, if the service.Spec.PublishNotReadyAddresses is set we just add the
// Pod, since the user is explicitly indicating that the Pod address should be published.
if !service.Spec.PublishNotReadyAddresses {
logger.Info("skipping Pod for Service, Node not found", "pod", klog.KObj(pod), "service", klog.KObj(service), "node", klog.KRef("", pod.Spec.NodeName))
errs = append(errs, fmt.Errorf("skipping Pod %s for Service %s/%s: Node %s Not Found", pod.Name, service.Namespace, service.Name, pod.Spec.NodeName))
continue
}
}
endpoint := podToEndpoint(pod, node, service, addressType)
if len(endpoint.Addresses) > 0 {
desiredEndpointsByPortMap[epHash].Insert(&endpoint)
}
}
spMetrics := metrics.NewServicePortCache()
totalAdded := 0
totalRemoved := 0
// Determine changes necessary for each group of slices by port map.
for portMap, desiredEndpoints := range desiredEndpointsByPortMap {
numEndpoints := len(desiredEndpoints)
pmSlicesToCreate, pmSlicesToUpdate, pmSlicesToDelete, added, removed := r.reconcileByPortMapping(
logger, service, existingSlicesByPortMap[portMap], desiredEndpoints, desiredMetaByPortMap[portMap])
totalAdded += added
totalRemoved += removed
spMetrics.Set(portMap, metrics.EfficiencyInfo{
Endpoints: numEndpoints,
Slices: len(existingSlicesByPortMap[portMap]) + len(pmSlicesToCreate) - len(pmSlicesToDelete),
})
slicesToCreate = append(slicesToCreate, pmSlicesToCreate...)
slicesToUpdate = append(slicesToUpdate, pmSlicesToUpdate...)
slicesToDelete = append(slicesToDelete, pmSlicesToDelete...)
}
// If there are unique sets of ports that are no longer desired, mark
// the corresponding endpoint slices for deletion.
for portMap, existingSlices := range existingSlicesByPortMap {
if _, ok := desiredEndpointsByPortMap[portMap]; !ok {
slicesToDelete = append(slicesToDelete, existingSlices...)
}
}
// When no endpoint slices would usually exist, we need to add a placeholder.
if len(existingSlices) == len(slicesToDelete) && len(slicesToCreate) < 1 {
// Check for existing placeholder slice outside of the core control flow
placeholderSlice := newEndpointSlice(logger, service, &endpointMeta{ports: []discovery.EndpointPort{}, addressType: addressType}, r.controllerName)
if len(slicesToDelete) == 1 && placeholderSliceCompare.DeepEqual(slicesToDelete[0], placeholderSlice) {
// We are about to unnecessarily delete/recreate the placeholder, remove it now.
slicesToDelete = slicesToDelete[:0]
} else {
slicesToCreate = append(slicesToCreate, placeholderSlice)
}
spMetrics.Set(endpointsliceutil.NewPortMapKey(placeholderSlice.Ports), metrics.EfficiencyInfo{
Endpoints: 0,
Slices: 1,
})
}
metrics.EndpointsAddedPerSync.WithLabelValues().Observe(float64(totalAdded))
metrics.EndpointsRemovedPerSync.WithLabelValues().Observe(float64(totalRemoved))
serviceNN := types.NamespacedName{Name: service.Name, Namespace: service.Namespace}
r.metricsCache.UpdateServicePortCache(serviceNN, spMetrics)
// Topology hints are assigned per address type. This means it is
// theoretically possible for endpoints of one address type to be assigned
// hints while another endpoints of another address type are not.
si := &topologycache.SliceInfo{
ServiceKey: fmt.Sprintf("%s/%s", service.Namespace, service.Name),
AddressType: addressType,
ToCreate: slicesToCreate,
ToUpdate: slicesToUpdate,
Unchanged: unchangedSlices(existingSlices, slicesToUpdate, slicesToDelete),
}
canUseTrafficDistribution := r.trafficDistributionEnabled && !hintsEnabled(service.Annotations)
// Check if we need to add/remove hints based on the topology annotation.
//
// This if/else clause can be removed once the annotation has been deprecated.
// Ref: https://github.com/kubernetes/enhancements/tree/master/keps/sig-network/4444-service-routing-preference
if r.topologyCache != nil && hintsEnabled(service.Annotations) {
// Reaching this point means that we need to configure hints based on the
// topology annotation.
slicesToCreate, slicesToUpdate, events = r.topologyCache.AddHints(logger, si)
} else {
// Reaching this point means that we will not be configuring hints based on
// the topology annotation. We need to do 2 things:
// 1. If hints were added previously based on the annotation, we need to
// clear up any locally cached hints from the topologyCache object.
// 2. Optionally remove the actual hints from the EndpointSlice if we know
// that the `trafficDistribution` field is also NOT being used. In other
// words, if we know that the `trafficDistribution` field has been
// correctly configured by the customer, we DO NOT remove the hints and
// wait for the trafficDist handlers to correctly configure them. Always
// unconditionally removing hints here (and letting them get readded by
// the trafficDist) adds extra overhead in the form of DeepCopy (done
// within topologyCache.RemoveHints)
// Check 1.
if r.topologyCache != nil {
if r.topologyCache.HasPopulatedHints(si.ServiceKey) {
logger.Info("TopologyAwareHints annotation has changed, removing hints", "serviceKey", si.ServiceKey, "addressType", si.AddressType)
events = append(events, &topologycache.EventBuilder{
EventType: corev1.EventTypeWarning,
Reason: "TopologyAwareHintsDisabled",
Message: topologycache.FormatWithAddressType(topologycache.TopologyAwareHintsDisabled, si.AddressType),
})
}
r.topologyCache.RemoveHints(si.ServiceKey, addressType)
}
// Check 2.
if !canUseTrafficDistribution {
slicesToCreate, slicesToUpdate = topologycache.RemoveHintsFromSlices(si)
}
}
if canUseTrafficDistribution {
r.metricsCache.UpdateTrafficDistributionForService(serviceNN, service.Spec.TrafficDistribution)
slicesToCreate, slicesToUpdate, _ = trafficdist.ReconcileHints(service.Spec.TrafficDistribution, slicesToCreate, slicesToUpdate, unchangedSlices(existingSlices, slicesToUpdate, slicesToDelete))
} else {
r.metricsCache.UpdateTrafficDistributionForService(serviceNN, nil)
}
err := r.finalize(service, slicesToCreate, slicesToUpdate, slicesToDelete, triggerTime)
if err != nil {
errs = append(errs, err)
}
for _, event := range events {
r.eventRecorder.Event(service, event.EventType, event.Reason, event.Message)
}
return utilerrors.NewAggregate(errs)
}
func NewReconciler(client clientset.Interface, nodeLister corelisters.NodeLister, maxEndpointsPerSlice int32, endpointSliceTracker *endpointsliceutil.EndpointSliceTracker, topologyCache *topologycache.TopologyCache, trafficDistributionEnabled bool, eventRecorder record.EventRecorder, controllerName string) *Reconciler {
return &Reconciler{
client: client,
nodeLister: nodeLister,
maxEndpointsPerSlice: maxEndpointsPerSlice,
endpointSliceTracker: endpointSliceTracker,
metricsCache: metrics.NewCache(maxEndpointsPerSlice),
topologyCache: topologyCache,
trafficDistributionEnabled: trafficDistributionEnabled,
eventRecorder: eventRecorder,
controllerName: controllerName,
}
}
// placeholderSliceCompare is a conversion func for comparing two placeholder endpoint slices.
// It only compares the specific fields we care about.
var placeholderSliceCompare = conversion.EqualitiesOrDie(
func(a, b metav1.OwnerReference) bool {
return a.String() == b.String()
},
func(a, b metav1.ObjectMeta) bool {
if a.Namespace != b.Namespace {
return false
}
for k, v := range a.Labels {
if b.Labels[k] != v {
return false
}
}
for k, v := range b.Labels {
if a.Labels[k] != v {
return false
}
}
return true
},
)
// finalize creates, updates, and deletes slices as specified
func (r *Reconciler) finalize(
service *corev1.Service,
slicesToCreate,
slicesToUpdate,
slicesToDelete []*discovery.EndpointSlice,
triggerTime time.Time,
) error {
// If there are slices to create and delete, change the creates to updates
// of the slices that would otherwise be deleted.
for i := 0; i < len(slicesToDelete); {
if len(slicesToCreate) == 0 {
break
}
sliceToDelete := slicesToDelete[i]
slice := slicesToCreate[len(slicesToCreate)-1]
// Only update EndpointSlices that are owned by this Service and have
// the same AddressType. We need to avoid updating EndpointSlices that
// are being garbage collected for an old Service with the same name.
// The AddressType field is immutable. Since Services also consider
// IPFamily immutable, the only case where this should matter will be
// the migration from IP to IPv4 and IPv6 AddressTypes, where there's a
// chance EndpointSlices with an IP AddressType would otherwise be
// updated to IPv4 or IPv6 without this check.
if sliceToDelete.AddressType == slice.AddressType && ownedBy(sliceToDelete, service) {
slice.Name = sliceToDelete.Name
slicesToCreate = slicesToCreate[:len(slicesToCreate)-1]
slicesToUpdate = append(slicesToUpdate, slice)
slicesToDelete = append(slicesToDelete[:i], slicesToDelete[i+1:]...)
} else {
i++
}
}
// Don't create new EndpointSlices if the Service is pending deletion. This
// is to avoid a potential race condition with the garbage collector where
// it tries to delete EndpointSlices as this controller replaces them.
if service.DeletionTimestamp == nil {
for _, endpointSlice := range slicesToCreate {
addTriggerTimeAnnotation(endpointSlice, triggerTime)
createdSlice, err := r.client.DiscoveryV1().EndpointSlices(service.Namespace).Create(context.TODO(), endpointSlice, metav1.CreateOptions{})
if err != nil {
// If the namespace is terminating, creates will continue to fail. Simply drop the item.
if errors.HasStatusCause(err, corev1.NamespaceTerminatingCause) {
return nil
}
return fmt.Errorf("failed to create EndpointSlice for Service %s/%s: %v", service.Namespace, service.Name, err)
}
r.endpointSliceTracker.Update(createdSlice)
metrics.EndpointSliceChanges.WithLabelValues("create").Inc()
}
}
for _, endpointSlice := range slicesToUpdate {
addTriggerTimeAnnotation(endpointSlice, triggerTime)
updatedSlice, err := r.client.DiscoveryV1().EndpointSlices(service.Namespace).Update(context.TODO(), endpointSlice, metav1.UpdateOptions{})
if err != nil {
return fmt.Errorf("failed to update %s EndpointSlice for Service %s/%s: %v", endpointSlice.Name, service.Namespace, service.Name, err)
}
r.endpointSliceTracker.Update(updatedSlice)
metrics.EndpointSliceChanges.WithLabelValues("update").Inc()
}
for _, endpointSlice := range slicesToDelete {
err := r.client.DiscoveryV1().EndpointSlices(service.Namespace).Delete(context.TODO(), endpointSlice.Name, metav1.DeleteOptions{})
if err != nil {
return fmt.Errorf("failed to delete %s EndpointSlice for Service %s/%s: %v", endpointSlice.Name, service.Namespace, service.Name, err)
}
r.endpointSliceTracker.ExpectDeletion(endpointSlice)
metrics.EndpointSliceChanges.WithLabelValues("delete").Inc()
}
topologyLabel := "Disabled"
if r.topologyCache != nil && hintsEnabled(service.Annotations) {
topologyLabel = "Auto"
}
var trafficDistribution string
if r.trafficDistributionEnabled && !hintsEnabled(service.Annotations) {
if service.Spec.TrafficDistribution != nil && *service.Spec.TrafficDistribution == corev1.ServiceTrafficDistributionPreferClose {
trafficDistribution = *service.Spec.TrafficDistribution
}
}
numSlicesChanged := len(slicesToCreate) + len(slicesToUpdate) + len(slicesToDelete)
metrics.EndpointSlicesChangedPerSync.WithLabelValues(topologyLabel, trafficDistribution).Observe(float64(numSlicesChanged))
return nil
}
// reconcileByPortMapping compares the endpoints found in existing slices with
// the list of desired endpoints and returns lists of slices to create, update,
// and delete. It also checks that the slices mirror the parent services labels.
// The logic is split up into several main steps:
// 1. Iterate through existing slices, delete endpoints that are no longer
// desired and update matching endpoints that have changed. It also checks
// if the slices have the labels of the parent services, and updates them if not.
// 2. Iterate through slices that have been modified in 1 and fill them up with
// any remaining desired endpoints.
// 3. If there still desired endpoints left, try to fit them into a previously
// unchanged slice and/or create new ones.
func (r *Reconciler) reconcileByPortMapping(
logger klog.Logger,
service *corev1.Service,
existingSlices []*discovery.EndpointSlice,
desiredSet endpointsliceutil.EndpointSet,
endpointMeta *endpointMeta,
) ([]*discovery.EndpointSlice, []*discovery.EndpointSlice, []*discovery.EndpointSlice, int, int) {
slicesByName := map[string]*discovery.EndpointSlice{}
sliceNamesUnchanged := sets.New[string]()
sliceNamesToUpdate := sets.New[string]()
sliceNamesToDelete := sets.New[string]()
numRemoved := 0
// 1. Iterate through existing slices to delete endpoints no longer desired
// and update endpoints that have changed
for _, existingSlice := range existingSlices {
slicesByName[existingSlice.Name] = existingSlice
newEndpoints := []discovery.Endpoint{}
endpointUpdated := false
for _, endpoint := range existingSlice.Endpoints {
got := desiredSet.Get(&endpoint)
// If endpoint is desired add it to list of endpoints to keep.
if got != nil {
newEndpoints = append(newEndpoints, *got)
// If existing version of endpoint doesn't match desired version
// set endpointUpdated to ensure endpoint changes are persisted.
if !endpointsliceutil.EndpointsEqualBeyondHash(got, &endpoint) {
endpointUpdated = true
}
// once an endpoint has been placed/found in a slice, it no
// longer needs to be handled
desiredSet.Delete(&endpoint)
}
}
// generate the slice labels and check if parent labels have changed
labels, labelsChanged := setEndpointSliceLabels(logger, existingSlice, service, r.controllerName)
// If an endpoint was updated or removed, mark for update or delete
if endpointUpdated || len(existingSlice.Endpoints) != len(newEndpoints) {
if len(existingSlice.Endpoints) > len(newEndpoints) {
numRemoved += len(existingSlice.Endpoints) - len(newEndpoints)
}
if len(newEndpoints) == 0 {
// if no endpoints desired in this slice, mark for deletion
sliceNamesToDelete.Insert(existingSlice.Name)
} else {
// otherwise, copy and mark for update
epSlice := existingSlice.DeepCopy()
epSlice.Endpoints = newEndpoints
epSlice.Labels = labels
slicesByName[existingSlice.Name] = epSlice
sliceNamesToUpdate.Insert(epSlice.Name)
}
} else if labelsChanged {
// if labels have changed, copy and mark for update
epSlice := existingSlice.DeepCopy()
epSlice.Labels = labels
slicesByName[existingSlice.Name] = epSlice
sliceNamesToUpdate.Insert(epSlice.Name)
} else {
// slices with no changes will be useful if there are leftover endpoints
sliceNamesUnchanged.Insert(existingSlice.Name)
}
}
numAdded := desiredSet.Len()
// 2. If we still have desired endpoints to add and slices marked for update,
// iterate through the slices and fill them up with the desired endpoints.
if desiredSet.Len() > 0 && sliceNamesToUpdate.Len() > 0 {
slices := []*discovery.EndpointSlice{}
for _, sliceName := range sliceNamesToUpdate.UnsortedList() {
slices = append(slices, slicesByName[sliceName])
}
// Sort endpoint slices by length so we're filling up the fullest ones
// first.
sort.Sort(endpointSliceEndpointLen(slices))
// Iterate through slices and fill them up with desired endpoints.
for _, slice := range slices {
for desiredSet.Len() > 0 && len(slice.Endpoints) < int(r.maxEndpointsPerSlice) {
endpoint, _ := desiredSet.PopAny()
slice.Endpoints = append(slice.Endpoints, *endpoint)
}
}
}
// 3. If there are still desired endpoints left at this point, we try to fit
// the endpoints in a single existing slice. If there are no slices with
// that capacity, we create new slices for the endpoints.
slicesToCreate := []*discovery.EndpointSlice{}
for desiredSet.Len() > 0 {
var sliceToFill *discovery.EndpointSlice
// If the remaining amounts of endpoints is smaller than the max
// endpoints per slice and we have slices that haven't already been
// filled, try to fit them in one.
if desiredSet.Len() < int(r.maxEndpointsPerSlice) && sliceNamesUnchanged.Len() > 0 {
unchangedSlices := []*discovery.EndpointSlice{}
for _, sliceName := range sliceNamesUnchanged.UnsortedList() {
unchangedSlices = append(unchangedSlices, slicesByName[sliceName])
}
sliceToFill = getSliceToFill(unchangedSlices, desiredSet.Len(), int(r.maxEndpointsPerSlice))
}
// If we didn't find a sliceToFill, generate a new empty one.
if sliceToFill == nil {
sliceToFill = newEndpointSlice(logger, service, endpointMeta, r.controllerName)
} else {
// deep copy required to modify this slice.
sliceToFill = sliceToFill.DeepCopy()
slicesByName[sliceToFill.Name] = sliceToFill
}
// Fill the slice up with remaining endpoints.
for desiredSet.Len() > 0 && len(sliceToFill.Endpoints) < int(r.maxEndpointsPerSlice) {
endpoint, _ := desiredSet.PopAny()
sliceToFill.Endpoints = append(sliceToFill.Endpoints, *endpoint)
}
// New slices will not have a Name set, use this to determine whether
// this should be an update or create.
if sliceToFill.Name != "" {
sliceNamesToUpdate.Insert(sliceToFill.Name)
sliceNamesUnchanged.Delete(sliceToFill.Name)
} else {
slicesToCreate = append(slicesToCreate, sliceToFill)
}
}
// Build slicesToUpdate from slice names.
slicesToUpdate := []*discovery.EndpointSlice{}
for _, sliceName := range sliceNamesToUpdate.UnsortedList() {
slicesToUpdate = append(slicesToUpdate, slicesByName[sliceName])
}
// Build slicesToDelete from slice names.
slicesToDelete := []*discovery.EndpointSlice{}
for _, sliceName := range sliceNamesToDelete.UnsortedList() {
slicesToDelete = append(slicesToDelete, slicesByName[sliceName])
}
return slicesToCreate, slicesToUpdate, slicesToDelete, numAdded, numRemoved
}
func (r *Reconciler) DeleteService(namespace, name string) {
r.metricsCache.DeleteService(types.NamespacedName{Namespace: namespace, Name: name})
}
func (r *Reconciler) GetControllerName() string {
return r.controllerName
}
// ManagedByChanged returns true if one of the provided EndpointSlices is
// managed by the EndpointSlice controller while the other is not.
func (r *Reconciler) ManagedByChanged(endpointSlice1, endpointSlice2 *discovery.EndpointSlice) bool {
return r.ManagedByController(endpointSlice1) != r.ManagedByController(endpointSlice2)
}
// ManagedByController returns true if the controller of the provided
// EndpointSlices is the EndpointSlice controller.
func (r *Reconciler) ManagedByController(endpointSlice *discovery.EndpointSlice) bool {
managedBy := endpointSlice.Labels[discovery.LabelManagedBy]
return managedBy == r.controllerName
}
| staging/src/k8s.io/endpointslice/reconciler.go | 1 | https://github.com/kubernetes/kubernetes/commit/ec6fd2befac30ebb7dfb55b94d8c9874489a3902 | [
0.9967104196548462,
0.057152971625328064,
0.0001654152147239074,
0.00036971960798837245,
0.2161145657300949
] |
{
"id": 2,
"code_window": [
"\t// eventRecorder allows Reconciler to record and publish events.\n",
"\teventRecorder record.EventRecorder\n",
"\tcontrollerName string\n",
"}\n",
"\n",
"// endpointMeta includes the attributes we group slices on, this type helps with\n",
"// that logic in Reconciler\n",
"type endpointMeta struct {\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"type ReconcilerOption func(*Reconciler)\n",
"\n",
"// WithTrafficDistributionEnabled controls whether the Reconciler considers the\n",
"// `trafficDistribution` field while reconciling EndpointSlices.\n",
"func WithTrafficDistributionEnabled(enabled bool) ReconcilerOption {\n",
"\treturn func(r *Reconciler) {\n",
"\t\tr.trafficDistributionEnabled = enabled\n",
"\t}\n",
"}\n",
"\n"
],
"file_path": "staging/src/k8s.io/endpointslice/reconciler.go",
"type": "add",
"edit_start_line_idx": 61
} | /*
Copyright 2021 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package healthz
import (
"fmt"
"net/http"
"net/http/httptest"
"sync/atomic"
"testing"
"time"
"k8s.io/apiserver/pkg/server/healthz"
)
func TestMutableHealthzHandler(t *testing.T) {
badChecker := healthz.NamedCheck("bad", func(r *http.Request) error {
return fmt.Errorf("bad")
})
for _, tc := range []struct {
name string
checkBatches [][]healthz.HealthChecker
appendBad bool // appends bad check after batches above, and see if it fails afterwards
path string
expectedBody string
expectedStatus int
}{
{
name: "empty",
checkBatches: [][]healthz.HealthChecker{},
path: "/healthz",
expectedBody: "ok",
expectedStatus: http.StatusOK,
},
{
name: "good",
checkBatches: [][]healthz.HealthChecker{
{NamedPingChecker("good")},
},
path: "/healthz",
expectedBody: "ok",
expectedStatus: http.StatusOK,
},
{
name: "good verbose", // verbose only applies for successful checks
checkBatches: [][]healthz.HealthChecker{
{NamedPingChecker("good")}, // batch 1: good
},
path: "/healthz?verbose=true",
expectedBody: "[+]good ok\nhealthz check passed\n",
expectedStatus: http.StatusOK,
},
{
name: "good and bad, same batch",
checkBatches: [][]healthz.HealthChecker{
{NamedPingChecker("good"), badChecker}, // batch 1: good, bad
},
path: "/healthz",
expectedBody: "[+]good ok\n[-]bad failed: reason withheld\nhealthz check failed\n",
expectedStatus: http.StatusInternalServerError,
},
{
name: "good and bad, two batches",
checkBatches: [][]healthz.HealthChecker{
{NamedPingChecker("good")}, // batch 1: good
{badChecker}, // batch 2: bad
},
path: "/healthz",
expectedBody: "[+]good ok\n[-]bad failed: reason withheld\nhealthz check failed\n",
expectedStatus: http.StatusInternalServerError,
},
{
name: "two checks and append bad",
checkBatches: [][]healthz.HealthChecker{
{NamedPingChecker("foo"), NamedPingChecker("bar")},
},
path: "/healthz",
expectedBody: "ok",
expectedStatus: http.StatusOK,
appendBad: true,
},
{
name: "subcheck",
checkBatches: [][]healthz.HealthChecker{
{NamedPingChecker("good")}, // batch 1: good
{badChecker}, // batch 2: bad
},
path: "/healthz/good",
expectedBody: "ok",
expectedStatus: http.StatusOK,
},
} {
t.Run(tc.name, func(t *testing.T) {
h := NewMutableHealthzHandler()
for _, batch := range tc.checkBatches {
h.AddHealthChecker(batch...)
}
req, err := http.NewRequest("GET", fmt.Sprintf("https://example.com%v", tc.path), nil)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
w := httptest.NewRecorder()
h.ServeHTTP(w, req)
if w.Code != tc.expectedStatus {
t.Errorf("unexpected status: expected %v, got %v", tc.expectedStatus, w.Result().StatusCode)
}
if w.Body.String() != tc.expectedBody {
t.Errorf("unexpected body: expected %v, got %v", tc.expectedBody, w.Body.String())
}
if tc.appendBad {
h.AddHealthChecker(badChecker)
w := httptest.NewRecorder()
h.ServeHTTP(w, req)
// should fail
if w.Code != http.StatusInternalServerError {
t.Errorf("did not fail after adding bad checker")
}
}
})
}
}
// TestConcurrentChecks tests that the handler would not block on concurrent healthz requests.
func TestConcurrentChecks(t *testing.T) {
const N = 5
stopChan := make(chan interface{})
defer close(stopChan) // always close no matter passing or not
concurrentChan := make(chan interface{}, N)
var concurrentCount int32
pausingCheck := healthz.NamedCheck("pausing", func(r *http.Request) error {
atomic.AddInt32(&concurrentCount, 1)
concurrentChan <- nil
<-stopChan
return nil
})
h := NewMutableHealthzHandler(pausingCheck)
for i := 0; i < N; i++ {
go func() {
req, _ := http.NewRequest(http.MethodGet, "https://example.com/healthz", nil)
w := httptest.NewRecorder()
h.ServeHTTP(w, req)
}()
}
giveUp := time.After(1 * time.Second) // should take <1ms if passing
for i := 0; i < N; i++ {
select {
case <-giveUp:
t.Errorf("given up waiting for concurrent checks to start.")
return
case <-concurrentChan:
continue
}
}
if concurrentCount != N {
t.Errorf("expected %v concurrency, got %v", N, concurrentCount)
}
}
| staging/src/k8s.io/controller-manager/pkg/healthz/handler_test.go | 0 | https://github.com/kubernetes/kubernetes/commit/ec6fd2befac30ebb7dfb55b94d8c9874489a3902 | [
0.0001806392683647573,
0.0001728023198666051,
0.0001677451655268669,
0.0001714210375212133,
0.0000034123784189432627
] |
{
"id": 2,
"code_window": [
"\t// eventRecorder allows Reconciler to record and publish events.\n",
"\teventRecorder record.EventRecorder\n",
"\tcontrollerName string\n",
"}\n",
"\n",
"// endpointMeta includes the attributes we group slices on, this type helps with\n",
"// that logic in Reconciler\n",
"type endpointMeta struct {\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"type ReconcilerOption func(*Reconciler)\n",
"\n",
"// WithTrafficDistributionEnabled controls whether the Reconciler considers the\n",
"// `trafficDistribution` field while reconciling EndpointSlices.\n",
"func WithTrafficDistributionEnabled(enabled bool) ReconcilerOption {\n",
"\treturn func(r *Reconciler) {\n",
"\t\tr.trafficDistributionEnabled = enabled\n",
"\t}\n",
"}\n",
"\n"
],
"file_path": "staging/src/k8s.io/endpointslice/reconciler.go",
"type": "add",
"edit_start_line_idx": 61
} | // Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package global // import "go.opentelemetry.io/otel/internal/global"
import (
"log"
"os"
"sync/atomic"
)
var (
// GlobalErrorHandler provides an ErrorHandler that can be used
// throughout an OpenTelemetry instrumented project. When a user
// specified ErrorHandler is registered (`SetErrorHandler`) all calls to
// `Handle` and will be delegated to the registered ErrorHandler.
GlobalErrorHandler = defaultErrorHandler()
// Compile-time check that delegator implements ErrorHandler.
_ ErrorHandler = (*ErrDelegator)(nil)
// Compile-time check that errLogger implements ErrorHandler.
_ ErrorHandler = (*ErrLogger)(nil)
)
// ErrorHandler handles irremediable events.
type ErrorHandler interface {
// Handle handles any error deemed irremediable by an OpenTelemetry
// component.
Handle(error)
}
type ErrDelegator struct {
delegate atomic.Pointer[ErrorHandler]
}
func (d *ErrDelegator) Handle(err error) {
d.getDelegate().Handle(err)
}
func (d *ErrDelegator) getDelegate() ErrorHandler {
return *d.delegate.Load()
}
// setDelegate sets the ErrorHandler delegate.
func (d *ErrDelegator) setDelegate(eh ErrorHandler) {
d.delegate.Store(&eh)
}
func defaultErrorHandler() *ErrDelegator {
d := &ErrDelegator{}
d.setDelegate(&ErrLogger{l: log.New(os.Stderr, "", log.LstdFlags)})
return d
}
// ErrLogger logs errors if no delegate is set, otherwise they are delegated.
type ErrLogger struct {
l *log.Logger
}
// Handle logs err if no delegate is set, otherwise it is delegated.
func (h *ErrLogger) Handle(err error) {
h.l.Print(err)
}
// GetErrorHandler returns the global ErrorHandler instance.
//
// The default ErrorHandler instance returned will log all errors to STDERR
// until an override ErrorHandler is set with SetErrorHandler. All
// ErrorHandler returned prior to this will automatically forward errors to
// the set instance instead of logging.
//
// Subsequent calls to SetErrorHandler after the first will not forward errors
// to the new ErrorHandler for prior returned instances.
func GetErrorHandler() ErrorHandler {
return GlobalErrorHandler
}
// SetErrorHandler sets the global ErrorHandler to h.
//
// The first time this is called all ErrorHandler previously returned from
// GetErrorHandler will send errors to h instead of the default logging
// ErrorHandler. Subsequent calls will set the global ErrorHandler, but not
// delegate errors to h.
func SetErrorHandler(h ErrorHandler) {
GlobalErrorHandler.setDelegate(h)
}
// Handle is a convenience function for ErrorHandler().Handle(err).
func Handle(err error) {
GetErrorHandler().Handle(err)
}
| vendor/go.opentelemetry.io/otel/internal/global/handler.go | 0 | https://github.com/kubernetes/kubernetes/commit/ec6fd2befac30ebb7dfb55b94d8c9874489a3902 | [
0.0012097215512767434,
0.0002673144917935133,
0.00016774867253843695,
0.00017353057046420872,
0.00029803853249177337
] |
{
"id": 2,
"code_window": [
"\t// eventRecorder allows Reconciler to record and publish events.\n",
"\teventRecorder record.EventRecorder\n",
"\tcontrollerName string\n",
"}\n",
"\n",
"// endpointMeta includes the attributes we group slices on, this type helps with\n",
"// that logic in Reconciler\n",
"type endpointMeta struct {\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"type ReconcilerOption func(*Reconciler)\n",
"\n",
"// WithTrafficDistributionEnabled controls whether the Reconciler considers the\n",
"// `trafficDistribution` field while reconciling EndpointSlices.\n",
"func WithTrafficDistributionEnabled(enabled bool) ReconcilerOption {\n",
"\treturn func(r *Reconciler) {\n",
"\t\tr.trafficDistributionEnabled = enabled\n",
"\t}\n",
"}\n",
"\n"
],
"file_path": "staging/src/k8s.io/endpointslice/reconciler.go",
"type": "add",
"edit_start_line_idx": 61
} | /*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by applyconfiguration-gen. DO NOT EDIT.
package v1alpha1
// PolicyRuleApplyConfiguration represents an declarative configuration of the PolicyRule type for use
// with apply.
type PolicyRuleApplyConfiguration struct {
Verbs []string `json:"verbs,omitempty"`
APIGroups []string `json:"apiGroups,omitempty"`
Resources []string `json:"resources,omitempty"`
ResourceNames []string `json:"resourceNames,omitempty"`
NonResourceURLs []string `json:"nonResourceURLs,omitempty"`
}
// PolicyRuleApplyConfiguration constructs an declarative configuration of the PolicyRule type for use with
// apply.
func PolicyRule() *PolicyRuleApplyConfiguration {
return &PolicyRuleApplyConfiguration{}
}
// WithVerbs adds the given value to the Verbs field in the declarative configuration
// and returns the receiver, so that objects can be build by chaining "With" function invocations.
// If called multiple times, values provided by each call will be appended to the Verbs field.
func (b *PolicyRuleApplyConfiguration) WithVerbs(values ...string) *PolicyRuleApplyConfiguration {
for i := range values {
b.Verbs = append(b.Verbs, values[i])
}
return b
}
// WithAPIGroups adds the given value to the APIGroups field in the declarative configuration
// and returns the receiver, so that objects can be build by chaining "With" function invocations.
// If called multiple times, values provided by each call will be appended to the APIGroups field.
func (b *PolicyRuleApplyConfiguration) WithAPIGroups(values ...string) *PolicyRuleApplyConfiguration {
for i := range values {
b.APIGroups = append(b.APIGroups, values[i])
}
return b
}
// WithResources adds the given value to the Resources field in the declarative configuration
// and returns the receiver, so that objects can be build by chaining "With" function invocations.
// If called multiple times, values provided by each call will be appended to the Resources field.
func (b *PolicyRuleApplyConfiguration) WithResources(values ...string) *PolicyRuleApplyConfiguration {
for i := range values {
b.Resources = append(b.Resources, values[i])
}
return b
}
// WithResourceNames adds the given value to the ResourceNames field in the declarative configuration
// and returns the receiver, so that objects can be build by chaining "With" function invocations.
// If called multiple times, values provided by each call will be appended to the ResourceNames field.
func (b *PolicyRuleApplyConfiguration) WithResourceNames(values ...string) *PolicyRuleApplyConfiguration {
for i := range values {
b.ResourceNames = append(b.ResourceNames, values[i])
}
return b
}
// WithNonResourceURLs adds the given value to the NonResourceURLs field in the declarative configuration
// and returns the receiver, so that objects can be build by chaining "With" function invocations.
// If called multiple times, values provided by each call will be appended to the NonResourceURLs field.
func (b *PolicyRuleApplyConfiguration) WithNonResourceURLs(values ...string) *PolicyRuleApplyConfiguration {
for i := range values {
b.NonResourceURLs = append(b.NonResourceURLs, values[i])
}
return b
}
| staging/src/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/policyrule.go | 0 | https://github.com/kubernetes/kubernetes/commit/ec6fd2befac30ebb7dfb55b94d8c9874489a3902 | [
0.0004312087257858366,
0.00022588383581023663,
0.00016665595467202365,
0.00017953226051758975,
0.00009419838897883892
] |
{
"id": 3,
"code_window": [
"\n",
"}\n",
"\n",
"func NewReconciler(client clientset.Interface, nodeLister corelisters.NodeLister, maxEndpointsPerSlice int32, endpointSliceTracker *endpointsliceutil.EndpointSliceTracker, topologyCache *topologycache.TopologyCache, trafficDistributionEnabled bool, eventRecorder record.EventRecorder, controllerName string) *Reconciler {\n",
"\treturn &Reconciler{\n",
"\t\tclient: client,\n",
"\t\tnodeLister: nodeLister,\n",
"\t\tmaxEndpointsPerSlice: maxEndpointsPerSlice,\n",
"\t\tendpointSliceTracker: endpointSliceTracker,\n",
"\t\tmetricsCache: metrics.NewCache(maxEndpointsPerSlice),\n",
"\t\ttopologyCache: topologyCache,\n",
"\t\ttrafficDistributionEnabled: trafficDistributionEnabled,\n",
"\t\teventRecorder: eventRecorder,\n",
"\t\tcontrollerName: controllerName,\n",
"\t}\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"keep"
],
"after_edit": [
"func NewReconciler(client clientset.Interface, nodeLister corelisters.NodeLister, maxEndpointsPerSlice int32, endpointSliceTracker *endpointsliceutil.EndpointSliceTracker, topologyCache *topologycache.TopologyCache, eventRecorder record.EventRecorder, controllerName string, options ...ReconcilerOption) *Reconciler {\n",
"\tr := &Reconciler{\n",
"\t\tclient: client,\n",
"\t\tnodeLister: nodeLister,\n",
"\t\tmaxEndpointsPerSlice: maxEndpointsPerSlice,\n",
"\t\tendpointSliceTracker: endpointSliceTracker,\n",
"\t\tmetricsCache: metrics.NewCache(maxEndpointsPerSlice),\n",
"\t\ttopologyCache: topologyCache,\n",
"\t\teventRecorder: eventRecorder,\n",
"\t\tcontrollerName: controllerName,\n",
"\t}\n",
"\tfor _, option := range options {\n",
"\t\toption(r)\n"
],
"file_path": "staging/src/k8s.io/endpointslice/reconciler.go",
"type": "replace",
"edit_start_line_idx": 329
} | /*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package endpointslice
import (
"context"
"fmt"
"time"
"golang.org/x/time/rate"
v1 "k8s.io/api/core/v1"
discovery "k8s.io/api/discovery/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/labels"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/wait"
utilfeature "k8s.io/apiserver/pkg/util/feature"
coreinformers "k8s.io/client-go/informers/core/v1"
discoveryinformers "k8s.io/client-go/informers/discovery/v1"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/client-go/kubernetes/scheme"
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
corelisters "k8s.io/client-go/listers/core/v1"
discoverylisters "k8s.io/client-go/listers/discovery/v1"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/record"
"k8s.io/client-go/util/workqueue"
endpointslicerec "k8s.io/endpointslice"
endpointslicemetrics "k8s.io/endpointslice/metrics"
"k8s.io/endpointslice/topologycache"
endpointsliceutil "k8s.io/endpointslice/util"
"k8s.io/klog/v2"
"k8s.io/kubernetes/pkg/controller"
endpointslicepkg "k8s.io/kubernetes/pkg/controller/util/endpointslice"
"k8s.io/kubernetes/pkg/features"
)
const (
// maxRetries is the number of times a service will be retried before it is
// dropped out of the queue. Any sync error, such as a failure to create or
// update an EndpointSlice could trigger a retry. With the current
// rate-limiter in use (1s*2^(numRetries-1)) the following numbers represent
// the sequence of delays between successive queuings of a service.
//
// 1s, 2s, 4s, 8s, 16s, 32s, 64s, 128s, 256s, 512s, 1000s (max)
maxRetries = 15
// endpointSliceChangeMinSyncDelay indicates the minimum delay before
// queuing a syncService call after an EndpointSlice changes. If
// endpointUpdatesBatchPeriod is greater than this value, it will be used
// instead. This helps batch processing of changes to multiple
// EndpointSlices.
endpointSliceChangeMinSyncDelay = 1 * time.Second
// defaultSyncBackOff is the default backoff period for syncService calls.
defaultSyncBackOff = 1 * time.Second
// maxSyncBackOff is the max backoff period for syncService calls.
maxSyncBackOff = 1000 * time.Second
// controllerName is a unique value used with LabelManagedBy to indicated
// the component managing an EndpointSlice.
controllerName = "endpointslice-controller.k8s.io"
)
// NewController creates and initializes a new Controller
func NewController(ctx context.Context, podInformer coreinformers.PodInformer,
serviceInformer coreinformers.ServiceInformer,
nodeInformer coreinformers.NodeInformer,
endpointSliceInformer discoveryinformers.EndpointSliceInformer,
maxEndpointsPerSlice int32,
client clientset.Interface,
endpointUpdatesBatchPeriod time.Duration,
) *Controller {
broadcaster := record.NewBroadcaster(record.WithContext(ctx))
recorder := broadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "endpoint-slice-controller"})
endpointslicemetrics.RegisterMetrics()
c := &Controller{
client: client,
// This is similar to the DefaultControllerRateLimiter, just with a
// significantly higher default backoff (1s vs 5ms). This controller
// processes events that can require significant EndpointSlice changes,
// such as an update to a Service or Deployment. A more significant
// rate limit back off here helps ensure that the Controller does not
// overwhelm the API Server.
queue: workqueue.NewNamedRateLimitingQueue(workqueue.NewMaxOfRateLimiter(
workqueue.NewItemExponentialFailureRateLimiter(defaultSyncBackOff, maxSyncBackOff),
// 10 qps, 100 bucket size. This is only for retry speed and its
// only the overall factor (not per item).
&workqueue.BucketRateLimiter{Limiter: rate.NewLimiter(rate.Limit(10), 100)},
), "endpoint_slice"),
workerLoopPeriod: time.Second,
}
serviceInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: c.onServiceUpdate,
UpdateFunc: func(old, cur interface{}) {
c.onServiceUpdate(cur)
},
DeleteFunc: c.onServiceDelete,
})
c.serviceLister = serviceInformer.Lister()
c.servicesSynced = serviceInformer.Informer().HasSynced
podInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: c.addPod,
UpdateFunc: c.updatePod,
DeleteFunc: c.deletePod,
})
c.podLister = podInformer.Lister()
c.podsSynced = podInformer.Informer().HasSynced
c.nodeLister = nodeInformer.Lister()
c.nodesSynced = nodeInformer.Informer().HasSynced
logger := klog.FromContext(ctx)
endpointSliceInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: c.onEndpointSliceAdd,
UpdateFunc: func(oldObj, newObj interface{}) {
c.onEndpointSliceUpdate(logger, oldObj, newObj)
},
DeleteFunc: c.onEndpointSliceDelete,
})
c.endpointSliceLister = endpointSliceInformer.Lister()
c.endpointSlicesSynced = endpointSliceInformer.Informer().HasSynced
c.endpointSliceTracker = endpointsliceutil.NewEndpointSliceTracker()
c.maxEndpointsPerSlice = maxEndpointsPerSlice
c.triggerTimeTracker = endpointsliceutil.NewTriggerTimeTracker()
c.eventBroadcaster = broadcaster
c.eventRecorder = recorder
c.endpointUpdatesBatchPeriod = endpointUpdatesBatchPeriod
if utilfeature.DefaultFeatureGate.Enabled(features.TopologyAwareHints) {
nodeInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
c.addNode(logger, obj)
},
UpdateFunc: func(oldObj, newObj interface{}) {
c.updateNode(logger, oldObj, newObj)
},
DeleteFunc: func(obj interface{}) {
c.deleteNode(logger, obj)
},
})
c.topologyCache = topologycache.NewTopologyCache()
}
c.reconciler = endpointslicerec.NewReconciler(
c.client,
c.nodeLister,
c.maxEndpointsPerSlice,
c.endpointSliceTracker,
c.topologyCache,
utilfeature.DefaultFeatureGate.Enabled(features.ServiceTrafficDistribution),
c.eventRecorder,
controllerName,
)
return c
}
// Controller manages selector-based service endpoint slices
type Controller struct {
client clientset.Interface
eventBroadcaster record.EventBroadcaster
eventRecorder record.EventRecorder
// serviceLister is able to list/get services and is populated by the
// shared informer passed to NewController
serviceLister corelisters.ServiceLister
// servicesSynced returns true if the service shared informer has been synced at least once.
// Added as a member to the struct to allow injection for testing.
servicesSynced cache.InformerSynced
// podLister is able to list/get pods and is populated by the
// shared informer passed to NewController
podLister corelisters.PodLister
// podsSynced returns true if the pod shared informer has been synced at least once.
// Added as a member to the struct to allow injection for testing.
podsSynced cache.InformerSynced
// endpointSliceLister is able to list/get endpoint slices and is populated by the
// shared informer passed to NewController
endpointSliceLister discoverylisters.EndpointSliceLister
// endpointSlicesSynced returns true if the endpoint slice shared informer has been synced at least once.
// Added as a member to the struct to allow injection for testing.
endpointSlicesSynced cache.InformerSynced
// endpointSliceTracker tracks the list of EndpointSlices and associated
// resource versions expected for each Service. It can help determine if a
// cached EndpointSlice is out of date.
endpointSliceTracker *endpointsliceutil.EndpointSliceTracker
// nodeLister is able to list/get nodes and is populated by the
// shared informer passed to NewController
nodeLister corelisters.NodeLister
// nodesSynced returns true if the node shared informer has been synced at least once.
// Added as a member to the struct to allow injection for testing.
nodesSynced cache.InformerSynced
// reconciler is an util used to reconcile EndpointSlice changes.
reconciler *endpointslicerec.Reconciler
// triggerTimeTracker is an util used to compute and export the
// EndpointsLastChangeTriggerTime annotation.
triggerTimeTracker *endpointsliceutil.TriggerTimeTracker
// Services that need to be updated. A channel is inappropriate here,
// because it allows services with lots of pods to be serviced much
// more often than services with few pods; it also would cause a
// service that's inserted multiple times to be processed more than
// necessary.
queue workqueue.RateLimitingInterface
// maxEndpointsPerSlice references the maximum number of endpoints that
// should be added to an EndpointSlice
maxEndpointsPerSlice int32
// workerLoopPeriod is the time between worker runs. The workers
// process the queue of service and pod changes
workerLoopPeriod time.Duration
// endpointUpdatesBatchPeriod is an artificial delay added to all service syncs triggered by pod changes.
// This can be used to reduce overall number of all endpoint slice updates.
endpointUpdatesBatchPeriod time.Duration
// topologyCache tracks the distribution of Nodes and endpoints across zones
// to enable TopologyAwareHints.
topologyCache *topologycache.TopologyCache
}
// Run will not return until stopCh is closed.
func (c *Controller) Run(ctx context.Context, workers int) {
defer utilruntime.HandleCrash()
// Start events processing pipeline.
c.eventBroadcaster.StartLogging(klog.Infof)
c.eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: c.client.CoreV1().Events("")})
defer c.eventBroadcaster.Shutdown()
defer c.queue.ShutDown()
logger := klog.FromContext(ctx)
logger.Info("Starting endpoint slice controller")
defer logger.Info("Shutting down endpoint slice controller")
if !cache.WaitForNamedCacheSync("endpoint_slice", ctx.Done(), c.podsSynced, c.servicesSynced, c.endpointSlicesSynced, c.nodesSynced) {
return
}
logger.V(2).Info("Starting worker threads", "total", workers)
for i := 0; i < workers; i++ {
go wait.Until(func() { c.worker(logger) }, c.workerLoopPeriod, ctx.Done())
}
<-ctx.Done()
}
// worker runs a worker thread that just dequeues items, processes them, and
// marks them done. You may run as many of these in parallel as you wish; the
// workqueue guarantees that they will not end up processing the same service
// at the same time
func (c *Controller) worker(logger klog.Logger) {
for c.processNextWorkItem(logger) {
}
}
func (c *Controller) processNextWorkItem(logger klog.Logger) bool {
cKey, quit := c.queue.Get()
if quit {
return false
}
defer c.queue.Done(cKey)
err := c.syncService(logger, cKey.(string))
c.handleErr(logger, err, cKey)
return true
}
func (c *Controller) handleErr(logger klog.Logger, err error, key interface{}) {
trackSync(err)
if err == nil {
c.queue.Forget(key)
return
}
if c.queue.NumRequeues(key) < maxRetries {
logger.Info("Error syncing endpoint slices for service, retrying", "key", key, "err", err)
c.queue.AddRateLimited(key)
return
}
logger.Info("Retry budget exceeded, dropping service out of the queue", "key", key, "err", err)
c.queue.Forget(key)
utilruntime.HandleError(err)
}
func (c *Controller) syncService(logger klog.Logger, key string) error {
startTime := time.Now()
defer func() {
logger.V(4).Info("Finished syncing service endpoint slices", "key", key, "elapsedTime", time.Since(startTime))
}()
namespace, name, err := cache.SplitMetaNamespaceKey(key)
if err != nil {
return err
}
service, err := c.serviceLister.Services(namespace).Get(name)
if err != nil {
if !apierrors.IsNotFound(err) {
return err
}
c.triggerTimeTracker.DeleteService(namespace, name)
c.reconciler.DeleteService(namespace, name)
c.endpointSliceTracker.DeleteService(namespace, name)
// The service has been deleted, return nil so that it won't be retried.
return nil
}
if service.Spec.Type == v1.ServiceTypeExternalName {
// services with Type ExternalName receive no endpoints from this controller;
// Ref: https://issues.k8s.io/105986
return nil
}
if service.Spec.Selector == nil {
// services without a selector receive no endpoint slices from this controller;
// these services will receive endpoint slices that are created out-of-band via the REST API.
return nil
}
logger.V(5).Info("About to update endpoint slices for service", "key", key)
podLabelSelector := labels.Set(service.Spec.Selector).AsSelectorPreValidated()
pods, err := c.podLister.Pods(service.Namespace).List(podLabelSelector)
if err != nil {
// Since we're getting stuff from a local cache, it is basically
// impossible to get this error.
c.eventRecorder.Eventf(service, v1.EventTypeWarning, "FailedToListPods",
"Error listing Pods for Service %s/%s: %v", service.Namespace, service.Name, err)
return err
}
esLabelSelector := labels.Set(map[string]string{
discovery.LabelServiceName: service.Name,
discovery.LabelManagedBy: c.reconciler.GetControllerName(),
}).AsSelectorPreValidated()
endpointSlices, err := c.endpointSliceLister.EndpointSlices(service.Namespace).List(esLabelSelector)
if err != nil {
// Since we're getting stuff from a local cache, it is basically
// impossible to get this error.
c.eventRecorder.Eventf(service, v1.EventTypeWarning, "FailedToListEndpointSlices",
"Error listing Endpoint Slices for Service %s/%s: %v", service.Namespace, service.Name, err)
return err
}
// Drop EndpointSlices that have been marked for deletion to prevent the controller from getting stuck.
endpointSlices = dropEndpointSlicesPendingDeletion(endpointSlices)
if c.endpointSliceTracker.StaleSlices(service, endpointSlices) {
return endpointslicepkg.NewStaleInformerCache("EndpointSlice informer cache is out of date")
}
// We call ComputeEndpointLastChangeTriggerTime here to make sure that the
// state of the trigger time tracker gets updated even if the sync turns out
// to be no-op and we don't update the EndpointSlice objects.
lastChangeTriggerTime := c.triggerTimeTracker.
ComputeEndpointLastChangeTriggerTime(namespace, service, pods)
err = c.reconciler.Reconcile(logger, service, pods, endpointSlices, lastChangeTriggerTime)
if err != nil {
c.eventRecorder.Eventf(service, v1.EventTypeWarning, "FailedToUpdateEndpointSlices",
"Error updating Endpoint Slices for Service %s/%s: %v", service.Namespace, service.Name, err)
return err
}
return nil
}
// onServiceUpdate updates the Service Selector in the cache and queues the Service for processing.
func (c *Controller) onServiceUpdate(obj interface{}) {
key, err := controller.KeyFunc(obj)
if err != nil {
utilruntime.HandleError(fmt.Errorf("Couldn't get key for object %+v: %v", obj, err))
return
}
c.queue.Add(key)
}
// onServiceDelete removes the Service Selector from the cache and queues the Service for processing.
func (c *Controller) onServiceDelete(obj interface{}) {
key, err := controller.KeyFunc(obj)
if err != nil {
utilruntime.HandleError(fmt.Errorf("Couldn't get key for object %+v: %v", obj, err))
return
}
c.queue.Add(key)
}
// onEndpointSliceAdd queues a sync for the relevant Service for a sync if the
// EndpointSlice resource version does not match the expected version in the
// endpointSliceTracker.
func (c *Controller) onEndpointSliceAdd(obj interface{}) {
endpointSlice := obj.(*discovery.EndpointSlice)
if endpointSlice == nil {
utilruntime.HandleError(fmt.Errorf("Invalid EndpointSlice provided to onEndpointSliceAdd()"))
return
}
if c.reconciler.ManagedByController(endpointSlice) && c.endpointSliceTracker.ShouldSync(endpointSlice) {
c.queueServiceForEndpointSlice(endpointSlice)
}
}
// onEndpointSliceUpdate queues a sync for the relevant Service for a sync if
// the EndpointSlice resource version does not match the expected version in the
// endpointSliceTracker or the managed-by value of the EndpointSlice has changed
// from or to this controller.
func (c *Controller) onEndpointSliceUpdate(logger klog.Logger, prevObj, obj interface{}) {
prevEndpointSlice := prevObj.(*discovery.EndpointSlice)
endpointSlice := obj.(*discovery.EndpointSlice)
if endpointSlice == nil || prevEndpointSlice == nil {
utilruntime.HandleError(fmt.Errorf("Invalid EndpointSlice provided to onEndpointSliceUpdate()"))
return
}
// EndpointSlice generation does not change when labels change. Although the
// controller will never change LabelServiceName, users might. This check
// ensures that we handle changes to this label.
svcName := endpointSlice.Labels[discovery.LabelServiceName]
prevSvcName := prevEndpointSlice.Labels[discovery.LabelServiceName]
if svcName != prevSvcName {
logger.Info("label changed", "label", discovery.LabelServiceName, "oldService", prevSvcName, "newService", svcName, "endpointslice", klog.KObj(endpointSlice))
c.queueServiceForEndpointSlice(endpointSlice)
c.queueServiceForEndpointSlice(prevEndpointSlice)
return
}
if c.reconciler.ManagedByChanged(prevEndpointSlice, endpointSlice) || (c.reconciler.ManagedByController(endpointSlice) && c.endpointSliceTracker.ShouldSync(endpointSlice)) {
c.queueServiceForEndpointSlice(endpointSlice)
}
}
// onEndpointSliceDelete queues a sync for the relevant Service for a sync if the
// EndpointSlice resource version does not match the expected version in the
// endpointSliceTracker.
func (c *Controller) onEndpointSliceDelete(obj interface{}) {
endpointSlice := getEndpointSliceFromDeleteAction(obj)
if endpointSlice != nil && c.reconciler.ManagedByController(endpointSlice) && c.endpointSliceTracker.Has(endpointSlice) {
// This returns false if we didn't expect the EndpointSlice to be
// deleted. If that is the case, we queue the Service for another sync.
if !c.endpointSliceTracker.HandleDeletion(endpointSlice) {
c.queueServiceForEndpointSlice(endpointSlice)
}
}
}
// queueServiceForEndpointSlice attempts to queue the corresponding Service for
// the provided EndpointSlice.
func (c *Controller) queueServiceForEndpointSlice(endpointSlice *discovery.EndpointSlice) {
key, err := endpointslicerec.ServiceControllerKey(endpointSlice)
if err != nil {
utilruntime.HandleError(fmt.Errorf("Couldn't get key for EndpointSlice %+v: %v", endpointSlice, err))
return
}
// queue after the max of endpointSliceChangeMinSyncDelay and
// endpointUpdatesBatchPeriod.
delay := endpointSliceChangeMinSyncDelay
if c.endpointUpdatesBatchPeriod > delay {
delay = c.endpointUpdatesBatchPeriod
}
c.queue.AddAfter(key, delay)
}
func (c *Controller) addPod(obj interface{}) {
pod := obj.(*v1.Pod)
services, err := endpointsliceutil.GetPodServiceMemberships(c.serviceLister, pod)
if err != nil {
utilruntime.HandleError(fmt.Errorf("Unable to get pod %s/%s's service memberships: %v", pod.Namespace, pod.Name, err))
return
}
for key := range services {
c.queue.AddAfter(key, c.endpointUpdatesBatchPeriod)
}
}
func (c *Controller) updatePod(old, cur interface{}) {
services := endpointsliceutil.GetServicesToUpdateOnPodChange(c.serviceLister, old, cur)
for key := range services {
c.queue.AddAfter(key, c.endpointUpdatesBatchPeriod)
}
}
// When a pod is deleted, enqueue the services the pod used to be a member of
// obj could be an *v1.Pod, or a DeletionFinalStateUnknown marker item.
func (c *Controller) deletePod(obj interface{}) {
pod := endpointsliceutil.GetPodFromDeleteAction(obj)
if pod != nil {
c.addPod(pod)
}
}
func (c *Controller) addNode(logger klog.Logger, obj interface{}) {
c.checkNodeTopologyDistribution(logger)
}
func (c *Controller) updateNode(logger klog.Logger, old, cur interface{}) {
oldNode := old.(*v1.Node)
curNode := cur.(*v1.Node)
// LabelTopologyZone may be added by cloud provider asynchronously after the Node is created.
// The topology cache should be updated in this case.
if isNodeReady(oldNode) != isNodeReady(curNode) ||
oldNode.Labels[v1.LabelTopologyZone] != curNode.Labels[v1.LabelTopologyZone] {
c.checkNodeTopologyDistribution(logger)
}
}
func (c *Controller) deleteNode(logger klog.Logger, obj interface{}) {
c.checkNodeTopologyDistribution(logger)
}
// checkNodeTopologyDistribution updates Nodes in the topology cache and then
// queues any Services that are past the threshold.
func (c *Controller) checkNodeTopologyDistribution(logger klog.Logger) {
if c.topologyCache == nil {
return
}
nodes, err := c.nodeLister.List(labels.Everything())
if err != nil {
logger.Error(err, "Error listing Nodes")
return
}
c.topologyCache.SetNodes(logger, nodes)
serviceKeys := c.topologyCache.GetOverloadedServices()
for _, serviceKey := range serviceKeys {
logger.V(2).Info("Queuing Service after Node change due to overloading", "key", serviceKey)
c.queue.Add(serviceKey)
}
}
// trackSync increments the EndpointSliceSyncs metric with the result of a sync.
func trackSync(err error) {
metricLabel := "success"
if err != nil {
if endpointslicepkg.IsStaleInformerCacheErr(err) {
metricLabel = "stale"
} else {
metricLabel = "error"
}
}
endpointslicemetrics.EndpointSliceSyncs.WithLabelValues(metricLabel).Inc()
}
func dropEndpointSlicesPendingDeletion(endpointSlices []*discovery.EndpointSlice) []*discovery.EndpointSlice {
n := 0
for _, endpointSlice := range endpointSlices {
if endpointSlice.DeletionTimestamp == nil {
endpointSlices[n] = endpointSlice
n++
}
}
return endpointSlices[:n]
}
// getEndpointSliceFromDeleteAction parses an EndpointSlice from a delete action.
func getEndpointSliceFromDeleteAction(obj interface{}) *discovery.EndpointSlice {
if endpointSlice, ok := obj.(*discovery.EndpointSlice); ok {
// Enqueue all the services that the pod used to be a member of.
// This is the same thing we do when we add a pod.
return endpointSlice
}
// If we reached here it means the pod was deleted but its final state is unrecorded.
tombstone, ok := obj.(cache.DeletedFinalStateUnknown)
if !ok {
utilruntime.HandleError(fmt.Errorf("Couldn't get object from tombstone %#v", obj))
return nil
}
endpointSlice, ok := tombstone.Obj.(*discovery.EndpointSlice)
if !ok {
utilruntime.HandleError(fmt.Errorf("Tombstone contained object that is not a EndpointSlice: %#v", obj))
return nil
}
return endpointSlice
}
// isNodeReady returns true if a node is ready; false otherwise.
func isNodeReady(node *v1.Node) bool {
for _, c := range node.Status.Conditions {
if c.Type == v1.NodeReady {
return c.Status == v1.ConditionTrue
}
}
return false
}
| pkg/controller/endpointslice/endpointslice_controller.go | 1 | https://github.com/kubernetes/kubernetes/commit/ec6fd2befac30ebb7dfb55b94d8c9874489a3902 | [
0.9986333250999451,
0.03554198890924454,
0.00016057062020990998,
0.0002572101657278836,
0.1720409244298935
] |
{
"id": 3,
"code_window": [
"\n",
"}\n",
"\n",
"func NewReconciler(client clientset.Interface, nodeLister corelisters.NodeLister, maxEndpointsPerSlice int32, endpointSliceTracker *endpointsliceutil.EndpointSliceTracker, topologyCache *topologycache.TopologyCache, trafficDistributionEnabled bool, eventRecorder record.EventRecorder, controllerName string) *Reconciler {\n",
"\treturn &Reconciler{\n",
"\t\tclient: client,\n",
"\t\tnodeLister: nodeLister,\n",
"\t\tmaxEndpointsPerSlice: maxEndpointsPerSlice,\n",
"\t\tendpointSliceTracker: endpointSliceTracker,\n",
"\t\tmetricsCache: metrics.NewCache(maxEndpointsPerSlice),\n",
"\t\ttopologyCache: topologyCache,\n",
"\t\ttrafficDistributionEnabled: trafficDistributionEnabled,\n",
"\t\teventRecorder: eventRecorder,\n",
"\t\tcontrollerName: controllerName,\n",
"\t}\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"keep"
],
"after_edit": [
"func NewReconciler(client clientset.Interface, nodeLister corelisters.NodeLister, maxEndpointsPerSlice int32, endpointSliceTracker *endpointsliceutil.EndpointSliceTracker, topologyCache *topologycache.TopologyCache, eventRecorder record.EventRecorder, controllerName string, options ...ReconcilerOption) *Reconciler {\n",
"\tr := &Reconciler{\n",
"\t\tclient: client,\n",
"\t\tnodeLister: nodeLister,\n",
"\t\tmaxEndpointsPerSlice: maxEndpointsPerSlice,\n",
"\t\tendpointSliceTracker: endpointSliceTracker,\n",
"\t\tmetricsCache: metrics.NewCache(maxEndpointsPerSlice),\n",
"\t\ttopologyCache: topologyCache,\n",
"\t\teventRecorder: eventRecorder,\n",
"\t\tcontrollerName: controllerName,\n",
"\t}\n",
"\tfor _, option := range options {\n",
"\t\toption(r)\n"
],
"file_path": "staging/src/k8s.io/endpointslice/reconciler.go",
"type": "replace",
"edit_start_line_idx": 329
} | apiVersion: v1
kind: Pod
metadata:
name: etcd-version-monitor
namespace: kube-system
spec:
hostNetwork: true
containers:
- name: etcd-version-monitor
image: registry.k8s.io/etcd-version-monitor:0.1.3
command:
- /etcd-version-monitor
| cluster/images/etcd-version-monitor/etcd-version-monitor.yaml | 0 | https://github.com/kubernetes/kubernetes/commit/ec6fd2befac30ebb7dfb55b94d8c9874489a3902 | [
0.00016876471636351198,
0.00016745267203077674,
0.0001661406276980415,
0.00016745267203077674,
0.0000013120443327352405
] |
{
"id": 3,
"code_window": [
"\n",
"}\n",
"\n",
"func NewReconciler(client clientset.Interface, nodeLister corelisters.NodeLister, maxEndpointsPerSlice int32, endpointSliceTracker *endpointsliceutil.EndpointSliceTracker, topologyCache *topologycache.TopologyCache, trafficDistributionEnabled bool, eventRecorder record.EventRecorder, controllerName string) *Reconciler {\n",
"\treturn &Reconciler{\n",
"\t\tclient: client,\n",
"\t\tnodeLister: nodeLister,\n",
"\t\tmaxEndpointsPerSlice: maxEndpointsPerSlice,\n",
"\t\tendpointSliceTracker: endpointSliceTracker,\n",
"\t\tmetricsCache: metrics.NewCache(maxEndpointsPerSlice),\n",
"\t\ttopologyCache: topologyCache,\n",
"\t\ttrafficDistributionEnabled: trafficDistributionEnabled,\n",
"\t\teventRecorder: eventRecorder,\n",
"\t\tcontrollerName: controllerName,\n",
"\t}\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"keep"
],
"after_edit": [
"func NewReconciler(client clientset.Interface, nodeLister corelisters.NodeLister, maxEndpointsPerSlice int32, endpointSliceTracker *endpointsliceutil.EndpointSliceTracker, topologyCache *topologycache.TopologyCache, eventRecorder record.EventRecorder, controllerName string, options ...ReconcilerOption) *Reconciler {\n",
"\tr := &Reconciler{\n",
"\t\tclient: client,\n",
"\t\tnodeLister: nodeLister,\n",
"\t\tmaxEndpointsPerSlice: maxEndpointsPerSlice,\n",
"\t\tendpointSliceTracker: endpointSliceTracker,\n",
"\t\tmetricsCache: metrics.NewCache(maxEndpointsPerSlice),\n",
"\t\ttopologyCache: topologyCache,\n",
"\t\teventRecorder: eventRecorder,\n",
"\t\tcontrollerName: controllerName,\n",
"\t}\n",
"\tfor _, option := range options {\n",
"\t\toption(r)\n"
],
"file_path": "staging/src/k8s.io/endpointslice/reconciler.go",
"type": "replace",
"edit_start_line_idx": 329
} | apiVersion: v1
kind: ReplicationController
metadata:
name: update-demo-kitten
spec:
selector:
name: update-demo
version: kitten
template:
metadata:
labels:
name: update-demo
version: kitten
spec:
containers:
- image: registry.k8s.io/update-demo:kitten
name: update-demo
ports:
- containerPort: 80
protocol: TCP
| staging/src/k8s.io/cli-runtime/artifacts/kitten-rc.yaml | 0 | https://github.com/kubernetes/kubernetes/commit/ec6fd2befac30ebb7dfb55b94d8c9874489a3902 | [
0.00018280863878317177,
0.00017416437913198024,
0.00016811488603707403,
0.00017156958347186446,
0.000006273025974223856
] |
{
"id": 3,
"code_window": [
"\n",
"}\n",
"\n",
"func NewReconciler(client clientset.Interface, nodeLister corelisters.NodeLister, maxEndpointsPerSlice int32, endpointSliceTracker *endpointsliceutil.EndpointSliceTracker, topologyCache *topologycache.TopologyCache, trafficDistributionEnabled bool, eventRecorder record.EventRecorder, controllerName string) *Reconciler {\n",
"\treturn &Reconciler{\n",
"\t\tclient: client,\n",
"\t\tnodeLister: nodeLister,\n",
"\t\tmaxEndpointsPerSlice: maxEndpointsPerSlice,\n",
"\t\tendpointSliceTracker: endpointSliceTracker,\n",
"\t\tmetricsCache: metrics.NewCache(maxEndpointsPerSlice),\n",
"\t\ttopologyCache: topologyCache,\n",
"\t\ttrafficDistributionEnabled: trafficDistributionEnabled,\n",
"\t\teventRecorder: eventRecorder,\n",
"\t\tcontrollerName: controllerName,\n",
"\t}\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"keep"
],
"after_edit": [
"func NewReconciler(client clientset.Interface, nodeLister corelisters.NodeLister, maxEndpointsPerSlice int32, endpointSliceTracker *endpointsliceutil.EndpointSliceTracker, topologyCache *topologycache.TopologyCache, eventRecorder record.EventRecorder, controllerName string, options ...ReconcilerOption) *Reconciler {\n",
"\tr := &Reconciler{\n",
"\t\tclient: client,\n",
"\t\tnodeLister: nodeLister,\n",
"\t\tmaxEndpointsPerSlice: maxEndpointsPerSlice,\n",
"\t\tendpointSliceTracker: endpointSliceTracker,\n",
"\t\tmetricsCache: metrics.NewCache(maxEndpointsPerSlice),\n",
"\t\ttopologyCache: topologyCache,\n",
"\t\teventRecorder: eventRecorder,\n",
"\t\tcontrollerName: controllerName,\n",
"\t}\n",
"\tfor _, option := range options {\n",
"\t\toption(r)\n"
],
"file_path": "staging/src/k8s.io/endpointslice/reconciler.go",
"type": "replace",
"edit_start_line_idx": 329
} | /*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package controller
import (
"context"
"reflect"
"strings"
"testing"
"github.com/google/go-cmp/cmp"
apps "k8s.io/api/apps/v1"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/types"
)
var (
productionLabel = map[string]string{"type": "production"}
testLabel = map[string]string{"type": "testing"}
productionLabelSelector = labels.Set{"type": "production"}.AsSelector()
controllerUID = "123"
)
func newPod(podName string, label map[string]string, owner metav1.Object) *v1.Pod {
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: podName,
Labels: label,
Namespace: metav1.NamespaceDefault,
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Image: "foo/bar",
},
},
},
}
if owner != nil {
pod.OwnerReferences = []metav1.OwnerReference{*metav1.NewControllerRef(owner, apps.SchemeGroupVersion.WithKind("Fake"))}
}
return pod
}
func TestClaimPods(t *testing.T) {
controllerKind := schema.GroupVersionKind{}
type test struct {
name string
manager *PodControllerRefManager
pods []*v1.Pod
claimed []*v1.Pod
patches int
}
var tests = []test{
func() test {
controller := v1.ReplicationController{}
controller.Namespace = metav1.NamespaceDefault
return test{
name: "Claim pods with correct label",
manager: NewPodControllerRefManager(&FakePodControl{},
&controller,
productionLabelSelector,
controllerKind,
func(ctx context.Context) error { return nil }),
pods: []*v1.Pod{newPod("pod1", productionLabel, nil), newPod("pod2", testLabel, nil)},
claimed: []*v1.Pod{newPod("pod1", productionLabel, nil)},
patches: 1,
}
}(),
func() test {
controller := v1.ReplicationController{}
controller.Namespace = metav1.NamespaceDefault
controller.UID = types.UID(controllerUID)
now := metav1.Now()
controller.DeletionTimestamp = &now
return test{
name: "Controller marked for deletion can not claim pods",
manager: NewPodControllerRefManager(&FakePodControl{},
&controller,
productionLabelSelector,
controllerKind,
func(ctx context.Context) error { return nil }),
pods: []*v1.Pod{newPod("pod1", productionLabel, nil), newPod("pod2", productionLabel, nil)},
claimed: nil,
}
}(),
func() test {
controller := v1.ReplicationController{}
controller.Namespace = metav1.NamespaceDefault
controller.UID = types.UID(controllerUID)
now := metav1.Now()
controller.DeletionTimestamp = &now
return test{
name: "Controller marked for deletion can not claim new pods",
manager: NewPodControllerRefManager(&FakePodControl{},
&controller,
productionLabelSelector,
controllerKind,
func(ctx context.Context) error { return nil }),
pods: []*v1.Pod{newPod("pod1", productionLabel, &controller), newPod("pod2", productionLabel, nil)},
claimed: []*v1.Pod{newPod("pod1", productionLabel, &controller)},
}
}(),
func() test {
controller := v1.ReplicationController{}
controller2 := v1.ReplicationController{}
controller.UID = types.UID(controllerUID)
controller.Namespace = metav1.NamespaceDefault
controller2.UID = types.UID("AAAAA")
controller2.Namespace = metav1.NamespaceDefault
return test{
name: "Controller can not claim pods owned by another controller",
manager: NewPodControllerRefManager(&FakePodControl{},
&controller,
productionLabelSelector,
controllerKind,
func(ctx context.Context) error { return nil }),
pods: []*v1.Pod{newPod("pod1", productionLabel, &controller), newPod("pod2", productionLabel, &controller2)},
claimed: []*v1.Pod{newPod("pod1", productionLabel, &controller)},
}
}(),
func() test {
controller := v1.ReplicationController{}
controller.Namespace = metav1.NamespaceDefault
controller.UID = types.UID(controllerUID)
return test{
name: "Controller releases claimed pods when selector doesn't match",
manager: NewPodControllerRefManager(&FakePodControl{},
&controller,
productionLabelSelector,
controllerKind,
func(ctx context.Context) error { return nil }),
pods: []*v1.Pod{newPod("pod1", productionLabel, &controller), newPod("pod2", testLabel, &controller)},
claimed: []*v1.Pod{newPod("pod1", productionLabel, &controller)},
patches: 1,
}
}(),
func() test {
controller := v1.ReplicationController{}
controller.Namespace = metav1.NamespaceDefault
controller.UID = types.UID(controllerUID)
podToDelete1 := newPod("pod1", productionLabel, &controller)
podToDelete2 := newPod("pod2", productionLabel, nil)
now := metav1.Now()
podToDelete1.DeletionTimestamp = &now
podToDelete2.DeletionTimestamp = &now
return test{
name: "Controller does not claim orphaned pods marked for deletion",
manager: NewPodControllerRefManager(&FakePodControl{},
&controller,
productionLabelSelector,
controllerKind,
func(ctx context.Context) error { return nil }),
pods: []*v1.Pod{podToDelete1, podToDelete2},
claimed: []*v1.Pod{podToDelete1},
}
}(),
func() test {
controller := v1.ReplicationController{}
controller.Namespace = metav1.NamespaceDefault
controller.UID = types.UID(controllerUID)
return test{
name: "Controller claims or release pods according to selector with finalizers",
manager: NewPodControllerRefManager(&FakePodControl{},
&controller,
productionLabelSelector,
controllerKind,
func(ctx context.Context) error { return nil },
"foo-finalizer", "bar-finalizer"),
pods: []*v1.Pod{newPod("pod1", productionLabel, &controller), newPod("pod2", testLabel, &controller), newPod("pod3", productionLabel, nil)},
claimed: []*v1.Pod{newPod("pod1", productionLabel, &controller), newPod("pod3", productionLabel, nil)},
patches: 2,
}
}(),
func() test {
controller := v1.ReplicationController{}
controller.Namespace = metav1.NamespaceDefault
controller.UID = types.UID(controllerUID)
pod1 := newPod("pod1", productionLabel, nil)
pod2 := newPod("pod2", productionLabel, nil)
pod2.Namespace = "fakens"
return test{
name: "Controller does not claim pods of different namespace",
manager: NewPodControllerRefManager(&FakePodControl{},
&controller,
productionLabelSelector,
controllerKind,
func(ctx context.Context) error { return nil }),
pods: []*v1.Pod{pod1, pod2},
claimed: []*v1.Pod{pod1},
patches: 1,
}
}(),
func() test {
// act as a cluster-scoped controller
controller := v1.ReplicationController{}
controller.Namespace = ""
controller.UID = types.UID(controllerUID)
pod1 := newPod("pod1", productionLabel, nil)
pod2 := newPod("pod2", productionLabel, nil)
pod2.Namespace = "fakens"
return test{
name: "Cluster scoped controller claims pods of specified namespace",
manager: NewPodControllerRefManager(&FakePodControl{},
&controller,
productionLabelSelector,
controllerKind,
func(ctx context.Context) error { return nil }),
pods: []*v1.Pod{pod1, pod2},
claimed: []*v1.Pod{pod1, pod2},
patches: 2,
}
}(),
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
claimed, err := test.manager.ClaimPods(context.TODO(), test.pods)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if diff := cmp.Diff(test.claimed, claimed); diff != "" {
t.Errorf("Claimed wrong pods (-want,+got):\n%s", diff)
}
fakePodControl, ok := test.manager.podControl.(*FakePodControl)
if !ok {
return
}
if p := len(fakePodControl.Patches); p != test.patches {
t.Errorf("ClaimPods issues %d patches, want %d", p, test.patches)
}
for _, p := range fakePodControl.Patches {
patch := string(p)
if uid := string(test.manager.Controller.GetUID()); !strings.Contains(patch, uid) {
t.Errorf("Patch doesn't contain controller UID %s", uid)
}
for _, f := range test.manager.finalizers {
if !strings.Contains(patch, f) {
t.Errorf("Patch doesn't contain finalizer %s, %q", patch, f)
}
}
}
})
}
}
func TestGeneratePatchBytesForDelete(t *testing.T) {
tests := []struct {
name string
ownerUID []types.UID
dependentUID types.UID
finalizers []string
want []byte
}{
{
name: "check the structure of patch bytes",
ownerUID: []types.UID{"ss1"},
dependentUID: "ss2",
finalizers: []string{},
want: []byte(`{"metadata":{"uid":"ss2","ownerReferences":[{"$patch":"delete","uid":"ss1"}]}}`),
},
{
name: "check if parent uid is escaped",
ownerUID: []types.UID{`ss1"hello`},
dependentUID: "ss2",
finalizers: []string{},
want: []byte(`{"metadata":{"uid":"ss2","ownerReferences":[{"$patch":"delete","uid":"ss1\"hello"}]}}`),
},
{
name: "check if revision uid uid is escaped",
ownerUID: []types.UID{`ss1`},
dependentUID: `ss2"hello`,
finalizers: []string{},
want: []byte(`{"metadata":{"uid":"ss2\"hello","ownerReferences":[{"$patch":"delete","uid":"ss1"}]}}`),
},
{
name: "check the structure of patch bytes with multiple owners",
ownerUID: []types.UID{"ss1", "ss2"},
dependentUID: "ss2",
finalizers: []string{},
want: []byte(`{"metadata":{"uid":"ss2","ownerReferences":[{"$patch":"delete","uid":"ss1"},{"$patch":"delete","uid":"ss2"}]}}`),
},
{
name: "check the structure of patch bytes with a finalizer and multiple owners",
ownerUID: []types.UID{"ss1", "ss2"},
dependentUID: "ss2",
finalizers: []string{"f1"},
want: []byte(`{"metadata":{"uid":"ss2","ownerReferences":[{"$patch":"delete","uid":"ss1"},{"$patch":"delete","uid":"ss2"}],"$deleteFromPrimitiveList/finalizers":["f1"]}}`),
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got, _ := GenerateDeleteOwnerRefStrategicMergeBytes(tt.dependentUID, tt.ownerUID, tt.finalizers...)
if !reflect.DeepEqual(got, tt.want) {
t.Errorf("generatePatchBytesForDelete() got = %s, want %s", got, tt.want)
}
})
}
}
| pkg/controller/controller_ref_manager_test.go | 0 | https://github.com/kubernetes/kubernetes/commit/ec6fd2befac30ebb7dfb55b94d8c9874489a3902 | [
0.00018002714205067605,
0.00016946918913163245,
0.00016539424541406333,
0.00016860768664628267,
0.0000031401325486513088
] |
{
"id": 5,
"code_window": [
"\t\tclient,\n",
"\t\tcorelisters.NewNodeLister(indexer),\n",
"\t\tmaxEndpointsPerSlice,\n",
"\t\tendpointsliceutil.NewEndpointSliceTracker(),\n",
"\t\tnil,\n",
"\t\tfalse,\n",
"\t\teventRecorder,\n",
"\t\tcontrollerName,\n",
"\t)\n",
"}\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "staging/src/k8s.io/endpointslice/reconciler_test.go",
"type": "replace",
"edit_start_line_idx": 1000
} | /*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package endpointslice
import (
"context"
"fmt"
"time"
"golang.org/x/time/rate"
v1 "k8s.io/api/core/v1"
discovery "k8s.io/api/discovery/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/labels"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/wait"
utilfeature "k8s.io/apiserver/pkg/util/feature"
coreinformers "k8s.io/client-go/informers/core/v1"
discoveryinformers "k8s.io/client-go/informers/discovery/v1"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/client-go/kubernetes/scheme"
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
corelisters "k8s.io/client-go/listers/core/v1"
discoverylisters "k8s.io/client-go/listers/discovery/v1"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/record"
"k8s.io/client-go/util/workqueue"
endpointslicerec "k8s.io/endpointslice"
endpointslicemetrics "k8s.io/endpointslice/metrics"
"k8s.io/endpointslice/topologycache"
endpointsliceutil "k8s.io/endpointslice/util"
"k8s.io/klog/v2"
"k8s.io/kubernetes/pkg/controller"
endpointslicepkg "k8s.io/kubernetes/pkg/controller/util/endpointslice"
"k8s.io/kubernetes/pkg/features"
)
const (
// maxRetries is the number of times a service will be retried before it is
// dropped out of the queue. Any sync error, such as a failure to create or
// update an EndpointSlice could trigger a retry. With the current
// rate-limiter in use (1s*2^(numRetries-1)) the following numbers represent
// the sequence of delays between successive queuings of a service.
//
// 1s, 2s, 4s, 8s, 16s, 32s, 64s, 128s, 256s, 512s, 1000s (max)
maxRetries = 15
// endpointSliceChangeMinSyncDelay indicates the minimum delay before
// queuing a syncService call after an EndpointSlice changes. If
// endpointUpdatesBatchPeriod is greater than this value, it will be used
// instead. This helps batch processing of changes to multiple
// EndpointSlices.
endpointSliceChangeMinSyncDelay = 1 * time.Second
// defaultSyncBackOff is the default backoff period for syncService calls.
defaultSyncBackOff = 1 * time.Second
// maxSyncBackOff is the max backoff period for syncService calls.
maxSyncBackOff = 1000 * time.Second
// controllerName is a unique value used with LabelManagedBy to indicated
// the component managing an EndpointSlice.
controllerName = "endpointslice-controller.k8s.io"
)
// NewController creates and initializes a new Controller
func NewController(ctx context.Context, podInformer coreinformers.PodInformer,
serviceInformer coreinformers.ServiceInformer,
nodeInformer coreinformers.NodeInformer,
endpointSliceInformer discoveryinformers.EndpointSliceInformer,
maxEndpointsPerSlice int32,
client clientset.Interface,
endpointUpdatesBatchPeriod time.Duration,
) *Controller {
broadcaster := record.NewBroadcaster(record.WithContext(ctx))
recorder := broadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "endpoint-slice-controller"})
endpointslicemetrics.RegisterMetrics()
c := &Controller{
client: client,
// This is similar to the DefaultControllerRateLimiter, just with a
// significantly higher default backoff (1s vs 5ms). This controller
// processes events that can require significant EndpointSlice changes,
// such as an update to a Service or Deployment. A more significant
// rate limit back off here helps ensure that the Controller does not
// overwhelm the API Server.
queue: workqueue.NewNamedRateLimitingQueue(workqueue.NewMaxOfRateLimiter(
workqueue.NewItemExponentialFailureRateLimiter(defaultSyncBackOff, maxSyncBackOff),
// 10 qps, 100 bucket size. This is only for retry speed and its
// only the overall factor (not per item).
&workqueue.BucketRateLimiter{Limiter: rate.NewLimiter(rate.Limit(10), 100)},
), "endpoint_slice"),
workerLoopPeriod: time.Second,
}
serviceInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: c.onServiceUpdate,
UpdateFunc: func(old, cur interface{}) {
c.onServiceUpdate(cur)
},
DeleteFunc: c.onServiceDelete,
})
c.serviceLister = serviceInformer.Lister()
c.servicesSynced = serviceInformer.Informer().HasSynced
podInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: c.addPod,
UpdateFunc: c.updatePod,
DeleteFunc: c.deletePod,
})
c.podLister = podInformer.Lister()
c.podsSynced = podInformer.Informer().HasSynced
c.nodeLister = nodeInformer.Lister()
c.nodesSynced = nodeInformer.Informer().HasSynced
logger := klog.FromContext(ctx)
endpointSliceInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: c.onEndpointSliceAdd,
UpdateFunc: func(oldObj, newObj interface{}) {
c.onEndpointSliceUpdate(logger, oldObj, newObj)
},
DeleteFunc: c.onEndpointSliceDelete,
})
c.endpointSliceLister = endpointSliceInformer.Lister()
c.endpointSlicesSynced = endpointSliceInformer.Informer().HasSynced
c.endpointSliceTracker = endpointsliceutil.NewEndpointSliceTracker()
c.maxEndpointsPerSlice = maxEndpointsPerSlice
c.triggerTimeTracker = endpointsliceutil.NewTriggerTimeTracker()
c.eventBroadcaster = broadcaster
c.eventRecorder = recorder
c.endpointUpdatesBatchPeriod = endpointUpdatesBatchPeriod
if utilfeature.DefaultFeatureGate.Enabled(features.TopologyAwareHints) {
nodeInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
c.addNode(logger, obj)
},
UpdateFunc: func(oldObj, newObj interface{}) {
c.updateNode(logger, oldObj, newObj)
},
DeleteFunc: func(obj interface{}) {
c.deleteNode(logger, obj)
},
})
c.topologyCache = topologycache.NewTopologyCache()
}
c.reconciler = endpointslicerec.NewReconciler(
c.client,
c.nodeLister,
c.maxEndpointsPerSlice,
c.endpointSliceTracker,
c.topologyCache,
utilfeature.DefaultFeatureGate.Enabled(features.ServiceTrafficDistribution),
c.eventRecorder,
controllerName,
)
return c
}
// Controller manages selector-based service endpoint slices
type Controller struct {
client clientset.Interface
eventBroadcaster record.EventBroadcaster
eventRecorder record.EventRecorder
// serviceLister is able to list/get services and is populated by the
// shared informer passed to NewController
serviceLister corelisters.ServiceLister
// servicesSynced returns true if the service shared informer has been synced at least once.
// Added as a member to the struct to allow injection for testing.
servicesSynced cache.InformerSynced
// podLister is able to list/get pods and is populated by the
// shared informer passed to NewController
podLister corelisters.PodLister
// podsSynced returns true if the pod shared informer has been synced at least once.
// Added as a member to the struct to allow injection for testing.
podsSynced cache.InformerSynced
// endpointSliceLister is able to list/get endpoint slices and is populated by the
// shared informer passed to NewController
endpointSliceLister discoverylisters.EndpointSliceLister
// endpointSlicesSynced returns true if the endpoint slice shared informer has been synced at least once.
// Added as a member to the struct to allow injection for testing.
endpointSlicesSynced cache.InformerSynced
// endpointSliceTracker tracks the list of EndpointSlices and associated
// resource versions expected for each Service. It can help determine if a
// cached EndpointSlice is out of date.
endpointSliceTracker *endpointsliceutil.EndpointSliceTracker
// nodeLister is able to list/get nodes and is populated by the
// shared informer passed to NewController
nodeLister corelisters.NodeLister
// nodesSynced returns true if the node shared informer has been synced at least once.
// Added as a member to the struct to allow injection for testing.
nodesSynced cache.InformerSynced
// reconciler is an util used to reconcile EndpointSlice changes.
reconciler *endpointslicerec.Reconciler
// triggerTimeTracker is an util used to compute and export the
// EndpointsLastChangeTriggerTime annotation.
triggerTimeTracker *endpointsliceutil.TriggerTimeTracker
// Services that need to be updated. A channel is inappropriate here,
// because it allows services with lots of pods to be serviced much
// more often than services with few pods; it also would cause a
// service that's inserted multiple times to be processed more than
// necessary.
queue workqueue.RateLimitingInterface
// maxEndpointsPerSlice references the maximum number of endpoints that
// should be added to an EndpointSlice
maxEndpointsPerSlice int32
// workerLoopPeriod is the time between worker runs. The workers
// process the queue of service and pod changes
workerLoopPeriod time.Duration
// endpointUpdatesBatchPeriod is an artificial delay added to all service syncs triggered by pod changes.
// This can be used to reduce overall number of all endpoint slice updates.
endpointUpdatesBatchPeriod time.Duration
// topologyCache tracks the distribution of Nodes and endpoints across zones
// to enable TopologyAwareHints.
topologyCache *topologycache.TopologyCache
}
// Run will not return until stopCh is closed.
func (c *Controller) Run(ctx context.Context, workers int) {
defer utilruntime.HandleCrash()
// Start events processing pipeline.
c.eventBroadcaster.StartLogging(klog.Infof)
c.eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: c.client.CoreV1().Events("")})
defer c.eventBroadcaster.Shutdown()
defer c.queue.ShutDown()
logger := klog.FromContext(ctx)
logger.Info("Starting endpoint slice controller")
defer logger.Info("Shutting down endpoint slice controller")
if !cache.WaitForNamedCacheSync("endpoint_slice", ctx.Done(), c.podsSynced, c.servicesSynced, c.endpointSlicesSynced, c.nodesSynced) {
return
}
logger.V(2).Info("Starting worker threads", "total", workers)
for i := 0; i < workers; i++ {
go wait.Until(func() { c.worker(logger) }, c.workerLoopPeriod, ctx.Done())
}
<-ctx.Done()
}
// worker runs a worker thread that just dequeues items, processes them, and
// marks them done. You may run as many of these in parallel as you wish; the
// workqueue guarantees that they will not end up processing the same service
// at the same time
func (c *Controller) worker(logger klog.Logger) {
for c.processNextWorkItem(logger) {
}
}
func (c *Controller) processNextWorkItem(logger klog.Logger) bool {
cKey, quit := c.queue.Get()
if quit {
return false
}
defer c.queue.Done(cKey)
err := c.syncService(logger, cKey.(string))
c.handleErr(logger, err, cKey)
return true
}
func (c *Controller) handleErr(logger klog.Logger, err error, key interface{}) {
trackSync(err)
if err == nil {
c.queue.Forget(key)
return
}
if c.queue.NumRequeues(key) < maxRetries {
logger.Info("Error syncing endpoint slices for service, retrying", "key", key, "err", err)
c.queue.AddRateLimited(key)
return
}
logger.Info("Retry budget exceeded, dropping service out of the queue", "key", key, "err", err)
c.queue.Forget(key)
utilruntime.HandleError(err)
}
func (c *Controller) syncService(logger klog.Logger, key string) error {
startTime := time.Now()
defer func() {
logger.V(4).Info("Finished syncing service endpoint slices", "key", key, "elapsedTime", time.Since(startTime))
}()
namespace, name, err := cache.SplitMetaNamespaceKey(key)
if err != nil {
return err
}
service, err := c.serviceLister.Services(namespace).Get(name)
if err != nil {
if !apierrors.IsNotFound(err) {
return err
}
c.triggerTimeTracker.DeleteService(namespace, name)
c.reconciler.DeleteService(namespace, name)
c.endpointSliceTracker.DeleteService(namespace, name)
// The service has been deleted, return nil so that it won't be retried.
return nil
}
if service.Spec.Type == v1.ServiceTypeExternalName {
// services with Type ExternalName receive no endpoints from this controller;
// Ref: https://issues.k8s.io/105986
return nil
}
if service.Spec.Selector == nil {
// services without a selector receive no endpoint slices from this controller;
// these services will receive endpoint slices that are created out-of-band via the REST API.
return nil
}
logger.V(5).Info("About to update endpoint slices for service", "key", key)
podLabelSelector := labels.Set(service.Spec.Selector).AsSelectorPreValidated()
pods, err := c.podLister.Pods(service.Namespace).List(podLabelSelector)
if err != nil {
// Since we're getting stuff from a local cache, it is basically
// impossible to get this error.
c.eventRecorder.Eventf(service, v1.EventTypeWarning, "FailedToListPods",
"Error listing Pods for Service %s/%s: %v", service.Namespace, service.Name, err)
return err
}
esLabelSelector := labels.Set(map[string]string{
discovery.LabelServiceName: service.Name,
discovery.LabelManagedBy: c.reconciler.GetControllerName(),
}).AsSelectorPreValidated()
endpointSlices, err := c.endpointSliceLister.EndpointSlices(service.Namespace).List(esLabelSelector)
if err != nil {
// Since we're getting stuff from a local cache, it is basically
// impossible to get this error.
c.eventRecorder.Eventf(service, v1.EventTypeWarning, "FailedToListEndpointSlices",
"Error listing Endpoint Slices for Service %s/%s: %v", service.Namespace, service.Name, err)
return err
}
// Drop EndpointSlices that have been marked for deletion to prevent the controller from getting stuck.
endpointSlices = dropEndpointSlicesPendingDeletion(endpointSlices)
if c.endpointSliceTracker.StaleSlices(service, endpointSlices) {
return endpointslicepkg.NewStaleInformerCache("EndpointSlice informer cache is out of date")
}
// We call ComputeEndpointLastChangeTriggerTime here to make sure that the
// state of the trigger time tracker gets updated even if the sync turns out
// to be no-op and we don't update the EndpointSlice objects.
lastChangeTriggerTime := c.triggerTimeTracker.
ComputeEndpointLastChangeTriggerTime(namespace, service, pods)
err = c.reconciler.Reconcile(logger, service, pods, endpointSlices, lastChangeTriggerTime)
if err != nil {
c.eventRecorder.Eventf(service, v1.EventTypeWarning, "FailedToUpdateEndpointSlices",
"Error updating Endpoint Slices for Service %s/%s: %v", service.Namespace, service.Name, err)
return err
}
return nil
}
// onServiceUpdate updates the Service Selector in the cache and queues the Service for processing.
func (c *Controller) onServiceUpdate(obj interface{}) {
key, err := controller.KeyFunc(obj)
if err != nil {
utilruntime.HandleError(fmt.Errorf("Couldn't get key for object %+v: %v", obj, err))
return
}
c.queue.Add(key)
}
// onServiceDelete removes the Service Selector from the cache and queues the Service for processing.
func (c *Controller) onServiceDelete(obj interface{}) {
key, err := controller.KeyFunc(obj)
if err != nil {
utilruntime.HandleError(fmt.Errorf("Couldn't get key for object %+v: %v", obj, err))
return
}
c.queue.Add(key)
}
// onEndpointSliceAdd queues a sync for the relevant Service for a sync if the
// EndpointSlice resource version does not match the expected version in the
// endpointSliceTracker.
func (c *Controller) onEndpointSliceAdd(obj interface{}) {
endpointSlice := obj.(*discovery.EndpointSlice)
if endpointSlice == nil {
utilruntime.HandleError(fmt.Errorf("Invalid EndpointSlice provided to onEndpointSliceAdd()"))
return
}
if c.reconciler.ManagedByController(endpointSlice) && c.endpointSliceTracker.ShouldSync(endpointSlice) {
c.queueServiceForEndpointSlice(endpointSlice)
}
}
// onEndpointSliceUpdate queues a sync for the relevant Service for a sync if
// the EndpointSlice resource version does not match the expected version in the
// endpointSliceTracker or the managed-by value of the EndpointSlice has changed
// from or to this controller.
func (c *Controller) onEndpointSliceUpdate(logger klog.Logger, prevObj, obj interface{}) {
prevEndpointSlice := prevObj.(*discovery.EndpointSlice)
endpointSlice := obj.(*discovery.EndpointSlice)
if endpointSlice == nil || prevEndpointSlice == nil {
utilruntime.HandleError(fmt.Errorf("Invalid EndpointSlice provided to onEndpointSliceUpdate()"))
return
}
// EndpointSlice generation does not change when labels change. Although the
// controller will never change LabelServiceName, users might. This check
// ensures that we handle changes to this label.
svcName := endpointSlice.Labels[discovery.LabelServiceName]
prevSvcName := prevEndpointSlice.Labels[discovery.LabelServiceName]
if svcName != prevSvcName {
logger.Info("label changed", "label", discovery.LabelServiceName, "oldService", prevSvcName, "newService", svcName, "endpointslice", klog.KObj(endpointSlice))
c.queueServiceForEndpointSlice(endpointSlice)
c.queueServiceForEndpointSlice(prevEndpointSlice)
return
}
if c.reconciler.ManagedByChanged(prevEndpointSlice, endpointSlice) || (c.reconciler.ManagedByController(endpointSlice) && c.endpointSliceTracker.ShouldSync(endpointSlice)) {
c.queueServiceForEndpointSlice(endpointSlice)
}
}
// onEndpointSliceDelete queues a sync for the relevant Service for a sync if the
// EndpointSlice resource version does not match the expected version in the
// endpointSliceTracker.
func (c *Controller) onEndpointSliceDelete(obj interface{}) {
endpointSlice := getEndpointSliceFromDeleteAction(obj)
if endpointSlice != nil && c.reconciler.ManagedByController(endpointSlice) && c.endpointSliceTracker.Has(endpointSlice) {
// This returns false if we didn't expect the EndpointSlice to be
// deleted. If that is the case, we queue the Service for another sync.
if !c.endpointSliceTracker.HandleDeletion(endpointSlice) {
c.queueServiceForEndpointSlice(endpointSlice)
}
}
}
// queueServiceForEndpointSlice attempts to queue the corresponding Service for
// the provided EndpointSlice.
func (c *Controller) queueServiceForEndpointSlice(endpointSlice *discovery.EndpointSlice) {
key, err := endpointslicerec.ServiceControllerKey(endpointSlice)
if err != nil {
utilruntime.HandleError(fmt.Errorf("Couldn't get key for EndpointSlice %+v: %v", endpointSlice, err))
return
}
// queue after the max of endpointSliceChangeMinSyncDelay and
// endpointUpdatesBatchPeriod.
delay := endpointSliceChangeMinSyncDelay
if c.endpointUpdatesBatchPeriod > delay {
delay = c.endpointUpdatesBatchPeriod
}
c.queue.AddAfter(key, delay)
}
func (c *Controller) addPod(obj interface{}) {
pod := obj.(*v1.Pod)
services, err := endpointsliceutil.GetPodServiceMemberships(c.serviceLister, pod)
if err != nil {
utilruntime.HandleError(fmt.Errorf("Unable to get pod %s/%s's service memberships: %v", pod.Namespace, pod.Name, err))
return
}
for key := range services {
c.queue.AddAfter(key, c.endpointUpdatesBatchPeriod)
}
}
func (c *Controller) updatePod(old, cur interface{}) {
services := endpointsliceutil.GetServicesToUpdateOnPodChange(c.serviceLister, old, cur)
for key := range services {
c.queue.AddAfter(key, c.endpointUpdatesBatchPeriod)
}
}
// When a pod is deleted, enqueue the services the pod used to be a member of
// obj could be an *v1.Pod, or a DeletionFinalStateUnknown marker item.
func (c *Controller) deletePod(obj interface{}) {
pod := endpointsliceutil.GetPodFromDeleteAction(obj)
if pod != nil {
c.addPod(pod)
}
}
func (c *Controller) addNode(logger klog.Logger, obj interface{}) {
c.checkNodeTopologyDistribution(logger)
}
func (c *Controller) updateNode(logger klog.Logger, old, cur interface{}) {
oldNode := old.(*v1.Node)
curNode := cur.(*v1.Node)
// LabelTopologyZone may be added by cloud provider asynchronously after the Node is created.
// The topology cache should be updated in this case.
if isNodeReady(oldNode) != isNodeReady(curNode) ||
oldNode.Labels[v1.LabelTopologyZone] != curNode.Labels[v1.LabelTopologyZone] {
c.checkNodeTopologyDistribution(logger)
}
}
func (c *Controller) deleteNode(logger klog.Logger, obj interface{}) {
c.checkNodeTopologyDistribution(logger)
}
// checkNodeTopologyDistribution updates Nodes in the topology cache and then
// queues any Services that are past the threshold.
func (c *Controller) checkNodeTopologyDistribution(logger klog.Logger) {
if c.topologyCache == nil {
return
}
nodes, err := c.nodeLister.List(labels.Everything())
if err != nil {
logger.Error(err, "Error listing Nodes")
return
}
c.topologyCache.SetNodes(logger, nodes)
serviceKeys := c.topologyCache.GetOverloadedServices()
for _, serviceKey := range serviceKeys {
logger.V(2).Info("Queuing Service after Node change due to overloading", "key", serviceKey)
c.queue.Add(serviceKey)
}
}
// trackSync increments the EndpointSliceSyncs metric with the result of a sync.
func trackSync(err error) {
metricLabel := "success"
if err != nil {
if endpointslicepkg.IsStaleInformerCacheErr(err) {
metricLabel = "stale"
} else {
metricLabel = "error"
}
}
endpointslicemetrics.EndpointSliceSyncs.WithLabelValues(metricLabel).Inc()
}
func dropEndpointSlicesPendingDeletion(endpointSlices []*discovery.EndpointSlice) []*discovery.EndpointSlice {
n := 0
for _, endpointSlice := range endpointSlices {
if endpointSlice.DeletionTimestamp == nil {
endpointSlices[n] = endpointSlice
n++
}
}
return endpointSlices[:n]
}
// getEndpointSliceFromDeleteAction parses an EndpointSlice from a delete action.
func getEndpointSliceFromDeleteAction(obj interface{}) *discovery.EndpointSlice {
if endpointSlice, ok := obj.(*discovery.EndpointSlice); ok {
// Enqueue all the services that the pod used to be a member of.
// This is the same thing we do when we add a pod.
return endpointSlice
}
// If we reached here it means the pod was deleted but its final state is unrecorded.
tombstone, ok := obj.(cache.DeletedFinalStateUnknown)
if !ok {
utilruntime.HandleError(fmt.Errorf("Couldn't get object from tombstone %#v", obj))
return nil
}
endpointSlice, ok := tombstone.Obj.(*discovery.EndpointSlice)
if !ok {
utilruntime.HandleError(fmt.Errorf("Tombstone contained object that is not a EndpointSlice: %#v", obj))
return nil
}
return endpointSlice
}
// isNodeReady returns true if a node is ready; false otherwise.
func isNodeReady(node *v1.Node) bool {
for _, c := range node.Status.Conditions {
if c.Type == v1.NodeReady {
return c.Status == v1.ConditionTrue
}
}
return false
}
| pkg/controller/endpointslice/endpointslice_controller.go | 1 | https://github.com/kubernetes/kubernetes/commit/ec6fd2befac30ebb7dfb55b94d8c9874489a3902 | [
0.028233306482434273,
0.0016874424181878567,
0.0001631643099244684,
0.0001791474933270365,
0.004362231586128473
] |
{
"id": 5,
"code_window": [
"\t\tclient,\n",
"\t\tcorelisters.NewNodeLister(indexer),\n",
"\t\tmaxEndpointsPerSlice,\n",
"\t\tendpointsliceutil.NewEndpointSliceTracker(),\n",
"\t\tnil,\n",
"\t\tfalse,\n",
"\t\teventRecorder,\n",
"\t\tcontrollerName,\n",
"\t)\n",
"}\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "staging/src/k8s.io/endpointslice/reconciler_test.go",
"type": "replace",
"edit_start_line_idx": 1000
} | apiVersion: v1
kind: Pod
metadata:
name: selinuxoptions0
spec:
containers:
- image: registry.k8s.io/pause
name: container1
securityContext: {}
initContainers:
- image: registry.k8s.io/pause
name: initcontainer1
securityContext:
seLinuxOptions: {}
securityContext: {}
| staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.11/pass/selinuxoptions0.yaml | 0 | https://github.com/kubernetes/kubernetes/commit/ec6fd2befac30ebb7dfb55b94d8c9874489a3902 | [
0.00017730383842717856,
0.00017596420366317034,
0.00017462455434724689,
0.00017596420366317034,
0.0000013396420399658382
] |
{
"id": 5,
"code_window": [
"\t\tclient,\n",
"\t\tcorelisters.NewNodeLister(indexer),\n",
"\t\tmaxEndpointsPerSlice,\n",
"\t\tendpointsliceutil.NewEndpointSliceTracker(),\n",
"\t\tnil,\n",
"\t\tfalse,\n",
"\t\teventRecorder,\n",
"\t\tcontrollerName,\n",
"\t)\n",
"}\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "staging/src/k8s.io/endpointslice/reconciler_test.go",
"type": "replace",
"edit_start_line_idx": 1000
} | /*
Copyright 2018 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package meta
import (
"fmt"
"regexp"
)
// Key for a GCP resource.
type Key struct {
Name string
Zone string
Region string
}
// KeyType is the type of the key.
type KeyType string
const (
// Zonal key type.
Zonal = "zonal"
// Regional key type.
Regional = "regional"
// Global key type.
Global = "global"
)
var (
// locationRegexp is the format of regions/zone names in GCE.
locationRegexp = regexp.MustCompile("^[a-z](?:[-a-z0-9]+)?$")
)
// ZonalKey returns the key for a zonal resource.
func ZonalKey(name, zone string) *Key {
return &Key{name, zone, ""}
}
// RegionalKey returns the key for a regional resource.
func RegionalKey(name, region string) *Key {
return &Key{name, "", region}
}
// GlobalKey returns the key for a global resource.
func GlobalKey(name string) *Key {
return &Key{name, "", ""}
}
// Type returns the type of the key.
func (k *Key) Type() KeyType {
switch {
case k.Zone != "":
return Zonal
case k.Region != "":
return Regional
default:
return Global
}
}
// String returns a string representation of the key.
func (k Key) String() string {
switch k.Type() {
case Zonal:
return fmt.Sprintf("Key{%q, zone: %q}", k.Name, k.Zone)
case Regional:
return fmt.Sprintf("Key{%q, region: %q}", k.Name, k.Region)
default:
return fmt.Sprintf("Key{%q}", k.Name)
}
}
// Valid is true if the key is valid.
func (k *Key) Valid() bool {
if k.Zone != "" && k.Region != "" {
return false
}
switch {
case k.Region != "":
return locationRegexp.Match([]byte(k.Region))
case k.Zone != "":
return locationRegexp.Match([]byte(k.Zone))
}
return true
}
// KeysToMap creates a map[Key]bool from a list of keys.
func KeysToMap(keys ...Key) map[Key]bool {
ret := map[Key]bool{}
for _, k := range keys {
ret[k] = true
}
return ret
}
| vendor/github.com/GoogleCloudPlatform/k8s-cloud-provider/pkg/cloud/meta/key.go | 0 | https://github.com/kubernetes/kubernetes/commit/ec6fd2befac30ebb7dfb55b94d8c9874489a3902 | [
0.00017914628551807255,
0.00017206236952915788,
0.0001658177498029545,
0.00017345993546769023,
0.000004396470103529282
] |
{
"id": 5,
"code_window": [
"\t\tclient,\n",
"\t\tcorelisters.NewNodeLister(indexer),\n",
"\t\tmaxEndpointsPerSlice,\n",
"\t\tendpointsliceutil.NewEndpointSliceTracker(),\n",
"\t\tnil,\n",
"\t\tfalse,\n",
"\t\teventRecorder,\n",
"\t\tcontrollerName,\n",
"\t)\n",
"}\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "staging/src/k8s.io/endpointslice/reconciler_test.go",
"type": "replace",
"edit_start_line_idx": 1000
} | /*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package podtolerationrestriction
import (
"context"
"encoding/json"
"testing"
"time"
"github.com/stretchr/testify/assert"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apiserver/pkg/admission"
genericadmissioninitializer "k8s.io/apiserver/pkg/admission/initializer"
admissiontesting "k8s.io/apiserver/pkg/admission/testing"
"k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/kubernetes/fake"
api "k8s.io/kubernetes/pkg/apis/core"
pluginapi "k8s.io/kubernetes/plugin/pkg/admission/podtolerationrestriction/apis/podtolerationrestriction"
)
// TestPodAdmission verifies various scenarios involving pod/namespace tolerations
func TestPodAdmission(t *testing.T) {
CPU1000m := resource.MustParse("1000m")
CPU500m := resource.MustParse("500m")
burstablePod := &api.Pod{
ObjectMeta: metav1.ObjectMeta{Name: "testPod", Namespace: "testNamespace"},
Spec: api.PodSpec{
Containers: []api.Container{
{
Name: "test",
Resources: api.ResourceRequirements{
Limits: api.ResourceList{api.ResourceCPU: CPU1000m},
Requests: api.ResourceList{api.ResourceCPU: CPU500m},
},
},
},
},
}
guaranteedPod := &api.Pod{
ObjectMeta: metav1.ObjectMeta{Name: "testPod", Namespace: "testNamespace"},
Spec: api.PodSpec{
Containers: []api.Container{
{
Name: "test",
Resources: api.ResourceRequirements{
Limits: api.ResourceList{api.ResourceCPU: CPU1000m},
Requests: api.ResourceList{api.ResourceCPU: CPU1000m},
},
},
},
},
}
bestEffortPod := &api.Pod{
ObjectMeta: metav1.ObjectMeta{Name: "testPod", Namespace: "testNamespace"},
Spec: api.PodSpec{
Containers: []api.Container{
{
Name: "test",
},
},
},
}
tests := []struct {
pod *api.Pod
defaultClusterTolerations []api.Toleration
namespaceTolerations []api.Toleration
whitelist []api.Toleration
clusterWhitelist []api.Toleration
podTolerations []api.Toleration
mergedTolerations []api.Toleration
admit bool
testName string
}{
{
pod: bestEffortPod,
defaultClusterTolerations: []api.Toleration{{Key: "testKey", Operator: "Equal", Value: "testValue", Effect: "NoSchedule", TolerationSeconds: nil}},
namespaceTolerations: nil,
podTolerations: []api.Toleration{},
mergedTolerations: []api.Toleration{{Key: "testKey", Operator: "Equal", Value: "testValue", Effect: "NoSchedule", TolerationSeconds: nil}},
admit: true,
testName: "default cluster tolerations with empty pod tolerations and nil namespace tolerations",
},
{
pod: bestEffortPod,
defaultClusterTolerations: []api.Toleration{{Key: "testKey", Operator: "Equal", Value: "testValue", Effect: "NoSchedule", TolerationSeconds: nil}},
namespaceTolerations: []api.Toleration{},
podTolerations: []api.Toleration{{Key: "testKey", Operator: "Equal", Value: "testValue", Effect: "NoSchedule", TolerationSeconds: nil}},
mergedTolerations: []api.Toleration{{Key: "testKey", Operator: "Equal", Value: "testValue", Effect: "NoSchedule", TolerationSeconds: nil}},
admit: true,
testName: "default cluster tolerations with pod tolerations specified",
},
{
pod: bestEffortPod,
defaultClusterTolerations: []api.Toleration{},
namespaceTolerations: []api.Toleration{{Key: "testKey", Operator: "Equal", Value: "testValue", Effect: "NoSchedule", TolerationSeconds: nil}},
podTolerations: []api.Toleration{{Key: "testKey", Operator: "Equal", Value: "testValue", Effect: "NoSchedule", TolerationSeconds: nil}},
mergedTolerations: []api.Toleration{{Key: "testKey", Operator: "Equal", Value: "testValue", Effect: "NoSchedule", TolerationSeconds: nil}},
admit: true,
testName: "namespace tolerations",
},
{
pod: bestEffortPod,
defaultClusterTolerations: []api.Toleration{},
namespaceTolerations: []api.Toleration{{Key: "testKey", Operator: "Equal", Value: "testValue", Effect: "NoSchedule", TolerationSeconds: nil}},
podTolerations: []api.Toleration{},
mergedTolerations: []api.Toleration{{Key: "testKey", Operator: "Equal", Value: "testValue", Effect: "NoSchedule", TolerationSeconds: nil}},
admit: true,
testName: "no pod tolerations",
},
{
pod: bestEffortPod,
defaultClusterTolerations: []api.Toleration{},
namespaceTolerations: []api.Toleration{{Key: "testKey", Operator: "Equal", Value: "testValue", Effect: "NoSchedule"}},
podTolerations: []api.Toleration{{Key: "testKey", Operator: "Equal", Value: "testValue1", Effect: "NoSchedule"}},
mergedTolerations: []api.Toleration{{Key: "testKey", Operator: "Equal", Value: "testValue", Effect: "NoSchedule"}, {Key: "testKey", Operator: "Equal", Value: "testValue1", Effect: "NoSchedule"}},
admit: true,
testName: "duplicate key pod and namespace tolerations",
},
{
pod: bestEffortPod,
defaultClusterTolerations: []api.Toleration{{Key: "testKey", Operator: "Equal", Value: "testValue2", Effect: "NoSchedule", TolerationSeconds: nil}},
namespaceTolerations: []api.Toleration{},
podTolerations: []api.Toleration{{Key: "testKey", Operator: "Equal", Value: "testValue1", Effect: "NoSchedule", TolerationSeconds: nil}},
mergedTolerations: []api.Toleration{{Key: "testKey", Operator: "Equal", Value: "testValue1", Effect: "NoSchedule", TolerationSeconds: nil}},
admit: true,
testName: "conflicting pod and default cluster tolerations but overridden by empty namespace tolerations",
},
{
pod: bestEffortPod,
defaultClusterTolerations: []api.Toleration{},
namespaceTolerations: []api.Toleration{{Key: "testKey", Operator: "Equal", Value: "testValue", Effect: "NoSchedule", TolerationSeconds: nil}},
whitelist: []api.Toleration{{Key: "testKey", Operator: "Equal", Value: "testValue", Effect: "NoSchedule", TolerationSeconds: nil}},
podTolerations: []api.Toleration{},
mergedTolerations: []api.Toleration{{Key: "testKey", Operator: "Equal", Value: "testValue", Effect: "NoSchedule", TolerationSeconds: nil}},
admit: true,
testName: "merged pod tolerations satisfy whitelist",
},
{
pod: bestEffortPod,
defaultClusterTolerations: []api.Toleration{{Key: "testKey", Operator: "Equal", Value: "testValue", Effect: "NoSchedule", TolerationSeconds: nil}},
namespaceTolerations: []api.Toleration{},
podTolerations: []api.Toleration{},
mergedTolerations: []api.Toleration{},
admit: true,
testName: "Override default cluster toleration by empty namespace level toleration",
},
{
pod: bestEffortPod,
whitelist: []api.Toleration{},
clusterWhitelist: []api.Toleration{{Key: "testKey", Operator: "Equal", Value: "testValue1", Effect: "NoSchedule", TolerationSeconds: nil}},
podTolerations: []api.Toleration{{Key: "testKey", Operator: "Equal", Value: "testValue", Effect: "NoSchedule", TolerationSeconds: nil}},
mergedTolerations: []api.Toleration{{Key: "testKey", Operator: "Equal", Value: "testValue", Effect: "NoSchedule", TolerationSeconds: nil}},
admit: true,
testName: "pod toleration conflicts with default cluster white list which is overridden by empty namespace whitelist",
},
{
pod: bestEffortPod,
defaultClusterTolerations: []api.Toleration{},
namespaceTolerations: []api.Toleration{{Key: "testKey", Operator: "Equal", Value: "testValue", Effect: "NoSchedule", TolerationSeconds: nil}},
whitelist: []api.Toleration{{Key: "testKey", Operator: "Equal", Value: "testValue1", Effect: "NoSchedule", TolerationSeconds: nil}},
podTolerations: []api.Toleration{},
admit: false,
testName: "merged pod tolerations conflict with the whitelist",
},
{
pod: burstablePod,
defaultClusterTolerations: []api.Toleration{},
namespaceTolerations: []api.Toleration{{Key: "testKey", Operator: "Equal", Value: "testValue", Effect: "NoSchedule", TolerationSeconds: nil}},
whitelist: []api.Toleration{},
podTolerations: []api.Toleration{},
mergedTolerations: []api.Toleration{
{Key: corev1.TaintNodeMemoryPressure, Operator: api.TolerationOpExists, Effect: api.TaintEffectNoSchedule, TolerationSeconds: nil},
{Key: "testKey", Operator: "Equal", Value: "testValue", Effect: "NoSchedule", TolerationSeconds: nil},
},
admit: true,
testName: "added memoryPressure/DiskPressure for Burstable pod",
},
{
pod: bestEffortPod,
defaultClusterTolerations: []api.Toleration{},
namespaceTolerations: []api.Toleration{},
whitelist: []api.Toleration{},
podTolerations: []api.Toleration{{Key: "testKey", Operator: "Equal", Value: "testValue", Effect: "NoSchedule", TolerationSeconds: nil}, {Key: "testKey", Operator: "Equal", Value: "testValue1", Effect: "NoSchedule", TolerationSeconds: nil}},
mergedTolerations: []api.Toleration{
{Key: "testKey", Operator: "Equal", Value: "testValue", Effect: "NoSchedule", TolerationSeconds: nil},
{Key: "testKey", Operator: "Equal", Value: "testValue1", Effect: "NoSchedule", TolerationSeconds: nil},
},
admit: true,
testName: "Pod with duplicate key tolerations should not be modified",
},
{
pod: guaranteedPod,
defaultClusterTolerations: []api.Toleration{},
namespaceTolerations: []api.Toleration{{Key: "testKey", Operator: "Equal", Value: "testValue", Effect: "NoSchedule", TolerationSeconds: nil}},
whitelist: []api.Toleration{},
podTolerations: []api.Toleration{},
mergedTolerations: []api.Toleration{
{Key: corev1.TaintNodeMemoryPressure, Operator: api.TolerationOpExists, Effect: api.TaintEffectNoSchedule, TolerationSeconds: nil},
{Key: "testKey", Operator: "Equal", Value: "testValue", Effect: "NoSchedule", TolerationSeconds: nil},
},
admit: true,
testName: "added memoryPressure/DiskPressure for Guaranteed pod",
},
}
for _, test := range tests {
t.Run(test.testName, func(t *testing.T) {
namespace := &corev1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: "testNamespace",
Namespace: "",
Annotations: map[string]string{},
},
}
if test.namespaceTolerations != nil {
tolerationStr, err := json.Marshal(test.namespaceTolerations)
if err != nil {
t.Errorf("error in marshalling namespace tolerations %v", test.namespaceTolerations)
}
namespace.Annotations = map[string]string{NSDefaultTolerations: string(tolerationStr)}
}
if test.whitelist != nil {
tolerationStr, err := json.Marshal(test.whitelist)
if err != nil {
t.Errorf("error in marshalling namespace whitelist %v", test.whitelist)
}
namespace.Annotations[NSWLTolerations] = string(tolerationStr)
}
mockClient := fake.NewSimpleClientset(namespace)
handler, informerFactory, err := newHandlerForTest(mockClient)
if err != nil {
t.Fatalf("unexpected error initializing handler: %v", err)
}
stopCh := make(chan struct{})
defer close(stopCh)
informerFactory.Start(stopCh)
handler.pluginConfig = &pluginapi.Configuration{Default: test.defaultClusterTolerations, Whitelist: test.clusterWhitelist}
pod := test.pod
pod.Spec.Tolerations = test.podTolerations
err = admissiontesting.WithReinvocationTesting(t, handler).Admit(context.TODO(), admission.NewAttributesRecord(pod, nil, api.Kind("Pod").WithVersion("version"), "testNamespace", namespace.ObjectMeta.Name, api.Resource("pods").WithVersion("version"), "", admission.Create, &metav1.CreateOptions{}, false, nil), nil)
if test.admit && err != nil {
t.Errorf("Test: %s, expected no error but got: %s", test.testName, err)
} else if !test.admit && err == nil {
t.Errorf("Test: %s, expected an error", test.testName)
}
updatedPodTolerations := pod.Spec.Tolerations
if test.admit {
assert.ElementsMatch(t, updatedPodTolerations, test.mergedTolerations)
}
})
}
}
func TestHandles(t *testing.T) {
for op, shouldHandle := range map[admission.Operation]bool{
admission.Create: true,
admission.Update: true,
admission.Connect: false,
admission.Delete: false,
} {
pluginConfig, err := loadConfiguration(nil)
// must not fail
if err != nil {
t.Errorf("%v: error reading default configuration", op)
}
ptPlugin := NewPodTolerationsPlugin(pluginConfig)
if e, a := shouldHandle, ptPlugin.Handles(op); e != a {
t.Errorf("%v: shouldHandle=%t, handles=%t", op, e, a)
}
}
}
func TestIgnoreUpdatingInitializedPod(t *testing.T) {
mockClient := &fake.Clientset{}
handler, informerFactory, err := newHandlerForTest(mockClient)
if err != nil {
t.Errorf("unexpected error initializing handler: %v", err)
}
handler.SetReadyFunc(func() bool { return true })
pod := &api.Pod{
ObjectMeta: metav1.ObjectMeta{Name: "testPod", Namespace: "testNamespace"},
Spec: api.PodSpec{},
}
podToleration := api.Toleration{
Key: "testKey",
Operator: "Equal",
Value: "testValue1",
Effect: "NoSchedule",
TolerationSeconds: nil,
}
pod.Spec.Tolerations = []api.Toleration{podToleration}
// this conflicts with pod's Tolerations
namespaceToleration := podToleration
namespaceToleration.Value = "testValue2"
namespaceTolerations := []api.Toleration{namespaceToleration}
tolerationsStr, err := json.Marshal(namespaceTolerations)
if err != nil {
t.Errorf("error in marshalling namespace tolerations %v", namespaceTolerations)
}
namespace := &corev1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: "testNamespace",
Namespace: "",
},
}
namespace.Annotations = map[string]string{NSDefaultTolerations: string(tolerationsStr)}
err = informerFactory.Core().V1().Namespaces().Informer().GetStore().Update(namespace)
if err != nil {
t.Fatal(err)
}
// if the update of initialized pod is not ignored, an error will be returned because the pod's Tolerations conflicts with namespace's Tolerations.
err = admissiontesting.WithReinvocationTesting(t, handler).Admit(context.TODO(), admission.NewAttributesRecord(pod, pod, api.Kind("Pod").WithVersion("version"), "testNamespace", pod.ObjectMeta.Name, api.Resource("pods").WithVersion("version"), "", admission.Update, &metav1.CreateOptions{}, false, nil), nil)
if err != nil {
t.Errorf("expected no error, got: %v", err)
}
}
// newHandlerForTest returns the admission controller configured for testing.
func newHandlerForTest(c kubernetes.Interface) (*Plugin, informers.SharedInformerFactory, error) {
f := informers.NewSharedInformerFactory(c, 5*time.Minute)
pluginConfig, err := loadConfiguration(nil)
// must not fail
if err != nil {
return nil, nil, err
}
handler := NewPodTolerationsPlugin(pluginConfig)
pluginInitializer := genericadmissioninitializer.New(c, nil, f, nil, nil, nil)
pluginInitializer.Initialize(handler)
err = admission.ValidateInitialization(handler)
return handler, f, err
}
| plugin/pkg/admission/podtolerationrestriction/admission_test.go | 0 | https://github.com/kubernetes/kubernetes/commit/ec6fd2befac30ebb7dfb55b94d8c9874489a3902 | [
0.00023287504154723138,
0.00017418833158444613,
0.00016571722517255694,
0.0001729679061099887,
0.000010110412404173985
] |
{
"id": 0,
"code_window": [
" // Do the same thing in case we only have zero byte files to sync.\n",
" return 95;\n",
" }\n",
" var pct = 100 * $scope.model[folder].inSyncBytes / $scope.model[folder].globalBytes;\n",
" return Math.floor(pct);\n",
" };\n",
"\n",
" $scope.scanPercentage = function (folder) {\n",
" if (!$scope.scanProgress[folder]) {\n",
" return undefined;\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
" return progressIntegerPercentage($scope.model[folder].inSyncBytes, $scope.model[folder].globalBytes);\n"
],
"file_path": "gui/default/syncthing/core/syncthingController.js",
"type": "replace",
"edit_start_line_idx": 1000
} | // Copyright (C) 2014 The Syncthing Authors.
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at https://mozilla.org/MPL/2.0/.
package scanner
import (
"context"
"errors"
"fmt"
"path/filepath"
"strings"
"sync/atomic"
"time"
"unicode/utf8"
metrics "github.com/rcrowley/go-metrics"
"github.com/syncthing/syncthing/lib/build"
"github.com/syncthing/syncthing/lib/events"
"github.com/syncthing/syncthing/lib/fs"
"github.com/syncthing/syncthing/lib/ignore"
"github.com/syncthing/syncthing/lib/osutil"
"github.com/syncthing/syncthing/lib/protocol"
"golang.org/x/text/unicode/norm"
)
type Config struct {
// Folder for which the walker has been created
Folder string
// Limit walking to these paths within Dir, or no limit if Sub is empty
Subs []string
// If Matcher is not nil, it is used to identify files to ignore which were specified by the user.
Matcher *ignore.Matcher
// Number of hours to keep temporary files for
TempLifetime time.Duration
// If CurrentFiler is not nil, it is queried for the current file before rescanning.
CurrentFiler CurrentFiler
// The Filesystem provides an abstraction on top of the actual filesystem.
Filesystem fs.Filesystem
// If IgnorePerms is true, changes to permission bits will not be
// detected.
IgnorePerms bool
// When AutoNormalize is set, file names that are in UTF8 but incorrect
// normalization form will be corrected.
AutoNormalize bool
// Number of routines to use for hashing
Hashers int
// Our vector clock id
ShortID protocol.ShortID
// Optional progress tick interval which defines how often FolderScanProgress
// events are emitted. Negative number means disabled.
ProgressTickIntervalS int
// Local flags to set on scanned files
LocalFlags uint32
// Modification time is to be considered unchanged if the difference is lower.
ModTimeWindow time.Duration
// Event logger to which the scan progress events are sent
EventLogger events.Logger
// If ScanOwnership is true, we pick up ownership information on files while scanning.
ScanOwnership bool
// If ScanXattrs is true, we pick up extended attributes on files while scanning.
ScanXattrs bool
// Filter for extended attributes
XattrFilter XattrFilter
}
type CurrentFiler interface {
// CurrentFile returns the file as seen at last scan.
CurrentFile(name string) (protocol.FileInfo, bool)
}
type XattrFilter interface {
Permit(string) bool
GetMaxSingleEntrySize() int
GetMaxTotalSize() int
}
type ScanResult struct {
File protocol.FileInfo
Err error
Path string // to be set in case Err != nil and File == nil
}
func Walk(ctx context.Context, cfg Config) chan ScanResult {
return newWalker(cfg).walk(ctx)
}
func WalkWithoutHashing(ctx context.Context, cfg Config) chan ScanResult {
return newWalker(cfg).walkWithoutHashing(ctx)
}
func newWalker(cfg Config) *walker {
w := &walker{cfg}
if w.CurrentFiler == nil {
w.CurrentFiler = noCurrentFiler{}
}
if w.Filesystem == nil {
panic("no filesystem specified")
}
if w.Matcher == nil {
w.Matcher = ignore.New(w.Filesystem)
}
registerFolderMetrics(w.Folder)
return w
}
var (
errUTF8Invalid = errors.New("item is not in UTF8 encoding")
errUTF8Normalization = errors.New("item is not in the correct UTF8 normalization form")
errUTF8Conflict = errors.New("item has UTF8 encoding conflict with another item")
)
type walker struct {
Config
}
// Walk returns the list of files found in the local folder by scanning the
// file system. Files are blockwise hashed.
func (w *walker) walk(ctx context.Context) chan ScanResult {
l.Debugln(w, "Walk", w.Subs, w.Matcher)
toHashChan := make(chan protocol.FileInfo)
finishedChan := make(chan ScanResult)
// A routine which walks the filesystem tree, and sends files which have
// been modified to the counter routine.
go w.scan(ctx, toHashChan, finishedChan)
// We're not required to emit scan progress events, just kick off hashers,
// and feed inputs directly from the walker.
if w.ProgressTickIntervalS < 0 {
newParallelHasher(ctx, w.Folder, w.Filesystem, w.Hashers, finishedChan, toHashChan, nil, nil)
return finishedChan
}
// Defaults to every 2 seconds.
if w.ProgressTickIntervalS == 0 {
w.ProgressTickIntervalS = 2
}
ticker := time.NewTicker(time.Duration(w.ProgressTickIntervalS) * time.Second)
// We need to emit progress events, hence we create a routine which buffers
// the list of files to be hashed, counts the total number of
// bytes to hash, and once no more files need to be hashed (chan gets closed),
// start a routine which periodically emits FolderScanProgress events,
// until a stop signal is sent by the parallel hasher.
// Parallel hasher is stopped by this routine when we close the channel over
// which it receives the files we ask it to hash.
go func() {
var filesToHash []protocol.FileInfo
var total int64 = 1
for file := range toHashChan {
filesToHash = append(filesToHash, file)
total += file.Size
}
realToHashChan := make(chan protocol.FileInfo)
done := make(chan struct{})
progress := newByteCounter()
newParallelHasher(ctx, w.Folder, w.Filesystem, w.Hashers, finishedChan, realToHashChan, progress, done)
// A routine which actually emits the FolderScanProgress events
// every w.ProgressTicker ticks, until the hasher routines terminate.
go func() {
defer progress.Close()
for {
select {
case <-done:
l.Debugln(w, "Walk progress done", w.Folder, w.Subs, w.Matcher)
ticker.Stop()
return
case <-ticker.C:
current := progress.Total()
rate := progress.Rate()
l.Debugf("%v: Walk %s %s current progress %d/%d at %.01f MiB/s (%d%%)", w, w.Folder, w.Subs, current, total, rate/1024/1024, current*100/total)
w.EventLogger.Log(events.FolderScanProgress, map[string]interface{}{
"folder": w.Folder,
"current": current,
"total": total,
"rate": rate, // bytes per second
})
case <-ctx.Done():
ticker.Stop()
return
}
}
}()
loop:
for _, file := range filesToHash {
l.Debugln(w, "real to hash:", file.Name)
select {
case realToHashChan <- file:
case <-ctx.Done():
break loop
}
}
close(realToHashChan)
}()
return finishedChan
}
func (w *walker) walkWithoutHashing(ctx context.Context) chan ScanResult {
l.Debugln(w, "Walk without hashing", w.Subs, w.Matcher)
toHashChan := make(chan protocol.FileInfo)
finishedChan := make(chan ScanResult)
// A routine which walks the filesystem tree, and sends files which have
// been modified to the counter routine.
go w.scan(ctx, toHashChan, finishedChan)
go func() {
for file := range toHashChan {
finishedChan <- ScanResult{File: file}
}
close(finishedChan)
}()
return finishedChan
}
func (w *walker) scan(ctx context.Context, toHashChan chan<- protocol.FileInfo, finishedChan chan<- ScanResult) {
hashFiles := w.walkAndHashFiles(ctx, toHashChan, finishedChan)
if len(w.Subs) == 0 {
w.Filesystem.Walk(".", hashFiles)
} else {
for _, sub := range w.Subs {
if err := osutil.TraversesSymlink(w.Filesystem, filepath.Dir(sub)); err != nil {
l.Debugf("%v: Skip walking %v as it is below a symlink", w, sub)
continue
}
w.Filesystem.Walk(sub, hashFiles)
}
}
close(toHashChan)
}
func (w *walker) walkAndHashFiles(ctx context.Context, toHashChan chan<- protocol.FileInfo, finishedChan chan<- ScanResult) fs.WalkFunc {
now := time.Now()
ignoredParent := ""
return func(path string, info fs.FileInfo, err error) error {
select {
case <-ctx.Done():
return ctx.Err()
default:
}
metricScannedItems.WithLabelValues(w.Folder).Inc()
// Return value used when we are returning early and don't want to
// process the item. For directories, this means do-not-descend.
var skip error // nil
// info nil when error is not nil
if info != nil && info.IsDir() {
skip = fs.SkipDir
}
if !utf8.ValidString(path) {
handleError(ctx, "scan", path, errUTF8Invalid, finishedChan)
return skip
}
if fs.IsTemporary(path) {
l.Debugln(w, "temporary:", path, "err:", err)
if err == nil && info.IsRegular() && info.ModTime().Add(w.TempLifetime).Before(now) {
w.Filesystem.Remove(path)
l.Debugln(w, "removing temporary:", path, info.ModTime())
}
return nil
}
if fs.IsInternal(path) {
l.Debugln(w, "ignored (internal):", path)
return skip
}
if w.Matcher.Match(path).IsIgnored() {
l.Debugln(w, "ignored (patterns):", path)
// Only descend if matcher says so and the current file is not a symlink.
if err != nil || w.Matcher.SkipIgnoredDirs() || info.IsSymlink() {
return skip
}
// If the parent wasn't ignored already, set this path as the "highest" ignored parent
if info.IsDir() && (ignoredParent == "" || !fs.IsParent(path, ignoredParent)) {
ignoredParent = path
}
return nil
}
if err != nil {
// No need reporting errors for files that don't exist (e.g. scan
// due to filesystem watcher)
if !fs.IsNotExist(err) {
handleError(ctx, "scan", path, err, finishedChan)
}
return skip
}
if path == "." {
return nil
}
if ignoredParent == "" {
// parent isn't ignored, nothing special
return w.handleItem(ctx, path, info, toHashChan, finishedChan, skip)
}
// Part of current path below the ignored (potential) parent
rel := strings.TrimPrefix(path, ignoredParent+string(fs.PathSeparator))
// ignored path isn't actually a parent of the current path
if rel == path {
ignoredParent = ""
return w.handleItem(ctx, path, info, toHashChan, finishedChan, skip)
}
// The previously ignored parent directories of the current, not
// ignored path need to be handled as well.
// Prepend an empty string to handle ignoredParent without anything
// appended in the first iteration.
for _, name := range append([]string{""}, fs.PathComponents(rel)...) {
ignoredParent = filepath.Join(ignoredParent, name)
info, err = w.Filesystem.Lstat(ignoredParent)
// An error here would be weird as we've already gotten to this point, but act on it nonetheless
if err != nil {
handleError(ctx, "scan", ignoredParent, err, finishedChan)
return skip
}
if err = w.handleItem(ctx, ignoredParent, info, toHashChan, finishedChan, skip); err != nil {
return err
}
}
ignoredParent = ""
return nil
}
}
func (w *walker) handleItem(ctx context.Context, path string, info fs.FileInfo, toHashChan chan<- protocol.FileInfo, finishedChan chan<- ScanResult, skip error) error {
oldPath := path
path, err := w.normalizePath(path, info)
if err != nil {
handleError(ctx, "normalizing path", oldPath, err, finishedChan)
return skip
}
switch {
case info.IsSymlink():
if err := w.walkSymlink(ctx, path, info, finishedChan); err != nil {
return err
}
if info.IsDir() {
// under no circumstances shall we descend into a symlink
return fs.SkipDir
}
return nil
case info.IsDir():
err = w.walkDir(ctx, path, info, finishedChan)
case info.IsRegular():
err = w.walkRegular(ctx, path, info, toHashChan)
}
return err
}
func (w *walker) walkRegular(ctx context.Context, relPath string, info fs.FileInfo, toHashChan chan<- protocol.FileInfo) error {
curFile, hasCurFile := w.CurrentFiler.CurrentFile(relPath)
blockSize := protocol.BlockSize(info.Size())
if hasCurFile {
// Check if we should retain current block size.
curBlockSize := curFile.BlockSize()
if blockSize > curBlockSize && blockSize/curBlockSize <= 2 {
// New block size is larger, but not more than twice larger.
// Retain.
blockSize = curBlockSize
} else if curBlockSize > blockSize && curBlockSize/blockSize <= 2 {
// Old block size is larger, but not more than twice larger.
// Retain.
blockSize = curBlockSize
}
}
f, err := CreateFileInfo(info, relPath, w.Filesystem, w.ScanOwnership, w.ScanXattrs, w.XattrFilter)
if err != nil {
return err
}
f = w.updateFileInfo(f, curFile)
f.NoPermissions = w.IgnorePerms
f.RawBlockSize = blockSize
l.Debugln(w, "checking:", f)
if hasCurFile {
if curFile.IsEquivalentOptional(f, protocol.FileInfoComparison{
ModTimeWindow: w.ModTimeWindow,
IgnorePerms: w.IgnorePerms,
IgnoreBlocks: true,
IgnoreFlags: w.LocalFlags,
IgnoreOwnership: !w.ScanOwnership,
IgnoreXattrs: !w.ScanXattrs,
}) {
l.Debugln(w, "unchanged:", curFile)
return nil
}
if curFile.ShouldConflict() {
// The old file was invalid for whatever reason and probably not
// up to date with what was out there in the cluster. Drop all
// others from the version vector to indicate that we haven't
// taken their version into account, and possibly cause a
// conflict.
f.Version = f.Version.DropOthers(w.ShortID)
}
l.Debugln(w, "rescan:", curFile)
}
l.Debugln(w, "to hash:", relPath, f)
select {
case toHashChan <- f:
case <-ctx.Done():
return ctx.Err()
}
return nil
}
func (w *walker) walkDir(ctx context.Context, relPath string, info fs.FileInfo, finishedChan chan<- ScanResult) error {
curFile, hasCurFile := w.CurrentFiler.CurrentFile(relPath)
f, err := CreateFileInfo(info, relPath, w.Filesystem, w.ScanOwnership, w.ScanXattrs, w.XattrFilter)
if err != nil {
return err
}
f = w.updateFileInfo(f, curFile)
f.NoPermissions = w.IgnorePerms
l.Debugln(w, "checking:", f)
if hasCurFile {
if curFile.IsEquivalentOptional(f, protocol.FileInfoComparison{
ModTimeWindow: w.ModTimeWindow,
IgnorePerms: w.IgnorePerms,
IgnoreBlocks: true,
IgnoreFlags: w.LocalFlags,
IgnoreOwnership: !w.ScanOwnership,
IgnoreXattrs: !w.ScanXattrs,
}) {
l.Debugln(w, "unchanged:", curFile)
return nil
}
if curFile.ShouldConflict() {
// The old file was invalid for whatever reason and probably not
// up to date with what was out there in the cluster. Drop all
// others from the version vector to indicate that we haven't
// taken their version into account, and possibly cause a
// conflict.
f.Version = f.Version.DropOthers(w.ShortID)
}
l.Debugln(w, "rescan:", curFile)
}
l.Debugln(w, "dir:", relPath, f)
select {
case finishedChan <- ScanResult{File: f}:
case <-ctx.Done():
return ctx.Err()
}
return nil
}
// walkSymlink returns nil or an error, if the error is of the nature that
// it should stop the entire walk.
func (w *walker) walkSymlink(ctx context.Context, relPath string, info fs.FileInfo, finishedChan chan<- ScanResult) error {
// Symlinks are not supported on Windows. We ignore instead of returning
// an error.
if build.IsWindows {
return nil
}
f, err := CreateFileInfo(info, relPath, w.Filesystem, w.ScanOwnership, w.ScanXattrs, w.XattrFilter)
if err != nil {
handleError(ctx, "reading link", relPath, err, finishedChan)
return nil
}
curFile, hasCurFile := w.CurrentFiler.CurrentFile(relPath)
f = w.updateFileInfo(f, curFile)
l.Debugln(w, "checking:", f)
if hasCurFile {
if curFile.IsEquivalentOptional(f, protocol.FileInfoComparison{
ModTimeWindow: w.ModTimeWindow,
IgnorePerms: w.IgnorePerms,
IgnoreBlocks: true,
IgnoreFlags: w.LocalFlags,
IgnoreOwnership: !w.ScanOwnership,
IgnoreXattrs: !w.ScanXattrs,
}) {
l.Debugln(w, "unchanged:", curFile, info.ModTime().Unix(), info.Mode()&fs.ModePerm)
return nil
}
if curFile.ShouldConflict() {
// The old file was invalid for whatever reason and probably not
// up to date with what was out there in the cluster. Drop all
// others from the version vector to indicate that we haven't
// taken their version into account, and possibly cause a
// conflict.
f.Version = f.Version.DropOthers(w.ShortID)
}
l.Debugln(w, "rescan:", curFile)
}
l.Debugln(w, "symlink:", relPath, f)
select {
case finishedChan <- ScanResult{File: f}:
case <-ctx.Done():
return ctx.Err()
}
return nil
}
// normalizePath returns the normalized relative path (possibly after fixing
// it on disk), or skip is true.
func (w *walker) normalizePath(path string, info fs.FileInfo) (normPath string, err error) {
if build.IsDarwin {
// Mac OS X file names should always be NFD normalized.
normPath = norm.NFD.String(path)
} else {
// Every other OS in the known universe uses NFC or just plain
// doesn't bother to define an encoding. In our case *we* do care,
// so we enforce NFC regardless.
normPath = norm.NFC.String(path)
}
if path == normPath {
// The file name is already normalized: nothing to do
return path, nil
}
if !w.AutoNormalize {
// We're not authorized to do anything about it, so complain and skip.
return "", errUTF8Normalization
}
// We will attempt to normalize it.
normInfo, err := w.Filesystem.Lstat(normPath)
if fs.IsNotExist(err) {
// Nothing exists with the normalized filename. Good.
if err = w.Filesystem.Rename(path, normPath); err != nil {
return "", err
}
l.Infof(`Normalized UTF8 encoding of file name "%s".`, path)
return normPath, nil
}
if w.Filesystem.SameFile(info, normInfo) {
// With some filesystems (ZFS), if there is an un-normalized path and you ask whether the normalized
// version exists, it responds with true. Therefore we need to check fs.SameFile as well.
// In this case, a call to Rename won't do anything, so we have to rename via a temp file.
// We don't want to use the standard syncthing prefix here, as that will result in the file being ignored
// and eventually deleted by Syncthing if the rename back fails.
tempPath := fs.TempNameWithPrefix(normPath, "")
if err = w.Filesystem.Rename(path, tempPath); err != nil {
return "", err
}
if err = w.Filesystem.Rename(tempPath, normPath); err != nil {
// I don't ever expect this to happen, but if it does, we should probably tell our caller that the normalized
// path is the temp path: that way at least the user's data still gets synced.
l.Warnf(`Error renaming "%s" to "%s" while normalizating UTF8 encoding: %v. You will want to rename this file back manually`, tempPath, normPath, err)
return tempPath, nil
}
return normPath, nil
}
// There is something already in the way at the normalized
// file name.
return "", errUTF8Conflict
}
// updateFileInfo updates walker specific members of protocol.FileInfo that
// do not depend on type, and things that should be preserved from the
// previous version of the FileInfo.
func (w *walker) updateFileInfo(dst, src protocol.FileInfo) protocol.FileInfo {
if dst.Type == protocol.FileInfoTypeFile && build.IsWindows {
// If we have an existing index entry, copy the executable bits
// from there.
dst.Permissions |= (src.Permissions & 0o111)
}
dst.Version = src.Version.Update(w.ShortID)
dst.ModifiedBy = w.ShortID
dst.LocalFlags = w.LocalFlags
// Copy OS data from src to dst, unless it was already set on dst.
dst.Platform.MergeWith(&src.Platform)
return dst
}
func handleError(ctx context.Context, context, path string, err error, finishedChan chan<- ScanResult) {
select {
case finishedChan <- ScanResult{
Err: fmt.Errorf("%s: %w", context, err),
Path: path,
}:
case <-ctx.Done():
}
}
func (w *walker) String() string {
return fmt.Sprintf("walker/%s@%p", w.Folder, w)
}
// A byteCounter gets bytes added to it via Update() and then provides the
// Total() and one minute moving average Rate() in bytes per second.
type byteCounter struct {
total atomic.Int64
metrics.EWMA
stop chan struct{}
}
func newByteCounter() *byteCounter {
c := &byteCounter{
EWMA: metrics.NewEWMA1(), // a one minute exponentially weighted moving average
stop: make(chan struct{}),
}
go c.ticker()
return c
}
func (c *byteCounter) ticker() {
// The metrics.EWMA expects clock ticks every five seconds in order to
// decay the average properly.
t := time.NewTicker(5 * time.Second)
for {
select {
case <-t.C:
c.Tick()
case <-c.stop:
t.Stop()
return
}
}
}
func (c *byteCounter) Update(bytes int64) {
c.total.Add(bytes)
c.EWMA.Update(bytes)
}
func (c *byteCounter) Total() int64 { return c.total.Load() }
func (c *byteCounter) Close() {
close(c.stop)
}
// A no-op CurrentFiler
type noCurrentFiler struct{}
func (noCurrentFiler) CurrentFile(_ string) (protocol.FileInfo, bool) {
return protocol.FileInfo{}, false
}
func CreateFileInfo(fi fs.FileInfo, name string, filesystem fs.Filesystem, scanOwnership bool, scanXattrs bool, xattrFilter XattrFilter) (protocol.FileInfo, error) {
f := protocol.FileInfo{Name: name}
if scanOwnership || scanXattrs {
if plat, err := filesystem.PlatformData(name, scanOwnership, scanXattrs, xattrFilter); err == nil {
f.Platform = plat
} else {
return protocol.FileInfo{}, fmt.Errorf("reading platform data: %w", err)
}
}
if ct := fi.InodeChangeTime(); !ct.IsZero() {
f.InodeChangeNs = ct.UnixNano()
} else {
f.InodeChangeNs = 0
}
if fi.IsSymlink() {
f.Type = protocol.FileInfoTypeSymlink
target, err := filesystem.ReadSymlink(name)
if err != nil {
return protocol.FileInfo{}, err
}
f.SymlinkTarget = target
f.NoPermissions = true // Symlinks don't have permissions of their own
return f, nil
}
f.Permissions = uint32(fi.Mode() & fs.ModePerm)
f.ModifiedS = fi.ModTime().Unix()
f.ModifiedNs = fi.ModTime().Nanosecond()
if fi.IsDir() {
f.Type = protocol.FileInfoTypeDirectory
return f, nil
}
f.Size = fi.Size()
f.Type = protocol.FileInfoTypeFile
return f, nil
}
| lib/scanner/walk.go | 1 | https://github.com/syncthing/syncthing/commit/2f3eacdb6c1c33650ccdd91f42e842c116200d92 | [
0.0007529562572017312,
0.0001779165759216994,
0.00016098792548291385,
0.00017091904010158032,
0.0000679080476402305
] |
{
"id": 0,
"code_window": [
" // Do the same thing in case we only have zero byte files to sync.\n",
" return 95;\n",
" }\n",
" var pct = 100 * $scope.model[folder].inSyncBytes / $scope.model[folder].globalBytes;\n",
" return Math.floor(pct);\n",
" };\n",
"\n",
" $scope.scanPercentage = function (folder) {\n",
" if (!$scope.scanProgress[folder]) {\n",
" return undefined;\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
" return progressIntegerPercentage($scope.model[folder].inSyncBytes, $scope.model[folder].globalBytes);\n"
],
"file_path": "gui/default/syncthing/core/syncthingController.js",
"type": "replace",
"edit_start_line_idx": 1000
} | // Code generated by protoc-gen-gogo. DO NOT EDIT.
// source: database.proto
package main
import (
fmt "fmt"
_ "github.com/gogo/protobuf/gogoproto"
proto "github.com/gogo/protobuf/proto"
io "io"
math "math"
math_bits "math/bits"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
type DatabaseRecord struct {
Addresses []DatabaseAddress `protobuf:"bytes,1,rep,name=addresses,proto3" json:"addresses"`
Misses int32 `protobuf:"varint,2,opt,name=misses,proto3" json:"misses,omitempty"`
Seen int64 `protobuf:"varint,3,opt,name=seen,proto3" json:"seen,omitempty"`
Missed int64 `protobuf:"varint,4,opt,name=missed,proto3" json:"missed,omitempty"`
}
func (m *DatabaseRecord) Reset() { *m = DatabaseRecord{} }
func (m *DatabaseRecord) String() string { return proto.CompactTextString(m) }
func (*DatabaseRecord) ProtoMessage() {}
func (*DatabaseRecord) Descriptor() ([]byte, []int) {
return fileDescriptor_b90fe3356ea5df07, []int{0}
}
func (m *DatabaseRecord) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *DatabaseRecord) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_DatabaseRecord.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *DatabaseRecord) XXX_Merge(src proto.Message) {
xxx_messageInfo_DatabaseRecord.Merge(m, src)
}
func (m *DatabaseRecord) XXX_Size() int {
return m.Size()
}
func (m *DatabaseRecord) XXX_DiscardUnknown() {
xxx_messageInfo_DatabaseRecord.DiscardUnknown(m)
}
var xxx_messageInfo_DatabaseRecord proto.InternalMessageInfo
type ReplicationRecord struct {
Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
Addresses []DatabaseAddress `protobuf:"bytes,2,rep,name=addresses,proto3" json:"addresses"`
Seen int64 `protobuf:"varint,3,opt,name=seen,proto3" json:"seen,omitempty"`
}
func (m *ReplicationRecord) Reset() { *m = ReplicationRecord{} }
func (m *ReplicationRecord) String() string { return proto.CompactTextString(m) }
func (*ReplicationRecord) ProtoMessage() {}
func (*ReplicationRecord) Descriptor() ([]byte, []int) {
return fileDescriptor_b90fe3356ea5df07, []int{1}
}
func (m *ReplicationRecord) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *ReplicationRecord) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_ReplicationRecord.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *ReplicationRecord) XXX_Merge(src proto.Message) {
xxx_messageInfo_ReplicationRecord.Merge(m, src)
}
func (m *ReplicationRecord) XXX_Size() int {
return m.Size()
}
func (m *ReplicationRecord) XXX_DiscardUnknown() {
xxx_messageInfo_ReplicationRecord.DiscardUnknown(m)
}
var xxx_messageInfo_ReplicationRecord proto.InternalMessageInfo
type DatabaseAddress struct {
Address string `protobuf:"bytes,1,opt,name=address,proto3" json:"address,omitempty"`
Expires int64 `protobuf:"varint,2,opt,name=expires,proto3" json:"expires,omitempty"`
}
func (m *DatabaseAddress) Reset() { *m = DatabaseAddress{} }
func (m *DatabaseAddress) String() string { return proto.CompactTextString(m) }
func (*DatabaseAddress) ProtoMessage() {}
func (*DatabaseAddress) Descriptor() ([]byte, []int) {
return fileDescriptor_b90fe3356ea5df07, []int{2}
}
func (m *DatabaseAddress) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *DatabaseAddress) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_DatabaseAddress.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *DatabaseAddress) XXX_Merge(src proto.Message) {
xxx_messageInfo_DatabaseAddress.Merge(m, src)
}
func (m *DatabaseAddress) XXX_Size() int {
return m.Size()
}
func (m *DatabaseAddress) XXX_DiscardUnknown() {
xxx_messageInfo_DatabaseAddress.DiscardUnknown(m)
}
var xxx_messageInfo_DatabaseAddress proto.InternalMessageInfo
func init() {
proto.RegisterType((*DatabaseRecord)(nil), "main.DatabaseRecord")
proto.RegisterType((*ReplicationRecord)(nil), "main.ReplicationRecord")
proto.RegisterType((*DatabaseAddress)(nil), "main.DatabaseAddress")
}
func init() { proto.RegisterFile("database.proto", fileDescriptor_b90fe3356ea5df07) }
var fileDescriptor_b90fe3356ea5df07 = []byte{
// 270 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x90, 0x41, 0x4a, 0xc4, 0x30,
0x18, 0x85, 0x9b, 0x49, 0x1d, 0x99, 0x08, 0xa3, 0x06, 0x94, 0x20, 0x12, 0x4b, 0xdd, 0x74, 0xd5,
0x01, 0x5d, 0xb9, 0x74, 0xd0, 0x0b, 0xe4, 0x06, 0xe9, 0xe4, 0x77, 0x08, 0x3a, 0x4d, 0x49, 0x2a,
0xe8, 0x29, 0xf4, 0x58, 0x5d, 0xce, 0xd2, 0x95, 0x68, 0x7b, 0x11, 0x69, 0x26, 0x55, 0x14, 0x37,
0xb3, 0x7b, 0xdf, 0xff, 0xbf, 0x97, 0xbc, 0x84, 0x4c, 0x95, 0xac, 0x65, 0x21, 0x1d, 0xe4, 0x95,
0x35, 0xb5, 0xa1, 0xf1, 0x4a, 0xea, 0xf2, 0xe4, 0xdc, 0x42, 0x65, 0xdc, 0xcc, 0x8f, 0x8a, 0xc7,
0xbb, 0xd9, 0xd2, 0x2c, 0x8d, 0x07, 0xaf, 0x36, 0xd6, 0xf4, 0x05, 0x91, 0xe9, 0x4d, 0x48, 0x0b,
0x58, 0x18, 0xab, 0xe8, 0x15, 0x99, 0x48, 0xa5, 0x2c, 0x38, 0x07, 0x8e, 0xa1, 0x04, 0x67, 0x7b,
0x17, 0x47, 0x79, 0x7f, 0x62, 0x3e, 0x18, 0xaf, 0x37, 0xeb, 0x79, 0xdc, 0xbc, 0x9f, 0x45, 0xe2,
0xc7, 0x4d, 0x8f, 0xc9, 0x78, 0xa5, 0x7d, 0x6e, 0x94, 0xa0, 0x6c, 0x47, 0x04, 0xa2, 0x94, 0xc4,
0x0e, 0xa0, 0x64, 0x38, 0x41, 0x19, 0x16, 0x5e, 0x7f, 0x7b, 0x15, 0x8b, 0xfd, 0x34, 0x50, 0x5a,
0x93, 0x43, 0x01, 0xd5, 0x83, 0x5e, 0xc8, 0x5a, 0x9b, 0x32, 0x74, 0x3a, 0x20, 0xf8, 0x1e, 0x9e,
0x19, 0x4a, 0x50, 0x36, 0x11, 0xbd, 0xfc, 0xdd, 0x72, 0xb4, 0x55, 0xcb, 0x7f, 0xda, 0xa4, 0xb7,
0x64, 0xff, 0x4f, 0x8e, 0x32, 0xb2, 0x1b, 0x32, 0xe1, 0xde, 0x01, 0xfb, 0x0d, 0x3c, 0x55, 0xda,
0x86, 0x77, 0x62, 0x31, 0xe0, 0xfc, 0xb4, 0xf9, 0xe4, 0x51, 0xd3, 0x72, 0xb4, 0x6e, 0x39, 0xfa,
0x68, 0x39, 0x7a, 0xed, 0x78, 0xb4, 0xee, 0x78, 0xf4, 0xd6, 0xf1, 0xa8, 0x18, 0xfb, 0x3f, 0xbf,
0xfc, 0x0a, 0x00, 0x00, 0xff, 0xff, 0x7a, 0xa2, 0xf6, 0x1e, 0xb0, 0x01, 0x00, 0x00,
}
func (m *DatabaseRecord) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *DatabaseRecord) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *DatabaseRecord) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if m.Missed != 0 {
i = encodeVarintDatabase(dAtA, i, uint64(m.Missed))
i--
dAtA[i] = 0x20
}
if m.Seen != 0 {
i = encodeVarintDatabase(dAtA, i, uint64(m.Seen))
i--
dAtA[i] = 0x18
}
if m.Misses != 0 {
i = encodeVarintDatabase(dAtA, i, uint64(m.Misses))
i--
dAtA[i] = 0x10
}
if len(m.Addresses) > 0 {
for iNdEx := len(m.Addresses) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.Addresses[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintDatabase(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0xa
}
}
return len(dAtA) - i, nil
}
func (m *ReplicationRecord) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *ReplicationRecord) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *ReplicationRecord) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if m.Seen != 0 {
i = encodeVarintDatabase(dAtA, i, uint64(m.Seen))
i--
dAtA[i] = 0x18
}
if len(m.Addresses) > 0 {
for iNdEx := len(m.Addresses) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.Addresses[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintDatabase(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x12
}
}
if len(m.Key) > 0 {
i -= len(m.Key)
copy(dAtA[i:], m.Key)
i = encodeVarintDatabase(dAtA, i, uint64(len(m.Key)))
i--
dAtA[i] = 0xa
}
return len(dAtA) - i, nil
}
func (m *DatabaseAddress) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *DatabaseAddress) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *DatabaseAddress) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if m.Expires != 0 {
i = encodeVarintDatabase(dAtA, i, uint64(m.Expires))
i--
dAtA[i] = 0x10
}
if len(m.Address) > 0 {
i -= len(m.Address)
copy(dAtA[i:], m.Address)
i = encodeVarintDatabase(dAtA, i, uint64(len(m.Address)))
i--
dAtA[i] = 0xa
}
return len(dAtA) - i, nil
}
func encodeVarintDatabase(dAtA []byte, offset int, v uint64) int {
offset -= sovDatabase(v)
base := offset
for v >= 1<<7 {
dAtA[offset] = uint8(v&0x7f | 0x80)
v >>= 7
offset++
}
dAtA[offset] = uint8(v)
return base
}
func (m *DatabaseRecord) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
if len(m.Addresses) > 0 {
for _, e := range m.Addresses {
l = e.Size()
n += 1 + l + sovDatabase(uint64(l))
}
}
if m.Misses != 0 {
n += 1 + sovDatabase(uint64(m.Misses))
}
if m.Seen != 0 {
n += 1 + sovDatabase(uint64(m.Seen))
}
if m.Missed != 0 {
n += 1 + sovDatabase(uint64(m.Missed))
}
return n
}
func (m *ReplicationRecord) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
l = len(m.Key)
if l > 0 {
n += 1 + l + sovDatabase(uint64(l))
}
if len(m.Addresses) > 0 {
for _, e := range m.Addresses {
l = e.Size()
n += 1 + l + sovDatabase(uint64(l))
}
}
if m.Seen != 0 {
n += 1 + sovDatabase(uint64(m.Seen))
}
return n
}
func (m *DatabaseAddress) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
l = len(m.Address)
if l > 0 {
n += 1 + l + sovDatabase(uint64(l))
}
if m.Expires != 0 {
n += 1 + sovDatabase(uint64(m.Expires))
}
return n
}
func sovDatabase(x uint64) (n int) {
return (math_bits.Len64(x|1) + 6) / 7
}
func sozDatabase(x uint64) (n int) {
return sovDatabase(uint64((x << 1) ^ uint64((int64(x) >> 63))))
}
func (m *DatabaseRecord) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowDatabase
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: DatabaseRecord: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: DatabaseRecord: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Addresses", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowDatabase
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthDatabase
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthDatabase
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Addresses = append(m.Addresses, DatabaseAddress{})
if err := m.Addresses[len(m.Addresses)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Misses", wireType)
}
m.Misses = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowDatabase
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.Misses |= int32(b&0x7F) << shift
if b < 0x80 {
break
}
}
case 3:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Seen", wireType)
}
m.Seen = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowDatabase
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.Seen |= int64(b&0x7F) << shift
if b < 0x80 {
break
}
}
case 4:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Missed", wireType)
}
m.Missed = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowDatabase
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.Missed |= int64(b&0x7F) << shift
if b < 0x80 {
break
}
}
default:
iNdEx = preIndex
skippy, err := skipDatabase(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthDatabase
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *ReplicationRecord) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowDatabase
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: ReplicationRecord: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: ReplicationRecord: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowDatabase
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthDatabase
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthDatabase
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Key = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Addresses", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowDatabase
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthDatabase
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthDatabase
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Addresses = append(m.Addresses, DatabaseAddress{})
if err := m.Addresses[len(m.Addresses)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 3:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Seen", wireType)
}
m.Seen = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowDatabase
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.Seen |= int64(b&0x7F) << shift
if b < 0x80 {
break
}
}
default:
iNdEx = preIndex
skippy, err := skipDatabase(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthDatabase
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *DatabaseAddress) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowDatabase
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: DatabaseAddress: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: DatabaseAddress: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Address", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowDatabase
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthDatabase
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthDatabase
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Address = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 2:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Expires", wireType)
}
m.Expires = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowDatabase
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.Expires |= int64(b&0x7F) << shift
if b < 0x80 {
break
}
}
default:
iNdEx = preIndex
skippy, err := skipDatabase(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthDatabase
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func skipDatabase(dAtA []byte) (n int, err error) {
l := len(dAtA)
iNdEx := 0
depth := 0
for iNdEx < l {
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowDatabase
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
wireType := int(wire & 0x7)
switch wireType {
case 0:
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowDatabase
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
iNdEx++
if dAtA[iNdEx-1] < 0x80 {
break
}
}
case 1:
iNdEx += 8
case 2:
var length int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowDatabase
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
length |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if length < 0 {
return 0, ErrInvalidLengthDatabase
}
iNdEx += length
case 3:
depth++
case 4:
if depth == 0 {
return 0, ErrUnexpectedEndOfGroupDatabase
}
depth--
case 5:
iNdEx += 4
default:
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
}
if iNdEx < 0 {
return 0, ErrInvalidLengthDatabase
}
if depth == 0 {
return iNdEx, nil
}
}
return 0, io.ErrUnexpectedEOF
}
var (
ErrInvalidLengthDatabase = fmt.Errorf("proto: negative length found during unmarshaling")
ErrIntOverflowDatabase = fmt.Errorf("proto: integer overflow")
ErrUnexpectedEndOfGroupDatabase = fmt.Errorf("proto: unexpected end of group")
)
| cmd/stdiscosrv/database.pb.go | 0 | https://github.com/syncthing/syncthing/commit/2f3eacdb6c1c33650ccdd91f42e842c116200d92 | [
0.00017627656052354723,
0.00017188438505399972,
0.00016220843826886266,
0.00017384439706802368,
0.000004136134521104395
] |
{
"id": 0,
"code_window": [
" // Do the same thing in case we only have zero byte files to sync.\n",
" return 95;\n",
" }\n",
" var pct = 100 * $scope.model[folder].inSyncBytes / $scope.model[folder].globalBytes;\n",
" return Math.floor(pct);\n",
" };\n",
"\n",
" $scope.scanPercentage = function (folder) {\n",
" if (!$scope.scanProgress[folder]) {\n",
" return undefined;\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
" return progressIntegerPercentage($scope.model[folder].inSyncBytes, $scope.model[folder].globalBytes);\n"
],
"file_path": "gui/default/syncthing/core/syncthingController.js",
"type": "replace",
"edit_start_line_idx": 1000
} | // Copyright (C) 2015 The Syncthing Authors.
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at https://mozilla.org/MPL/2.0/.
// Package signature provides simple methods to create and verify signatures
// in PEM format.
package signature
import (
"crypto/ecdsa"
"crypto/elliptic"
"crypto/x509"
"encoding/asn1"
"encoding/pem"
"errors"
"fmt"
"io"
"math/big"
"github.com/syncthing/syncthing/lib/rand"
"github.com/syncthing/syncthing/lib/sha256"
)
// GenerateKeys returns a new key pair, with the private and public key
// encoded in PEM format.
func GenerateKeys() (privKey []byte, pubKey []byte, err error) {
// Generate a new key pair
key, err := ecdsa.GenerateKey(elliptic.P521(), rand.Reader)
if err != nil {
return nil, nil, err
}
// Marshal the private key
bs, err := x509.MarshalECPrivateKey(key)
if err != nil {
return nil, nil, err
}
// Encode it in PEM format
privKey = pem.EncodeToMemory(&pem.Block{
Type: "EC PRIVATE KEY",
Bytes: bs,
})
// Marshal the public key
bs, err = x509.MarshalPKIXPublicKey(&key.PublicKey)
if err != nil {
return nil, nil, err
}
// Encode it in PEM format
pubKey = pem.EncodeToMemory(&pem.Block{
Type: "EC PUBLIC KEY",
Bytes: bs,
})
return
}
// Sign computes the hash of data and signs it with the private key, returning
// a signature in PEM format.
func Sign(privKeyPEM []byte, data io.Reader) ([]byte, error) {
// Parse the private key
key, err := loadPrivateKey(privKeyPEM)
if err != nil {
return nil, err
}
// Hash the reader data
hash, err := hashReader(data)
if err != nil {
return nil, err
}
// Sign the hash
r, s, err := ecdsa.Sign(rand.Reader, key, hash)
if err != nil {
return nil, err
}
// Marshal the signature using ASN.1
sig, err := marshalSignature(r, s)
if err != nil {
return nil, err
}
// Encode it in a PEM block
bs := pem.EncodeToMemory(&pem.Block{
Type: "SIGNATURE",
Bytes: sig,
})
return bs, nil
}
// Verify computes the hash of data and compares it to the signature using the
// given public key. Returns nil if the signature is correct.
func Verify(pubKeyPEM []byte, signature []byte, data io.Reader) error {
// Parse the public key
key, err := loadPublicKey(pubKeyPEM)
if err != nil {
return err
}
// Parse the signature
block, _ := pem.Decode(signature)
if block == nil || block.Bytes == nil {
return errors.New("unsupported signature format")
}
r, s, err := unmarshalSignature(block.Bytes)
if err != nil {
return err
}
// Compute the hash of the data
hash, err := hashReader(data)
if err != nil {
return err
}
// Verify the signature
if !ecdsa.Verify(key, hash, r, s) {
return errors.New("incorrect signature")
}
return nil
}
// hashReader returns the SHA256 hash of the reader
func hashReader(r io.Reader) ([]byte, error) {
h := sha256.New()
if _, err := io.Copy(h, r); err != nil {
return nil, err
}
hash := []byte(fmt.Sprintf("%x", h.Sum(nil)))
return hash, nil
}
// loadPrivateKey returns the ECDSA private key structure for the given PEM
// data.
func loadPrivateKey(bs []byte) (*ecdsa.PrivateKey, error) {
block, _ := pem.Decode(bs)
return x509.ParseECPrivateKey(block.Bytes)
}
// loadPublicKey returns the ECDSA public key structure for the given PEM
// data.
func loadPublicKey(bs []byte) (*ecdsa.PublicKey, error) {
// Decode and parse the public key PEM block
block, _ := pem.Decode(bs)
if block == nil || block.Bytes == nil {
return nil, errors.New("unsupported public key format")
}
intf, err := x509.ParsePKIXPublicKey(block.Bytes)
if err != nil {
return nil, err
}
// It should be an ECDSA public key
pk, ok := intf.(*ecdsa.PublicKey)
if !ok {
return nil, errors.New("unsupported public key format")
}
return pk, nil
}
// A wrapper around the signature integers so that we can marshal and
// unmarshal them.
type signature struct {
R, S *big.Int
}
// marhalSignature returns ASN.1 encoded bytes for the given integers,
// suitable for PEM encoding.
func marshalSignature(r, s *big.Int) ([]byte, error) {
sig := signature{
R: r,
S: s,
}
bs, err := asn1.Marshal(sig)
if err != nil {
return nil, err
}
return bs, nil
}
// unmarshalSignature returns the R and S integers from the given ASN.1
// encoded signature.
func unmarshalSignature(sig []byte) (r *big.Int, s *big.Int, err error) {
var ts signature
_, err = asn1.Unmarshal(sig, &ts)
if err != nil {
return nil, nil, err
}
return ts.R, ts.S, nil
}
| lib/signature/signature.go | 0 | https://github.com/syncthing/syncthing/commit/2f3eacdb6c1c33650ccdd91f42e842c116200d92 | [
0.00018137744336854666,
0.0001701253786450252,
0.00016387450159527361,
0.00016966761904768646,
0.000004195804194750963
] |
{
"id": 0,
"code_window": [
" // Do the same thing in case we only have zero byte files to sync.\n",
" return 95;\n",
" }\n",
" var pct = 100 * $scope.model[folder].inSyncBytes / $scope.model[folder].globalBytes;\n",
" return Math.floor(pct);\n",
" };\n",
"\n",
" $scope.scanPercentage = function (folder) {\n",
" if (!$scope.scanProgress[folder]) {\n",
" return undefined;\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
" return progressIntegerPercentage($scope.model[folder].inSyncBytes, $scope.model[folder].globalBytes);\n"
],
"file_path": "gui/default/syncthing/core/syncthingController.js",
"type": "replace",
"edit_start_line_idx": 1000
} | // Copyright (C) 2015 The Syncthing Authors.
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at https://mozilla.org/MPL/2.0/.
package osutil
import (
"errors"
"path/filepath"
"github.com/syncthing/syncthing/lib/build"
"github.com/syncthing/syncthing/lib/fs"
)
var (
ErrClosed = errors.New("write to closed writer")
TempPrefix = ".syncthing.tmp."
)
// An AtomicWriter is an *os.File that writes to a temporary file in the same
// directory as the final path. On successful Close the file is renamed to
// its final path. Any error on Write or during Close is accumulated and
// returned on Close, so a lazy user can ignore errors until Close.
type AtomicWriter struct {
path string
next fs.File
fs fs.Filesystem
err error
}
// CreateAtomic is like os.Create, except a temporary file name is used
// instead of the given name. The file is created with secure (0600)
// permissions.
func CreateAtomic(path string) (*AtomicWriter, error) {
fs := fs.NewFilesystem(fs.FilesystemTypeBasic, filepath.Dir(path))
return CreateAtomicFilesystem(fs, filepath.Base(path))
}
// CreateAtomicFilesystem is like os.Create, except a temporary file name is used
// instead of the given name. The file is created with secure (0600)
// permissions.
func CreateAtomicFilesystem(filesystem fs.Filesystem, path string) (*AtomicWriter, error) {
// The security of this depends on the tempfile having secure
// permissions, 0600, from the beginning. This is what os.CreateTemp
// does. We have a test that verifies that that is the case, should this
// ever change in the standard library in the future.
fd, err := TempFile(filesystem, filepath.Dir(path), TempPrefix)
if err != nil {
return nil, err
}
w := &AtomicWriter{
path: path,
next: fd,
fs: filesystem,
}
return w, nil
}
// Write is like io.Writer, but is a no-op on an already failed AtomicWriter.
func (w *AtomicWriter) Write(bs []byte) (int, error) {
if w.err != nil {
return 0, w.err
}
n, err := w.next.Write(bs)
if err != nil {
w.err = err
w.next.Close()
}
return n, err
}
// Close closes the temporary file and renames it to the final path. It is
// invalid to call Write() or Close() after Close().
func (w *AtomicWriter) Close() error {
if w.err != nil {
return w.err
}
// Try to not leave temp file around, but ignore error.
defer w.fs.Remove(w.next.Name())
// sync() isn't supported everywhere, our best effort will suffice.
_ = w.next.Sync()
if err := w.next.Close(); err != nil {
w.err = err
return err
}
info, infoErr := w.fs.Lstat(w.path)
if infoErr != nil && !fs.IsNotExist(infoErr) {
w.err = infoErr
return infoErr
}
err := w.fs.Rename(w.next.Name(), w.path)
if build.IsWindows && fs.IsPermission(err) {
// On Windows, we might not be allowed to rename over the file
// because it's read-only. Get us some write permissions and try
// again.
_ = w.fs.Chmod(w.path, 0644)
err = w.fs.Rename(w.next.Name(), w.path)
}
if err != nil {
w.err = err
return err
}
if infoErr == nil {
// Restore chmod setting for final file to what it was
if err := w.fs.Chmod(w.path, info.Mode()); err != nil {
// Only fail if permissions differ, since some filesystems are expected to not allow chmod (e.g. error
// `operation not permitted`).
infoAfterRename, infoAfterRenameErr := w.fs.Lstat(w.path)
if infoAfterRenameErr != nil || infoAfterRename.Mode() != info.Mode() {
w.err = err
return err
}
}
}
// fsync the directory too
if fd, err := w.fs.Open(filepath.Dir(w.next.Name())); err == nil {
fd.Sync()
fd.Close()
}
// Set w.err to return appropriately for any future operations.
w.err = ErrClosed
return nil
}
| lib/osutil/atomic.go | 0 | https://github.com/syncthing/syncthing/commit/2f3eacdb6c1c33650ccdd91f42e842c116200d92 | [
0.00045375287299975753,
0.00018879948765970767,
0.00016092606529127806,
0.00016942634829320014,
0.00007362350152106956
] |
{
"id": 1,
"code_window": [
" $scope.scanPercentage = function (folder) {\n",
" if (!$scope.scanProgress[folder]) {\n",
" return undefined;\n",
" }\n",
" var pct = 100 * $scope.scanProgress[folder].current / $scope.scanProgress[folder].total;\n",
" return Math.floor(pct);\n",
" };\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"keep",
"keep"
],
"after_edit": [
" return progressIntegerPercentage($scope.scanProgress[folder].current, $scope.scanProgress[folder].total);\n"
],
"file_path": "gui/default/syncthing/core/syncthingController.js",
"type": "replace",
"edit_start_line_idx": 1008
} | // Copyright (C) 2014 The Syncthing Authors.
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at https://mozilla.org/MPL/2.0/.
package scanner
import (
"context"
"errors"
"fmt"
"path/filepath"
"strings"
"sync/atomic"
"time"
"unicode/utf8"
metrics "github.com/rcrowley/go-metrics"
"github.com/syncthing/syncthing/lib/build"
"github.com/syncthing/syncthing/lib/events"
"github.com/syncthing/syncthing/lib/fs"
"github.com/syncthing/syncthing/lib/ignore"
"github.com/syncthing/syncthing/lib/osutil"
"github.com/syncthing/syncthing/lib/protocol"
"golang.org/x/text/unicode/norm"
)
type Config struct {
// Folder for which the walker has been created
Folder string
// Limit walking to these paths within Dir, or no limit if Sub is empty
Subs []string
// If Matcher is not nil, it is used to identify files to ignore which were specified by the user.
Matcher *ignore.Matcher
// Number of hours to keep temporary files for
TempLifetime time.Duration
// If CurrentFiler is not nil, it is queried for the current file before rescanning.
CurrentFiler CurrentFiler
// The Filesystem provides an abstraction on top of the actual filesystem.
Filesystem fs.Filesystem
// If IgnorePerms is true, changes to permission bits will not be
// detected.
IgnorePerms bool
// When AutoNormalize is set, file names that are in UTF8 but incorrect
// normalization form will be corrected.
AutoNormalize bool
// Number of routines to use for hashing
Hashers int
// Our vector clock id
ShortID protocol.ShortID
// Optional progress tick interval which defines how often FolderScanProgress
// events are emitted. Negative number means disabled.
ProgressTickIntervalS int
// Local flags to set on scanned files
LocalFlags uint32
// Modification time is to be considered unchanged if the difference is lower.
ModTimeWindow time.Duration
// Event logger to which the scan progress events are sent
EventLogger events.Logger
// If ScanOwnership is true, we pick up ownership information on files while scanning.
ScanOwnership bool
// If ScanXattrs is true, we pick up extended attributes on files while scanning.
ScanXattrs bool
// Filter for extended attributes
XattrFilter XattrFilter
}
type CurrentFiler interface {
// CurrentFile returns the file as seen at last scan.
CurrentFile(name string) (protocol.FileInfo, bool)
}
type XattrFilter interface {
Permit(string) bool
GetMaxSingleEntrySize() int
GetMaxTotalSize() int
}
type ScanResult struct {
File protocol.FileInfo
Err error
Path string // to be set in case Err != nil and File == nil
}
func Walk(ctx context.Context, cfg Config) chan ScanResult {
return newWalker(cfg).walk(ctx)
}
func WalkWithoutHashing(ctx context.Context, cfg Config) chan ScanResult {
return newWalker(cfg).walkWithoutHashing(ctx)
}
func newWalker(cfg Config) *walker {
w := &walker{cfg}
if w.CurrentFiler == nil {
w.CurrentFiler = noCurrentFiler{}
}
if w.Filesystem == nil {
panic("no filesystem specified")
}
if w.Matcher == nil {
w.Matcher = ignore.New(w.Filesystem)
}
registerFolderMetrics(w.Folder)
return w
}
var (
errUTF8Invalid = errors.New("item is not in UTF8 encoding")
errUTF8Normalization = errors.New("item is not in the correct UTF8 normalization form")
errUTF8Conflict = errors.New("item has UTF8 encoding conflict with another item")
)
type walker struct {
Config
}
// Walk returns the list of files found in the local folder by scanning the
// file system. Files are blockwise hashed.
func (w *walker) walk(ctx context.Context) chan ScanResult {
l.Debugln(w, "Walk", w.Subs, w.Matcher)
toHashChan := make(chan protocol.FileInfo)
finishedChan := make(chan ScanResult)
// A routine which walks the filesystem tree, and sends files which have
// been modified to the counter routine.
go w.scan(ctx, toHashChan, finishedChan)
// We're not required to emit scan progress events, just kick off hashers,
// and feed inputs directly from the walker.
if w.ProgressTickIntervalS < 0 {
newParallelHasher(ctx, w.Folder, w.Filesystem, w.Hashers, finishedChan, toHashChan, nil, nil)
return finishedChan
}
// Defaults to every 2 seconds.
if w.ProgressTickIntervalS == 0 {
w.ProgressTickIntervalS = 2
}
ticker := time.NewTicker(time.Duration(w.ProgressTickIntervalS) * time.Second)
// We need to emit progress events, hence we create a routine which buffers
// the list of files to be hashed, counts the total number of
// bytes to hash, and once no more files need to be hashed (chan gets closed),
// start a routine which periodically emits FolderScanProgress events,
// until a stop signal is sent by the parallel hasher.
// Parallel hasher is stopped by this routine when we close the channel over
// which it receives the files we ask it to hash.
go func() {
var filesToHash []protocol.FileInfo
var total int64 = 1
for file := range toHashChan {
filesToHash = append(filesToHash, file)
total += file.Size
}
realToHashChan := make(chan protocol.FileInfo)
done := make(chan struct{})
progress := newByteCounter()
newParallelHasher(ctx, w.Folder, w.Filesystem, w.Hashers, finishedChan, realToHashChan, progress, done)
// A routine which actually emits the FolderScanProgress events
// every w.ProgressTicker ticks, until the hasher routines terminate.
go func() {
defer progress.Close()
for {
select {
case <-done:
l.Debugln(w, "Walk progress done", w.Folder, w.Subs, w.Matcher)
ticker.Stop()
return
case <-ticker.C:
current := progress.Total()
rate := progress.Rate()
l.Debugf("%v: Walk %s %s current progress %d/%d at %.01f MiB/s (%d%%)", w, w.Folder, w.Subs, current, total, rate/1024/1024, current*100/total)
w.EventLogger.Log(events.FolderScanProgress, map[string]interface{}{
"folder": w.Folder,
"current": current,
"total": total,
"rate": rate, // bytes per second
})
case <-ctx.Done():
ticker.Stop()
return
}
}
}()
loop:
for _, file := range filesToHash {
l.Debugln(w, "real to hash:", file.Name)
select {
case realToHashChan <- file:
case <-ctx.Done():
break loop
}
}
close(realToHashChan)
}()
return finishedChan
}
func (w *walker) walkWithoutHashing(ctx context.Context) chan ScanResult {
l.Debugln(w, "Walk without hashing", w.Subs, w.Matcher)
toHashChan := make(chan protocol.FileInfo)
finishedChan := make(chan ScanResult)
// A routine which walks the filesystem tree, and sends files which have
// been modified to the counter routine.
go w.scan(ctx, toHashChan, finishedChan)
go func() {
for file := range toHashChan {
finishedChan <- ScanResult{File: file}
}
close(finishedChan)
}()
return finishedChan
}
func (w *walker) scan(ctx context.Context, toHashChan chan<- protocol.FileInfo, finishedChan chan<- ScanResult) {
hashFiles := w.walkAndHashFiles(ctx, toHashChan, finishedChan)
if len(w.Subs) == 0 {
w.Filesystem.Walk(".", hashFiles)
} else {
for _, sub := range w.Subs {
if err := osutil.TraversesSymlink(w.Filesystem, filepath.Dir(sub)); err != nil {
l.Debugf("%v: Skip walking %v as it is below a symlink", w, sub)
continue
}
w.Filesystem.Walk(sub, hashFiles)
}
}
close(toHashChan)
}
func (w *walker) walkAndHashFiles(ctx context.Context, toHashChan chan<- protocol.FileInfo, finishedChan chan<- ScanResult) fs.WalkFunc {
now := time.Now()
ignoredParent := ""
return func(path string, info fs.FileInfo, err error) error {
select {
case <-ctx.Done():
return ctx.Err()
default:
}
metricScannedItems.WithLabelValues(w.Folder).Inc()
// Return value used when we are returning early and don't want to
// process the item. For directories, this means do-not-descend.
var skip error // nil
// info nil when error is not nil
if info != nil && info.IsDir() {
skip = fs.SkipDir
}
if !utf8.ValidString(path) {
handleError(ctx, "scan", path, errUTF8Invalid, finishedChan)
return skip
}
if fs.IsTemporary(path) {
l.Debugln(w, "temporary:", path, "err:", err)
if err == nil && info.IsRegular() && info.ModTime().Add(w.TempLifetime).Before(now) {
w.Filesystem.Remove(path)
l.Debugln(w, "removing temporary:", path, info.ModTime())
}
return nil
}
if fs.IsInternal(path) {
l.Debugln(w, "ignored (internal):", path)
return skip
}
if w.Matcher.Match(path).IsIgnored() {
l.Debugln(w, "ignored (patterns):", path)
// Only descend if matcher says so and the current file is not a symlink.
if err != nil || w.Matcher.SkipIgnoredDirs() || info.IsSymlink() {
return skip
}
// If the parent wasn't ignored already, set this path as the "highest" ignored parent
if info.IsDir() && (ignoredParent == "" || !fs.IsParent(path, ignoredParent)) {
ignoredParent = path
}
return nil
}
if err != nil {
// No need reporting errors for files that don't exist (e.g. scan
// due to filesystem watcher)
if !fs.IsNotExist(err) {
handleError(ctx, "scan", path, err, finishedChan)
}
return skip
}
if path == "." {
return nil
}
if ignoredParent == "" {
// parent isn't ignored, nothing special
return w.handleItem(ctx, path, info, toHashChan, finishedChan, skip)
}
// Part of current path below the ignored (potential) parent
rel := strings.TrimPrefix(path, ignoredParent+string(fs.PathSeparator))
// ignored path isn't actually a parent of the current path
if rel == path {
ignoredParent = ""
return w.handleItem(ctx, path, info, toHashChan, finishedChan, skip)
}
// The previously ignored parent directories of the current, not
// ignored path need to be handled as well.
// Prepend an empty string to handle ignoredParent without anything
// appended in the first iteration.
for _, name := range append([]string{""}, fs.PathComponents(rel)...) {
ignoredParent = filepath.Join(ignoredParent, name)
info, err = w.Filesystem.Lstat(ignoredParent)
// An error here would be weird as we've already gotten to this point, but act on it nonetheless
if err != nil {
handleError(ctx, "scan", ignoredParent, err, finishedChan)
return skip
}
if err = w.handleItem(ctx, ignoredParent, info, toHashChan, finishedChan, skip); err != nil {
return err
}
}
ignoredParent = ""
return nil
}
}
func (w *walker) handleItem(ctx context.Context, path string, info fs.FileInfo, toHashChan chan<- protocol.FileInfo, finishedChan chan<- ScanResult, skip error) error {
oldPath := path
path, err := w.normalizePath(path, info)
if err != nil {
handleError(ctx, "normalizing path", oldPath, err, finishedChan)
return skip
}
switch {
case info.IsSymlink():
if err := w.walkSymlink(ctx, path, info, finishedChan); err != nil {
return err
}
if info.IsDir() {
// under no circumstances shall we descend into a symlink
return fs.SkipDir
}
return nil
case info.IsDir():
err = w.walkDir(ctx, path, info, finishedChan)
case info.IsRegular():
err = w.walkRegular(ctx, path, info, toHashChan)
}
return err
}
func (w *walker) walkRegular(ctx context.Context, relPath string, info fs.FileInfo, toHashChan chan<- protocol.FileInfo) error {
curFile, hasCurFile := w.CurrentFiler.CurrentFile(relPath)
blockSize := protocol.BlockSize(info.Size())
if hasCurFile {
// Check if we should retain current block size.
curBlockSize := curFile.BlockSize()
if blockSize > curBlockSize && blockSize/curBlockSize <= 2 {
// New block size is larger, but not more than twice larger.
// Retain.
blockSize = curBlockSize
} else if curBlockSize > blockSize && curBlockSize/blockSize <= 2 {
// Old block size is larger, but not more than twice larger.
// Retain.
blockSize = curBlockSize
}
}
f, err := CreateFileInfo(info, relPath, w.Filesystem, w.ScanOwnership, w.ScanXattrs, w.XattrFilter)
if err != nil {
return err
}
f = w.updateFileInfo(f, curFile)
f.NoPermissions = w.IgnorePerms
f.RawBlockSize = blockSize
l.Debugln(w, "checking:", f)
if hasCurFile {
if curFile.IsEquivalentOptional(f, protocol.FileInfoComparison{
ModTimeWindow: w.ModTimeWindow,
IgnorePerms: w.IgnorePerms,
IgnoreBlocks: true,
IgnoreFlags: w.LocalFlags,
IgnoreOwnership: !w.ScanOwnership,
IgnoreXattrs: !w.ScanXattrs,
}) {
l.Debugln(w, "unchanged:", curFile)
return nil
}
if curFile.ShouldConflict() {
// The old file was invalid for whatever reason and probably not
// up to date with what was out there in the cluster. Drop all
// others from the version vector to indicate that we haven't
// taken their version into account, and possibly cause a
// conflict.
f.Version = f.Version.DropOthers(w.ShortID)
}
l.Debugln(w, "rescan:", curFile)
}
l.Debugln(w, "to hash:", relPath, f)
select {
case toHashChan <- f:
case <-ctx.Done():
return ctx.Err()
}
return nil
}
func (w *walker) walkDir(ctx context.Context, relPath string, info fs.FileInfo, finishedChan chan<- ScanResult) error {
curFile, hasCurFile := w.CurrentFiler.CurrentFile(relPath)
f, err := CreateFileInfo(info, relPath, w.Filesystem, w.ScanOwnership, w.ScanXattrs, w.XattrFilter)
if err != nil {
return err
}
f = w.updateFileInfo(f, curFile)
f.NoPermissions = w.IgnorePerms
l.Debugln(w, "checking:", f)
if hasCurFile {
if curFile.IsEquivalentOptional(f, protocol.FileInfoComparison{
ModTimeWindow: w.ModTimeWindow,
IgnorePerms: w.IgnorePerms,
IgnoreBlocks: true,
IgnoreFlags: w.LocalFlags,
IgnoreOwnership: !w.ScanOwnership,
IgnoreXattrs: !w.ScanXattrs,
}) {
l.Debugln(w, "unchanged:", curFile)
return nil
}
if curFile.ShouldConflict() {
// The old file was invalid for whatever reason and probably not
// up to date with what was out there in the cluster. Drop all
// others from the version vector to indicate that we haven't
// taken their version into account, and possibly cause a
// conflict.
f.Version = f.Version.DropOthers(w.ShortID)
}
l.Debugln(w, "rescan:", curFile)
}
l.Debugln(w, "dir:", relPath, f)
select {
case finishedChan <- ScanResult{File: f}:
case <-ctx.Done():
return ctx.Err()
}
return nil
}
// walkSymlink returns nil or an error, if the error is of the nature that
// it should stop the entire walk.
func (w *walker) walkSymlink(ctx context.Context, relPath string, info fs.FileInfo, finishedChan chan<- ScanResult) error {
// Symlinks are not supported on Windows. We ignore instead of returning
// an error.
if build.IsWindows {
return nil
}
f, err := CreateFileInfo(info, relPath, w.Filesystem, w.ScanOwnership, w.ScanXattrs, w.XattrFilter)
if err != nil {
handleError(ctx, "reading link", relPath, err, finishedChan)
return nil
}
curFile, hasCurFile := w.CurrentFiler.CurrentFile(relPath)
f = w.updateFileInfo(f, curFile)
l.Debugln(w, "checking:", f)
if hasCurFile {
if curFile.IsEquivalentOptional(f, protocol.FileInfoComparison{
ModTimeWindow: w.ModTimeWindow,
IgnorePerms: w.IgnorePerms,
IgnoreBlocks: true,
IgnoreFlags: w.LocalFlags,
IgnoreOwnership: !w.ScanOwnership,
IgnoreXattrs: !w.ScanXattrs,
}) {
l.Debugln(w, "unchanged:", curFile, info.ModTime().Unix(), info.Mode()&fs.ModePerm)
return nil
}
if curFile.ShouldConflict() {
// The old file was invalid for whatever reason and probably not
// up to date with what was out there in the cluster. Drop all
// others from the version vector to indicate that we haven't
// taken their version into account, and possibly cause a
// conflict.
f.Version = f.Version.DropOthers(w.ShortID)
}
l.Debugln(w, "rescan:", curFile)
}
l.Debugln(w, "symlink:", relPath, f)
select {
case finishedChan <- ScanResult{File: f}:
case <-ctx.Done():
return ctx.Err()
}
return nil
}
// normalizePath returns the normalized relative path (possibly after fixing
// it on disk), or skip is true.
func (w *walker) normalizePath(path string, info fs.FileInfo) (normPath string, err error) {
if build.IsDarwin {
// Mac OS X file names should always be NFD normalized.
normPath = norm.NFD.String(path)
} else {
// Every other OS in the known universe uses NFC or just plain
// doesn't bother to define an encoding. In our case *we* do care,
// so we enforce NFC regardless.
normPath = norm.NFC.String(path)
}
if path == normPath {
// The file name is already normalized: nothing to do
return path, nil
}
if !w.AutoNormalize {
// We're not authorized to do anything about it, so complain and skip.
return "", errUTF8Normalization
}
// We will attempt to normalize it.
normInfo, err := w.Filesystem.Lstat(normPath)
if fs.IsNotExist(err) {
// Nothing exists with the normalized filename. Good.
if err = w.Filesystem.Rename(path, normPath); err != nil {
return "", err
}
l.Infof(`Normalized UTF8 encoding of file name "%s".`, path)
return normPath, nil
}
if w.Filesystem.SameFile(info, normInfo) {
// With some filesystems (ZFS), if there is an un-normalized path and you ask whether the normalized
// version exists, it responds with true. Therefore we need to check fs.SameFile as well.
// In this case, a call to Rename won't do anything, so we have to rename via a temp file.
// We don't want to use the standard syncthing prefix here, as that will result in the file being ignored
// and eventually deleted by Syncthing if the rename back fails.
tempPath := fs.TempNameWithPrefix(normPath, "")
if err = w.Filesystem.Rename(path, tempPath); err != nil {
return "", err
}
if err = w.Filesystem.Rename(tempPath, normPath); err != nil {
// I don't ever expect this to happen, but if it does, we should probably tell our caller that the normalized
// path is the temp path: that way at least the user's data still gets synced.
l.Warnf(`Error renaming "%s" to "%s" while normalizating UTF8 encoding: %v. You will want to rename this file back manually`, tempPath, normPath, err)
return tempPath, nil
}
return normPath, nil
}
// There is something already in the way at the normalized
// file name.
return "", errUTF8Conflict
}
// updateFileInfo updates walker specific members of protocol.FileInfo that
// do not depend on type, and things that should be preserved from the
// previous version of the FileInfo.
func (w *walker) updateFileInfo(dst, src protocol.FileInfo) protocol.FileInfo {
if dst.Type == protocol.FileInfoTypeFile && build.IsWindows {
// If we have an existing index entry, copy the executable bits
// from there.
dst.Permissions |= (src.Permissions & 0o111)
}
dst.Version = src.Version.Update(w.ShortID)
dst.ModifiedBy = w.ShortID
dst.LocalFlags = w.LocalFlags
// Copy OS data from src to dst, unless it was already set on dst.
dst.Platform.MergeWith(&src.Platform)
return dst
}
func handleError(ctx context.Context, context, path string, err error, finishedChan chan<- ScanResult) {
select {
case finishedChan <- ScanResult{
Err: fmt.Errorf("%s: %w", context, err),
Path: path,
}:
case <-ctx.Done():
}
}
func (w *walker) String() string {
return fmt.Sprintf("walker/%s@%p", w.Folder, w)
}
// A byteCounter gets bytes added to it via Update() and then provides the
// Total() and one minute moving average Rate() in bytes per second.
type byteCounter struct {
total atomic.Int64
metrics.EWMA
stop chan struct{}
}
func newByteCounter() *byteCounter {
c := &byteCounter{
EWMA: metrics.NewEWMA1(), // a one minute exponentially weighted moving average
stop: make(chan struct{}),
}
go c.ticker()
return c
}
func (c *byteCounter) ticker() {
// The metrics.EWMA expects clock ticks every five seconds in order to
// decay the average properly.
t := time.NewTicker(5 * time.Second)
for {
select {
case <-t.C:
c.Tick()
case <-c.stop:
t.Stop()
return
}
}
}
func (c *byteCounter) Update(bytes int64) {
c.total.Add(bytes)
c.EWMA.Update(bytes)
}
func (c *byteCounter) Total() int64 { return c.total.Load() }
func (c *byteCounter) Close() {
close(c.stop)
}
// A no-op CurrentFiler
type noCurrentFiler struct{}
func (noCurrentFiler) CurrentFile(_ string) (protocol.FileInfo, bool) {
return protocol.FileInfo{}, false
}
func CreateFileInfo(fi fs.FileInfo, name string, filesystem fs.Filesystem, scanOwnership bool, scanXattrs bool, xattrFilter XattrFilter) (protocol.FileInfo, error) {
f := protocol.FileInfo{Name: name}
if scanOwnership || scanXattrs {
if plat, err := filesystem.PlatformData(name, scanOwnership, scanXattrs, xattrFilter); err == nil {
f.Platform = plat
} else {
return protocol.FileInfo{}, fmt.Errorf("reading platform data: %w", err)
}
}
if ct := fi.InodeChangeTime(); !ct.IsZero() {
f.InodeChangeNs = ct.UnixNano()
} else {
f.InodeChangeNs = 0
}
if fi.IsSymlink() {
f.Type = protocol.FileInfoTypeSymlink
target, err := filesystem.ReadSymlink(name)
if err != nil {
return protocol.FileInfo{}, err
}
f.SymlinkTarget = target
f.NoPermissions = true // Symlinks don't have permissions of their own
return f, nil
}
f.Permissions = uint32(fi.Mode() & fs.ModePerm)
f.ModifiedS = fi.ModTime().Unix()
f.ModifiedNs = fi.ModTime().Nanosecond()
if fi.IsDir() {
f.Type = protocol.FileInfoTypeDirectory
return f, nil
}
f.Size = fi.Size()
f.Type = protocol.FileInfoTypeFile
return f, nil
}
| lib/scanner/walk.go | 1 | https://github.com/syncthing/syncthing/commit/2f3eacdb6c1c33650ccdd91f42e842c116200d92 | [
0.0010172936599701643,
0.00019983200763817877,
0.0001620767143322155,
0.0001717095001367852,
0.00012737138604279608
] |
{
"id": 1,
"code_window": [
" $scope.scanPercentage = function (folder) {\n",
" if (!$scope.scanProgress[folder]) {\n",
" return undefined;\n",
" }\n",
" var pct = 100 * $scope.scanProgress[folder].current / $scope.scanProgress[folder].total;\n",
" return Math.floor(pct);\n",
" };\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"keep",
"keep"
],
"after_edit": [
" return progressIntegerPercentage($scope.scanProgress[folder].current, $scope.scanProgress[folder].total);\n"
],
"file_path": "gui/default/syncthing/core/syncthingController.js",
"type": "replace",
"edit_start_line_idx": 1008
} | <?xml version="1.0" encoding="utf-8"?>
<!-- Generator: Adobe Illustrator 18.1.1, SVG Export Plug-In . SVG Version: 6.00 Build 0) -->
<svg version="1.1" id="Layer_1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" x="0px" y="0px"
viewBox="0 0 117.3 117.3" enable-background="new 0 0 117.3 117.3" xml:space="preserve">
<g>
<linearGradient id="SVGID_1_" gradientUnits="userSpaceOnUse" x1="58.666" y1="117.332" x2="58.666" y2="0">
<stop offset="0" style="stop-color:#0882C8"/>
<stop offset="1" style="stop-color:#26B6DB"/>
</linearGradient>
<circle fill="url(#SVGID_1_)" cx="58.7" cy="58.7" r="58.7"/>
<g>
<circle fill="none" stroke="#FFFFFF" stroke-width="6" stroke-miterlimit="10" cx="58.7" cy="58.5" r="43.7"/>
<g>
<path fill="#FFFFFF" d="M94.7,47.8c4.7,1.6,9.8-0.9,11.4-5.6c1.6-4.7-0.9-9.8-5.6-11.4c-4.7-1.6-9.8,0.9-11.4,5.6
C87.5,41.1,90,46.2,94.7,47.8z"/>
<line fill="none" stroke="#FFFFFF" stroke-width="6" stroke-miterlimit="10" x1="97.6" y1="39.4" x2="67.5" y2="64.4"/>
</g>
<g>
<path fill="#FFFFFF" d="M77.6,91c-0.4,4.9,3.2,9.3,8.2,9.8c5,0.4,9.3-3.2,9.8-8.2c0.4-4.9-3.2-9.3-8.2-9.8
C82.4,82.4,78,86,77.6,91z"/>
<line fill="none" stroke="#FFFFFF" stroke-width="6" stroke-miterlimit="10" x1="86.5" y1="91.8" x2="67.5" y2="64.4"/>
</g>
<path fill="#FFFFFF" d="M60,69.3c2.7,4.2,8.3,5.4,12.4,2.7c4.2-2.7,5.4-8.3,2.7-12.4c-2.7-4.2-8.3-5.4-12.4-2.7
C58.5,59.5,57.3,65.1,60,69.3z"/>
<g>
<path fill="#FFFFFF" d="M21.2,61.4c-4.3-2.5-9.8-1.1-12.3,3.1c-2.5,4.3-1.1,9.8,3.1,12.3c4.3,2.5,9.8,1.1,12.3-3.1
C26.8,69.5,25.4,64,21.2,61.4z"/>
<line fill="none" stroke="#FFFFFF" stroke-width="6" stroke-miterlimit="10" x1="16.6" y1="69.1" x2="67.5" y2="64.4"/>
</g>
</g>
</g>
</svg>
| assets/logo-only.svg | 0 | https://github.com/syncthing/syncthing/commit/2f3eacdb6c1c33650ccdd91f42e842c116200d92 | [
0.00018803696730174124,
0.0001752597454469651,
0.00016736396355554461,
0.00017281901091337204,
0.000007948204256535973
] |
{
"id": 1,
"code_window": [
" $scope.scanPercentage = function (folder) {\n",
" if (!$scope.scanProgress[folder]) {\n",
" return undefined;\n",
" }\n",
" var pct = 100 * $scope.scanProgress[folder].current / $scope.scanProgress[folder].total;\n",
" return Math.floor(pct);\n",
" };\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"keep",
"keep"
],
"after_edit": [
" return progressIntegerPercentage($scope.scanProgress[folder].current, $scope.scanProgress[folder].total);\n"
],
"file_path": "gui/default/syncthing/core/syncthingController.js",
"type": "replace",
"edit_start_line_idx": 1008
} | <?xml version="1.0" encoding="UTF-8"?>
<svg xmlns:svg="http://www.w3.org/2000/svg" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 16 16">
<defs>
<linearGradient id="grad" gradientUnits="userSpaceOnUse" x1="8" y1="0" x2="8" y2="16">
<stop offset="0" style="stop-color:#26B6DB"/>
<stop offset="1" style="stop-color:#0882C8"/>
</linearGradient>
<mask id="bitemask" maskUnits="userSpaceOnUse">
<g>
<rect id="mask-bg" x="0" y="0" width="16" height="16" style="fill:#ffffff"/>
<circle id="mask-subtract" cx="11.5" cy="11.5" r="5.5" style="fill:#000000"/>
</g>
</mask>
</defs>
<g id="syncthing-logo" mask="url(#bitemask)">
<circle id="outer" cx="8" cy="8" r="8" style="fill:url(#grad)"/>
<circle id="inner" cx="8" cy="7.9727402" r="5.9557071" style="fill:none;stroke:#ffffff;stroke-width:0.81771719"/>
<line id="arm-l" x1="9.1993189" y1="8.776825" x2="2.262351" y2="9.4173737" style="stroke:#ffffff;stroke-width:0.81771719"/>
<line id="arm-tr" x1="9.1993189" y1="8.776825" x2="13.301533" y2="5.3696747" style="stroke:#ffffff;stroke-width:0.81771719"/>
<line id="arm-br" x1="9.1993189" y1="8.776825" x2="11.788756" y2="12.51107" style="stroke:#ffffff;stroke-width:0.81771719"/>
<circle id="node-c" cx="9.1993189" cy="8.776825" r="1.22" style="fill:#ffffff"/>
<circle id="node-l" cx="2.262351" cy="9.4173737" r="1.22" style="fill:#ffffff"/>
<circle id="node-tr" cx="13.301533" cy="5.3696747" r="1.22" style="fill:#ffffff"/>
<circle id="node-br" cx="11.788756" cy="12.51107" r="1.22" style="fill:#ffffff"/>
</g>
<circle id="bubble" cx="11.5" cy="11.5" r="4.5" style="fill:#000000"/>
<g id="exclaim">
<rect id="exclaim-top" x="11" y="9" width="1" height="3" style="fill:#ffffff"/>
<rect id="exclaim-bottom" x="11" y="13" width="1" height="1" style="fill:#ffffff"/>
</g>
</svg> | assets/statusicons/notify.svg | 0 | https://github.com/syncthing/syncthing/commit/2f3eacdb6c1c33650ccdd91f42e842c116200d92 | [
0.00017365635721944273,
0.00017107464373111725,
0.00016667216550558805,
0.00017198501154780388,
0.0000026868244731304003
] |
{
"id": 1,
"code_window": [
" $scope.scanPercentage = function (folder) {\n",
" if (!$scope.scanProgress[folder]) {\n",
" return undefined;\n",
" }\n",
" var pct = 100 * $scope.scanProgress[folder].current / $scope.scanProgress[folder].total;\n",
" return Math.floor(pct);\n",
" };\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"keep",
"keep"
],
"after_edit": [
" return progressIntegerPercentage($scope.scanProgress[folder].current, $scope.scanProgress[folder].total);\n"
],
"file_path": "gui/default/syncthing/core/syncthingController.js",
"type": "replace",
"edit_start_line_idx": 1008
} | <app-card>
<app-card-title>{{title | uppercase}}</app-card-title>
<app-card-content>
<div fxLayout="row" fxLayoutAlign="space-between stretch">
<app-donut-chart [elementID]="chartID" fxFlex="30" [title]="title" (stateEvent)="onItemSelect($event)">
</app-donut-chart>
<div class=" items" fxLayout="column" fxLayoutAlign="start end" fxFlex="70">
<app-chart-item *ngFor="let state of states" (click)="onItemSelect(state)" [state]="state.label"
[count]="state.count" [selected]="state.selected">
</app-chart-item>
</div>
</div>
</app-card-content>
</app-card> | next-gen-gui/src/app/charts/chart/chart.component.html | 0 | https://github.com/syncthing/syncthing/commit/2f3eacdb6c1c33650ccdd91f42e842c116200d92 | [
0.0001718254789011553,
0.00016970490105450153,
0.000167584337759763,
0.00016970490105450153,
0.0000021205705706961453
] |
{
"id": 3,
"code_window": [
"\t\t\ttotal += file.Size\n",
"\t\t}\n",
"\n",
"\t\trealToHashChan := make(chan protocol.FileInfo)\n",
"\t\tdone := make(chan struct{})\n",
"\t\tprogress := newByteCounter()\n",
"\n",
"\t\tnewParallelHasher(ctx, w.Folder, w.Filesystem, w.Hashers, finishedChan, realToHashChan, progress, done)\n"
],
"labels": [
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tif len(filesToHash) == 0 {\n",
"\t\t\tclose(finishedChan)\n",
"\t\t\treturn\n",
"\t\t}\n",
"\n"
],
"file_path": "lib/scanner/walk.go",
"type": "add",
"edit_start_line_idx": 162
} | // Copyright (C) 2014 The Syncthing Authors.
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at https://mozilla.org/MPL/2.0/.
package scanner
import (
"context"
"errors"
"fmt"
"path/filepath"
"strings"
"sync/atomic"
"time"
"unicode/utf8"
metrics "github.com/rcrowley/go-metrics"
"github.com/syncthing/syncthing/lib/build"
"github.com/syncthing/syncthing/lib/events"
"github.com/syncthing/syncthing/lib/fs"
"github.com/syncthing/syncthing/lib/ignore"
"github.com/syncthing/syncthing/lib/osutil"
"github.com/syncthing/syncthing/lib/protocol"
"golang.org/x/text/unicode/norm"
)
type Config struct {
// Folder for which the walker has been created
Folder string
// Limit walking to these paths within Dir, or no limit if Sub is empty
Subs []string
// If Matcher is not nil, it is used to identify files to ignore which were specified by the user.
Matcher *ignore.Matcher
// Number of hours to keep temporary files for
TempLifetime time.Duration
// If CurrentFiler is not nil, it is queried for the current file before rescanning.
CurrentFiler CurrentFiler
// The Filesystem provides an abstraction on top of the actual filesystem.
Filesystem fs.Filesystem
// If IgnorePerms is true, changes to permission bits will not be
// detected.
IgnorePerms bool
// When AutoNormalize is set, file names that are in UTF8 but incorrect
// normalization form will be corrected.
AutoNormalize bool
// Number of routines to use for hashing
Hashers int
// Our vector clock id
ShortID protocol.ShortID
// Optional progress tick interval which defines how often FolderScanProgress
// events are emitted. Negative number means disabled.
ProgressTickIntervalS int
// Local flags to set on scanned files
LocalFlags uint32
// Modification time is to be considered unchanged if the difference is lower.
ModTimeWindow time.Duration
// Event logger to which the scan progress events are sent
EventLogger events.Logger
// If ScanOwnership is true, we pick up ownership information on files while scanning.
ScanOwnership bool
// If ScanXattrs is true, we pick up extended attributes on files while scanning.
ScanXattrs bool
// Filter for extended attributes
XattrFilter XattrFilter
}
type CurrentFiler interface {
// CurrentFile returns the file as seen at last scan.
CurrentFile(name string) (protocol.FileInfo, bool)
}
type XattrFilter interface {
Permit(string) bool
GetMaxSingleEntrySize() int
GetMaxTotalSize() int
}
type ScanResult struct {
File protocol.FileInfo
Err error
Path string // to be set in case Err != nil and File == nil
}
func Walk(ctx context.Context, cfg Config) chan ScanResult {
return newWalker(cfg).walk(ctx)
}
func WalkWithoutHashing(ctx context.Context, cfg Config) chan ScanResult {
return newWalker(cfg).walkWithoutHashing(ctx)
}
func newWalker(cfg Config) *walker {
w := &walker{cfg}
if w.CurrentFiler == nil {
w.CurrentFiler = noCurrentFiler{}
}
if w.Filesystem == nil {
panic("no filesystem specified")
}
if w.Matcher == nil {
w.Matcher = ignore.New(w.Filesystem)
}
registerFolderMetrics(w.Folder)
return w
}
var (
errUTF8Invalid = errors.New("item is not in UTF8 encoding")
errUTF8Normalization = errors.New("item is not in the correct UTF8 normalization form")
errUTF8Conflict = errors.New("item has UTF8 encoding conflict with another item")
)
type walker struct {
Config
}
// Walk returns the list of files found in the local folder by scanning the
// file system. Files are blockwise hashed.
func (w *walker) walk(ctx context.Context) chan ScanResult {
l.Debugln(w, "Walk", w.Subs, w.Matcher)
toHashChan := make(chan protocol.FileInfo)
finishedChan := make(chan ScanResult)
// A routine which walks the filesystem tree, and sends files which have
// been modified to the counter routine.
go w.scan(ctx, toHashChan, finishedChan)
// We're not required to emit scan progress events, just kick off hashers,
// and feed inputs directly from the walker.
if w.ProgressTickIntervalS < 0 {
newParallelHasher(ctx, w.Folder, w.Filesystem, w.Hashers, finishedChan, toHashChan, nil, nil)
return finishedChan
}
// Defaults to every 2 seconds.
if w.ProgressTickIntervalS == 0 {
w.ProgressTickIntervalS = 2
}
ticker := time.NewTicker(time.Duration(w.ProgressTickIntervalS) * time.Second)
// We need to emit progress events, hence we create a routine which buffers
// the list of files to be hashed, counts the total number of
// bytes to hash, and once no more files need to be hashed (chan gets closed),
// start a routine which periodically emits FolderScanProgress events,
// until a stop signal is sent by the parallel hasher.
// Parallel hasher is stopped by this routine when we close the channel over
// which it receives the files we ask it to hash.
go func() {
var filesToHash []protocol.FileInfo
var total int64 = 1
for file := range toHashChan {
filesToHash = append(filesToHash, file)
total += file.Size
}
realToHashChan := make(chan protocol.FileInfo)
done := make(chan struct{})
progress := newByteCounter()
newParallelHasher(ctx, w.Folder, w.Filesystem, w.Hashers, finishedChan, realToHashChan, progress, done)
// A routine which actually emits the FolderScanProgress events
// every w.ProgressTicker ticks, until the hasher routines terminate.
go func() {
defer progress.Close()
for {
select {
case <-done:
l.Debugln(w, "Walk progress done", w.Folder, w.Subs, w.Matcher)
ticker.Stop()
return
case <-ticker.C:
current := progress.Total()
rate := progress.Rate()
l.Debugf("%v: Walk %s %s current progress %d/%d at %.01f MiB/s (%d%%)", w, w.Folder, w.Subs, current, total, rate/1024/1024, current*100/total)
w.EventLogger.Log(events.FolderScanProgress, map[string]interface{}{
"folder": w.Folder,
"current": current,
"total": total,
"rate": rate, // bytes per second
})
case <-ctx.Done():
ticker.Stop()
return
}
}
}()
loop:
for _, file := range filesToHash {
l.Debugln(w, "real to hash:", file.Name)
select {
case realToHashChan <- file:
case <-ctx.Done():
break loop
}
}
close(realToHashChan)
}()
return finishedChan
}
func (w *walker) walkWithoutHashing(ctx context.Context) chan ScanResult {
l.Debugln(w, "Walk without hashing", w.Subs, w.Matcher)
toHashChan := make(chan protocol.FileInfo)
finishedChan := make(chan ScanResult)
// A routine which walks the filesystem tree, and sends files which have
// been modified to the counter routine.
go w.scan(ctx, toHashChan, finishedChan)
go func() {
for file := range toHashChan {
finishedChan <- ScanResult{File: file}
}
close(finishedChan)
}()
return finishedChan
}
func (w *walker) scan(ctx context.Context, toHashChan chan<- protocol.FileInfo, finishedChan chan<- ScanResult) {
hashFiles := w.walkAndHashFiles(ctx, toHashChan, finishedChan)
if len(w.Subs) == 0 {
w.Filesystem.Walk(".", hashFiles)
} else {
for _, sub := range w.Subs {
if err := osutil.TraversesSymlink(w.Filesystem, filepath.Dir(sub)); err != nil {
l.Debugf("%v: Skip walking %v as it is below a symlink", w, sub)
continue
}
w.Filesystem.Walk(sub, hashFiles)
}
}
close(toHashChan)
}
func (w *walker) walkAndHashFiles(ctx context.Context, toHashChan chan<- protocol.FileInfo, finishedChan chan<- ScanResult) fs.WalkFunc {
now := time.Now()
ignoredParent := ""
return func(path string, info fs.FileInfo, err error) error {
select {
case <-ctx.Done():
return ctx.Err()
default:
}
metricScannedItems.WithLabelValues(w.Folder).Inc()
// Return value used when we are returning early and don't want to
// process the item. For directories, this means do-not-descend.
var skip error // nil
// info nil when error is not nil
if info != nil && info.IsDir() {
skip = fs.SkipDir
}
if !utf8.ValidString(path) {
handleError(ctx, "scan", path, errUTF8Invalid, finishedChan)
return skip
}
if fs.IsTemporary(path) {
l.Debugln(w, "temporary:", path, "err:", err)
if err == nil && info.IsRegular() && info.ModTime().Add(w.TempLifetime).Before(now) {
w.Filesystem.Remove(path)
l.Debugln(w, "removing temporary:", path, info.ModTime())
}
return nil
}
if fs.IsInternal(path) {
l.Debugln(w, "ignored (internal):", path)
return skip
}
if w.Matcher.Match(path).IsIgnored() {
l.Debugln(w, "ignored (patterns):", path)
// Only descend if matcher says so and the current file is not a symlink.
if err != nil || w.Matcher.SkipIgnoredDirs() || info.IsSymlink() {
return skip
}
// If the parent wasn't ignored already, set this path as the "highest" ignored parent
if info.IsDir() && (ignoredParent == "" || !fs.IsParent(path, ignoredParent)) {
ignoredParent = path
}
return nil
}
if err != nil {
// No need reporting errors for files that don't exist (e.g. scan
// due to filesystem watcher)
if !fs.IsNotExist(err) {
handleError(ctx, "scan", path, err, finishedChan)
}
return skip
}
if path == "." {
return nil
}
if ignoredParent == "" {
// parent isn't ignored, nothing special
return w.handleItem(ctx, path, info, toHashChan, finishedChan, skip)
}
// Part of current path below the ignored (potential) parent
rel := strings.TrimPrefix(path, ignoredParent+string(fs.PathSeparator))
// ignored path isn't actually a parent of the current path
if rel == path {
ignoredParent = ""
return w.handleItem(ctx, path, info, toHashChan, finishedChan, skip)
}
// The previously ignored parent directories of the current, not
// ignored path need to be handled as well.
// Prepend an empty string to handle ignoredParent without anything
// appended in the first iteration.
for _, name := range append([]string{""}, fs.PathComponents(rel)...) {
ignoredParent = filepath.Join(ignoredParent, name)
info, err = w.Filesystem.Lstat(ignoredParent)
// An error here would be weird as we've already gotten to this point, but act on it nonetheless
if err != nil {
handleError(ctx, "scan", ignoredParent, err, finishedChan)
return skip
}
if err = w.handleItem(ctx, ignoredParent, info, toHashChan, finishedChan, skip); err != nil {
return err
}
}
ignoredParent = ""
return nil
}
}
func (w *walker) handleItem(ctx context.Context, path string, info fs.FileInfo, toHashChan chan<- protocol.FileInfo, finishedChan chan<- ScanResult, skip error) error {
oldPath := path
path, err := w.normalizePath(path, info)
if err != nil {
handleError(ctx, "normalizing path", oldPath, err, finishedChan)
return skip
}
switch {
case info.IsSymlink():
if err := w.walkSymlink(ctx, path, info, finishedChan); err != nil {
return err
}
if info.IsDir() {
// under no circumstances shall we descend into a symlink
return fs.SkipDir
}
return nil
case info.IsDir():
err = w.walkDir(ctx, path, info, finishedChan)
case info.IsRegular():
err = w.walkRegular(ctx, path, info, toHashChan)
}
return err
}
func (w *walker) walkRegular(ctx context.Context, relPath string, info fs.FileInfo, toHashChan chan<- protocol.FileInfo) error {
curFile, hasCurFile := w.CurrentFiler.CurrentFile(relPath)
blockSize := protocol.BlockSize(info.Size())
if hasCurFile {
// Check if we should retain current block size.
curBlockSize := curFile.BlockSize()
if blockSize > curBlockSize && blockSize/curBlockSize <= 2 {
// New block size is larger, but not more than twice larger.
// Retain.
blockSize = curBlockSize
} else if curBlockSize > blockSize && curBlockSize/blockSize <= 2 {
// Old block size is larger, but not more than twice larger.
// Retain.
blockSize = curBlockSize
}
}
f, err := CreateFileInfo(info, relPath, w.Filesystem, w.ScanOwnership, w.ScanXattrs, w.XattrFilter)
if err != nil {
return err
}
f = w.updateFileInfo(f, curFile)
f.NoPermissions = w.IgnorePerms
f.RawBlockSize = blockSize
l.Debugln(w, "checking:", f)
if hasCurFile {
if curFile.IsEquivalentOptional(f, protocol.FileInfoComparison{
ModTimeWindow: w.ModTimeWindow,
IgnorePerms: w.IgnorePerms,
IgnoreBlocks: true,
IgnoreFlags: w.LocalFlags,
IgnoreOwnership: !w.ScanOwnership,
IgnoreXattrs: !w.ScanXattrs,
}) {
l.Debugln(w, "unchanged:", curFile)
return nil
}
if curFile.ShouldConflict() {
// The old file was invalid for whatever reason and probably not
// up to date with what was out there in the cluster. Drop all
// others from the version vector to indicate that we haven't
// taken their version into account, and possibly cause a
// conflict.
f.Version = f.Version.DropOthers(w.ShortID)
}
l.Debugln(w, "rescan:", curFile)
}
l.Debugln(w, "to hash:", relPath, f)
select {
case toHashChan <- f:
case <-ctx.Done():
return ctx.Err()
}
return nil
}
func (w *walker) walkDir(ctx context.Context, relPath string, info fs.FileInfo, finishedChan chan<- ScanResult) error {
curFile, hasCurFile := w.CurrentFiler.CurrentFile(relPath)
f, err := CreateFileInfo(info, relPath, w.Filesystem, w.ScanOwnership, w.ScanXattrs, w.XattrFilter)
if err != nil {
return err
}
f = w.updateFileInfo(f, curFile)
f.NoPermissions = w.IgnorePerms
l.Debugln(w, "checking:", f)
if hasCurFile {
if curFile.IsEquivalentOptional(f, protocol.FileInfoComparison{
ModTimeWindow: w.ModTimeWindow,
IgnorePerms: w.IgnorePerms,
IgnoreBlocks: true,
IgnoreFlags: w.LocalFlags,
IgnoreOwnership: !w.ScanOwnership,
IgnoreXattrs: !w.ScanXattrs,
}) {
l.Debugln(w, "unchanged:", curFile)
return nil
}
if curFile.ShouldConflict() {
// The old file was invalid for whatever reason and probably not
// up to date with what was out there in the cluster. Drop all
// others from the version vector to indicate that we haven't
// taken their version into account, and possibly cause a
// conflict.
f.Version = f.Version.DropOthers(w.ShortID)
}
l.Debugln(w, "rescan:", curFile)
}
l.Debugln(w, "dir:", relPath, f)
select {
case finishedChan <- ScanResult{File: f}:
case <-ctx.Done():
return ctx.Err()
}
return nil
}
// walkSymlink returns nil or an error, if the error is of the nature that
// it should stop the entire walk.
func (w *walker) walkSymlink(ctx context.Context, relPath string, info fs.FileInfo, finishedChan chan<- ScanResult) error {
// Symlinks are not supported on Windows. We ignore instead of returning
// an error.
if build.IsWindows {
return nil
}
f, err := CreateFileInfo(info, relPath, w.Filesystem, w.ScanOwnership, w.ScanXattrs, w.XattrFilter)
if err != nil {
handleError(ctx, "reading link", relPath, err, finishedChan)
return nil
}
curFile, hasCurFile := w.CurrentFiler.CurrentFile(relPath)
f = w.updateFileInfo(f, curFile)
l.Debugln(w, "checking:", f)
if hasCurFile {
if curFile.IsEquivalentOptional(f, protocol.FileInfoComparison{
ModTimeWindow: w.ModTimeWindow,
IgnorePerms: w.IgnorePerms,
IgnoreBlocks: true,
IgnoreFlags: w.LocalFlags,
IgnoreOwnership: !w.ScanOwnership,
IgnoreXattrs: !w.ScanXattrs,
}) {
l.Debugln(w, "unchanged:", curFile, info.ModTime().Unix(), info.Mode()&fs.ModePerm)
return nil
}
if curFile.ShouldConflict() {
// The old file was invalid for whatever reason and probably not
// up to date with what was out there in the cluster. Drop all
// others from the version vector to indicate that we haven't
// taken their version into account, and possibly cause a
// conflict.
f.Version = f.Version.DropOthers(w.ShortID)
}
l.Debugln(w, "rescan:", curFile)
}
l.Debugln(w, "symlink:", relPath, f)
select {
case finishedChan <- ScanResult{File: f}:
case <-ctx.Done():
return ctx.Err()
}
return nil
}
// normalizePath returns the normalized relative path (possibly after fixing
// it on disk), or skip is true.
func (w *walker) normalizePath(path string, info fs.FileInfo) (normPath string, err error) {
if build.IsDarwin {
// Mac OS X file names should always be NFD normalized.
normPath = norm.NFD.String(path)
} else {
// Every other OS in the known universe uses NFC or just plain
// doesn't bother to define an encoding. In our case *we* do care,
// so we enforce NFC regardless.
normPath = norm.NFC.String(path)
}
if path == normPath {
// The file name is already normalized: nothing to do
return path, nil
}
if !w.AutoNormalize {
// We're not authorized to do anything about it, so complain and skip.
return "", errUTF8Normalization
}
// We will attempt to normalize it.
normInfo, err := w.Filesystem.Lstat(normPath)
if fs.IsNotExist(err) {
// Nothing exists with the normalized filename. Good.
if err = w.Filesystem.Rename(path, normPath); err != nil {
return "", err
}
l.Infof(`Normalized UTF8 encoding of file name "%s".`, path)
return normPath, nil
}
if w.Filesystem.SameFile(info, normInfo) {
// With some filesystems (ZFS), if there is an un-normalized path and you ask whether the normalized
// version exists, it responds with true. Therefore we need to check fs.SameFile as well.
// In this case, a call to Rename won't do anything, so we have to rename via a temp file.
// We don't want to use the standard syncthing prefix here, as that will result in the file being ignored
// and eventually deleted by Syncthing if the rename back fails.
tempPath := fs.TempNameWithPrefix(normPath, "")
if err = w.Filesystem.Rename(path, tempPath); err != nil {
return "", err
}
if err = w.Filesystem.Rename(tempPath, normPath); err != nil {
// I don't ever expect this to happen, but if it does, we should probably tell our caller that the normalized
// path is the temp path: that way at least the user's data still gets synced.
l.Warnf(`Error renaming "%s" to "%s" while normalizating UTF8 encoding: %v. You will want to rename this file back manually`, tempPath, normPath, err)
return tempPath, nil
}
return normPath, nil
}
// There is something already in the way at the normalized
// file name.
return "", errUTF8Conflict
}
// updateFileInfo updates walker specific members of protocol.FileInfo that
// do not depend on type, and things that should be preserved from the
// previous version of the FileInfo.
func (w *walker) updateFileInfo(dst, src protocol.FileInfo) protocol.FileInfo {
if dst.Type == protocol.FileInfoTypeFile && build.IsWindows {
// If we have an existing index entry, copy the executable bits
// from there.
dst.Permissions |= (src.Permissions & 0o111)
}
dst.Version = src.Version.Update(w.ShortID)
dst.ModifiedBy = w.ShortID
dst.LocalFlags = w.LocalFlags
// Copy OS data from src to dst, unless it was already set on dst.
dst.Platform.MergeWith(&src.Platform)
return dst
}
func handleError(ctx context.Context, context, path string, err error, finishedChan chan<- ScanResult) {
select {
case finishedChan <- ScanResult{
Err: fmt.Errorf("%s: %w", context, err),
Path: path,
}:
case <-ctx.Done():
}
}
func (w *walker) String() string {
return fmt.Sprintf("walker/%s@%p", w.Folder, w)
}
// A byteCounter gets bytes added to it via Update() and then provides the
// Total() and one minute moving average Rate() in bytes per second.
type byteCounter struct {
total atomic.Int64
metrics.EWMA
stop chan struct{}
}
func newByteCounter() *byteCounter {
c := &byteCounter{
EWMA: metrics.NewEWMA1(), // a one minute exponentially weighted moving average
stop: make(chan struct{}),
}
go c.ticker()
return c
}
func (c *byteCounter) ticker() {
// The metrics.EWMA expects clock ticks every five seconds in order to
// decay the average properly.
t := time.NewTicker(5 * time.Second)
for {
select {
case <-t.C:
c.Tick()
case <-c.stop:
t.Stop()
return
}
}
}
func (c *byteCounter) Update(bytes int64) {
c.total.Add(bytes)
c.EWMA.Update(bytes)
}
func (c *byteCounter) Total() int64 { return c.total.Load() }
func (c *byteCounter) Close() {
close(c.stop)
}
// A no-op CurrentFiler
type noCurrentFiler struct{}
func (noCurrentFiler) CurrentFile(_ string) (protocol.FileInfo, bool) {
return protocol.FileInfo{}, false
}
func CreateFileInfo(fi fs.FileInfo, name string, filesystem fs.Filesystem, scanOwnership bool, scanXattrs bool, xattrFilter XattrFilter) (protocol.FileInfo, error) {
f := protocol.FileInfo{Name: name}
if scanOwnership || scanXattrs {
if plat, err := filesystem.PlatformData(name, scanOwnership, scanXattrs, xattrFilter); err == nil {
f.Platform = plat
} else {
return protocol.FileInfo{}, fmt.Errorf("reading platform data: %w", err)
}
}
if ct := fi.InodeChangeTime(); !ct.IsZero() {
f.InodeChangeNs = ct.UnixNano()
} else {
f.InodeChangeNs = 0
}
if fi.IsSymlink() {
f.Type = protocol.FileInfoTypeSymlink
target, err := filesystem.ReadSymlink(name)
if err != nil {
return protocol.FileInfo{}, err
}
f.SymlinkTarget = target
f.NoPermissions = true // Symlinks don't have permissions of their own
return f, nil
}
f.Permissions = uint32(fi.Mode() & fs.ModePerm)
f.ModifiedS = fi.ModTime().Unix()
f.ModifiedNs = fi.ModTime().Nanosecond()
if fi.IsDir() {
f.Type = protocol.FileInfoTypeDirectory
return f, nil
}
f.Size = fi.Size()
f.Type = protocol.FileInfoTypeFile
return f, nil
}
| lib/scanner/walk.go | 1 | https://github.com/syncthing/syncthing/commit/2f3eacdb6c1c33650ccdd91f42e842c116200d92 | [
0.998589813709259,
0.055626288056373596,
0.0001617114758118987,
0.00029999675462022424,
0.2268860787153244
] |
{
"id": 3,
"code_window": [
"\t\t\ttotal += file.Size\n",
"\t\t}\n",
"\n",
"\t\trealToHashChan := make(chan protocol.FileInfo)\n",
"\t\tdone := make(chan struct{})\n",
"\t\tprogress := newByteCounter()\n",
"\n",
"\t\tnewParallelHasher(ctx, w.Folder, w.Filesystem, w.Hashers, finishedChan, realToHashChan, progress, done)\n"
],
"labels": [
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tif len(filesToHash) == 0 {\n",
"\t\t\tclose(finishedChan)\n",
"\t\t\treturn\n",
"\t\t}\n",
"\n"
],
"file_path": "lib/scanner/walk.go",
"type": "add",
"edit_start_line_idx": 162
} | Copyright (c) 2016 The angular-translate team, Pascal Precht; Licensed MIT
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
| gui/default/vendor/angular/angular-translate-loader-static-files.js.LICENSE | 0 | https://github.com/syncthing/syncthing/commit/2f3eacdb6c1c33650ccdd91f42e842c116200d92 | [
0.0001748476643115282,
0.00017436203779652715,
0.00017387639672961086,
0.00017436203779652715,
4.856337909586728e-7
] |
{
"id": 3,
"code_window": [
"\t\t\ttotal += file.Size\n",
"\t\t}\n",
"\n",
"\t\trealToHashChan := make(chan protocol.FileInfo)\n",
"\t\tdone := make(chan struct{})\n",
"\t\tprogress := newByteCounter()\n",
"\n",
"\t\tnewParallelHasher(ctx, w.Folder, w.Filesystem, w.Hashers, finishedChan, realToHashChan, progress, done)\n"
],
"labels": [
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tif len(filesToHash) == 0 {\n",
"\t\t\tclose(finishedChan)\n",
"\t\t\treturn\n",
"\t\t}\n",
"\n"
],
"file_path": "lib/scanner/walk.go",
"type": "add",
"edit_start_line_idx": 162
} | export interface SystemStatus {
alloc: number;
connectionServiceStatus: any;
cpuPercent: number; // allows returns 0
discoveryEnabled: boolean;
discoveryErrors: any;
discoveryMethods: number;
goroutines: number;
lastDialStatus: any;
myID: string;
pathSeparator: string;
startTime: string;
sys: number;
themes: string[];
tilde: string;
uptime: number;
} | next-gen-gui/src/app/system-status.ts | 0 | https://github.com/syncthing/syncthing/commit/2f3eacdb6c1c33650ccdd91f42e842c116200d92 | [
0.0001734339602990076,
0.000169444945640862,
0.00016545591643080115,
0.000169444945640862,
0.000003989021934103221
] |
{
"id": 3,
"code_window": [
"\t\t\ttotal += file.Size\n",
"\t\t}\n",
"\n",
"\t\trealToHashChan := make(chan protocol.FileInfo)\n",
"\t\tdone := make(chan struct{})\n",
"\t\tprogress := newByteCounter()\n",
"\n",
"\t\tnewParallelHasher(ctx, w.Folder, w.Filesystem, w.Hashers, finishedChan, realToHashChan, progress, done)\n"
],
"labels": [
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tif len(filesToHash) == 0 {\n",
"\t\t\tclose(finishedChan)\n",
"\t\t\treturn\n",
"\t\t}\n",
"\n"
],
"file_path": "lib/scanner/walk.go",
"type": "add",
"edit_start_line_idx": 162
} | // Copyright (C) 2014 The Protocol Authors.
package protocol
import (
"bytes"
"encoding/base32"
"encoding/binary"
"errors"
"fmt"
"strings"
"github.com/syncthing/syncthing/lib/sha256"
)
const (
DeviceIDLength = 32
ShortIDStringLength = 7
)
type (
DeviceID [DeviceIDLength]byte
ShortID uint64
)
var (
LocalDeviceID = repeatedDeviceID(0xff)
GlobalDeviceID = repeatedDeviceID(0xf8)
EmptyDeviceID = DeviceID{ /* all zeroes */ }
)
func repeatedDeviceID(v byte) (d DeviceID) {
for i := range d {
d[i] = v
}
return
}
// NewDeviceID generates a new device ID from the raw bytes of a certificate
func NewDeviceID(rawCert []byte) DeviceID {
return DeviceID(sha256.Sum256(rawCert))
}
func DeviceIDFromString(s string) (DeviceID, error) {
var n DeviceID
err := n.UnmarshalText([]byte(s))
return n, err
}
func DeviceIDFromBytes(bs []byte) (DeviceID, error) {
var n DeviceID
if len(bs) != len(n) {
return n, errors.New("incorrect length of byte slice representing device ID")
}
copy(n[:], bs)
return n, nil
}
// String returns the canonical string representation of the device ID
func (n DeviceID) String() string {
if n == EmptyDeviceID {
return ""
}
id := base32.StdEncoding.EncodeToString(n[:])
id = strings.Trim(id, "=")
id, err := luhnify(id)
if err != nil {
// Should never happen
panic(err)
}
id = chunkify(id)
return id
}
func (n DeviceID) GoString() string {
return n.String()
}
func (n DeviceID) Compare(other DeviceID) int {
return bytes.Compare(n[:], other[:])
}
func (n DeviceID) Equals(other DeviceID) bool {
return bytes.Equal(n[:], other[:])
}
// Short returns an integer representing bits 0-63 of the device ID.
func (n DeviceID) Short() ShortID {
return ShortID(binary.BigEndian.Uint64(n[:]))
}
func (n DeviceID) MarshalText() ([]byte, error) {
return []byte(n.String()), nil
}
func (s ShortID) String() string {
if s == 0 {
return ""
}
var bs [8]byte
binary.BigEndian.PutUint64(bs[:], uint64(s))
return base32.StdEncoding.EncodeToString(bs[:])[:ShortIDStringLength]
}
func (n *DeviceID) UnmarshalText(bs []byte) error {
id := string(bs)
id = strings.Trim(id, "=")
id = strings.ToUpper(id)
id = untypeoify(id)
id = unchunkify(id)
var err error
switch len(id) {
case 0:
*n = EmptyDeviceID
return nil
case 56:
// New style, with check digits
id, err = unluhnify(id)
if err != nil {
return err
}
fallthrough
case 52:
// Old style, no check digits
dec, err := base32.StdEncoding.DecodeString(id + "====")
if err != nil {
return err
}
copy(n[:], dec)
return nil
default:
return fmt.Errorf("%q: device ID invalid: incorrect length", bs)
}
}
func (*DeviceID) ProtoSize() int {
// Used by protobuf marshaller.
return DeviceIDLength
}
func (n *DeviceID) MarshalTo(bs []byte) (int, error) {
// Used by protobuf marshaller.
if len(bs) < DeviceIDLength {
return 0, errors.New("destination too short")
}
copy(bs, (*n)[:])
return DeviceIDLength, nil
}
func (n *DeviceID) Unmarshal(bs []byte) error {
// Used by protobuf marshaller.
if len(bs) < DeviceIDLength {
return fmt.Errorf("%q: not enough data", bs)
}
copy((*n)[:], bs)
return nil
}
func luhnify(s string) (string, error) {
if len(s) != 52 {
panic("unsupported string length")
}
res := make([]byte, 4*(13+1))
for i := 0; i < 4; i++ {
p := s[i*13 : (i+1)*13]
copy(res[i*(13+1):], p)
l, err := luhn32(p)
if err != nil {
return "", err
}
res[(i+1)*(13)+i] = byte(l)
}
return string(res), nil
}
func unluhnify(s string) (string, error) {
if len(s) != 56 {
return "", fmt.Errorf("%q: unsupported string length %d", s, len(s))
}
res := make([]byte, 52)
for i := 0; i < 4; i++ {
p := s[i*(13+1) : (i+1)*(13+1)-1]
copy(res[i*13:], p)
l, err := luhn32(p)
if err != nil {
return "", err
}
if s[(i+1)*14-1] != byte(l) {
return "", fmt.Errorf("%q: check digit incorrect", s)
}
}
return string(res), nil
}
func chunkify(s string) string {
chunks := len(s) / 7
res := make([]byte, chunks*(7+1)-1)
for i := 0; i < chunks; i++ {
if i > 0 {
res[i*(7+1)-1] = '-'
}
copy(res[i*(7+1):], s[i*7:(i+1)*7])
}
return string(res)
}
func unchunkify(s string) string {
s = strings.ReplaceAll(s, "-", "")
s = strings.ReplaceAll(s, " ", "")
return s
}
func untypeoify(s string) string {
s = strings.ReplaceAll(s, "0", "O")
s = strings.ReplaceAll(s, "1", "I")
s = strings.ReplaceAll(s, "8", "B")
return s
}
| lib/protocol/deviceid.go | 0 | https://github.com/syncthing/syncthing/commit/2f3eacdb6c1c33650ccdd91f42e842c116200d92 | [
0.0003781058476306498,
0.00017860646767076105,
0.00016354280523955822,
0.00017093827773351222,
0.0000427350023528561
] |
{
"id": 5,
"code_window": [
"\t\t\tfor {\n",
"\t\t\t\tselect {\n",
"\t\t\t\tcase <-done:\n",
"\t\t\t\t\tl.Debugln(w, \"Walk progress done\", w.Folder, w.Subs, w.Matcher)\n",
"\t\t\t\t\tticker.Stop()\n",
"\t\t\t\t\treturn\n"
],
"labels": [
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\t\t\temitProgressEvent()\n"
],
"file_path": "lib/scanner/walk.go",
"type": "add",
"edit_start_line_idx": 176
} | // Copyright (C) 2014 The Syncthing Authors.
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at https://mozilla.org/MPL/2.0/.
package scanner
import (
"context"
"errors"
"fmt"
"path/filepath"
"strings"
"sync/atomic"
"time"
"unicode/utf8"
metrics "github.com/rcrowley/go-metrics"
"github.com/syncthing/syncthing/lib/build"
"github.com/syncthing/syncthing/lib/events"
"github.com/syncthing/syncthing/lib/fs"
"github.com/syncthing/syncthing/lib/ignore"
"github.com/syncthing/syncthing/lib/osutil"
"github.com/syncthing/syncthing/lib/protocol"
"golang.org/x/text/unicode/norm"
)
type Config struct {
// Folder for which the walker has been created
Folder string
// Limit walking to these paths within Dir, or no limit if Sub is empty
Subs []string
// If Matcher is not nil, it is used to identify files to ignore which were specified by the user.
Matcher *ignore.Matcher
// Number of hours to keep temporary files for
TempLifetime time.Duration
// If CurrentFiler is not nil, it is queried for the current file before rescanning.
CurrentFiler CurrentFiler
// The Filesystem provides an abstraction on top of the actual filesystem.
Filesystem fs.Filesystem
// If IgnorePerms is true, changes to permission bits will not be
// detected.
IgnorePerms bool
// When AutoNormalize is set, file names that are in UTF8 but incorrect
// normalization form will be corrected.
AutoNormalize bool
// Number of routines to use for hashing
Hashers int
// Our vector clock id
ShortID protocol.ShortID
// Optional progress tick interval which defines how often FolderScanProgress
// events are emitted. Negative number means disabled.
ProgressTickIntervalS int
// Local flags to set on scanned files
LocalFlags uint32
// Modification time is to be considered unchanged if the difference is lower.
ModTimeWindow time.Duration
// Event logger to which the scan progress events are sent
EventLogger events.Logger
// If ScanOwnership is true, we pick up ownership information on files while scanning.
ScanOwnership bool
// If ScanXattrs is true, we pick up extended attributes on files while scanning.
ScanXattrs bool
// Filter for extended attributes
XattrFilter XattrFilter
}
type CurrentFiler interface {
// CurrentFile returns the file as seen at last scan.
CurrentFile(name string) (protocol.FileInfo, bool)
}
type XattrFilter interface {
Permit(string) bool
GetMaxSingleEntrySize() int
GetMaxTotalSize() int
}
type ScanResult struct {
File protocol.FileInfo
Err error
Path string // to be set in case Err != nil and File == nil
}
func Walk(ctx context.Context, cfg Config) chan ScanResult {
return newWalker(cfg).walk(ctx)
}
func WalkWithoutHashing(ctx context.Context, cfg Config) chan ScanResult {
return newWalker(cfg).walkWithoutHashing(ctx)
}
func newWalker(cfg Config) *walker {
w := &walker{cfg}
if w.CurrentFiler == nil {
w.CurrentFiler = noCurrentFiler{}
}
if w.Filesystem == nil {
panic("no filesystem specified")
}
if w.Matcher == nil {
w.Matcher = ignore.New(w.Filesystem)
}
registerFolderMetrics(w.Folder)
return w
}
var (
errUTF8Invalid = errors.New("item is not in UTF8 encoding")
errUTF8Normalization = errors.New("item is not in the correct UTF8 normalization form")
errUTF8Conflict = errors.New("item has UTF8 encoding conflict with another item")
)
type walker struct {
Config
}
// Walk returns the list of files found in the local folder by scanning the
// file system. Files are blockwise hashed.
func (w *walker) walk(ctx context.Context) chan ScanResult {
l.Debugln(w, "Walk", w.Subs, w.Matcher)
toHashChan := make(chan protocol.FileInfo)
finishedChan := make(chan ScanResult)
// A routine which walks the filesystem tree, and sends files which have
// been modified to the counter routine.
go w.scan(ctx, toHashChan, finishedChan)
// We're not required to emit scan progress events, just kick off hashers,
// and feed inputs directly from the walker.
if w.ProgressTickIntervalS < 0 {
newParallelHasher(ctx, w.Folder, w.Filesystem, w.Hashers, finishedChan, toHashChan, nil, nil)
return finishedChan
}
// Defaults to every 2 seconds.
if w.ProgressTickIntervalS == 0 {
w.ProgressTickIntervalS = 2
}
ticker := time.NewTicker(time.Duration(w.ProgressTickIntervalS) * time.Second)
// We need to emit progress events, hence we create a routine which buffers
// the list of files to be hashed, counts the total number of
// bytes to hash, and once no more files need to be hashed (chan gets closed),
// start a routine which periodically emits FolderScanProgress events,
// until a stop signal is sent by the parallel hasher.
// Parallel hasher is stopped by this routine when we close the channel over
// which it receives the files we ask it to hash.
go func() {
var filesToHash []protocol.FileInfo
var total int64 = 1
for file := range toHashChan {
filesToHash = append(filesToHash, file)
total += file.Size
}
realToHashChan := make(chan protocol.FileInfo)
done := make(chan struct{})
progress := newByteCounter()
newParallelHasher(ctx, w.Folder, w.Filesystem, w.Hashers, finishedChan, realToHashChan, progress, done)
// A routine which actually emits the FolderScanProgress events
// every w.ProgressTicker ticks, until the hasher routines terminate.
go func() {
defer progress.Close()
for {
select {
case <-done:
l.Debugln(w, "Walk progress done", w.Folder, w.Subs, w.Matcher)
ticker.Stop()
return
case <-ticker.C:
current := progress.Total()
rate := progress.Rate()
l.Debugf("%v: Walk %s %s current progress %d/%d at %.01f MiB/s (%d%%)", w, w.Folder, w.Subs, current, total, rate/1024/1024, current*100/total)
w.EventLogger.Log(events.FolderScanProgress, map[string]interface{}{
"folder": w.Folder,
"current": current,
"total": total,
"rate": rate, // bytes per second
})
case <-ctx.Done():
ticker.Stop()
return
}
}
}()
loop:
for _, file := range filesToHash {
l.Debugln(w, "real to hash:", file.Name)
select {
case realToHashChan <- file:
case <-ctx.Done():
break loop
}
}
close(realToHashChan)
}()
return finishedChan
}
func (w *walker) walkWithoutHashing(ctx context.Context) chan ScanResult {
l.Debugln(w, "Walk without hashing", w.Subs, w.Matcher)
toHashChan := make(chan protocol.FileInfo)
finishedChan := make(chan ScanResult)
// A routine which walks the filesystem tree, and sends files which have
// been modified to the counter routine.
go w.scan(ctx, toHashChan, finishedChan)
go func() {
for file := range toHashChan {
finishedChan <- ScanResult{File: file}
}
close(finishedChan)
}()
return finishedChan
}
func (w *walker) scan(ctx context.Context, toHashChan chan<- protocol.FileInfo, finishedChan chan<- ScanResult) {
hashFiles := w.walkAndHashFiles(ctx, toHashChan, finishedChan)
if len(w.Subs) == 0 {
w.Filesystem.Walk(".", hashFiles)
} else {
for _, sub := range w.Subs {
if err := osutil.TraversesSymlink(w.Filesystem, filepath.Dir(sub)); err != nil {
l.Debugf("%v: Skip walking %v as it is below a symlink", w, sub)
continue
}
w.Filesystem.Walk(sub, hashFiles)
}
}
close(toHashChan)
}
func (w *walker) walkAndHashFiles(ctx context.Context, toHashChan chan<- protocol.FileInfo, finishedChan chan<- ScanResult) fs.WalkFunc {
now := time.Now()
ignoredParent := ""
return func(path string, info fs.FileInfo, err error) error {
select {
case <-ctx.Done():
return ctx.Err()
default:
}
metricScannedItems.WithLabelValues(w.Folder).Inc()
// Return value used when we are returning early and don't want to
// process the item. For directories, this means do-not-descend.
var skip error // nil
// info nil when error is not nil
if info != nil && info.IsDir() {
skip = fs.SkipDir
}
if !utf8.ValidString(path) {
handleError(ctx, "scan", path, errUTF8Invalid, finishedChan)
return skip
}
if fs.IsTemporary(path) {
l.Debugln(w, "temporary:", path, "err:", err)
if err == nil && info.IsRegular() && info.ModTime().Add(w.TempLifetime).Before(now) {
w.Filesystem.Remove(path)
l.Debugln(w, "removing temporary:", path, info.ModTime())
}
return nil
}
if fs.IsInternal(path) {
l.Debugln(w, "ignored (internal):", path)
return skip
}
if w.Matcher.Match(path).IsIgnored() {
l.Debugln(w, "ignored (patterns):", path)
// Only descend if matcher says so and the current file is not a symlink.
if err != nil || w.Matcher.SkipIgnoredDirs() || info.IsSymlink() {
return skip
}
// If the parent wasn't ignored already, set this path as the "highest" ignored parent
if info.IsDir() && (ignoredParent == "" || !fs.IsParent(path, ignoredParent)) {
ignoredParent = path
}
return nil
}
if err != nil {
// No need reporting errors for files that don't exist (e.g. scan
// due to filesystem watcher)
if !fs.IsNotExist(err) {
handleError(ctx, "scan", path, err, finishedChan)
}
return skip
}
if path == "." {
return nil
}
if ignoredParent == "" {
// parent isn't ignored, nothing special
return w.handleItem(ctx, path, info, toHashChan, finishedChan, skip)
}
// Part of current path below the ignored (potential) parent
rel := strings.TrimPrefix(path, ignoredParent+string(fs.PathSeparator))
// ignored path isn't actually a parent of the current path
if rel == path {
ignoredParent = ""
return w.handleItem(ctx, path, info, toHashChan, finishedChan, skip)
}
// The previously ignored parent directories of the current, not
// ignored path need to be handled as well.
// Prepend an empty string to handle ignoredParent without anything
// appended in the first iteration.
for _, name := range append([]string{""}, fs.PathComponents(rel)...) {
ignoredParent = filepath.Join(ignoredParent, name)
info, err = w.Filesystem.Lstat(ignoredParent)
// An error here would be weird as we've already gotten to this point, but act on it nonetheless
if err != nil {
handleError(ctx, "scan", ignoredParent, err, finishedChan)
return skip
}
if err = w.handleItem(ctx, ignoredParent, info, toHashChan, finishedChan, skip); err != nil {
return err
}
}
ignoredParent = ""
return nil
}
}
func (w *walker) handleItem(ctx context.Context, path string, info fs.FileInfo, toHashChan chan<- protocol.FileInfo, finishedChan chan<- ScanResult, skip error) error {
oldPath := path
path, err := w.normalizePath(path, info)
if err != nil {
handleError(ctx, "normalizing path", oldPath, err, finishedChan)
return skip
}
switch {
case info.IsSymlink():
if err := w.walkSymlink(ctx, path, info, finishedChan); err != nil {
return err
}
if info.IsDir() {
// under no circumstances shall we descend into a symlink
return fs.SkipDir
}
return nil
case info.IsDir():
err = w.walkDir(ctx, path, info, finishedChan)
case info.IsRegular():
err = w.walkRegular(ctx, path, info, toHashChan)
}
return err
}
func (w *walker) walkRegular(ctx context.Context, relPath string, info fs.FileInfo, toHashChan chan<- protocol.FileInfo) error {
curFile, hasCurFile := w.CurrentFiler.CurrentFile(relPath)
blockSize := protocol.BlockSize(info.Size())
if hasCurFile {
// Check if we should retain current block size.
curBlockSize := curFile.BlockSize()
if blockSize > curBlockSize && blockSize/curBlockSize <= 2 {
// New block size is larger, but not more than twice larger.
// Retain.
blockSize = curBlockSize
} else if curBlockSize > blockSize && curBlockSize/blockSize <= 2 {
// Old block size is larger, but not more than twice larger.
// Retain.
blockSize = curBlockSize
}
}
f, err := CreateFileInfo(info, relPath, w.Filesystem, w.ScanOwnership, w.ScanXattrs, w.XattrFilter)
if err != nil {
return err
}
f = w.updateFileInfo(f, curFile)
f.NoPermissions = w.IgnorePerms
f.RawBlockSize = blockSize
l.Debugln(w, "checking:", f)
if hasCurFile {
if curFile.IsEquivalentOptional(f, protocol.FileInfoComparison{
ModTimeWindow: w.ModTimeWindow,
IgnorePerms: w.IgnorePerms,
IgnoreBlocks: true,
IgnoreFlags: w.LocalFlags,
IgnoreOwnership: !w.ScanOwnership,
IgnoreXattrs: !w.ScanXattrs,
}) {
l.Debugln(w, "unchanged:", curFile)
return nil
}
if curFile.ShouldConflict() {
// The old file was invalid for whatever reason and probably not
// up to date with what was out there in the cluster. Drop all
// others from the version vector to indicate that we haven't
// taken their version into account, and possibly cause a
// conflict.
f.Version = f.Version.DropOthers(w.ShortID)
}
l.Debugln(w, "rescan:", curFile)
}
l.Debugln(w, "to hash:", relPath, f)
select {
case toHashChan <- f:
case <-ctx.Done():
return ctx.Err()
}
return nil
}
func (w *walker) walkDir(ctx context.Context, relPath string, info fs.FileInfo, finishedChan chan<- ScanResult) error {
curFile, hasCurFile := w.CurrentFiler.CurrentFile(relPath)
f, err := CreateFileInfo(info, relPath, w.Filesystem, w.ScanOwnership, w.ScanXattrs, w.XattrFilter)
if err != nil {
return err
}
f = w.updateFileInfo(f, curFile)
f.NoPermissions = w.IgnorePerms
l.Debugln(w, "checking:", f)
if hasCurFile {
if curFile.IsEquivalentOptional(f, protocol.FileInfoComparison{
ModTimeWindow: w.ModTimeWindow,
IgnorePerms: w.IgnorePerms,
IgnoreBlocks: true,
IgnoreFlags: w.LocalFlags,
IgnoreOwnership: !w.ScanOwnership,
IgnoreXattrs: !w.ScanXattrs,
}) {
l.Debugln(w, "unchanged:", curFile)
return nil
}
if curFile.ShouldConflict() {
// The old file was invalid for whatever reason and probably not
// up to date with what was out there in the cluster. Drop all
// others from the version vector to indicate that we haven't
// taken their version into account, and possibly cause a
// conflict.
f.Version = f.Version.DropOthers(w.ShortID)
}
l.Debugln(w, "rescan:", curFile)
}
l.Debugln(w, "dir:", relPath, f)
select {
case finishedChan <- ScanResult{File: f}:
case <-ctx.Done():
return ctx.Err()
}
return nil
}
// walkSymlink returns nil or an error, if the error is of the nature that
// it should stop the entire walk.
func (w *walker) walkSymlink(ctx context.Context, relPath string, info fs.FileInfo, finishedChan chan<- ScanResult) error {
// Symlinks are not supported on Windows. We ignore instead of returning
// an error.
if build.IsWindows {
return nil
}
f, err := CreateFileInfo(info, relPath, w.Filesystem, w.ScanOwnership, w.ScanXattrs, w.XattrFilter)
if err != nil {
handleError(ctx, "reading link", relPath, err, finishedChan)
return nil
}
curFile, hasCurFile := w.CurrentFiler.CurrentFile(relPath)
f = w.updateFileInfo(f, curFile)
l.Debugln(w, "checking:", f)
if hasCurFile {
if curFile.IsEquivalentOptional(f, protocol.FileInfoComparison{
ModTimeWindow: w.ModTimeWindow,
IgnorePerms: w.IgnorePerms,
IgnoreBlocks: true,
IgnoreFlags: w.LocalFlags,
IgnoreOwnership: !w.ScanOwnership,
IgnoreXattrs: !w.ScanXattrs,
}) {
l.Debugln(w, "unchanged:", curFile, info.ModTime().Unix(), info.Mode()&fs.ModePerm)
return nil
}
if curFile.ShouldConflict() {
// The old file was invalid for whatever reason and probably not
// up to date with what was out there in the cluster. Drop all
// others from the version vector to indicate that we haven't
// taken their version into account, and possibly cause a
// conflict.
f.Version = f.Version.DropOthers(w.ShortID)
}
l.Debugln(w, "rescan:", curFile)
}
l.Debugln(w, "symlink:", relPath, f)
select {
case finishedChan <- ScanResult{File: f}:
case <-ctx.Done():
return ctx.Err()
}
return nil
}
// normalizePath returns the normalized relative path (possibly after fixing
// it on disk), or skip is true.
func (w *walker) normalizePath(path string, info fs.FileInfo) (normPath string, err error) {
if build.IsDarwin {
// Mac OS X file names should always be NFD normalized.
normPath = norm.NFD.String(path)
} else {
// Every other OS in the known universe uses NFC or just plain
// doesn't bother to define an encoding. In our case *we* do care,
// so we enforce NFC regardless.
normPath = norm.NFC.String(path)
}
if path == normPath {
// The file name is already normalized: nothing to do
return path, nil
}
if !w.AutoNormalize {
// We're not authorized to do anything about it, so complain and skip.
return "", errUTF8Normalization
}
// We will attempt to normalize it.
normInfo, err := w.Filesystem.Lstat(normPath)
if fs.IsNotExist(err) {
// Nothing exists with the normalized filename. Good.
if err = w.Filesystem.Rename(path, normPath); err != nil {
return "", err
}
l.Infof(`Normalized UTF8 encoding of file name "%s".`, path)
return normPath, nil
}
if w.Filesystem.SameFile(info, normInfo) {
// With some filesystems (ZFS), if there is an un-normalized path and you ask whether the normalized
// version exists, it responds with true. Therefore we need to check fs.SameFile as well.
// In this case, a call to Rename won't do anything, so we have to rename via a temp file.
// We don't want to use the standard syncthing prefix here, as that will result in the file being ignored
// and eventually deleted by Syncthing if the rename back fails.
tempPath := fs.TempNameWithPrefix(normPath, "")
if err = w.Filesystem.Rename(path, tempPath); err != nil {
return "", err
}
if err = w.Filesystem.Rename(tempPath, normPath); err != nil {
// I don't ever expect this to happen, but if it does, we should probably tell our caller that the normalized
// path is the temp path: that way at least the user's data still gets synced.
l.Warnf(`Error renaming "%s" to "%s" while normalizating UTF8 encoding: %v. You will want to rename this file back manually`, tempPath, normPath, err)
return tempPath, nil
}
return normPath, nil
}
// There is something already in the way at the normalized
// file name.
return "", errUTF8Conflict
}
// updateFileInfo updates walker specific members of protocol.FileInfo that
// do not depend on type, and things that should be preserved from the
// previous version of the FileInfo.
func (w *walker) updateFileInfo(dst, src protocol.FileInfo) protocol.FileInfo {
if dst.Type == protocol.FileInfoTypeFile && build.IsWindows {
// If we have an existing index entry, copy the executable bits
// from there.
dst.Permissions |= (src.Permissions & 0o111)
}
dst.Version = src.Version.Update(w.ShortID)
dst.ModifiedBy = w.ShortID
dst.LocalFlags = w.LocalFlags
// Copy OS data from src to dst, unless it was already set on dst.
dst.Platform.MergeWith(&src.Platform)
return dst
}
func handleError(ctx context.Context, context, path string, err error, finishedChan chan<- ScanResult) {
select {
case finishedChan <- ScanResult{
Err: fmt.Errorf("%s: %w", context, err),
Path: path,
}:
case <-ctx.Done():
}
}
func (w *walker) String() string {
return fmt.Sprintf("walker/%s@%p", w.Folder, w)
}
// A byteCounter gets bytes added to it via Update() and then provides the
// Total() and one minute moving average Rate() in bytes per second.
type byteCounter struct {
total atomic.Int64
metrics.EWMA
stop chan struct{}
}
func newByteCounter() *byteCounter {
c := &byteCounter{
EWMA: metrics.NewEWMA1(), // a one minute exponentially weighted moving average
stop: make(chan struct{}),
}
go c.ticker()
return c
}
func (c *byteCounter) ticker() {
// The metrics.EWMA expects clock ticks every five seconds in order to
// decay the average properly.
t := time.NewTicker(5 * time.Second)
for {
select {
case <-t.C:
c.Tick()
case <-c.stop:
t.Stop()
return
}
}
}
func (c *byteCounter) Update(bytes int64) {
c.total.Add(bytes)
c.EWMA.Update(bytes)
}
func (c *byteCounter) Total() int64 { return c.total.Load() }
func (c *byteCounter) Close() {
close(c.stop)
}
// A no-op CurrentFiler
type noCurrentFiler struct{}
func (noCurrentFiler) CurrentFile(_ string) (protocol.FileInfo, bool) {
return protocol.FileInfo{}, false
}
func CreateFileInfo(fi fs.FileInfo, name string, filesystem fs.Filesystem, scanOwnership bool, scanXattrs bool, xattrFilter XattrFilter) (protocol.FileInfo, error) {
f := protocol.FileInfo{Name: name}
if scanOwnership || scanXattrs {
if plat, err := filesystem.PlatformData(name, scanOwnership, scanXattrs, xattrFilter); err == nil {
f.Platform = plat
} else {
return protocol.FileInfo{}, fmt.Errorf("reading platform data: %w", err)
}
}
if ct := fi.InodeChangeTime(); !ct.IsZero() {
f.InodeChangeNs = ct.UnixNano()
} else {
f.InodeChangeNs = 0
}
if fi.IsSymlink() {
f.Type = protocol.FileInfoTypeSymlink
target, err := filesystem.ReadSymlink(name)
if err != nil {
return protocol.FileInfo{}, err
}
f.SymlinkTarget = target
f.NoPermissions = true // Symlinks don't have permissions of their own
return f, nil
}
f.Permissions = uint32(fi.Mode() & fs.ModePerm)
f.ModifiedS = fi.ModTime().Unix()
f.ModifiedNs = fi.ModTime().Nanosecond()
if fi.IsDir() {
f.Type = protocol.FileInfoTypeDirectory
return f, nil
}
f.Size = fi.Size()
f.Type = protocol.FileInfoTypeFile
return f, nil
}
| lib/scanner/walk.go | 1 | https://github.com/syncthing/syncthing/commit/2f3eacdb6c1c33650ccdd91f42e842c116200d92 | [
0.984483540058136,
0.013973185792565346,
0.00016239445540122688,
0.0001722317683743313,
0.11437942832708359
] |
{
"id": 5,
"code_window": [
"\t\t\tfor {\n",
"\t\t\t\tselect {\n",
"\t\t\t\tcase <-done:\n",
"\t\t\t\t\tl.Debugln(w, \"Walk progress done\", w.Folder, w.Subs, w.Matcher)\n",
"\t\t\t\t\tticker.Stop()\n",
"\t\t\t\t\treturn\n"
],
"labels": [
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\t\t\temitProgressEvent()\n"
],
"file_path": "lib/scanner/walk.go",
"type": "add",
"edit_start_line_idx": 176
} | // Copyright (C) 2019 The Syncthing Authors.
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at https://mozilla.org/MPL/2.0/.
package cli
import (
"encoding/json"
"errors"
"fmt"
"io"
"mime"
"net/http"
"os"
"path/filepath"
"github.com/syncthing/syncthing/lib/config"
"github.com/syncthing/syncthing/lib/db/backend"
"github.com/syncthing/syncthing/lib/locations"
)
func responseToBArray(response *http.Response) ([]byte, error) {
bytes, err := io.ReadAll(response.Body)
if err != nil {
return nil, err
}
return bytes, response.Body.Close()
}
func emptyPost(url string, apiClientFactory *apiClientFactory) error {
client, err := apiClientFactory.getClient()
if err != nil {
return err
}
_, err = client.Post(url, "")
return err
}
func indexDumpOutputWrapper(apiClientFactory *apiClientFactory) func(url string) error {
return func(url string) error {
return indexDumpOutput(url, apiClientFactory)
}
}
func indexDumpOutput(url string, apiClientFactory *apiClientFactory) error {
client, err := apiClientFactory.getClient()
if err != nil {
return err
}
response, err := client.Get(url)
if errors.Is(err, errNotFound) {
return errors.New("not found (folder/file not in database)")
}
if err != nil {
return err
}
return prettyPrintResponse(response)
}
func saveToFile(url string, apiClientFactory *apiClientFactory) error {
client, err := apiClientFactory.getClient()
if err != nil {
return err
}
response, err := client.Get(url)
if err != nil {
return err
}
_, params, err := mime.ParseMediaType(response.Header.Get("Content-Disposition"))
if err != nil {
return err
}
filename := params["filename"]
if filename == "" {
return errors.New("Missing filename in response")
}
bs, err := responseToBArray(response)
if err != nil {
return err
}
f, err := os.Create(filename)
if err != nil {
return err
}
_, err = f.Write(bs)
if err != nil {
_ = f.Close()
return err
}
err = f.Close()
if err != nil {
return err
}
fmt.Println("Wrote results to", filename)
return nil
}
func getConfig(c APIClient) (config.Configuration, error) {
cfg := config.Configuration{}
response, err := c.Get("system/config")
if err != nil {
return cfg, err
}
bytes, err := responseToBArray(response)
if err != nil {
return cfg, err
}
err = json.Unmarshal(bytes, &cfg)
if err != nil {
return config.Configuration{}, err
}
return cfg, nil
}
func prettyPrintJSON(data interface{}) error {
enc := json.NewEncoder(os.Stdout)
enc.SetIndent("", " ")
return enc.Encode(data)
}
func prettyPrintResponse(response *http.Response) error {
bytes, err := responseToBArray(response)
if err != nil {
return err
}
var data interface{}
if err := json.Unmarshal(bytes, &data); err != nil {
return err
}
// TODO: Check flag for pretty print format
return prettyPrintJSON(data)
}
func getDB() (backend.Backend, error) {
return backend.OpenLevelDBRO(locations.Get(locations.Database))
}
func nulString(bs []byte) string {
for i := range bs {
if bs[i] == 0 {
return string(bs[:i])
}
}
return string(bs)
}
func normalizePath(path string) string {
return filepath.ToSlash(filepath.Clean(path))
}
| cmd/syncthing/cli/utils.go | 0 | https://github.com/syncthing/syncthing/commit/2f3eacdb6c1c33650ccdd91f42e842c116200d92 | [
0.000204160125576891,
0.0001740057923598215,
0.0001653056388022378,
0.00017255404964089394,
0.000008565164534957148
] |
{
"id": 5,
"code_window": [
"\t\t\tfor {\n",
"\t\t\t\tselect {\n",
"\t\t\t\tcase <-done:\n",
"\t\t\t\t\tl.Debugln(w, \"Walk progress done\", w.Folder, w.Subs, w.Matcher)\n",
"\t\t\t\t\tticker.Stop()\n",
"\t\t\t\t\treturn\n"
],
"labels": [
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\t\t\temitProgressEvent()\n"
],
"file_path": "lib/scanner/walk.go",
"type": "add",
"edit_start_line_idx": 176
} | <configuration version="37">
<folder id="a" label="a" path="a">
<device id="6564BQV-R2WYPMN-5OLXMII-CDJUKFD-BHNNCRA-WLQPAIV-ELSGAD2-RMFBFQU" introducedBy="">
<encryptionPassword></encryptionPassword>
</device>
</folder>
<folder id="b" label="b" path="b">
<device id="6564BQV-R2WYPMN-5OLXMII-CDJUKFD-BHNNCRA-WLQPAIV-ELSGAD2-RMFBFQU" introducedBy="">
<encryptionPassword>a complex password</encryptionPassword>
</device>
</folder>
<device id="6564BQV-R2WYPMN-5OLXMII-CDJUKFD-BHNNCRA-WLQPAIV-ELSGAD2-RMFBFQU" name="untrusted" compression="metadata" introducer="true" skipIntroductionRemovals="false" introducedBy="">
<address>dynamic</address>
<autoAcceptFolders>true</autoAcceptFolders>
<untrusted>true</untrusted>
</device>
</configuration>
| lib/config/testdata/untrustedintroducer.xml | 0 | https://github.com/syncthing/syncthing/commit/2f3eacdb6c1c33650ccdd91f42e842c116200d92 | [
0.00017158544505946338,
0.00016915248124860227,
0.0001667195319896564,
0.00016915248124860227,
0.0000024329565349034965
] |
{
"id": 5,
"code_window": [
"\t\t\tfor {\n",
"\t\t\t\tselect {\n",
"\t\t\t\tcase <-done:\n",
"\t\t\t\t\tl.Debugln(w, \"Walk progress done\", w.Folder, w.Subs, w.Matcher)\n",
"\t\t\t\t\tticker.Stop()\n",
"\t\t\t\t\treturn\n"
],
"labels": [
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\t\t\temitProgressEvent()\n"
],
"file_path": "lib/scanner/walk.go",
"type": "add",
"edit_start_line_idx": 176
} | /*!
Fork Awesome 1.2.0
License - https://forkaweso.me/Fork-Awesome/license
Copyright 2018 Dave Gandy & Fork Awesome
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
.fas,
.fab,
.far {
display: inline-block;
font: normal normal normal 14px/1 ForkAwesome;
font-size: inherit;
text-rendering: auto;
-webkit-font-smoothing: antialiased;
-moz-osx-font-smoothing: grayscale;
}
.fas.fa-chart-area:before {
content: "\f1fe";
}
.fas.fa-arrows-alt:before {
content: "\f047";
}
.fas.fa-expand-arrows-alt:before {
content: "\f0b2";
}
.fas.fa-arrows-alt-h:before {
content: "\f07e";
}
.fas.fa-arrows-alt-v:before {
content: "\f07d";
}
.fas.fa-calendar-alt:before {
content: "\f073";
}
.fas.fa-circle-notch:before {
content: "\f1ce";
}
.fas.fa-cloud-download-alt:before {
content: "\f0ed";
}
.fas.fa-cloud-upload-alt:before {
content: "\f0ee";
}
.fas.fa-credit-card:before {
content: "\f283";
}
.fas.fa-dollar-sign:before {
content: "\f155";
}
.fas.fa-euro-sign:before {
content: "\f153";
}
.fas.fa-exchange-alt:before {
content: "\f0ec";
}
.fas.fa-external-link-alt:before {
content: "\f08e";
}
.fas.fa-external-link-square-alt:before {
content: "\f14c";
}
.fas.fa-eye-dropper:before {
content: "\f1fb";
}
.fas.fa-pound-sign:before {
content: "\f154";
}
.fas.fa-glass-martini:before {
content: "\f000";
}
.fas.fa-shekel-sign:before {
content: "\f20b";
}
.fas.fa-rupee-sign:before {
content: "\f156";
}
.fas.fa-won-sign:before {
content: "\f159";
}
.fas.fa-level-down-alt:before {
content: "\f149";
}
.fas.fa-level-up-alt:before {
content: "\f148";
}
.fas.fa-chart-line:before {
content: "\f201";
}
.fas.fa-long-arrow-alt-down:before {
content: "\f175";
}
.fas.fa-long-arrow-alt-left:before {
content: "\f177";
}
.fas.fa-long-arrow-alt-right:before {
content: "\f178";
}
.fas.fa-long-arrow-alt-up:before {
content: "\f176";
}
.fas.fa-map-marker-alt:before {
content: "\f041";
}
.fas.fa-mobile-alt:before {
content: "\f10b";
}
.fas.fa-pencil-alt:before {
content: "\f040";
}
.fas.fa-pen-square:before {
content: "\f14b";
}
.fas.fa-chart-pie:before {
content: "\f200";
}
.fas.fa-yen-sign:before {
content: "\f157";
}
.fas.fa-ruble-sign:before {
content: "\f158";
}
.fas.fa-shield-alt:before {
content: "\f132";
}
.fas.fa-sign-in-alt:before {
content: "\f090";
}
.fas.fa-sign-out-alt:before {
content: "\f08b";
}
.fas.fa-sliders-h:before {
content: "\f1de";
}
.fas.fa-tablet-alt:before {
content: "\f10a";
}
.fas.fa-tachometer-alt:before {
content: "\f0e4";
}
.fas.fa-thumbtack:before {
content: "\f08d";
}
.fas.fa-ticket-alt:before {
content: "\f145";
}
.fas.fa-trash-alt:before {
content: "\f1f8";
}
.fas.fa-lira-sign:before {
content: "\f195";
}
.fab.fa-linkedin-in:before {
content: "\fe01";
}
.fab.fa-linkedin:before {
content: "\f08c";
}
.far.fa-address-book:before {
content: "\f2ba";
}
.far.fa-address-card:before {
content: "\f2bc";
}
.far.fa-arrow-alt-circle-down:before {
content: "\f01a";
}
.far.fa-arrow-alt-circle-left:before {
content: "\f190";
}
.far.fa-arrow-alt-circle-right:before {
content: "\f18e";
}
.far.fa-arrow-alt-circle-up:before {
content: "\f01b";
}
.far.fa-bell:before {
content: "\f0f3";
}
.far.fa-bell-slash:before {
content: "\f1f7";
}
.far.fa-bookmark:before {
content: "\f097";
}
.far.fa-building:before {
content: "\f0f7";
}
.far.fa-calendar-check:before {
content: "\f274";
}
.far.fa-calendar-minus:before {
content: "\f272";
}
.far.fa-calendar:before {
content: "\f133";
}
.far.fa-calendar-plus:before {
content: "\f271";
}
.far.fa-calendar-times:before {
content: "\f273";
}
.far.fa-caret-square-down:before {
content: "\f150";
}
.far.fa-caret-square-left:before {
content: "\f191";
}
.far.fa-caret-square-right:before {
content: "\f152";
}
.far.fa-caret-square-up:before {
content: "\f151";
}
.far.fa-check-circle:before {
content: "\f05d";
}
.far.fa-check-square:before {
content: "\f046";
}
.far.fa-circle:before {
content: "\f10c";
}
.far.fa-clock:before {
content: "\f017";
}
.far.fa-comment:before {
content: "\f0e5";
}
.far.fa-comment-dots:before {
content: "\f27b";
}
.far.fa-comments:before {
content: "\f0e6";
}
.far.fa-dot-circle:before {
content: "\f192";
}
.far.fa-id-card:before {
content: "\f2c3";
}
.far.fa-envelope:before {
content: "\f003";
}
.far.fa-envelope-open:before {
content: "\f2b7";
}
.far.fa-file-archive:before {
content: "\f1c6";
}
.far.fa-file-audio:before {
content: "\f1c7";
}
.far.fa-file-code:before {
content: "\f1c9";
}
.far.fa-file-excel:before {
content: "\f1c3";
}
.far.fa-file-image:before {
content: "\f1c5";
}
.far.fa-file-video:before {
content: "\f1c8";
}
.far.fa-copy:before,
.far.fa-file:before {
content: "\f016";
}
.far.fa-file-pdf:before {
content: "\f1c1";
}
.far.fa-file-powerpoint:before {
content: "\f1c4";
}
.far.fa-file-alt:before {
content: "\f0f6";
}
.far.fa-file-word:before {
content: "\f1c2";
}
.far.fa-flag:before {
content: "\f11d";
}
.far.fa-save:before {
content: "\f0c7";
}
.far.fa-folder:before {
content: "\f114";
}
.far.fa-folder-open:before {
content: "\f115";
}
.far.fa-frown:before {
content: "\f119";
}
.far.fa-futbol:before {
content: "\f1e3";
}
.far.fa-hand-rock:before {
content: "\f255";
}
.far.fa-hand-lizard:before {
content: "\f258";
}
.far.fa-hand-point-down:before {
content: "\f0a7";
}
.far.fa-hand-point-left:before {
content: "\f0a5";
}
.far.fa-hand-point-right:before {
content: "\f0a4";
}
.far.fa-hand-point-up:before {
content: "\f0a6";
}
.far.fa-hand-paper:before {
content: "\256";
}
.far.fa-hand-pointer:before {
content: "\f25a";
}
.far.fa-hand-scissors:before {
content: "\f257";
}
.far.fa-hand-spock:before {
content: "\f259";
}
.far.fa-handshake:before {
content: "\f2b5";
}
.far.fa-hdd:before {
content: "\f0a0";
}
.far.fa-heart:before {
content: "\f08a";
}
.far.fa-hospital:before {
content: "\f0f8";
}
.far.fa-hourglass:before {
content: "\f250";
}
.far.fa-id-card:before {
content: "\f2c3";
}
.far.fa-keyboard:before {
content: "\f11c";
}
.far.fa-lemon:before {
content: "\f094";
}
.far.fa-lightbulb:before {
content: "\f0eb";
}
.far.fa-meh:before {
content: "\f11a";
}
.far.fa-minus-square:before {
content: "\f147";
}
.far.fa-money-bill-alt:before {
content: "\f0d6";
}
.far.fa-moon:before {
content: "\f186";
}
.far.fa-newspaper:before {
content: "\f1ea";
}
.far.fa-paper-plane:before {
content: "\f1d9";
}
.far.fa-pause-circle:before {
content: "\f28c";
}
.far.fa-edit:before {
content: "\f044";
}
.far.fa-image:before {
content: "\f03e";
}
.far.fa-play-circle:before {
content: "\f01d";
}
.far.fa-plus-square:before {
content: "\f196";
}
.far.fa-question-circle:before {
content: "\f92c";
}
.far.fa-share-square:before {
content: "\f045";
}
.far.fa-smile:before {
content: "\f118";
}
.far.fa-snowflake:before {
content: "\f2dc";
}
.far.fa-futbol:before {
content: "\f1e3";
}
.far.fa-star-half:before {
content: "\f089";
}
.far.fa-star:before {
content: "\f006";
}
.far.fa-sticky-note:before {
content: "\f24a";
}
.far.fa-stop-circle:before {
content: "\f28e";
}
.far.fa-sun:before {
content: "\f185";
}
.far.fa-thumbs-down:before {
content: "\f088";
}
.far.fa-thumbs-up:before {
content: "\f087";
}
.far.fa-times-circle:before {
content: "\f05c";
}
.far.fa-window-close:before {
content: "\f2d4";
}
.far.fa-trash-alt:before {
content: "\f014";
}
.far.fa-user-circle:before {
content: "\f2be";
}
.far.fa-user:before {
content: "\f2c0";
}
| gui/default/vendor/fork-awesome/css/v5-compat.css | 0 | https://github.com/syncthing/syncthing/commit/2f3eacdb6c1c33650ccdd91f42e842c116200d92 | [
0.00017851096345111728,
0.00017649227811489254,
0.0001719961001072079,
0.0001768450892996043,
0.0000013240720591056743
] |
{
"id": 0,
"code_window": [
"\t{Scope: ScopeGlobal, Name: \"innodb_max_dirty_pages_pct\", Value: \"75\"},\n",
"\t{Scope: ScopeGlobal, Name: InnodbFilePerTable, Value: BoolOn, Type: TypeBool, AutoConvertNegativeBool: true},\n",
"\t{Scope: ScopeGlobal, Name: InnodbLogCompressedPages, Value: \"1\"},\n",
"\t{Scope: ScopeNone, Name: \"skip_networking\", Value: \"0\"},\n",
"\t{Scope: ScopeGlobal, Name: \"innodb_monitor_reset\", Value: \"\"},\n",
"\t{Scope: ScopeNone, Name: \"have_ssl\", Value: \"DISABLED\"},\n",
"\t{Scope: ScopeNone, Name: \"have_openssl\", Value: \"DISABLED\"},\n",
"\t{Scope: ScopeNone, Name: \"ssl_ca\", Value: \"\"},\n",
"\t{Scope: ScopeNone, Name: \"ssl_cert\", Value: \"\"},\n",
"\t{Scope: ScopeNone, Name: \"ssl_key\", Value: \"\"},\n",
"\t{Scope: ScopeNone, Name: \"ssl_cipher\", Value: \"\"},\n",
"\t{Scope: ScopeNone, Name: \"tls_version\", Value: \"TLSv1,TLSv1.1,TLSv1.2\"},\n",
"\t{Scope: ScopeGlobal, Name: InnodbPrintAllDeadlocks, Value: BoolOff, Type: TypeBool, AutoConvertNegativeBool: true},\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "sessionctx/variable/noop.go",
"type": "replace",
"edit_start_line_idx": 114
} | // Copyright 2020 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package variable
import (
"math"
)
// The following sysVars are noops.
// Some applications will depend on certain variables to be present or settable,
// for example query_cache_time. These are included for MySQL compatibility,
// but changing them has no effect on behavior.
var noopSysVars = []*SysVar{
{Scope: ScopeGlobal, Name: ConnectTimeout, Value: "10", Type: TypeUnsigned, MinValue: 2, MaxValue: secondsPerYear, AutoConvertOutOfRange: true},
{Scope: ScopeGlobal | ScopeSession, Name: QueryCacheWlockInvalidate, Value: BoolOff, Type: TypeBool},
{Scope: ScopeGlobal | ScopeSession, Name: "sql_buffer_result", Value: BoolOff, IsHintUpdatable: true},
{Scope: ScopeGlobal, Name: MyISAMUseMmap, Value: BoolOff, Type: TypeBool, AutoConvertNegativeBool: true},
{Scope: ScopeGlobal, Name: "gtid_mode", Value: BoolOff, Type: TypeBool},
{Scope: ScopeGlobal, Name: FlushTime, Value: "0", Type: TypeUnsigned, MinValue: 0, MaxValue: secondsPerYear, AutoConvertOutOfRange: true},
{Scope: ScopeNone, Name: "performance_schema_max_mutex_classes", Value: "200"},
{Scope: ScopeGlobal | ScopeSession, Name: LowPriorityUpdates, Value: BoolOff, Type: TypeBool},
{Scope: ScopeGlobal | ScopeSession, Name: SessionTrackGtids, Value: BoolOff, Type: TypeEnum, PossibleValues: []string{BoolOff, "OWN_GTID", "ALL_GTIDS"}},
{Scope: ScopeGlobal | ScopeSession, Name: "ndbinfo_max_rows", Value: ""},
{Scope: ScopeGlobal | ScopeSession, Name: "ndb_index_stat_option", Value: ""},
{Scope: ScopeGlobal | ScopeSession, Name: OldPasswords, Value: "0", Type: TypeUnsigned, MinValue: 0, MaxValue: 2, AutoConvertOutOfRange: true},
{Scope: ScopeNone, Name: "innodb_version", Value: "5.6.25"},
{Scope: ScopeGlobal | ScopeSession, Name: BigTables, Value: BoolOff, Type: TypeBool},
{Scope: ScopeNone, Name: "skip_external_locking", Value: "1"},
{Scope: ScopeNone, Name: "innodb_sync_array_size", Value: "1"},
{Scope: ScopeSession, Name: "rand_seed2", Value: ""},
{Scope: ScopeGlobal, Name: ValidatePasswordCheckUserName, Value: BoolOff, Type: TypeBool},
{Scope: ScopeGlobal, Name: ValidatePasswordNumberCount, Value: "1", Type: TypeUnsigned, MinValue: 0, MaxValue: math.MaxUint64, AutoConvertOutOfRange: true},
{Scope: ScopeSession, Name: "gtid_next", Value: ""},
{Scope: ScopeGlobal, Name: "ndb_show_foreign_key_mock_tables", Value: ""},
{Scope: ScopeNone, Name: "multi_range_count", Value: "256"},
{Scope: ScopeGlobal | ScopeSession, Name: "binlog_error_action", Value: "IGNORE_ERROR"},
{Scope: ScopeGlobal | ScopeSession, Name: "default_storage_engine", Value: "InnoDB"},
{Scope: ScopeNone, Name: "ft_query_expansion_limit", Value: "20"},
{Scope: ScopeGlobal, Name: MaxConnectErrors, Value: "100", Type: TypeUnsigned, MinValue: 1, MaxValue: math.MaxUint64, AutoConvertOutOfRange: true},
{Scope: ScopeGlobal, Name: SyncBinlog, Value: "0", Type: TypeUnsigned, MinValue: 0, MaxValue: 4294967295, AutoConvertOutOfRange: true},
{Scope: ScopeNone, Name: "max_digest_length", Value: "1024"},
{Scope: ScopeNone, Name: "innodb_force_load_corrupted", Value: "0"},
{Scope: ScopeNone, Name: "performance_schema_max_table_handles", Value: "4000"},
{Scope: ScopeGlobal, Name: InnodbFastShutdown, Value: "1", Type: TypeUnsigned, MinValue: 0, MaxValue: 2, AutoConvertOutOfRange: true},
{Scope: ScopeNone, Name: "ft_max_word_len", Value: "84"},
{Scope: ScopeGlobal, Name: "log_backward_compatible_user_definitions", Value: ""},
{Scope: ScopeNone, Name: "lc_messages_dir", Value: "/usr/local/mysql-5.6.25-osx10.8-x86_64/share/"},
{Scope: ScopeGlobal, Name: "ft_boolean_syntax", Value: "+ -><()~*:\"\"&|"},
{Scope: ScopeGlobal, Name: TableDefinitionCache, Value: "-1", Type: TypeUnsigned, MinValue: 400, MaxValue: 524288, AutoConvertOutOfRange: true},
{Scope: ScopeNone, Name: SkipNameResolve, Value: BoolOff, Type: TypeBool},
{Scope: ScopeNone, Name: "performance_schema_max_file_handles", Value: "32768"},
{Scope: ScopeSession, Name: "transaction_allow_batching", Value: ""},
{Scope: ScopeNone, Name: "performance_schema_max_statement_classes", Value: "168"},
{Scope: ScopeGlobal, Name: "server_id", Value: "0"},
{Scope: ScopeGlobal, Name: "innodb_flushing_avg_loops", Value: "30"},
{Scope: ScopeGlobal | ScopeSession, Name: TmpTableSize, Value: "16777216", Type: TypeUnsigned, MinValue: 1024, MaxValue: math.MaxUint64, AutoConvertOutOfRange: true, IsHintUpdatable: true},
{Scope: ScopeGlobal, Name: "innodb_max_purge_lag", Value: "0"},
{Scope: ScopeGlobal | ScopeSession, Name: "preload_buffer_size", Value: "32768"},
{Scope: ScopeGlobal, Name: CheckProxyUsers, Value: BoolOff, Type: TypeBool},
{Scope: ScopeNone, Name: "have_query_cache", Value: "YES"},
{Scope: ScopeGlobal, Name: "innodb_flush_log_at_timeout", Value: "1"},
{Scope: ScopeGlobal, Name: "innodb_max_undo_log_size", Value: ""},
{Scope: ScopeGlobal | ScopeSession, Name: "range_alloc_block_size", Value: "4096", IsHintUpdatable: true},
{Scope: ScopeNone, Name: "have_rtree_keys", Value: "YES"},
{Scope: ScopeGlobal, Name: "innodb_old_blocks_pct", Value: "37"},
{Scope: ScopeGlobal, Name: "innodb_file_format", Value: "Barracuda", Type: TypeEnum, PossibleValues: []string{"Antelope", "Barracuda"}},
{Scope: ScopeGlobal, Name: "innodb_default_row_format", Value: "dynamic", Type: TypeEnum, PossibleValues: []string{"redundant", "compact", "dynamic"}},
{Scope: ScopeGlobal, Name: "innodb_compression_failure_threshold_pct", Value: "5"},
{Scope: ScopeNone, Name: "performance_schema_events_waits_history_long_size", Value: "10000"},
{Scope: ScopeGlobal, Name: "innodb_checksum_algorithm", Value: "innodb"},
{Scope: ScopeNone, Name: "innodb_ft_sort_pll_degree", Value: "2"},
{Scope: ScopeNone, Name: "thread_stack", Value: "262144"},
{Scope: ScopeGlobal, Name: "relay_log_info_repository", Value: "FILE"},
{Scope: ScopeGlobal, Name: SuperReadOnly, Value: "0", Type: TypeBool},
{Scope: ScopeGlobal | ScopeSession, Name: "max_delayed_threads", Value: "20"},
{Scope: ScopeNone, Name: "protocol_version", Value: "10"},
{Scope: ScopeGlobal | ScopeSession, Name: "new", Value: BoolOff},
{Scope: ScopeGlobal | ScopeSession, Name: "myisam_sort_buffer_size", Value: "8388608"},
{Scope: ScopeGlobal | ScopeSession, Name: "optimizer_trace_offset", Value: "-1"},
{Scope: ScopeGlobal, Name: InnodbBufferPoolDumpAtShutdown, Value: "0"},
{Scope: ScopeGlobal | ScopeSession, Name: SQLNotes, Value: "1"},
{Scope: ScopeGlobal, Name: InnodbCmpPerIndexEnabled, Value: BoolOff, Type: TypeBool, AutoConvertNegativeBool: true},
{Scope: ScopeGlobal, Name: "innodb_ft_server_stopword_table", Value: ""},
{Scope: ScopeNone, Name: "performance_schema_max_file_instances", Value: "7693"},
{Scope: ScopeNone, Name: "log_output", Value: "FILE"},
{Scope: ScopeGlobal, Name: "binlog_group_commit_sync_delay", Value: ""},
{Scope: ScopeGlobal, Name: "binlog_group_commit_sync_no_delay_count", Value: ""},
{Scope: ScopeNone, Name: "have_crypt", Value: "YES"},
{Scope: ScopeGlobal, Name: "innodb_log_write_ahead_size", Value: ""},
{Scope: ScopeNone, Name: "innodb_log_group_home_dir", Value: "./"},
{Scope: ScopeNone, Name: "performance_schema_events_statements_history_size", Value: "10"},
{Scope: ScopeGlobal, Name: GeneralLog, Value: BoolOff, Type: TypeBool},
{Scope: ScopeGlobal, Name: "validate_password_dictionary_file", Value: ""},
{Scope: ScopeGlobal, Name: BinlogOrderCommits, Value: BoolOn, Type: TypeBool},
{Scope: ScopeGlobal, Name: "key_cache_division_limit", Value: "100"},
{Scope: ScopeGlobal | ScopeSession, Name: "max_insert_delayed_threads", Value: "20"},
{Scope: ScopeNone, Name: "performance_schema_session_connect_attrs_size", Value: "512"},
{Scope: ScopeGlobal, Name: "innodb_max_dirty_pages_pct", Value: "75"},
{Scope: ScopeGlobal, Name: InnodbFilePerTable, Value: BoolOn, Type: TypeBool, AutoConvertNegativeBool: true},
{Scope: ScopeGlobal, Name: InnodbLogCompressedPages, Value: "1"},
{Scope: ScopeNone, Name: "skip_networking", Value: "0"},
{Scope: ScopeGlobal, Name: "innodb_monitor_reset", Value: ""},
{Scope: ScopeNone, Name: "have_ssl", Value: "DISABLED"},
{Scope: ScopeNone, Name: "have_openssl", Value: "DISABLED"},
{Scope: ScopeNone, Name: "ssl_ca", Value: ""},
{Scope: ScopeNone, Name: "ssl_cert", Value: ""},
{Scope: ScopeNone, Name: "ssl_key", Value: ""},
{Scope: ScopeNone, Name: "ssl_cipher", Value: ""},
{Scope: ScopeNone, Name: "tls_version", Value: "TLSv1,TLSv1.1,TLSv1.2"},
{Scope: ScopeGlobal, Name: InnodbPrintAllDeadlocks, Value: BoolOff, Type: TypeBool, AutoConvertNegativeBool: true},
{Scope: ScopeNone, Name: "innodb_autoinc_lock_mode", Value: "1"},
{Scope: ScopeGlobal, Name: "key_buffer_size", Value: "8388608"},
{Scope: ScopeGlobal, Name: "host_cache_size", Value: "279"},
{Scope: ScopeGlobal, Name: DelayKeyWrite, Value: BoolOn, Type: TypeEnum, PossibleValues: []string{BoolOff, BoolOn, "ALL"}},
{Scope: ScopeNone, Name: "metadata_locks_cache_size", Value: "1024"},
{Scope: ScopeNone, Name: "innodb_force_recovery", Value: "0"},
{Scope: ScopeGlobal, Name: "innodb_file_format_max", Value: "Antelope"},
{Scope: ScopeGlobal | ScopeSession, Name: "debug", Value: ""},
{Scope: ScopeGlobal, Name: "log_warnings", Value: "1"},
{Scope: ScopeGlobal, Name: OfflineMode, Value: "0", Type: TypeBool},
{Scope: ScopeGlobal | ScopeSession, Name: InnodbStrictMode, Value: "1", Type: TypeBool, AutoConvertNegativeBool: true},
{Scope: ScopeGlobal, Name: "innodb_rollback_segments", Value: "128"},
{Scope: ScopeGlobal | ScopeSession, Name: "join_buffer_size", Value: "262144", IsHintUpdatable: true},
{Scope: ScopeNone, Name: "innodb_mirrored_log_groups", Value: "1"},
{Scope: ScopeGlobal, Name: "max_binlog_size", Value: "1073741824"},
{Scope: ScopeGlobal, Name: "concurrent_insert", Value: "AUTO"},
{Scope: ScopeGlobal, Name: InnodbAdaptiveHashIndex, Value: BoolOn, Type: TypeBool, AutoConvertNegativeBool: true},
{Scope: ScopeGlobal, Name: InnodbFtEnableStopword, Value: BoolOn, Type: TypeBool, AutoConvertNegativeBool: true},
{Scope: ScopeGlobal, Name: "general_log_file", Value: "/usr/local/mysql/data/localhost.log"},
{Scope: ScopeGlobal | ScopeSession, Name: InnodbSupportXA, Value: "1"},
{Scope: ScopeGlobal, Name: "innodb_compression_level", Value: "6"},
{Scope: ScopeNone, Name: "innodb_file_format_check", Value: "1"},
{Scope: ScopeNone, Name: "myisam_mmap_size", Value: "18446744073709551615"},
{Scope: ScopeNone, Name: "innodb_buffer_pool_instances", Value: "8"},
{Scope: ScopeGlobal | ScopeSession, Name: BlockEncryptionMode, Value: "aes-128-ecb"},
{Scope: ScopeGlobal | ScopeSession, Name: "max_length_for_sort_data", Value: "1024", IsHintUpdatable: true},
{Scope: ScopeNone, Name: "character_set_system", Value: "utf8"},
{Scope: ScopeGlobal, Name: InnodbOptimizeFullTextOnly, Value: "0"},
{Scope: ScopeNone, Name: "character_sets_dir", Value: "/usr/local/mysql-5.6.25-osx10.8-x86_64/share/charsets/"},
{Scope: ScopeGlobal | ScopeSession, Name: QueryCacheType, Value: BoolOff, Type: TypeEnum, PossibleValues: []string{BoolOff, BoolOn, "DEMAND"}},
{Scope: ScopeNone, Name: "innodb_rollback_on_timeout", Value: "0"},
{Scope: ScopeGlobal | ScopeSession, Name: "query_alloc_block_size", Value: "8192"},
{Scope: ScopeGlobal | ScopeSession, Name: InitConnect, Value: ""},
{Scope: ScopeNone, Name: "have_compress", Value: "YES"},
{Scope: ScopeNone, Name: "thread_concurrency", Value: "10"},
{Scope: ScopeGlobal | ScopeSession, Name: "query_prealloc_size", Value: "8192"},
{Scope: ScopeNone, Name: "relay_log_space_limit", Value: "0"},
{Scope: ScopeGlobal | ScopeSession, Name: MaxUserConnections, Value: "0", Type: TypeUnsigned, MinValue: 0, MaxValue: 4294967295, AutoConvertOutOfRange: true},
{Scope: ScopeNone, Name: "performance_schema_max_thread_classes", Value: "50"},
{Scope: ScopeGlobal, Name: "innodb_api_trx_level", Value: "0"},
{Scope: ScopeNone, Name: "disconnect_on_expired_password", Value: "1"},
{Scope: ScopeNone, Name: "performance_schema_max_file_classes", Value: "50"},
{Scope: ScopeGlobal, Name: "expire_logs_days", Value: "0"},
{Scope: ScopeGlobal | ScopeSession, Name: BinlogRowQueryLogEvents, Value: BoolOff, Type: TypeBool},
{Scope: ScopeGlobal, Name: "default_password_lifetime", Value: ""},
{Scope: ScopeNone, Name: "pid_file", Value: "/usr/local/mysql/data/localhost.pid"},
{Scope: ScopeNone, Name: "innodb_undo_tablespaces", Value: "0"},
{Scope: ScopeGlobal, Name: InnodbStatusOutputLocks, Value: BoolOff, Type: TypeBool, AutoConvertNegativeBool: true},
{Scope: ScopeNone, Name: "performance_schema_accounts_size", Value: "100"},
{Scope: ScopeGlobal | ScopeSession, Name: "max_error_count", Value: "64", IsHintUpdatable: true},
{Scope: ScopeGlobal, Name: "max_write_lock_count", Value: "18446744073709551615"},
{Scope: ScopeNone, Name: "performance_schema_max_socket_instances", Value: "322"},
{Scope: ScopeNone, Name: "performance_schema_max_table_instances", Value: "12500"},
{Scope: ScopeGlobal, Name: "innodb_stats_persistent_sample_pages", Value: "20"},
{Scope: ScopeGlobal, Name: "show_compatibility_56", Value: ""},
{Scope: ScopeNone, Name: "innodb_open_files", Value: "2000"},
{Scope: ScopeGlobal, Name: "innodb_spin_wait_delay", Value: "6"},
{Scope: ScopeGlobal, Name: "thread_cache_size", Value: "9"},
{Scope: ScopeGlobal, Name: LogSlowAdminStatements, Value: BoolOff, Type: TypeBool},
{Scope: ScopeNone, Name: "innodb_checksums", Type: TypeBool, Value: BoolOn},
{Scope: ScopeNone, Name: "ft_stopword_file", Value: "(built-in)"},
{Scope: ScopeGlobal, Name: "innodb_max_dirty_pages_pct_lwm", Value: "0"},
{Scope: ScopeGlobal, Name: LogQueriesNotUsingIndexes, Value: BoolOff, Type: TypeBool},
{Scope: ScopeGlobal | ScopeSession, Name: "max_heap_table_size", Value: "16777216", IsHintUpdatable: true},
{Scope: ScopeGlobal | ScopeSession, Name: "div_precision_increment", Value: "4", IsHintUpdatable: true},
{Scope: ScopeGlobal, Name: "innodb_lru_scan_depth", Value: "1024"},
{Scope: ScopeGlobal, Name: "innodb_purge_rseg_truncate_frequency", Value: ""},
{Scope: ScopeGlobal | ScopeSession, Name: SQLAutoIsNull, Value: BoolOff, Type: TypeBool, IsHintUpdatable: true},
{Scope: ScopeNone, Name: "innodb_api_enable_binlog", Value: "0"},
{Scope: ScopeGlobal | ScopeSession, Name: "innodb_ft_user_stopword_table", Value: ""},
{Scope: ScopeNone, Name: "server_id_bits", Value: "32"},
{Scope: ScopeGlobal, Name: "innodb_log_checksum_algorithm", Value: ""},
{Scope: ScopeNone, Name: "innodb_buffer_pool_load_at_startup", Value: "1"},
{Scope: ScopeGlobal | ScopeSession, Name: "sort_buffer_size", Value: "262144", IsHintUpdatable: true},
{Scope: ScopeGlobal, Name: "innodb_flush_neighbors", Value: "1"},
{Scope: ScopeNone, Name: "innodb_use_sys_malloc", Value: "1"},
{Scope: ScopeSession, Name: PluginLoad, Value: ""},
{Scope: ScopeSession, Name: PluginDir, Value: "/data/deploy/plugin"},
{Scope: ScopeNone, Name: "performance_schema_max_socket_classes", Value: "10"},
{Scope: ScopeNone, Name: "performance_schema_max_stage_classes", Value: "150"},
{Scope: ScopeGlobal, Name: "innodb_purge_batch_size", Value: "300"},
{Scope: ScopeNone, Name: "have_profiling", Value: "NO"},
{Scope: ScopeGlobal, Name: InnodbBufferPoolDumpNow, Value: BoolOff, Type: TypeBool, AutoConvertNegativeBool: true},
{Scope: ScopeGlobal, Name: RelayLogPurge, Value: BoolOn, Type: TypeBool},
{Scope: ScopeGlobal, Name: "ndb_distribution", Value: ""},
{Scope: ScopeGlobal, Name: "myisam_data_pointer_size", Value: "6"},
{Scope: ScopeGlobal, Name: "ndb_optimization_delay", Value: ""},
{Scope: ScopeGlobal, Name: "innodb_ft_num_word_optimize", Value: "2000"},
{Scope: ScopeGlobal | ScopeSession, Name: "max_join_size", Value: "18446744073709551615", IsHintUpdatable: true},
{Scope: ScopeNone, Name: CoreFile, Value: BoolOff, Type: TypeBool},
{Scope: ScopeGlobal | ScopeSession, Name: "max_seeks_for_key", Value: "18446744073709551615", IsHintUpdatable: true},
{Scope: ScopeNone, Name: "innodb_log_buffer_size", Value: "8388608"},
{Scope: ScopeGlobal, Name: "delayed_insert_timeout", Value: "300"},
{Scope: ScopeGlobal, Name: "max_relay_log_size", Value: "0"},
{Scope: ScopeGlobal | ScopeSession, Name: MaxSortLength, Value: "1024", Type: TypeUnsigned, MinValue: 4, MaxValue: 8388608, AutoConvertOutOfRange: true, IsHintUpdatable: true},
{Scope: ScopeNone, Name: "metadata_locks_hash_instances", Value: "8"},
{Scope: ScopeGlobal, Name: "ndb_eventbuffer_free_percent", Value: ""},
{Scope: ScopeNone, Name: "large_files_support", Value: "1"},
{Scope: ScopeGlobal, Name: "binlog_max_flush_queue_time", Value: "0"},
{Scope: ScopeGlobal, Name: "innodb_fill_factor", Value: ""},
{Scope: ScopeGlobal, Name: "log_syslog_facility", Value: ""},
{Scope: ScopeNone, Name: "innodb_ft_min_token_size", Value: "3"},
{Scope: ScopeGlobal | ScopeSession, Name: "transaction_write_set_extraction", Value: ""},
{Scope: ScopeGlobal | ScopeSession, Name: "ndb_blob_write_batch_bytes", Value: ""},
{Scope: ScopeGlobal, Name: "automatic_sp_privileges", Value: "1"},
{Scope: ScopeGlobal, Name: "innodb_flush_sync", Value: ""},
{Scope: ScopeNone, Name: "performance_schema_events_statements_history_long_size", Value: "10000"},
{Scope: ScopeGlobal, Name: "innodb_monitor_disable", Value: ""},
{Scope: ScopeNone, Name: "innodb_doublewrite", Value: "1"},
{Scope: ScopeNone, Name: "log_bin_use_v1_row_events", Value: "0"},
{Scope: ScopeSession, Name: "innodb_optimize_point_storage", Value: ""},
{Scope: ScopeNone, Name: "innodb_api_disable_rowlock", Value: "0"},
{Scope: ScopeGlobal, Name: "innodb_adaptive_flushing_lwm", Value: "10"},
{Scope: ScopeNone, Name: "innodb_log_files_in_group", Value: "2"},
{Scope: ScopeGlobal, Name: InnodbBufferPoolLoadNow, Value: BoolOff, Type: TypeBool, AutoConvertNegativeBool: true},
{Scope: ScopeNone, Name: "performance_schema_max_rwlock_classes", Value: "40"},
{Scope: ScopeNone, Name: "binlog_gtid_simple_recovery", Value: "1"},
{Scope: ScopeNone, Name: "performance_schema_digests_size", Value: "10000"},
{Scope: ScopeGlobal | ScopeSession, Name: Profiling, Value: BoolOff, Type: TypeBool},
{Scope: ScopeSession, Name: "rand_seed1", Value: ""},
{Scope: ScopeGlobal, Name: "sha256_password_proxy_users", Value: ""},
{Scope: ScopeGlobal | ScopeSession, Name: SQLQuoteShowCreate, Value: BoolOn, Type: TypeBool},
{Scope: ScopeGlobal | ScopeSession, Name: "binlogging_impossible_mode", Value: "IGNORE_ERROR"},
{Scope: ScopeGlobal | ScopeSession, Name: QueryCacheSize, Value: "1048576"},
{Scope: ScopeGlobal, Name: "innodb_stats_transient_sample_pages", Value: "8"},
{Scope: ScopeGlobal, Name: InnodbStatsOnMetadata, Value: "0"},
{Scope: ScopeNone, Name: "server_uuid", Value: "00000000-0000-0000-0000-000000000000"},
{Scope: ScopeNone, Name: "open_files_limit", Value: "5000"},
{Scope: ScopeGlobal | ScopeSession, Name: "ndb_force_send", Value: ""},
{Scope: ScopeNone, Name: "skip_show_database", Value: "0"},
{Scope: ScopeGlobal, Name: "log_timestamps", Value: ""},
{Scope: ScopeNone, Name: "version_compile_machine", Value: "x86_64"},
{Scope: ScopeGlobal, Name: "event_scheduler", Value: BoolOff},
{Scope: ScopeGlobal | ScopeSession, Name: "ndb_deferred_constraints", Value: ""},
{Scope: ScopeGlobal, Name: "log_syslog_include_pid", Value: ""},
{Scope: ScopeSession, Name: "last_insert_id", Value: ""},
{Scope: ScopeNone, Name: "innodb_ft_cache_size", Value: "8000000"},
{Scope: ScopeGlobal, Name: InnodbDisableSortFileCache, Value: "0"},
{Scope: ScopeGlobal, Name: "log_error_verbosity", Value: ""},
{Scope: ScopeNone, Name: "performance_schema_hosts_size", Value: "100"},
{Scope: ScopeGlobal, Name: "innodb_replication_delay", Value: "0"},
{Scope: ScopeGlobal, Name: SlowQueryLog, Value: "0"},
{Scope: ScopeSession, Name: "debug_sync", Value: ""},
{Scope: ScopeGlobal, Name: InnodbStatsAutoRecalc, Value: "1"},
{Scope: ScopeGlobal | ScopeSession, Name: "lc_messages", Value: "en_US"},
{Scope: ScopeGlobal | ScopeSession, Name: "bulk_insert_buffer_size", Value: "8388608", IsHintUpdatable: true},
{Scope: ScopeGlobal | ScopeSession, Name: BinlogDirectNonTransactionalUpdates, Value: BoolOff, Type: TypeBool},
{Scope: ScopeGlobal, Name: "innodb_change_buffering", Value: "all"},
{Scope: ScopeGlobal | ScopeSession, Name: SQLBigSelects, Value: BoolOn, Type: TypeBool, IsHintUpdatable: true},
{Scope: ScopeGlobal, Name: "innodb_max_purge_lag_delay", Value: "0"},
{Scope: ScopeGlobal | ScopeSession, Name: "session_track_schema", Value: ""},
{Scope: ScopeGlobal, Name: "innodb_io_capacity_max", Value: "2000"},
{Scope: ScopeGlobal, Name: "innodb_autoextend_increment", Value: "64"},
{Scope: ScopeGlobal | ScopeSession, Name: "binlog_format", Value: "STATEMENT"},
{Scope: ScopeGlobal | ScopeSession, Name: "optimizer_trace", Value: "enabled=off,one_line=off"},
{Scope: ScopeGlobal | ScopeSession, Name: "read_rnd_buffer_size", Value: "262144", IsHintUpdatable: true},
{Scope: ScopeGlobal | ScopeSession, Name: NetWriteTimeout, Value: "60"},
{Scope: ScopeGlobal, Name: InnodbBufferPoolLoadAbort, Value: BoolOff, Type: TypeBool, AutoConvertNegativeBool: true},
{Scope: ScopeGlobal | ScopeSession, Name: "transaction_prealloc_size", Value: "4096"},
{Scope: ScopeNone, Name: "performance_schema_setup_objects_size", Value: "100"},
{Scope: ScopeGlobal, Name: "sync_relay_log", Value: "10000"},
{Scope: ScopeGlobal, Name: "innodb_ft_result_cache_limit", Value: "2000000000"},
{Scope: ScopeNone, Name: "innodb_sort_buffer_size", Value: "1048576"},
{Scope: ScopeGlobal, Name: "innodb_ft_enable_diag_print", Type: TypeBool, Value: BoolOff},
{Scope: ScopeNone, Name: "thread_handling", Value: "one-thread-per-connection"},
{Scope: ScopeGlobal, Name: "stored_program_cache", Value: "256"},
{Scope: ScopeNone, Name: "performance_schema_max_mutex_instances", Value: "15906"},
{Scope: ScopeGlobal, Name: "innodb_adaptive_max_sleep_delay", Value: "150000"},
{Scope: ScopeNone, Name: "large_pages", Value: BoolOff},
{Scope: ScopeGlobal | ScopeSession, Name: "session_track_system_variables", Value: ""},
{Scope: ScopeGlobal, Name: "innodb_change_buffer_max_size", Value: "25"},
{Scope: ScopeGlobal, Name: LogBinTrustFunctionCreators, Value: BoolOff, Type: TypeBool},
{Scope: ScopeNone, Name: "innodb_write_io_threads", Value: "4"},
{Scope: ScopeGlobal, Name: "mysql_native_password_proxy_users", Value: ""},
{Scope: ScopeGlobal, Name: serverReadOnly, Value: BoolOff, Type: TypeBool},
{Scope: ScopeNone, Name: "large_page_size", Value: "0"},
{Scope: ScopeNone, Name: "table_open_cache_instances", Value: "1"},
{Scope: ScopeGlobal, Name: InnodbStatsPersistent, Value: BoolOn, Type: TypeBool, AutoConvertNegativeBool: true},
{Scope: ScopeGlobal | ScopeSession, Name: "session_track_state_change", Value: ""},
{Scope: ScopeNone, Name: OptimizerSwitch, Value: "index_merge=on,index_merge_union=on,index_merge_sort_union=on,index_merge_intersection=on,engine_condition_pushdown=on,index_condition_pushdown=on,mrr=on,mrr_cost_based=on,block_nested_loop=on,batched_key_access=off,materialization=on,semijoin=on,loosescan=on,firstmatch=on,subquery_materialization_cost_based=on,use_index_extensions=on", IsHintUpdatable: true},
{Scope: ScopeGlobal, Name: "delayed_queue_size", Value: "1000"},
{Scope: ScopeNone, Name: "innodb_read_only", Value: "0"},
{Scope: ScopeNone, Name: "datetime_format", Value: "%Y-%m-%d %H:%i:%s"},
{Scope: ScopeGlobal, Name: "log_syslog", Value: ""},
{Scope: ScopeGlobal | ScopeSession, Name: "transaction_alloc_block_size", Value: "8192"},
{Scope: ScopeGlobal, Name: "innodb_large_prefix", Type: TypeBool, Value: BoolOff},
{Scope: ScopeNone, Name: "performance_schema_max_cond_classes", Value: "80"},
{Scope: ScopeGlobal, Name: "innodb_io_capacity", Value: "200"},
{Scope: ScopeGlobal, Name: "max_binlog_cache_size", Value: "18446744073709547520"},
{Scope: ScopeGlobal | ScopeSession, Name: "ndb_index_stat_enable", Value: ""},
{Scope: ScopeGlobal, Name: "executed_gtids_compression_period", Value: ""},
{Scope: ScopeNone, Name: "time_format", Value: "%H:%i:%s"},
{Scope: ScopeGlobal | ScopeSession, Name: OldAlterTable, Value: BoolOff, Type: TypeBool},
{Scope: ScopeGlobal | ScopeSession, Name: "long_query_time", Value: "10.000000"},
{Scope: ScopeNone, Name: "innodb_use_native_aio", Value: "0"},
{Scope: ScopeGlobal, Name: "log_throttle_queries_not_using_indexes", Value: "0"},
{Scope: ScopeNone, Name: "locked_in_memory", Value: "0"},
{Scope: ScopeNone, Name: "innodb_api_enable_mdl", Value: "0"},
{Scope: ScopeGlobal, Name: "binlog_cache_size", Value: "32768"},
{Scope: ScopeGlobal, Name: "innodb_compression_pad_pct_max", Value: "50"},
{Scope: ScopeGlobal, Name: InnodbCommitConcurrency, Value: "0", Type: TypeUnsigned, MinValue: 0, MaxValue: 1000, AutoConvertOutOfRange: true},
{Scope: ScopeNone, Name: "ft_min_word_len", Value: "4"},
{Scope: ScopeGlobal, Name: EnforceGtidConsistency, Value: BoolOff, Type: TypeEnum, PossibleValues: []string{BoolOff, BoolOn, "WARN"}},
{Scope: ScopeGlobal, Name: SecureAuth, Value: BoolOn, Type: TypeBool, Validation: func(vars *SessionVars, normalizedValue string, originalValue string, scope ScopeFlag) (string, error) {
if TiDBOptOn(normalizedValue) {
return BoolOn, nil
}
return normalizedValue, ErrWrongValueForVar.GenWithStackByArgs(SecureAuth, originalValue)
}},
{Scope: ScopeNone, Name: "max_tmp_tables", Value: "32"},
{Scope: ScopeGlobal, Name: InnodbRandomReadAhead, Value: BoolOff, Type: TypeBool, AutoConvertNegativeBool: true},
{Scope: ScopeGlobal | ScopeSession, Name: UniqueChecks, Value: BoolOn, Type: TypeBool, IsHintUpdatable: true},
{Scope: ScopeGlobal, Name: "internal_tmp_disk_storage_engine", Value: ""},
{Scope: ScopeGlobal | ScopeSession, Name: "myisam_repair_threads", Value: "1"},
{Scope: ScopeGlobal, Name: "ndb_eventbuffer_max_alloc", Value: ""},
{Scope: ScopeGlobal, Name: "innodb_read_ahead_threshold", Value: "56"},
{Scope: ScopeGlobal, Name: "key_cache_block_size", Value: "1024"},
{Scope: ScopeNone, Name: "ndb_recv_thread_cpu_mask", Value: ""},
{Scope: ScopeGlobal, Name: "gtid_purged", Value: ""},
{Scope: ScopeGlobal, Name: "max_binlog_stmt_cache_size", Value: "18446744073709547520"},
{Scope: ScopeGlobal | ScopeSession, Name: "lock_wait_timeout", Value: "31536000"},
{Scope: ScopeGlobal | ScopeSession, Name: "read_buffer_size", Value: "131072", IsHintUpdatable: true},
{Scope: ScopeNone, Name: "innodb_read_io_threads", Value: "4"},
{Scope: ScopeGlobal | ScopeSession, Name: MaxSpRecursionDepth, Value: "0", Type: TypeUnsigned, MinValue: 0, MaxValue: 255, AutoConvertOutOfRange: true},
{Scope: ScopeNone, Name: "ignore_builtin_innodb", Value: "0"},
{Scope: ScopeGlobal, Name: "slow_query_log_file", Value: "/usr/local/mysql/data/localhost-slow.log"},
{Scope: ScopeGlobal, Name: "innodb_thread_sleep_delay", Value: "10000"},
{Scope: ScopeNone, Name: "license", Value: "Apache License 2.0"},
{Scope: ScopeGlobal, Name: "innodb_ft_aux_table", Value: ""},
{Scope: ScopeGlobal | ScopeSession, Name: SQLWarnings, Value: BoolOff, Type: TypeBool},
{Scope: ScopeGlobal | ScopeSession, Name: KeepFilesOnCreate, Value: BoolOff, Type: TypeBool},
{Scope: ScopeNone, Name: "innodb_data_file_path", Value: "ibdata1:12M:autoextend"},
{Scope: ScopeNone, Name: "performance_schema_setup_actors_size", Value: "100"},
{Scope: ScopeNone, Name: "innodb_additional_mem_pool_size", Value: "8388608"},
{Scope: ScopeNone, Name: "log_error", Value: "/usr/local/mysql/data/localhost.err"},
{Scope: ScopeGlobal, Name: "binlog_stmt_cache_size", Value: "32768"},
{Scope: ScopeNone, Name: "relay_log_info_file", Value: "relay-log.info"},
{Scope: ScopeNone, Name: "innodb_ft_total_cache_size", Value: "640000000"},
{Scope: ScopeNone, Name: "performance_schema_max_rwlock_instances", Value: "9102"},
{Scope: ScopeGlobal, Name: "table_open_cache", Value: "2000"},
{Scope: ScopeNone, Name: "performance_schema_events_stages_history_long_size", Value: "10000"},
{Scope: ScopeSession, Name: "insert_id", Value: ""},
{Scope: ScopeGlobal | ScopeSession, Name: "default_tmp_storage_engine", Value: "InnoDB", IsHintUpdatable: true},
{Scope: ScopeGlobal | ScopeSession, Name: "optimizer_search_depth", Value: "62", IsHintUpdatable: true},
{Scope: ScopeGlobal | ScopeSession, Name: "max_points_in_geometry", Value: "65536", IsHintUpdatable: true},
{Scope: ScopeGlobal, Name: "innodb_stats_sample_pages", Value: "8"},
{Scope: ScopeGlobal | ScopeSession, Name: "profiling_history_size", Value: "15"},
{Scope: ScopeNone, Name: "have_symlink", Value: "YES"},
{Scope: ScopeGlobal | ScopeSession, Name: "storage_engine", Value: "InnoDB"},
{Scope: ScopeGlobal | ScopeSession, Name: "sql_log_off", Value: "0"},
// In MySQL, the default value of `explicit_defaults_for_timestamp` is `0`.
// But In TiDB, it's set to `1` to be consistent with TiDB timestamp behavior.
// See: https://github.com/pingcap/tidb/pull/6068 for details
{Scope: ScopeNone, Name: "explicit_defaults_for_timestamp", Value: BoolOn, Type: TypeBool},
{Scope: ScopeNone, Name: "performance_schema_events_waits_history_size", Value: "10"},
{Scope: ScopeGlobal, Name: "log_syslog_tag", Value: ""},
{Scope: ScopeGlobal, Name: "innodb_undo_log_truncate", Value: ""},
{Scope: ScopeSession, Name: "innodb_create_intrinsic", Value: ""},
{Scope: ScopeGlobal, Name: "gtid_executed_compression_period", Value: ""},
{Scope: ScopeGlobal, Name: "ndb_log_empty_epochs", Value: ""},
{Scope: ScopeNone, Name: "have_geometry", Value: "YES"},
{Scope: ScopeGlobal | ScopeSession, Name: "optimizer_trace_max_mem_size", Value: "16384"},
{Scope: ScopeGlobal | ScopeSession, Name: "net_retry_count", Value: "10"},
{Scope: ScopeSession, Name: "ndb_table_no_logging", Value: ""},
{Scope: ScopeGlobal | ScopeSession, Name: "optimizer_trace_features", Value: "greedy_search=on,range_optimizer=on,dynamic_range=on,repeated_subselect=on"},
{Scope: ScopeGlobal, Name: "innodb_flush_log_at_trx_commit", Value: "1"},
{Scope: ScopeGlobal, Name: "rewriter_enabled", Value: ""},
{Scope: ScopeGlobal, Name: "query_cache_min_res_unit", Value: "4096"},
{Scope: ScopeGlobal | ScopeSession, Name: "updatable_views_with_limit", Value: "YES", IsHintUpdatable: true},
{Scope: ScopeGlobal | ScopeSession, Name: "optimizer_prune_level", Value: "1", IsHintUpdatable: true},
{Scope: ScopeGlobal | ScopeSession, Name: "completion_type", Value: "NO_CHAIN"},
{Scope: ScopeGlobal, Name: "binlog_checksum", Value: "CRC32"},
{Scope: ScopeNone, Name: "report_port", Value: "3306"},
{Scope: ScopeGlobal | ScopeSession, Name: ShowOldTemporals, Value: BoolOff, Type: TypeBool},
{Scope: ScopeGlobal, Name: "query_cache_limit", Value: "1048576"},
{Scope: ScopeGlobal, Name: "innodb_buffer_pool_size", Value: "134217728"},
{Scope: ScopeGlobal, Name: InnodbAdaptiveFlushing, Value: BoolOn, Type: TypeBool, AutoConvertNegativeBool: true},
{Scope: ScopeGlobal, Name: "innodb_monitor_enable", Value: ""},
{Scope: ScopeNone, Name: "date_format", Value: "%Y-%m-%d"},
{Scope: ScopeGlobal, Name: "innodb_buffer_pool_filename", Value: "ib_buffer_pool"},
{Scope: ScopeGlobal, Name: "slow_launch_time", Value: "2"},
{Scope: ScopeGlobal | ScopeSession, Name: "ndb_use_transactions", Value: ""},
{Scope: ScopeNone, Name: "innodb_purge_threads", Value: "1"},
{Scope: ScopeGlobal, Name: "innodb_concurrency_tickets", Value: "5000"},
{Scope: ScopeGlobal, Name: "innodb_monitor_reset_all", Value: ""},
{Scope: ScopeNone, Name: "performance_schema_users_size", Value: "100"},
{Scope: ScopeGlobal, Name: "ndb_log_updated_only", Value: ""},
{Scope: ScopeNone, Name: "basedir", Value: "/usr/local/mysql"},
{Scope: ScopeGlobal, Name: "innodb_old_blocks_time", Value: "1000"},
{Scope: ScopeGlobal, Name: "innodb_stats_method", Value: "nulls_equal"},
{Scope: ScopeGlobal, Name: LocalInFile, Value: BoolOn, Type: TypeBool},
{Scope: ScopeGlobal | ScopeSession, Name: "myisam_stats_method", Value: "nulls_unequal"},
{Scope: ScopeNone, Name: "version_compile_os", Value: "osx10.8"},
{Scope: ScopeNone, Name: "relay_log_recovery", Value: "0"},
{Scope: ScopeNone, Name: "old", Value: "0"},
{Scope: ScopeGlobal | ScopeSession, Name: InnodbTableLocks, Value: BoolOn, Type: TypeBool, AutoConvertNegativeBool: true},
{Scope: ScopeNone, Name: PerformanceSchema, Value: BoolOff, Type: TypeBool},
{Scope: ScopeNone, Name: "myisam_recover_options", Value: BoolOff},
{Scope: ScopeGlobal | ScopeSession, Name: NetBufferLength, Value: "16384"},
{Scope: ScopeGlobal | ScopeSession, Name: "binlog_row_image", Value: "FULL"},
{Scope: ScopeNone, Name: "innodb_locks_unsafe_for_binlog", Value: "0"},
{Scope: ScopeSession, Name: "rbr_exec_mode", Value: ""},
{Scope: ScopeGlobal, Name: "myisam_max_sort_file_size", Value: "9223372036853727232"},
{Scope: ScopeNone, Name: "back_log", Value: "80"},
{Scope: ScopeSession, Name: "pseudo_thread_id", Value: ""},
{Scope: ScopeNone, Name: "have_dynamic_loading", Value: "YES"},
{Scope: ScopeGlobal, Name: "rewriter_verbose", Value: ""},
{Scope: ScopeGlobal, Name: "innodb_undo_logs", Value: "128"},
{Scope: ScopeNone, Name: "performance_schema_max_cond_instances", Value: "3504"},
{Scope: ScopeGlobal, Name: "delayed_insert_limit", Value: "100"},
{Scope: ScopeGlobal, Name: Flush, Value: BoolOff, Type: TypeBool},
{Scope: ScopeGlobal | ScopeSession, Name: "eq_range_index_dive_limit", Value: "200", IsHintUpdatable: true},
{Scope: ScopeNone, Name: "performance_schema_events_stages_history_size", Value: "10"},
{Scope: ScopeGlobal | ScopeSession, Name: "ndb_join_pushdown", Value: ""},
{Scope: ScopeGlobal, Name: "validate_password_special_char_count", Value: "1"},
{Scope: ScopeNone, Name: "performance_schema_max_thread_instances", Value: "402"},
{Scope: ScopeGlobal | ScopeSession, Name: "ndbinfo_show_hidden", Value: ""},
{Scope: ScopeGlobal | ScopeSession, Name: "net_read_timeout", Value: "30"},
{Scope: ScopeNone, Name: "innodb_page_size", Value: "16384"},
{Scope: ScopeNone, Name: "innodb_log_file_size", Value: "50331648"},
{Scope: ScopeGlobal, Name: "sync_relay_log_info", Value: "10000"},
{Scope: ScopeGlobal | ScopeSession, Name: "optimizer_trace_limit", Value: "1"},
{Scope: ScopeNone, Name: "innodb_ft_max_token_size", Value: "84"},
{Scope: ScopeGlobal, Name: ValidatePasswordLength, Value: "8", Type: TypeUnsigned, MinValue: 0, MaxValue: math.MaxUint64, AutoConvertOutOfRange: true},
{Scope: ScopeGlobal, Name: "ndb_log_binlog_index", Value: ""},
{Scope: ScopeGlobal, Name: "innodb_api_bk_commit_interval", Value: "5"},
{Scope: ScopeNone, Name: "innodb_undo_directory", Value: "."},
{Scope: ScopeNone, Name: "bind_address", Value: "*"},
{Scope: ScopeGlobal, Name: "innodb_sync_spin_loops", Value: "30"},
{Scope: ScopeGlobal | ScopeSession, Name: SQLSafeUpdates, Value: BoolOff, Type: TypeBool, IsHintUpdatable: true},
{Scope: ScopeNone, Name: "tmpdir", Value: "/var/tmp/"},
{Scope: ScopeGlobal, Name: "innodb_thread_concurrency", Value: "0"},
{Scope: ScopeGlobal, Name: "innodb_buffer_pool_dump_pct", Value: ""},
{Scope: ScopeGlobal | ScopeSession, Name: "lc_time_names", Value: "en_US"},
{Scope: ScopeGlobal | ScopeSession, Name: "max_statement_time", Value: ""},
{Scope: ScopeGlobal | ScopeSession, Name: EndMarkersInJSON, Value: BoolOff, Type: TypeBool, IsHintUpdatable: true},
{Scope: ScopeGlobal, Name: AvoidTemporalUpgrade, Value: BoolOff, Type: TypeBool},
{Scope: ScopeGlobal, Name: "key_cache_age_threshold", Value: "300"},
{Scope: ScopeGlobal, Name: InnodbStatusOutput, Value: BoolOff, Type: TypeBool, AutoConvertNegativeBool: true},
{Scope: ScopeSession, Name: "identity", Value: ""},
{Scope: ScopeGlobal | ScopeSession, Name: "min_examined_row_limit", Value: "0"},
{Scope: ScopeGlobal, Name: "sync_frm", Type: TypeBool, Value: BoolOn},
{Scope: ScopeGlobal, Name: "innodb_online_alter_log_max_size", Value: "134217728"},
{Scope: ScopeGlobal | ScopeSession, Name: "information_schema_stats_expiry", Value: "86400"},
{Scope: ScopeGlobal, Name: ThreadPoolSize, Value: "16", Type: TypeUnsigned, MinValue: 1, MaxValue: 64, AutoConvertOutOfRange: true},
{Scope: ScopeNone, Name: "lower_case_file_system", Value: "1"},
// for compatibility purpose, we should leave them alone.
// TODO: Follow the Terminology Updates of MySQL after their changes arrived.
// https://mysqlhighavailability.com/mysql-terminology-updates/
{Scope: ScopeSession, Name: PseudoSlaveMode, Value: "", Type: TypeInt},
{Scope: ScopeGlobal, Name: "slave_pending_jobs_size_max", Value: "16777216"},
{Scope: ScopeGlobal, Name: "slave_transaction_retries", Value: "10"},
{Scope: ScopeGlobal, Name: "slave_checkpoint_period", Value: "300"},
{Scope: ScopeGlobal, Name: MasterVerifyChecksum, Value: BoolOff, Type: TypeBool},
{Scope: ScopeGlobal, Name: "rpl_semi_sync_master_trace_level", Value: ""},
{Scope: ScopeGlobal, Name: "master_info_repository", Value: "FILE"},
{Scope: ScopeGlobal, Name: "rpl_stop_slave_timeout", Value: "31536000"},
{Scope: ScopeGlobal, Name: "slave_net_timeout", Value: "3600"},
{Scope: ScopeGlobal, Name: "sync_master_info", Value: "10000"},
{Scope: ScopeGlobal, Name: "init_slave", Value: ""},
{Scope: ScopeGlobal, Name: SlaveCompressedProtocol, Value: BoolOff, Type: TypeBool},
{Scope: ScopeGlobal, Name: "rpl_semi_sync_slave_trace_level", Value: ""},
{Scope: ScopeGlobal, Name: LogSlowSlaveStatements, Value: BoolOff, Type: TypeBool},
{Scope: ScopeGlobal, Name: "slave_checkpoint_group", Value: "512"},
{Scope: ScopeNone, Name: "slave_load_tmpdir", Value: "/var/tmp/"},
{Scope: ScopeGlobal, Name: "slave_parallel_type", Value: ""},
{Scope: ScopeGlobal, Name: "slave_parallel_workers", Value: "0"},
{Scope: ScopeGlobal, Name: "rpl_semi_sync_master_timeout", Value: "10000", Type: TypeInt},
{Scope: ScopeNone, Name: "slave_skip_errors", Value: BoolOff},
{Scope: ScopeGlobal, Name: "sql_slave_skip_counter", Value: "0"},
{Scope: ScopeGlobal, Name: "rpl_semi_sync_slave_enabled", Value: BoolOff, Type: TypeBool},
{Scope: ScopeGlobal, Name: "rpl_semi_sync_master_enabled", Value: BoolOff, Type: TypeBool},
{Scope: ScopeGlobal, Name: "slave_preserve_commit_order", Value: BoolOff, Type: TypeBool},
{Scope: ScopeGlobal, Name: "slave_exec_mode", Value: "STRICT"},
{Scope: ScopeNone, Name: "log_slave_updates", Value: BoolOff, Type: TypeBool},
{Scope: ScopeGlobal, Name: "rpl_semi_sync_master_wait_point", Value: "AFTER_SYNC", Type: TypeEnum, PossibleValues: []string{"AFTER_SYNC", "AFTER_COMMIT"}},
{Scope: ScopeGlobal, Name: "slave_sql_verify_checksum", Value: BoolOn, Type: TypeBool},
{Scope: ScopeGlobal, Name: "slave_max_allowed_packet", Value: "1073741824"},
{Scope: ScopeGlobal, Name: "rpl_semi_sync_master_wait_for_slave_count", Value: "1", Type: TypeInt, MinValue: 1, MaxValue: 65535},
{Scope: ScopeGlobal, Name: "rpl_semi_sync_master_wait_no_slave", Value: BoolOn, Type: TypeBool},
{Scope: ScopeGlobal, Name: "slave_rows_search_algorithms", Value: "TABLE_SCAN,INDEX_SCAN"},
{Scope: ScopeGlobal, Name: SlaveAllowBatching, Value: BoolOff, Type: TypeBool},
}
| sessionctx/variable/noop.go | 1 | https://github.com/pingcap/tidb/commit/80edc8cd20b452089a26b817b4b450b7f31db77f | [
0.9415306448936462,
0.021442703902721405,
0.00016812706599012017,
0.0008487201994284987,
0.1303180605173111
] |
{
"id": 0,
"code_window": [
"\t{Scope: ScopeGlobal, Name: \"innodb_max_dirty_pages_pct\", Value: \"75\"},\n",
"\t{Scope: ScopeGlobal, Name: InnodbFilePerTable, Value: BoolOn, Type: TypeBool, AutoConvertNegativeBool: true},\n",
"\t{Scope: ScopeGlobal, Name: InnodbLogCompressedPages, Value: \"1\"},\n",
"\t{Scope: ScopeNone, Name: \"skip_networking\", Value: \"0\"},\n",
"\t{Scope: ScopeGlobal, Name: \"innodb_monitor_reset\", Value: \"\"},\n",
"\t{Scope: ScopeNone, Name: \"have_ssl\", Value: \"DISABLED\"},\n",
"\t{Scope: ScopeNone, Name: \"have_openssl\", Value: \"DISABLED\"},\n",
"\t{Scope: ScopeNone, Name: \"ssl_ca\", Value: \"\"},\n",
"\t{Scope: ScopeNone, Name: \"ssl_cert\", Value: \"\"},\n",
"\t{Scope: ScopeNone, Name: \"ssl_key\", Value: \"\"},\n",
"\t{Scope: ScopeNone, Name: \"ssl_cipher\", Value: \"\"},\n",
"\t{Scope: ScopeNone, Name: \"tls_version\", Value: \"TLSv1,TLSv1.1,TLSv1.2\"},\n",
"\t{Scope: ScopeGlobal, Name: InnodbPrintAllDeadlocks, Value: BoolOff, Type: TypeBool, AutoConvertNegativeBool: true},\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "sessionctx/variable/noop.go",
"type": "replace",
"edit_start_line_idx": 114
} | // Copyright 2020 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package unionstore
import (
"bytes"
tidbkv "github.com/pingcap/tidb/kv"
"github.com/pingcap/tidb/store/tikv/kv"
)
// MemdbIterator is an Iterator with KeyFlags related functions.
type MemdbIterator struct {
db *MemDB
curr memdbNodeAddr
start tidbkv.Key
end tidbkv.Key
reverse bool
includeFlags bool
}
// Iter creates an Iterator positioned on the first entry that k <= entry's key.
// If such entry is not found, it returns an invalid Iterator with no error.
// It yields only keys that < upperBound. If upperBound is nil, it means the upperBound is unbounded.
// The Iterator must be Closed after use.
func (db *MemDB) Iter(k tidbkv.Key, upperBound tidbkv.Key) (tidbkv.Iterator, error) {
i := &MemdbIterator{
db: db,
start: k,
end: upperBound,
}
i.init()
return i, nil
}
// IterReverse creates a reversed Iterator positioned on the first entry which key is less than k.
// The returned iterator will iterate from greater key to smaller key.
// If k is nil, the returned iterator will be positioned at the last key.
// TODO: Add lower bound limit
func (db *MemDB) IterReverse(k tidbkv.Key) (tidbkv.Iterator, error) {
i := &MemdbIterator{
db: db,
end: k,
reverse: true,
}
i.init()
return i, nil
}
// IterWithFlags returns a MemdbIterator.
func (db *MemDB) IterWithFlags(k tidbkv.Key, upperBound tidbkv.Key) *MemdbIterator {
i := &MemdbIterator{
db: db,
start: k,
end: upperBound,
includeFlags: true,
}
i.init()
return i
}
// IterReverseWithFlags returns a reversed MemdbIterator.
func (db *MemDB) IterReverseWithFlags(k tidbkv.Key) *MemdbIterator {
i := &MemdbIterator{
db: db,
end: k,
reverse: true,
includeFlags: true,
}
i.init()
return i
}
func (i *MemdbIterator) init() {
if i.reverse {
if len(i.end) == 0 {
i.seekToLast()
} else {
i.seek(i.end)
}
} else {
if len(i.start) == 0 {
i.seekToFirst()
} else {
i.seek(i.start)
}
}
if i.isFlagsOnly() && !i.includeFlags {
err := i.Next()
_ = err // memdbIterator will never fail
}
}
// Valid returns true if the current iterator is valid.
func (i *MemdbIterator) Valid() bool {
if !i.reverse {
return !i.curr.isNull() && (i.end == nil || bytes.Compare(i.Key(), i.end) < 0)
}
return !i.curr.isNull()
}
// Flags returns flags belong to current iterator.
func (i *MemdbIterator) Flags() kv.KeyFlags {
return i.curr.getKeyFlags()
}
// UpdateFlags updates and apply with flagsOp.
func (i *MemdbIterator) UpdateFlags(ops ...kv.FlagsOp) {
origin := i.curr.getKeyFlags()
n := kv.ApplyFlagsOps(origin, ops...)
i.curr.setKeyFlags(n)
}
// HasValue returns false if it is flags only.
func (i *MemdbIterator) HasValue() bool {
return !i.isFlagsOnly()
}
// Key returns current key.
func (i *MemdbIterator) Key() tidbkv.Key {
return i.curr.getKey()
}
// Handle returns MemKeyHandle with the current position.
func (i *MemdbIterator) Handle() MemKeyHandle {
return MemKeyHandle{
idx: uint16(i.curr.addr.idx),
off: i.curr.addr.off,
}
}
// Value returns the value.
func (i *MemdbIterator) Value() []byte {
return i.db.vlog.getValue(i.curr.vptr)
}
// Next goes the next position.
func (i *MemdbIterator) Next() error {
for {
if i.reverse {
i.curr = i.db.predecessor(i.curr)
} else {
i.curr = i.db.successor(i.curr)
}
// We need to skip persistent flags only nodes.
if i.includeFlags || !i.isFlagsOnly() {
break
}
}
return nil
}
// Close closes the current iterator.
func (i *MemdbIterator) Close() {}
func (i *MemdbIterator) seekToFirst() {
y := memdbNodeAddr{nil, nullAddr}
x := i.db.getNode(i.db.root)
for !x.isNull() {
y = x
x = y.getLeft(i.db)
}
i.curr = y
}
func (i *MemdbIterator) seekToLast() {
y := memdbNodeAddr{nil, nullAddr}
x := i.db.getNode(i.db.root)
for !x.isNull() {
y = x
x = y.getRight(i.db)
}
i.curr = y
}
func (i *MemdbIterator) seek(key tidbkv.Key) {
y := memdbNodeAddr{nil, nullAddr}
x := i.db.getNode(i.db.root)
var cmp int
for !x.isNull() {
y = x
cmp = bytes.Compare(key, y.getKey())
if cmp < 0 {
x = y.getLeft(i.db)
} else if cmp > 0 {
x = y.getRight(i.db)
} else {
break
}
}
if !i.reverse {
if cmp > 0 {
// Move to next
i.curr = i.db.successor(y)
return
}
i.curr = y
return
}
if cmp <= 0 && !y.isNull() {
i.curr = i.db.predecessor(y)
return
}
i.curr = y
}
func (i *MemdbIterator) isFlagsOnly() bool {
return !i.curr.isNull() && i.curr.vptr.isNull()
}
| store/tikv/unionstore/memdb_iterator.go | 0 | https://github.com/pingcap/tidb/commit/80edc8cd20b452089a26b817b4b450b7f31db77f | [
0.00017933601338882,
0.00016870407853275537,
0.00015493981481995434,
0.00016945814422797412,
0.0000049621871767158154
] |
{
"id": 0,
"code_window": [
"\t{Scope: ScopeGlobal, Name: \"innodb_max_dirty_pages_pct\", Value: \"75\"},\n",
"\t{Scope: ScopeGlobal, Name: InnodbFilePerTable, Value: BoolOn, Type: TypeBool, AutoConvertNegativeBool: true},\n",
"\t{Scope: ScopeGlobal, Name: InnodbLogCompressedPages, Value: \"1\"},\n",
"\t{Scope: ScopeNone, Name: \"skip_networking\", Value: \"0\"},\n",
"\t{Scope: ScopeGlobal, Name: \"innodb_monitor_reset\", Value: \"\"},\n",
"\t{Scope: ScopeNone, Name: \"have_ssl\", Value: \"DISABLED\"},\n",
"\t{Scope: ScopeNone, Name: \"have_openssl\", Value: \"DISABLED\"},\n",
"\t{Scope: ScopeNone, Name: \"ssl_ca\", Value: \"\"},\n",
"\t{Scope: ScopeNone, Name: \"ssl_cert\", Value: \"\"},\n",
"\t{Scope: ScopeNone, Name: \"ssl_key\", Value: \"\"},\n",
"\t{Scope: ScopeNone, Name: \"ssl_cipher\", Value: \"\"},\n",
"\t{Scope: ScopeNone, Name: \"tls_version\", Value: \"TLSv1,TLSv1.1,TLSv1.2\"},\n",
"\t{Scope: ScopeGlobal, Name: InnodbPrintAllDeadlocks, Value: BoolOff, Type: TypeBool, AutoConvertNegativeBool: true},\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "sessionctx/variable/noop.go",
"type": "replace",
"edit_start_line_idx": 114
} | // Copyright 2020 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package expression
import (
. "github.com/pingcap/check"
"github.com/pingcap/parser/charset"
"github.com/pingcap/parser/mysql"
"github.com/pingcap/tidb/types"
"github.com/pingcap/tidb/util/chunk"
"github.com/pingcap/tidb/util/collate"
"github.com/pingcap/tidb/util/mock"
)
var _ = SerialSuites(&testCollationSuites{})
type testCollationSuites struct{}
func (s *testCollationSuites) TestCompareString(c *C) {
collate.SetNewCollationEnabledForTest(true)
defer collate.SetNewCollationEnabledForTest(false)
c.Assert(types.CompareString("a", "A", "utf8_general_ci"), Equals, 0)
c.Assert(types.CompareString("À", "A", "utf8_general_ci"), Equals, 0)
c.Assert(types.CompareString("😜", "😃", "utf8_general_ci"), Equals, 0)
c.Assert(types.CompareString("a ", "a ", "utf8_general_ci"), Equals, 0)
c.Assert(types.CompareString("ß", "s", "utf8_general_ci"), Equals, 0)
c.Assert(types.CompareString("ß", "ss", "utf8_general_ci"), Not(Equals), 0)
c.Assert(types.CompareString("a", "A", "utf8_unicode_ci"), Equals, 0)
c.Assert(types.CompareString("À", "A", "utf8_unicode_ci"), Equals, 0)
c.Assert(types.CompareString("😜", "😃", "utf8_unicode_ci"), Equals, 0)
c.Assert(types.CompareString("a ", "a ", "utf8_unicode_ci"), Equals, 0)
c.Assert(types.CompareString("ß", "s", "utf8_unicode_ci"), Not(Equals), 0)
c.Assert(types.CompareString("ß", "ss", "utf8_unicode_ci"), Equals, 0)
c.Assert(types.CompareString("a", "A", "binary"), Not(Equals), 0)
c.Assert(types.CompareString("À", "A", "binary"), Not(Equals), 0)
c.Assert(types.CompareString("😜", "😃", "binary"), Not(Equals), 0)
c.Assert(types.CompareString("a ", "a ", "binary"), Not(Equals), 0)
ctx := mock.NewContext()
ft := types.NewFieldType(mysql.TypeVarString)
col1 := &Column{
RetType: ft,
Index: 0,
}
col2 := &Column{
RetType: ft,
Index: 1,
}
chk := chunk.NewChunkWithCapacity([]*types.FieldType{ft, ft}, 4)
chk.Column(0).AppendString("a")
chk.Column(1).AppendString("A")
chk.Column(0).AppendString("À")
chk.Column(1).AppendString("A")
chk.Column(0).AppendString("😜")
chk.Column(1).AppendString("😃")
chk.Column(0).AppendString("a ")
chk.Column(1).AppendString("a ")
for i := 0; i < 4; i++ {
v, isNull, err := CompareStringWithCollationInfo(ctx, col1, col2, chk.GetRow(0), chk.GetRow(0), "utf8_general_ci")
c.Assert(err, IsNil)
c.Assert(isNull, IsFalse)
c.Assert(v, Equals, int64(0))
}
}
func (s *testCollationSuites) TestDeriveCollationFromExprs(c *C) {
tInt := types.NewFieldType(mysql.TypeLonglong)
tInt.Charset = charset.CharsetBin
ctx := mock.NewContext()
// no string column
chs, coll := DeriveCollationFromExprs(ctx, newColumnWithType(0, tInt), newColumnWithType(0, tInt), newColumnWithType(0, tInt))
c.Assert(chs, Equals, charset.CharsetBin)
c.Assert(coll, Equals, charset.CollationBin)
}
| expression/collation_test.go | 0 | https://github.com/pingcap/tidb/commit/80edc8cd20b452089a26b817b4b450b7f31db77f | [
0.00017933601338882,
0.00017455843044444919,
0.00017264117195736617,
0.00017410152941010892,
0.0000018944890598504571
] |
{
"id": 0,
"code_window": [
"\t{Scope: ScopeGlobal, Name: \"innodb_max_dirty_pages_pct\", Value: \"75\"},\n",
"\t{Scope: ScopeGlobal, Name: InnodbFilePerTable, Value: BoolOn, Type: TypeBool, AutoConvertNegativeBool: true},\n",
"\t{Scope: ScopeGlobal, Name: InnodbLogCompressedPages, Value: \"1\"},\n",
"\t{Scope: ScopeNone, Name: \"skip_networking\", Value: \"0\"},\n",
"\t{Scope: ScopeGlobal, Name: \"innodb_monitor_reset\", Value: \"\"},\n",
"\t{Scope: ScopeNone, Name: \"have_ssl\", Value: \"DISABLED\"},\n",
"\t{Scope: ScopeNone, Name: \"have_openssl\", Value: \"DISABLED\"},\n",
"\t{Scope: ScopeNone, Name: \"ssl_ca\", Value: \"\"},\n",
"\t{Scope: ScopeNone, Name: \"ssl_cert\", Value: \"\"},\n",
"\t{Scope: ScopeNone, Name: \"ssl_key\", Value: \"\"},\n",
"\t{Scope: ScopeNone, Name: \"ssl_cipher\", Value: \"\"},\n",
"\t{Scope: ScopeNone, Name: \"tls_version\", Value: \"TLSv1,TLSv1.1,TLSv1.2\"},\n",
"\t{Scope: ScopeGlobal, Name: InnodbPrintAllDeadlocks, Value: BoolOff, Type: TypeBool, AutoConvertNegativeBool: true},\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "sessionctx/variable/noop.go",
"type": "replace",
"edit_start_line_idx": 114
} | // Copyright 2016 The ql Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSES/QL-LICENSE file.
// Copyright 2015 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package table
import (
"fmt"
"strconv"
"strings"
"time"
"unicode"
"unicode/utf8"
"github.com/pingcap/parser"
"github.com/pingcap/parser/ast"
"github.com/pingcap/parser/charset"
"github.com/pingcap/parser/model"
"github.com/pingcap/parser/mysql"
field_types "github.com/pingcap/parser/types"
"github.com/pingcap/tidb/config"
"github.com/pingcap/tidb/expression"
"github.com/pingcap/tidb/sessionctx"
"github.com/pingcap/tidb/sessionctx/stmtctx"
"github.com/pingcap/tidb/types"
"github.com/pingcap/tidb/types/json"
"github.com/pingcap/tidb/util/hack"
"github.com/pingcap/tidb/util/logutil"
"github.com/pingcap/tidb/util/timeutil"
"go.uber.org/zap"
)
// Column provides meta data describing a table column.
type Column struct {
*model.ColumnInfo
// If this column is a generated column, the expression will be stored here.
GeneratedExpr ast.ExprNode
// If this column has default expr value, this expression will be stored here.
DefaultExpr ast.ExprNode
}
// String implements fmt.Stringer interface.
func (c *Column) String() string {
ans := []string{c.Name.O, types.TypeToStr(c.Tp, c.Charset)}
if mysql.HasAutoIncrementFlag(c.Flag) {
ans = append(ans, "AUTO_INCREMENT")
}
if mysql.HasNotNullFlag(c.Flag) {
ans = append(ans, "NOT NULL")
}
return strings.Join(ans, " ")
}
// ToInfo casts Column to model.ColumnInfo
// NOTE: DONT modify return value.
func (c *Column) ToInfo() *model.ColumnInfo {
return c.ColumnInfo
}
// FindCol finds column in cols by name.
func FindCol(cols []*Column, name string) *Column {
for _, col := range cols {
if strings.EqualFold(col.Name.O, name) {
return col
}
}
return nil
}
// ToColumn converts a *model.ColumnInfo to *Column.
func ToColumn(col *model.ColumnInfo) *Column {
return &Column{
col,
nil,
nil,
}
}
// FindCols finds columns in cols by names.
// If pkIsHandle is false and name is ExtraHandleName, the extra handle column will be added.
// If any columns don't match, return nil and the first missing column's name
func FindCols(cols []*Column, names []string, pkIsHandle bool) ([]*Column, string) {
var rcols []*Column
for _, name := range names {
col := FindCol(cols, name)
if col != nil {
rcols = append(rcols, col)
} else if name == model.ExtraHandleName.L && !pkIsHandle {
col := &Column{}
col.ColumnInfo = model.NewExtraHandleColInfo()
col.ColumnInfo.Offset = len(cols)
rcols = append(rcols, col)
} else {
return nil, name
}
}
return rcols, ""
}
// FindOnUpdateCols finds columns which have OnUpdateNow flag.
func FindOnUpdateCols(cols []*Column) []*Column {
var rcols []*Column
for _, col := range cols {
if mysql.HasOnUpdateNowFlag(col.Flag) {
rcols = append(rcols, col)
}
}
return rcols
}
// truncateTrailingSpaces truncates trailing spaces for CHAR[(M)] column.
// fix: https://github.com/pingcap/tidb/issues/3660
func truncateTrailingSpaces(v *types.Datum) {
if v.Kind() == types.KindNull {
return
}
b := v.GetBytes()
length := len(b)
for length > 0 && b[length-1] == ' ' {
length--
}
b = b[:length]
str := string(hack.String(b))
v.SetString(str, v.Collation())
}
func handleWrongASCIIValue(ctx sessionctx.Context, col *model.ColumnInfo, casted *types.Datum, str string, i int) (types.Datum, error) {
sc := ctx.GetSessionVars().StmtCtx
err := ErrTruncatedWrongValueForField.FastGen("incorrect ascii value %x(%s) for column %s", casted.GetBytes(), str, col.Name)
logutil.BgLogger().Error("incorrect ASCII value", zap.Uint64("conn", ctx.GetSessionVars().ConnectionID), zap.Error(err))
truncateVal := types.NewStringDatum(str[:i])
err = sc.HandleTruncate(err)
return truncateVal, err
}
func handleWrongUtf8Value(ctx sessionctx.Context, col *model.ColumnInfo, casted *types.Datum, str string, i int) (types.Datum, error) {
sc := ctx.GetSessionVars().StmtCtx
err := ErrTruncatedWrongValueForField.FastGen("incorrect utf8 value %x(%s) for column %s", casted.GetBytes(), str, col.Name)
logutil.BgLogger().Error("incorrect UTF-8 value", zap.Uint64("conn", ctx.GetSessionVars().ConnectionID), zap.Error(err))
// Truncate to valid utf8 string.
truncateVal := types.NewStringDatum(str[:i])
err = sc.HandleTruncate(err)
return truncateVal, err
}
func handleZeroDatetime(ctx sessionctx.Context, col *model.ColumnInfo, casted types.Datum, str string, tmIsInvalid bool) (types.Datum, bool, error) {
sc := ctx.GetSessionVars().StmtCtx
tm := casted.GetMysqlTime()
mode := ctx.GetSessionVars().SQLMode
var (
zeroV types.Time
zeroT string
)
switch col.Tp {
case mysql.TypeDate:
zeroV, zeroT = types.ZeroDate, types.DateStr
case mysql.TypeDatetime:
zeroV, zeroT = types.ZeroDatetime, types.DateTimeStr
case mysql.TypeTimestamp:
zeroV, zeroT = types.ZeroTimestamp, types.TimestampStr
}
// ref https://dev.mysql.com/doc/refman/8.0/en/sql-mode.html#sqlmode_no_zero_date
// if NO_ZERO_DATE is not enabled, '0000-00-00' is permitted and inserts produce no warning
// if NO_ZERO_DATE is enabled, '0000-00-00' is permitted and inserts produce a warning
// If NO_ZERO_DATE mode and strict mode are enabled, '0000-00-00' is not permitted and inserts produce an error, unless IGNORE is given as well. For INSERT IGNORE and UPDATE IGNORE, '0000-00-00' is permitted and inserts produce a warning.
// if NO_ZERO_IN_DATE is not enabled, dates with zero parts are permitted and inserts produce no warning
// if NO_ZERO_IN_DATE is enabled, dates with zero parts are inserted as '0000-00-00' and produce a warning
// If NO_ZERO_IN_DATE mode and strict mode are enabled, dates with zero parts are not permitted and inserts produce an error, unless IGNORE is given as well. For INSERT IGNORE and UPDATE IGNORE, dates with zero parts are inserted as '0000-00-00' and produce a warning.
ignoreErr := sc.DupKeyAsWarning
// in MySQL 8.0, the Timestamp's case is different to Datetime/Date, as shown below:
//
// | | NZD | NZD|ST | ELSE | ELSE|ST |
// | ------------ | ----------------- | ------- | ----------------- | -------- |
// | `0000-00-01` | Success + Warning | Error | Success + Warning | Error |
// | `0000-00-00` | Success + Warning | Error | Success | Success |
//
// * **NZD**: NO_ZERO_DATE_MODE
// * **ST**: STRICT_TRANS_TABLES
// * **ELSE**: empty or NO_ZERO_IN_DATE_MODE
if tm.IsZero() && col.Tp == mysql.TypeTimestamp {
innerErr := types.ErrWrongValue.GenWithStackByArgs(zeroT, str)
if mode.HasStrictMode() && !ignoreErr && (tmIsInvalid || mode.HasNoZeroDateMode()) {
return types.NewDatum(zeroV), true, innerErr
}
if tmIsInvalid || mode.HasNoZeroDateMode() {
sc.AppendWarning(innerErr)
}
return types.NewDatum(zeroV), true, nil
} else if tm.IsZero() || tm.InvalidZero() {
if tm.IsZero() {
if !mode.HasNoZeroDateMode() {
return types.NewDatum(zeroV), true, nil
}
} else if tm.InvalidZero() {
if !mode.HasNoZeroInDateMode() {
return casted, true, nil
}
}
innerErr := types.ErrWrongValue.GenWithStackByArgs(zeroT, str)
if mode.HasStrictMode() && !ignoreErr {
return types.NewDatum(zeroV), true, innerErr
}
// TODO: as in MySQL 8.0's implement, warning message is `types.ErrWarnDataOutOfRange`,
// but this error message need a `rowIdx` argument, in this context, the `rowIdx` is missing.
// And refactor this function seems too complicated, so we set the warning message the same to error's.
sc.AppendWarning(innerErr)
return types.NewDatum(zeroV), true, nil
}
return casted, false, nil
}
// CastValue casts a value based on column type.
// If forceIgnoreTruncate is true, truncated errors will be ignored.
// If returnOverflow is true, don't handle overflow errors in this function.
// It's safe now and it's the same as the behavior of select statement.
// Set it to true only in FillVirtualColumnValue and UnionScanExec.Next()
// If the handle of err is changed latter, the behavior of forceIgnoreTruncate also need to change.
// TODO: change the third arg to TypeField. Not pass ColumnInfo.
func CastValue(ctx sessionctx.Context, val types.Datum, col *model.ColumnInfo, returnErr, forceIgnoreTruncate bool) (casted types.Datum, err error) {
sc := ctx.GetSessionVars().StmtCtx
casted, err = val.ConvertTo(sc, &col.FieldType)
// TODO: make sure all truncate errors are handled by ConvertTo.
if returnErr && err != nil {
return casted, err
}
if err != nil && types.ErrTruncated.Equal(err) && col.Tp != mysql.TypeSet && col.Tp != mysql.TypeEnum {
str, err1 := val.ToString()
if err1 != nil {
logutil.BgLogger().Warn("Datum ToString failed", zap.Stringer("Datum", val), zap.Error(err1))
}
err = types.ErrTruncatedWrongVal.GenWithStackByArgs(col.FieldType.CompactStr(), str)
} else if (sc.InInsertStmt || sc.InUpdateStmt) && !casted.IsNull() &&
(val.Kind() != types.KindMysqlTime || !val.GetMysqlTime().IsZero()) &&
(col.Tp == mysql.TypeDate || col.Tp == mysql.TypeDatetime || col.Tp == mysql.TypeTimestamp) {
if innCasted, exit, innErr := handleZeroDatetime(ctx, col, casted, val.GetString(), types.ErrWrongValue.Equal(err)); exit {
return innCasted, innErr
}
}
err = sc.HandleTruncate(err)
if forceIgnoreTruncate {
err = nil
} else if err != nil {
return casted, err
}
if col.Tp == mysql.TypeString && !types.IsBinaryStr(&col.FieldType) {
truncateTrailingSpaces(&casted)
}
if col.Charset == charset.CharsetASCII {
if ctx.GetSessionVars().SkipASCIICheck {
return casted, nil
}
str := casted.GetString()
for i := 0; i < len(str); i++ {
if str[i] > unicode.MaxASCII {
casted, err = handleWrongASCIIValue(ctx, col, &casted, str, i)
break
}
}
if forceIgnoreTruncate {
err = nil
}
return casted, err
}
if ctx.GetSessionVars().SkipUTF8Check {
return casted, nil
}
if !mysql.IsUTF8Charset(col.Charset) {
return casted, nil
}
str := casted.GetString()
utf8Charset := col.Charset == mysql.UTF8Charset
doMB4CharCheck := utf8Charset && config.GetGlobalConfig().CheckMb4ValueInUTF8
for i, w := 0, 0; i < len(str); i += w {
runeValue, width := utf8.DecodeRuneInString(str[i:])
if runeValue == utf8.RuneError {
if strings.HasPrefix(str[i:], string(utf8.RuneError)) {
w = width
continue
}
casted, err = handleWrongUtf8Value(ctx, col, &casted, str, i)
break
} else if width > 3 && doMB4CharCheck {
// Handle non-BMP characters.
casted, err = handleWrongUtf8Value(ctx, col, &casted, str, i)
break
}
w = width
}
if forceIgnoreTruncate {
err = nil
}
return casted, err
}
// ColDesc describes column information like MySQL desc and show columns do.
type ColDesc struct {
Field string
Type string
// Charset is nil if the column doesn't have a charset, or a string indicating the charset name.
Charset interface{}
// Collation is nil if the column doesn't have a collation, or a string indicating the collation name.
Collation interface{}
Null string
Key string
DefaultValue interface{}
Extra string
Privileges string
Comment string
}
const defaultPrivileges = "select,insert,update,references"
// NewColDesc returns a new ColDesc for a column.
func NewColDesc(col *Column) *ColDesc {
// TODO: if we have no primary key and a unique index which's columns are all not null
// we will set these columns' flag as PriKeyFlag
// see https://dev.mysql.com/doc/refman/5.7/en/show-columns.html
// create table
name := col.Name
nullFlag := "YES"
if mysql.HasNotNullFlag(col.Flag) {
nullFlag = "NO"
}
keyFlag := ""
if mysql.HasPriKeyFlag(col.Flag) {
keyFlag = "PRI"
} else if mysql.HasUniKeyFlag(col.Flag) {
keyFlag = "UNI"
} else if mysql.HasMultipleKeyFlag(col.Flag) {
keyFlag = "MUL"
}
var defaultValue interface{}
if !mysql.HasNoDefaultValueFlag(col.Flag) {
defaultValue = col.GetDefaultValue()
if defaultValStr, ok := defaultValue.(string); ok {
if (col.Tp == mysql.TypeTimestamp || col.Tp == mysql.TypeDatetime) &&
strings.EqualFold(defaultValStr, ast.CurrentTimestamp) &&
col.Decimal > 0 {
defaultValue = fmt.Sprintf("%s(%d)", defaultValStr, col.Decimal)
}
}
}
extra := ""
if mysql.HasAutoIncrementFlag(col.Flag) {
extra = "auto_increment"
} else if mysql.HasOnUpdateNowFlag(col.Flag) {
// in order to match the rules of mysql 8.0.16 version
// see https://github.com/pingcap/tidb/issues/10337
extra = "DEFAULT_GENERATED on update CURRENT_TIMESTAMP" + OptionalFsp(&col.FieldType)
} else if col.IsGenerated() {
if col.GeneratedStored {
extra = "STORED GENERATED"
} else {
extra = "VIRTUAL GENERATED"
}
}
desc := &ColDesc{
Field: name.O,
Type: col.GetTypeDesc(),
Charset: col.Charset,
Collation: col.Collate,
Null: nullFlag,
Key: keyFlag,
DefaultValue: defaultValue,
Extra: extra,
Privileges: defaultPrivileges,
Comment: col.Comment,
}
if !field_types.HasCharset(&col.ColumnInfo.FieldType) {
desc.Charset = nil
desc.Collation = nil
}
return desc
}
// ColDescFieldNames returns the fields name in result set for desc and show columns.
func ColDescFieldNames(full bool) []string {
if full {
return []string{"Field", "Type", "Collation", "Null", "Key", "Default", "Extra", "Privileges", "Comment"}
}
return []string{"Field", "Type", "Null", "Key", "Default", "Extra"}
}
// CheckOnce checks if there are duplicated column names in cols.
func CheckOnce(cols []*Column) error {
m := map[string]struct{}{}
for _, col := range cols {
name := col.Name
_, ok := m[name.L]
if ok {
return errDuplicateColumn.GenWithStackByArgs(name)
}
m[name.L] = struct{}{}
}
return nil
}
// CheckNotNull checks if nil value set to a column with NotNull flag is set.
func (c *Column) CheckNotNull(data *types.Datum) error {
if (mysql.HasNotNullFlag(c.Flag) || mysql.HasPreventNullInsertFlag(c.Flag)) && data.IsNull() {
return ErrColumnCantNull.GenWithStackByArgs(c.Name)
}
return nil
}
// HandleBadNull handles the bad null error.
// If BadNullAsWarning is true, it will append the error as a warning, else return the error.
func (c *Column) HandleBadNull(d *types.Datum, sc *stmtctx.StatementContext) error {
if err := c.CheckNotNull(d); err != nil {
if sc.BadNullAsWarning {
sc.AppendWarning(err)
*d = GetZeroValue(c.ToInfo())
return nil
}
return err
}
return nil
}
// IsPKHandleColumn checks if the column is primary key handle column.
func (c *Column) IsPKHandleColumn(tbInfo *model.TableInfo) bool {
return mysql.HasPriKeyFlag(c.Flag) && tbInfo.PKIsHandle
}
// IsCommonHandleColumn checks if the column is common handle column.
func (c *Column) IsCommonHandleColumn(tbInfo *model.TableInfo) bool {
return mysql.HasPriKeyFlag(c.Flag) && tbInfo.IsCommonHandle
}
// CheckNotNull checks if row has nil value set to a column with NotNull flag set.
func CheckNotNull(cols []*Column, row []types.Datum) error {
for _, c := range cols {
if err := c.CheckNotNull(&row[c.Offset]); err != nil {
return err
}
}
return nil
}
// GetColOriginDefaultValue gets default value of the column from original default value.
func GetColOriginDefaultValue(ctx sessionctx.Context, col *model.ColumnInfo) (types.Datum, error) {
return getColDefaultValue(ctx, col, col.GetOriginDefaultValue())
}
// GetColDefaultValue gets default value of the column.
func GetColDefaultValue(ctx sessionctx.Context, col *model.ColumnInfo) (types.Datum, error) {
defaultValue := col.GetDefaultValue()
if !col.DefaultIsExpr {
return getColDefaultValue(ctx, col, defaultValue)
}
return getColDefaultExprValue(ctx, col, defaultValue.(string))
}
// EvalColDefaultExpr eval default expr node to explicit default value.
func EvalColDefaultExpr(ctx sessionctx.Context, col *model.ColumnInfo, defaultExpr ast.ExprNode) (types.Datum, error) {
d, err := expression.EvalAstExpr(ctx, defaultExpr)
if err != nil {
return types.Datum{}, err
}
// Check the evaluated data type by cast.
value, err := CastValue(ctx, d, col, false, false)
if err != nil {
return types.Datum{}, err
}
return value, nil
}
func getColDefaultExprValue(ctx sessionctx.Context, col *model.ColumnInfo, defaultValue string) (types.Datum, error) {
var defaultExpr ast.ExprNode
expr := fmt.Sprintf("select %s", defaultValue)
stmts, _, err := parser.New().Parse(expr, "", "")
if err == nil {
defaultExpr = stmts[0].(*ast.SelectStmt).Fields.Fields[0].Expr
}
d, err := expression.EvalAstExpr(ctx, defaultExpr)
if err != nil {
return types.Datum{}, err
}
// Check the evaluated data type by cast.
value, err := CastValue(ctx, d, col, false, false)
if err != nil {
return types.Datum{}, err
}
return value, nil
}
func getColDefaultValue(ctx sessionctx.Context, col *model.ColumnInfo, defaultVal interface{}) (types.Datum, error) {
if defaultVal == nil {
return getColDefaultValueFromNil(ctx, col)
}
if col.Tp != mysql.TypeTimestamp && col.Tp != mysql.TypeDatetime {
value, err := CastValue(ctx, types.NewDatum(defaultVal), col, false, false)
if err != nil {
return types.Datum{}, err
}
return value, nil
}
// Check and get timestamp/datetime default value.
sc := ctx.GetSessionVars().StmtCtx
var needChangeTimeZone bool
// If the column's default value is not ZeroDatetimeStr nor CurrentTimestamp, should use the time zone of the default value itself.
if col.Tp == mysql.TypeTimestamp {
if vv, ok := defaultVal.(string); ok && vv != types.ZeroDatetimeStr && !strings.EqualFold(vv, ast.CurrentTimestamp) {
needChangeTimeZone = true
originalTZ := sc.TimeZone
// For col.Version = 0, the timezone information of default value is already lost, so use the system timezone as the default value timezone.
sc.TimeZone = timeutil.SystemLocation()
if col.Version >= model.ColumnInfoVersion1 {
sc.TimeZone = time.UTC
}
defer func() { sc.TimeZone = originalTZ }()
}
}
value, err := expression.GetTimeValue(ctx, defaultVal, col.Tp, int8(col.Decimal))
if err != nil {
return types.Datum{}, errGetDefaultFailed.GenWithStackByArgs(col.Name)
}
// If the column's default value is not ZeroDatetimeStr or CurrentTimestamp, convert the default value to the current session time zone.
if needChangeTimeZone {
t := value.GetMysqlTime()
err = t.ConvertTimeZone(sc.TimeZone, ctx.GetSessionVars().Location())
if err != nil {
return value, err
}
value.SetMysqlTime(t)
}
return value, nil
}
func getColDefaultValueFromNil(ctx sessionctx.Context, col *model.ColumnInfo) (types.Datum, error) {
if !mysql.HasNotNullFlag(col.Flag) {
return types.Datum{}, nil
}
if col.Tp == mysql.TypeEnum {
// For enum type, if no default value and not null is set,
// the default value is the first element of the enum list
defEnum, err := types.ParseEnumValue(col.FieldType.Elems, 1)
if err != nil {
return types.Datum{}, err
}
return types.NewCollateMysqlEnumDatum(defEnum, col.Collate), nil
}
if mysql.HasAutoIncrementFlag(col.Flag) {
// Auto increment column doesn't has default value and we should not return error.
return GetZeroValue(col), nil
}
vars := ctx.GetSessionVars()
sc := vars.StmtCtx
if !vars.StrictSQLMode {
sc.AppendWarning(ErrNoDefaultValue.FastGenByArgs(col.Name))
return GetZeroValue(col), nil
}
if sc.BadNullAsWarning {
sc.AppendWarning(ErrColumnCantNull.FastGenByArgs(col.Name))
return GetZeroValue(col), nil
}
return types.Datum{}, ErrNoDefaultValue.FastGenByArgs(col.Name)
}
// GetZeroValue gets zero value for given column type.
func GetZeroValue(col *model.ColumnInfo) types.Datum {
var d types.Datum
switch col.Tp {
case mysql.TypeTiny, mysql.TypeInt24, mysql.TypeShort, mysql.TypeLong, mysql.TypeLonglong:
if mysql.HasUnsignedFlag(col.Flag) {
d.SetUint64(0)
} else {
d.SetInt64(0)
}
case mysql.TypeYear:
d.SetInt64(0)
case mysql.TypeFloat:
d.SetFloat32(0)
case mysql.TypeDouble:
d.SetFloat64(0)
case mysql.TypeNewDecimal:
d.SetLength(col.Flen)
d.SetFrac(col.Decimal)
d.SetMysqlDecimal(new(types.MyDecimal))
case mysql.TypeString:
if col.Flen > 0 && col.Charset == charset.CharsetBin {
d.SetBytes(make([]byte, col.Flen))
} else {
d.SetString("", col.Collate)
}
case mysql.TypeVarString, mysql.TypeVarchar, mysql.TypeBlob, mysql.TypeTinyBlob, mysql.TypeMediumBlob, mysql.TypeLongBlob:
d.SetString("", col.Collate)
case mysql.TypeDuration:
d.SetMysqlDuration(types.ZeroDuration)
case mysql.TypeDate:
d.SetMysqlTime(types.ZeroDate)
case mysql.TypeTimestamp:
d.SetMysqlTime(types.ZeroTimestamp)
case mysql.TypeDatetime:
d.SetMysqlTime(types.ZeroDatetime)
case mysql.TypeBit:
d.SetMysqlBit(types.ZeroBinaryLiteral)
case mysql.TypeSet:
d.SetMysqlSet(types.Set{}, col.Collate)
case mysql.TypeEnum:
d.SetMysqlEnum(types.Enum{}, col.Collate)
case mysql.TypeJSON:
d.SetMysqlJSON(json.CreateBinary(nil))
}
return d
}
// OptionalFsp convert a FieldType.Decimal to string.
func OptionalFsp(fieldType *types.FieldType) string {
fsp := fieldType.Decimal
if fsp == 0 {
return ""
}
return "(" + strconv.Itoa(fsp) + ")"
}
| table/column.go | 0 | https://github.com/pingcap/tidb/commit/80edc8cd20b452089a26b817b4b450b7f31db77f | [
0.0002664311323314905,
0.0001727056223899126,
0.000160730691277422,
0.00017156163812614977,
0.00001200972019432811
] |
{
"id": 1,
"code_window": [
"\t{Scope: ScopeGlobal | ScopeSession, Name: InnodbSupportXA, Value: \"1\"},\n",
"\t{Scope: ScopeGlobal, Name: \"innodb_compression_level\", Value: \"6\"},\n",
"\t{Scope: ScopeNone, Name: \"innodb_file_format_check\", Value: \"1\"},\n",
"\t{Scope: ScopeNone, Name: \"myisam_mmap_size\", Value: \"18446744073709551615\"},\n",
"\t{Scope: ScopeNone, Name: \"innodb_buffer_pool_instances\", Value: \"8\"},\n",
"\t{Scope: ScopeGlobal | ScopeSession, Name: BlockEncryptionMode, Value: \"aes-128-ecb\"},\n",
"\t{Scope: ScopeGlobal | ScopeSession, Name: \"max_length_for_sort_data\", Value: \"1024\", IsHintUpdatable: true},\n",
"\t{Scope: ScopeNone, Name: \"character_set_system\", Value: \"utf8\"},\n",
"\t{Scope: ScopeGlobal, Name: InnodbOptimizeFullTextOnly, Value: \"0\"},\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "sessionctx/variable/noop.go",
"type": "replace",
"edit_start_line_idx": 146
} | // Copyright 2015 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package variable
import (
"fmt"
"math"
"strconv"
"strings"
"sync"
"sync/atomic"
"time"
"github.com/cznic/mathutil"
"github.com/pingcap/errors"
"github.com/pingcap/parser/charset"
"github.com/pingcap/parser/mysql"
"github.com/pingcap/tidb/config"
"github.com/pingcap/tidb/kv"
"github.com/pingcap/tidb/store/tikv/oracle"
"github.com/pingcap/tidb/types"
"github.com/pingcap/tidb/util/collate"
"github.com/pingcap/tidb/util/logutil"
"github.com/pingcap/tidb/util/versioninfo"
atomic2 "go.uber.org/atomic"
)
// ScopeFlag is for system variable whether can be changed in global/session dynamically or not.
type ScopeFlag uint8
// TypeFlag is the SysVar type, which doesn't exactly match MySQL types.
type TypeFlag byte
const (
// ScopeNone means the system variable can not be changed dynamically.
ScopeNone ScopeFlag = 0
// ScopeGlobal means the system variable can be changed globally.
ScopeGlobal ScopeFlag = 1 << 0
// ScopeSession means the system variable can only be changed in current session.
ScopeSession ScopeFlag = 1 << 1
// TypeStr is the default
TypeStr TypeFlag = 0
// TypeBool for boolean
TypeBool TypeFlag = 1
// TypeInt for integer
TypeInt TypeFlag = 2
// TypeEnum for Enum
TypeEnum TypeFlag = 3
// TypeFloat for Double
TypeFloat TypeFlag = 4
// TypeUnsigned for Unsigned integer
TypeUnsigned TypeFlag = 5
// TypeTime for time of day (a TiDB extension)
TypeTime TypeFlag = 6
// TypeDuration for a golang duration (a TiDB extension)
TypeDuration TypeFlag = 7
// BoolOff is the canonical string representation of a boolean false.
BoolOff = "OFF"
// BoolOn is the canonical string representation of a boolean true.
BoolOn = "ON"
// On is the canonical string for ON
On = "ON"
// Off is the canonical string for OFF
Off = "OFF"
// Warn means return warnings
Warn = "WARN"
// IntOnly means enable for int type
IntOnly = "INT_ONLY"
)
// SysVar is for system variable.
type SysVar struct {
// Scope is for whether can be changed or not
Scope ScopeFlag
// Name is the variable name.
Name string
// Value is the variable value.
Value string
// Type is the MySQL type (optional)
Type TypeFlag
// MinValue will automatically be validated when specified (optional)
MinValue int64
// MaxValue will automatically be validated when specified (optional)
MaxValue uint64
// AutoConvertNegativeBool applies to boolean types (optional)
AutoConvertNegativeBool bool
// AutoConvertOutOfRange applies to int and unsigned types.
AutoConvertOutOfRange bool
// ReadOnly applies to all types
ReadOnly bool
// PossibleValues applies to ENUM type
PossibleValues []string
// AllowEmpty is a special TiDB behavior which means "read value from config" (do not use)
AllowEmpty bool
// AllowEmptyAll is a special behavior that only applies to TiDBCapturePlanBaseline, TiDBTxnMode (do not use)
AllowEmptyAll bool
// AllowAutoValue means that the special value "-1" is permitted, even when outside of range.
AllowAutoValue bool
// Validation is a callback after the type validation has been performed
Validation func(*SessionVars, string, string, ScopeFlag) (string, error)
// SetSession is called after validation
SetSession func(*SessionVars, string) error
// IsHintUpdatable indicate whether it's updatable via SET_VAR() hint (optional)
IsHintUpdatable bool
}
// SetSessionFromHook calls the SetSession func if it exists.
func (sv *SysVar) SetSessionFromHook(s *SessionVars, val string) error {
if sv.SetSession != nil {
return sv.SetSession(s, val)
}
return nil
}
// ValidateFromType provides automatic validation based on the SysVar's type
func (sv *SysVar) ValidateFromType(vars *SessionVars, value string, scope ScopeFlag) (string, error) {
// Some sysvars are read-only. Attempting to set should always fail.
if sv.ReadOnly || sv.Scope == ScopeNone {
return value, ErrIncorrectScope.GenWithStackByArgs(sv.Name, "read only")
}
// The string "DEFAULT" is a special keyword in MySQL, which restores
// the compiled sysvar value. In which case we can skip further validation.
if strings.EqualFold(value, "DEFAULT") {
return sv.Value, nil
}
// Some sysvars in TiDB have a special behavior where the empty string means
// "use the config file value". This needs to be cleaned up once the behavior
// for instance variables is determined.
if value == "" && ((sv.AllowEmpty && scope == ScopeSession) || sv.AllowEmptyAll) {
return value, nil
}
// Provide validation using the SysVar struct
switch sv.Type {
case TypeUnsigned:
return sv.checkUInt64SystemVar(value, vars)
case TypeInt:
return sv.checkInt64SystemVar(value, vars)
case TypeBool:
return sv.checkBoolSystemVar(value, vars)
case TypeFloat:
return sv.checkFloatSystemVar(value, vars)
case TypeEnum:
return sv.checkEnumSystemVar(value, vars)
case TypeTime:
return sv.checkTimeSystemVar(value, vars)
case TypeDuration:
return sv.checkDurationSystemVar(value, vars)
}
return value, nil // typeString
}
const (
localDayTimeFormat = "15:04"
// FullDayTimeFormat is the full format of analyze start time and end time.
FullDayTimeFormat = "15:04 -0700"
)
func (sv *SysVar) checkTimeSystemVar(value string, vars *SessionVars) (string, error) {
var t time.Time
var err error
if len(value) <= len(localDayTimeFormat) {
t, err = time.ParseInLocation(localDayTimeFormat, value, vars.TimeZone)
} else {
t, err = time.ParseInLocation(FullDayTimeFormat, value, vars.TimeZone)
}
if err != nil {
return "", err
}
return t.Format(FullDayTimeFormat), nil
}
func (sv *SysVar) checkDurationSystemVar(value string, vars *SessionVars) (string, error) {
d, err := time.ParseDuration(value)
if err != nil {
return value, ErrWrongTypeForVar.GenWithStackByArgs(sv.Name)
}
// Check for min/max violations
if int64(d) < sv.MinValue {
return value, ErrWrongTypeForVar.GenWithStackByArgs(sv.Name)
}
if uint64(d) > sv.MaxValue {
return value, ErrWrongTypeForVar.GenWithStackByArgs(sv.Name)
}
// return a string representation of the duration
return d.String(), nil
}
func (sv *SysVar) checkUInt64SystemVar(value string, vars *SessionVars) (string, error) {
if sv.AllowAutoValue && value == "-1" {
return value, nil
}
// There are two types of validation behaviors for integer values. The default
// is to return an error saying the value is out of range. For MySQL compatibility, some
// values prefer convert the value to the min/max and return a warning.
if !sv.AutoConvertOutOfRange {
return sv.checkUint64SystemVarWithError(value)
}
if len(value) == 0 {
return value, ErrWrongTypeForVar.GenWithStackByArgs(sv.Name)
}
if value[0] == '-' {
_, err := strconv.ParseInt(value, 10, 64)
if err != nil {
return value, ErrWrongTypeForVar.GenWithStackByArgs(sv.Name)
}
vars.StmtCtx.AppendWarning(ErrTruncatedWrongValue.GenWithStackByArgs(sv.Name, value))
return fmt.Sprintf("%d", sv.MinValue), nil
}
val, err := strconv.ParseUint(value, 10, 64)
if err != nil {
return value, ErrWrongTypeForVar.GenWithStackByArgs(sv.Name)
}
if val < uint64(sv.MinValue) {
vars.StmtCtx.AppendWarning(ErrTruncatedWrongValue.GenWithStackByArgs(sv.Name, value))
return fmt.Sprintf("%d", sv.MinValue), nil
}
if val > sv.MaxValue {
vars.StmtCtx.AppendWarning(ErrTruncatedWrongValue.GenWithStackByArgs(sv.Name, value))
return fmt.Sprintf("%d", sv.MaxValue), nil
}
return value, nil
}
func (sv *SysVar) checkInt64SystemVar(value string, vars *SessionVars) (string, error) {
if sv.AllowAutoValue && value == "-1" {
return value, nil
}
// There are two types of validation behaviors for integer values. The default
// is to return an error saying the value is out of range. For MySQL compatibility, some
// values prefer convert the value to the min/max and return a warning.
if !sv.AutoConvertOutOfRange {
return sv.checkInt64SystemVarWithError(value)
}
val, err := strconv.ParseInt(value, 10, 64)
if err != nil {
return value, ErrWrongTypeForVar.GenWithStackByArgs(sv.Name)
}
if val < sv.MinValue {
vars.StmtCtx.AppendWarning(ErrTruncatedWrongValue.GenWithStackByArgs(sv.Name, value))
return fmt.Sprintf("%d", sv.MinValue), nil
}
if val > int64(sv.MaxValue) {
vars.StmtCtx.AppendWarning(ErrTruncatedWrongValue.GenWithStackByArgs(sv.Name, value))
return fmt.Sprintf("%d", sv.MaxValue), nil
}
return value, nil
}
func (sv *SysVar) checkEnumSystemVar(value string, vars *SessionVars) (string, error) {
// The value could be either a string or the ordinal position in the PossibleValues.
// This allows for the behavior 0 = OFF, 1 = ON, 2 = DEMAND etc.
var iStr string
for i, v := range sv.PossibleValues {
iStr = fmt.Sprintf("%d", i)
if strings.EqualFold(value, v) || strings.EqualFold(value, iStr) {
return v, nil
}
}
return value, ErrWrongValueForVar.GenWithStackByArgs(sv.Name, value)
}
func (sv *SysVar) checkFloatSystemVar(value string, vars *SessionVars) (string, error) {
if len(value) == 0 {
return value, ErrWrongTypeForVar.GenWithStackByArgs(sv.Name)
}
val, err := strconv.ParseFloat(value, 64)
if err != nil {
return value, ErrWrongTypeForVar.GenWithStackByArgs(sv.Name)
}
if val < float64(sv.MinValue) || val > float64(sv.MaxValue) {
return value, ErrWrongValueForVar.GenWithStackByArgs(sv.Name, value)
}
return value, nil
}
func (sv *SysVar) checkBoolSystemVar(value string, vars *SessionVars) (string, error) {
if strings.EqualFold(value, "ON") {
return BoolOn, nil
} else if strings.EqualFold(value, "OFF") {
return BoolOff, nil
}
val, err := strconv.ParseInt(value, 10, 64)
if err == nil {
// There are two types of conversion rules for integer values.
// The default only allows 0 || 1, but a subset of values convert any
// negative integer to 1.
if !sv.AutoConvertNegativeBool {
if val == 0 {
return BoolOff, nil
} else if val == 1 {
return BoolOn, nil
}
} else {
if val == 1 || val < 0 {
return BoolOn, nil
} else if val == 0 {
return BoolOff, nil
}
}
}
return value, ErrWrongValueForVar.GenWithStackByArgs(sv.Name, value)
}
func (sv *SysVar) checkUint64SystemVarWithError(value string) (string, error) {
if len(value) == 0 {
return value, ErrWrongTypeForVar.GenWithStackByArgs(sv.Name)
}
if value[0] == '-' {
// // in strict it expects the error WrongValue, but in non-strict it returns WrongType
return value, ErrWrongValueForVar.GenWithStackByArgs(sv.Name, value)
}
val, err := strconv.ParseUint(value, 10, 64)
if err != nil {
return value, ErrWrongTypeForVar.GenWithStackByArgs(sv.Name)
}
if val < uint64(sv.MinValue) || val > sv.MaxValue {
return value, ErrWrongValueForVar.GenWithStackByArgs(sv.Name, value)
}
return value, nil
}
func (sv *SysVar) checkInt64SystemVarWithError(value string) (string, error) {
if len(value) == 0 {
return value, ErrWrongTypeForVar.GenWithStackByArgs(sv.Name)
}
val, err := strconv.ParseInt(value, 10, 64)
if err != nil {
return value, ErrWrongTypeForVar.GenWithStackByArgs(sv.Name)
}
if val < sv.MinValue || val > int64(sv.MaxValue) {
return value, ErrWrongValueForVar.GenWithStackByArgs(sv.Name, value)
}
return value, nil
}
// ValidateFromHook calls the anonymous function on the sysvar if it exists.
func (sv *SysVar) ValidateFromHook(vars *SessionVars, normalizedValue string, originalValue string, scope ScopeFlag) (string, error) {
if sv.Validation != nil {
return sv.Validation(vars, normalizedValue, originalValue, scope)
}
return normalizedValue, nil
}
// GetNativeValType attempts to convert the val to the approx MySQL non-string type
func (sv *SysVar) GetNativeValType(val string) (types.Datum, byte, uint) {
switch sv.Type {
case TypeUnsigned:
u, err := strconv.ParseUint(val, 10, 64)
if err != nil {
u = 0
}
return types.NewUintDatum(u), mysql.TypeLonglong, mysql.UnsignedFlag
case TypeBool:
optVal := int64(0) // OFF
if TiDBOptOn(val) {
optVal = 1
}
return types.NewIntDatum(optVal), mysql.TypeLong, 0
}
return types.NewStringDatum(val), mysql.TypeVarString, 0
}
var sysVars map[string]*SysVar
var sysVarsLock sync.RWMutex
// RegisterSysVar adds a sysvar to the SysVars list
func RegisterSysVar(sv *SysVar) {
name := strings.ToLower(sv.Name)
sysVarsLock.Lock()
sysVars[name] = sv
sysVarsLock.Unlock()
}
// UnregisterSysVar removes a sysvar from the SysVars list
// currently only used in tests.
func UnregisterSysVar(name string) {
name = strings.ToLower(name)
sysVarsLock.Lock()
delete(sysVars, name)
sysVarsLock.Unlock()
}
// GetSysVar returns sys var info for name as key.
func GetSysVar(name string) *SysVar {
name = strings.ToLower(name)
sysVarsLock.RLock()
defer sysVarsLock.RUnlock()
return sysVars[name]
}
// SetSysVar sets a sysvar. This will not propagate to the cluster, so it should only be
// used for instance scoped AUTO variables such as system_time_zone.
func SetSysVar(name string, value string) {
name = strings.ToLower(name)
sysVarsLock.Lock()
defer sysVarsLock.Unlock()
sysVars[name].Value = value
}
// GetSysVars returns the sysVars list under a RWLock
func GetSysVars() map[string]*SysVar {
sysVarsLock.RLock()
defer sysVarsLock.RUnlock()
return sysVars
}
// PluginVarNames is global plugin var names set.
var PluginVarNames []string
func init() {
sysVars = make(map[string]*SysVar)
for _, v := range defaultSysVars {
RegisterSysVar(v)
}
for _, v := range noopSysVars {
RegisterSysVar(v)
}
initSynonymsSysVariables()
}
// BoolToOnOff returns the string representation of a bool, i.e. "ON/OFF"
func BoolToOnOff(b bool) string {
if b {
return BoolOn
}
return BoolOff
}
func int32ToBoolStr(i int32) string {
if i == 1 {
return BoolOn
}
return BoolOff
}
func checkCharacterValid(normalizedValue string, argName string) (string, error) {
if normalizedValue == "" {
return normalizedValue, errors.Trace(ErrWrongValueForVar.GenWithStackByArgs(argName, "NULL"))
}
cht, _, err := charset.GetCharsetInfo(normalizedValue)
if err != nil {
return normalizedValue, errors.Trace(err)
}
return cht, nil
}
var defaultSysVars = []*SysVar{
{Scope: ScopeGlobal, Name: MaxConnections, Value: "151", Type: TypeUnsigned, MinValue: 1, MaxValue: 100000, AutoConvertOutOfRange: true},
{Scope: ScopeGlobal | ScopeSession, Name: SQLSelectLimit, Value: "18446744073709551615", Type: TypeUnsigned, MinValue: 0, MaxValue: math.MaxUint64, AutoConvertOutOfRange: true},
{Scope: ScopeGlobal | ScopeSession, Name: DefaultWeekFormat, Value: "0", Type: TypeUnsigned, MinValue: 0, MaxValue: 7, AutoConvertOutOfRange: true},
{Scope: ScopeGlobal | ScopeSession, Name: SQLModeVar, Value: mysql.DefaultSQLMode, IsHintUpdatable: true},
{Scope: ScopeGlobal | ScopeSession, Name: MaxExecutionTime, Value: "0", Type: TypeUnsigned, MinValue: 0, MaxValue: math.MaxUint64, AutoConvertOutOfRange: true, IsHintUpdatable: true, SetSession: func(s *SessionVars, val string) error {
timeoutMS := tidbOptPositiveInt32(val, 0)
s.MaxExecutionTime = uint64(timeoutMS)
return nil
}},
{Scope: ScopeGlobal | ScopeSession, Name: CollationServer, Value: mysql.DefaultCollationName, Validation: func(vars *SessionVars, normalizedValue string, originalValue string, scope ScopeFlag) (string, error) {
if _, err := collate.GetCollationByName(normalizedValue); err != nil {
return normalizedValue, errors.Trace(err)
}
return normalizedValue, nil
}},
{Scope: ScopeGlobal | ScopeSession, Name: SQLLogBin, Value: BoolOn, Type: TypeBool},
{Scope: ScopeGlobal | ScopeSession, Name: TimeZone, Value: "SYSTEM", IsHintUpdatable: true, Validation: func(vars *SessionVars, normalizedValue string, originalValue string, scope ScopeFlag) (string, error) {
if strings.EqualFold(normalizedValue, "SYSTEM") {
return "SYSTEM", nil
}
_, err := parseTimeZone(normalizedValue)
return normalizedValue, err
}},
{Scope: ScopeNone, Name: SystemTimeZone, Value: "CST"},
{Scope: ScopeGlobal | ScopeSession, Name: ForeignKeyChecks, Value: BoolOff, Type: TypeBool, Validation: func(vars *SessionVars, normalizedValue string, originalValue string, scope ScopeFlag) (string, error) {
if TiDBOptOn(normalizedValue) {
// TiDB does not yet support foreign keys.
// Return the original value in the warning, so that users are not confused.
vars.StmtCtx.AppendWarning(ErrUnsupportedValueForVar.GenWithStackByArgs(ForeignKeyChecks, originalValue))
return BoolOff, nil
} else if !TiDBOptOn(normalizedValue) {
return BoolOff, nil
}
return normalizedValue, ErrWrongValueForVar.GenWithStackByArgs(ForeignKeyChecks, originalValue)
}},
{Scope: ScopeNone, Name: Hostname, Value: ServerHostname},
{Scope: ScopeSession, Name: Timestamp, Value: ""},
{Scope: ScopeGlobal | ScopeSession, Name: CharacterSetFilesystem, Value: "binary", Validation: func(vars *SessionVars, normalizedValue string, originalValue string, scope ScopeFlag) (string, error) {
return checkCharacterValid(normalizedValue, CharacterSetFilesystem)
}},
{Scope: ScopeGlobal | ScopeSession, Name: CollationDatabase, Value: mysql.DefaultCollationName, Validation: func(vars *SessionVars, normalizedValue string, originalValue string, scope ScopeFlag) (string, error) {
if _, err := collate.GetCollationByName(normalizedValue); err != nil {
return normalizedValue, errors.Trace(err)
}
return normalizedValue, nil
}},
{Scope: ScopeGlobal | ScopeSession, Name: AutoIncrementIncrement, Value: strconv.FormatInt(DefAutoIncrementIncrement, 10), Type: TypeUnsigned, MinValue: 1, MaxValue: math.MaxUint16, AutoConvertOutOfRange: true, SetSession: func(s *SessionVars, val string) error {
// AutoIncrementIncrement is valid in [1, 65535].
s.AutoIncrementIncrement = tidbOptPositiveInt32(val, DefAutoIncrementIncrement)
return nil
}},
{Scope: ScopeGlobal | ScopeSession, Name: AutoIncrementOffset, Value: strconv.FormatInt(DefAutoIncrementOffset, 10), Type: TypeUnsigned, MinValue: 1, MaxValue: math.MaxUint16, AutoConvertOutOfRange: true, SetSession: func(s *SessionVars, val string) error {
// AutoIncrementOffset is valid in [1, 65535].
s.AutoIncrementOffset = tidbOptPositiveInt32(val, DefAutoIncrementOffset)
return nil
}},
{Scope: ScopeGlobal | ScopeSession, Name: CharacterSetClient, Value: mysql.DefaultCharset, Validation: func(vars *SessionVars, normalizedValue string, originalValue string, scope ScopeFlag) (string, error) {
return checkCharacterValid(normalizedValue, CharacterSetClient)
}},
{Scope: ScopeNone, Name: Port, Value: "4000", Type: TypeUnsigned, MinValue: 0, MaxValue: math.MaxUint16},
{Scope: ScopeNone, Name: LowerCaseTableNames, Value: "2"},
{Scope: ScopeNone, Name: LogBin, Value: BoolOff, Type: TypeBool},
{Scope: ScopeGlobal | ScopeSession, Name: CharacterSetResults, Value: mysql.DefaultCharset, Validation: func(vars *SessionVars, normalizedValue string, originalValue string, scope ScopeFlag) (string, error) {
if normalizedValue == "" {
return normalizedValue, nil
}
return checkCharacterValid(normalizedValue, "")
}},
{Scope: ScopeNone, Name: VersionComment, Value: "TiDB Server (Apache License 2.0) " + versioninfo.TiDBEdition + " Edition, MySQL 5.7 compatible"},
{Scope: ScopeGlobal | ScopeSession, Name: TxnIsolation, Value: "REPEATABLE-READ", Type: TypeEnum, PossibleValues: []string{"READ-UNCOMMITTED", "READ-COMMITTED", "REPEATABLE-READ", "SERIALIZABLE"}, Validation: func(vars *SessionVars, normalizedValue string, originalValue string, scope ScopeFlag) (string, error) {
if normalizedValue == "SERIALIZABLE" || normalizedValue == "READ-UNCOMMITTED" {
if skipIsolationLevelCheck, err := GetSessionSystemVar(vars, TiDBSkipIsolationLevelCheck); err != nil {
return normalizedValue, err
} else if !TiDBOptOn(skipIsolationLevelCheck) {
return normalizedValue, ErrUnsupportedIsolationLevel.GenWithStackByArgs(normalizedValue)
}
}
return normalizedValue, nil
}},
{Scope: ScopeGlobal | ScopeSession, Name: TransactionIsolation, Value: "REPEATABLE-READ", Type: TypeEnum, PossibleValues: []string{"READ-UNCOMMITTED", "READ-COMMITTED", "REPEATABLE-READ", "SERIALIZABLE"}, Validation: func(vars *SessionVars, normalizedValue string, originalValue string, scope ScopeFlag) (string, error) {
if normalizedValue == "SERIALIZABLE" || normalizedValue == "READ-UNCOMMITTED" {
returnErr := ErrUnsupportedIsolationLevel.GenWithStackByArgs(normalizedValue)
if skipIsolationLevelCheck, err := GetSessionSystemVar(vars, TiDBSkipIsolationLevelCheck); err != nil {
return normalizedValue, err
} else if !TiDBOptOn(skipIsolationLevelCheck) {
return normalizedValue, returnErr
}
vars.StmtCtx.AppendWarning(returnErr)
}
return normalizedValue, nil
}},
{Scope: ScopeGlobal | ScopeSession, Name: CollationConnection, Value: mysql.DefaultCollationName, Validation: func(vars *SessionVars, normalizedValue string, originalValue string, scope ScopeFlag) (string, error) {
if _, err := collate.GetCollationByName(normalizedValue); err != nil {
return normalizedValue, errors.Trace(err)
}
return normalizedValue, nil
}},
{Scope: ScopeNone, Name: Version, Value: mysql.ServerVersion},
{Scope: ScopeGlobal | ScopeSession, Name: AutoCommit, Value: BoolOn, Type: TypeBool},
{Scope: ScopeGlobal | ScopeSession, Name: CharsetDatabase, Value: mysql.DefaultCharset, Validation: func(vars *SessionVars, normalizedValue string, originalValue string, scope ScopeFlag) (string, error) {
return checkCharacterValid(normalizedValue, CharsetDatabase)
}},
{Scope: ScopeGlobal | ScopeSession, Name: TxReadOnly, Value: "0"},
{Scope: ScopeGlobal | ScopeSession, Name: TransactionReadOnly, Value: "0"},
{Scope: ScopeGlobal, Name: MaxPreparedStmtCount, Value: strconv.FormatInt(DefMaxPreparedStmtCount, 10), Type: TypeInt, MinValue: -1, MaxValue: 1048576, AutoConvertOutOfRange: true},
{Scope: ScopeNone, Name: DataDir, Value: "/usr/local/mysql/data/"},
{Scope: ScopeGlobal | ScopeSession, Name: WaitTimeout, Value: strconv.FormatInt(DefWaitTimeout, 10), Type: TypeUnsigned, MinValue: 0, MaxValue: secondsPerYear, AutoConvertOutOfRange: true},
{Scope: ScopeGlobal | ScopeSession, Name: InteractiveTimeout, Value: "28800", Type: TypeUnsigned, MinValue: 1, MaxValue: secondsPerYear, AutoConvertOutOfRange: true},
{Scope: ScopeGlobal | ScopeSession, Name: InnodbLockWaitTimeout, Value: strconv.FormatInt(DefInnodbLockWaitTimeout, 10), Type: TypeUnsigned, MinValue: 1, MaxValue: 1073741824, AutoConvertOutOfRange: true, SetSession: func(s *SessionVars, val string) error {
lockWaitSec := tidbOptInt64(val, DefInnodbLockWaitTimeout)
s.LockWaitTimeout = lockWaitSec * 1000
return nil
}},
{Scope: ScopeGlobal | ScopeSession, Name: GroupConcatMaxLen, Value: "1024", AutoConvertOutOfRange: true, IsHintUpdatable: true, Type: TypeUnsigned, MinValue: 4, MaxValue: math.MaxUint64, Validation: func(vars *SessionVars, normalizedValue string, originalValue string, scope ScopeFlag) (string, error) {
// https://dev.mysql.com/doc/refman/8.0/en/server-system-variables.html#sysvar_group_concat_max_len
// Minimum Value 4
// Maximum Value (64-bit platforms) 18446744073709551615
// Maximum Value (32-bit platforms) 4294967295
if mathutil.IntBits == 32 {
if val, err := strconv.ParseUint(normalizedValue, 10, 64); err == nil {
if val > uint64(math.MaxUint32) {
vars.StmtCtx.AppendWarning(ErrTruncatedWrongValue.GenWithStackByArgs(GroupConcatMaxLen, originalValue))
return fmt.Sprintf("%d", math.MaxUint32), nil
}
}
}
return normalizedValue, nil
}},
{Scope: ScopeNone, Name: Socket, Value: "/tmp/myssock"},
{Scope: ScopeGlobal | ScopeSession, Name: CharacterSetConnection, Value: mysql.DefaultCharset, Validation: func(vars *SessionVars, normalizedValue string, originalValue string, scope ScopeFlag) (string, error) {
return checkCharacterValid(normalizedValue, CharacterSetConnection)
}},
{Scope: ScopeGlobal | ScopeSession, Name: CharacterSetServer, Value: mysql.DefaultCharset, Validation: func(vars *SessionVars, normalizedValue string, originalValue string, scope ScopeFlag) (string, error) {
return checkCharacterValid(normalizedValue, CharacterSetServer)
}},
{Scope: ScopeGlobal | ScopeSession, Name: MaxAllowedPacket, Value: "67108864", Type: TypeUnsigned, MinValue: 1024, MaxValue: MaxOfMaxAllowedPacket, AutoConvertOutOfRange: true},
{Scope: ScopeSession, Name: WarningCount, Value: "0", ReadOnly: true},
{Scope: ScopeSession, Name: ErrorCount, Value: "0", ReadOnly: true},
{Scope: ScopeGlobal | ScopeSession, Name: WindowingUseHighPrecision, Value: BoolOn, Type: TypeBool, IsHintUpdatable: true, SetSession: func(s *SessionVars, val string) error {
s.WindowingUseHighPrecision = TiDBOptOn(val)
return nil
}},
{Scope: ScopeSession, Name: TiDBTxnScope, Value: func() string {
if isGlobal, _ := config.GetTxnScopeFromConfig(); isGlobal {
return oracle.GlobalTxnScope
}
return oracle.LocalTxnScope
}()},
/* TiDB specific variables */
{Scope: ScopeGlobal | ScopeSession, Name: TiDBAllowMPPExecution, Type: TypeBool, Value: BoolToOnOff(DefTiDBAllowMPPExecution)},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBBCJThresholdCount, Value: strconv.Itoa(DefBroadcastJoinThresholdCount), Type: TypeInt, MinValue: 0, MaxValue: math.MaxInt64, SetSession: func(s *SessionVars, val string) error {
s.BroadcastJoinThresholdCount = tidbOptInt64(val, DefBroadcastJoinThresholdCount)
return nil
}},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBBCJThresholdSize, Value: strconv.Itoa(DefBroadcastJoinThresholdSize), Type: TypeInt, MinValue: 0, MaxValue: math.MaxInt64, SetSession: func(s *SessionVars, val string) error {
s.BroadcastJoinThresholdSize = tidbOptInt64(val, DefBroadcastJoinThresholdSize)
return nil
}},
{Scope: ScopeSession, Name: TiDBSnapshot, Value: ""},
{Scope: ScopeSession, Name: TiDBOptAggPushDown, Value: BoolToOnOff(DefOptAggPushDown), Type: TypeBool, SetSession: func(s *SessionVars, val string) error {
s.AllowAggPushDown = TiDBOptOn(val)
return nil
}},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBOptBCJ, Value: BoolToOnOff(DefOptBCJ), Type: TypeBool, Validation: func(vars *SessionVars, normalizedValue string, originalValue string, scope ScopeFlag) (string, error) {
if TiDBOptOn(normalizedValue) && vars.AllowBatchCop == 0 {
return normalizedValue, ErrWrongValueForVar.GenWithStackByArgs("Can't set Broadcast Join to 1 but tidb_allow_batch_cop is 0, please active batch cop at first.")
}
return normalizedValue, nil
}, SetSession: func(s *SessionVars, val string) error {
s.AllowBCJ = TiDBOptOn(val)
return nil
}},
{Scope: ScopeSession, Name: TiDBOptDistinctAggPushDown, Value: BoolToOnOff(config.GetGlobalConfig().Performance.DistinctAggPushDown), Type: TypeBool, SetSession: func(s *SessionVars, val string) error {
s.AllowDistinctAggPushDown = TiDBOptOn(val)
return nil
}},
{Scope: ScopeSession, Name: TiDBOptWriteRowID, Value: BoolToOnOff(DefOptWriteRowID), SetSession: func(s *SessionVars, val string) error {
s.AllowWriteRowID = TiDBOptOn(val)
return nil
}},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBBuildStatsConcurrency, Value: strconv.Itoa(DefBuildStatsConcurrency)},
{Scope: ScopeGlobal, Name: TiDBAutoAnalyzeRatio, Value: strconv.FormatFloat(DefAutoAnalyzeRatio, 'f', -1, 64), Type: TypeFloat, MinValue: 0, MaxValue: math.MaxUint64},
{Scope: ScopeGlobal, Name: TiDBAutoAnalyzeStartTime, Value: DefAutoAnalyzeStartTime, Type: TypeTime},
{Scope: ScopeGlobal, Name: TiDBAutoAnalyzeEndTime, Value: DefAutoAnalyzeEndTime, Type: TypeTime},
{Scope: ScopeSession, Name: TiDBChecksumTableConcurrency, Value: strconv.Itoa(DefChecksumTableConcurrency)},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBExecutorConcurrency, Value: strconv.Itoa(DefExecutorConcurrency), Type: TypeUnsigned, MinValue: 1, MaxValue: math.MaxUint64},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBDistSQLScanConcurrency, Value: strconv.Itoa(DefDistSQLScanConcurrency), Type: TypeUnsigned, MinValue: 1, MaxValue: math.MaxUint64},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBOptInSubqToJoinAndAgg, Value: BoolToOnOff(DefOptInSubqToJoinAndAgg), Type: TypeBool, SetSession: func(s *SessionVars, val string) error {
s.SetAllowInSubqToJoinAndAgg(TiDBOptOn(val))
return nil
}},
{Scope: ScopeSession, Name: TiDBOptPreferRangeScan, Value: BoolToOnOff(DefOptPreferRangeScan), Type: TypeBool, IsHintUpdatable: true, SetSession: func(s *SessionVars, val string) error {
s.SetAllowPreferRangeScan(TiDBOptOn(val))
return nil
}},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBOptCorrelationThreshold, Value: strconv.FormatFloat(DefOptCorrelationThreshold, 'f', -1, 64), Type: TypeFloat, MinValue: 0, MaxValue: 1, SetSession: func(s *SessionVars, val string) error {
s.CorrelationThreshold = tidbOptFloat64(val, DefOptCorrelationThreshold)
return nil
}},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBOptCorrelationExpFactor, Value: strconv.Itoa(DefOptCorrelationExpFactor), Type: TypeUnsigned, MinValue: 0, MaxValue: math.MaxUint64, SetSession: func(s *SessionVars, val string) error {
s.CorrelationExpFactor = int(tidbOptInt64(val, DefOptCorrelationExpFactor))
return nil
}},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBOptCPUFactor, Value: strconv.FormatFloat(DefOptCPUFactor, 'f', -1, 64), Type: TypeFloat, MinValue: 0, MaxValue: math.MaxUint64, SetSession: func(s *SessionVars, val string) error {
s.CPUFactor = tidbOptFloat64(val, DefOptCPUFactor)
return nil
}},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBOptTiFlashConcurrencyFactor, Value: strconv.FormatFloat(DefOptTiFlashConcurrencyFactor, 'f', -1, 64), Type: TypeFloat, MinValue: 0, MaxValue: math.MaxUint64, SetSession: func(s *SessionVars, val string) error {
s.CopTiFlashConcurrencyFactor = tidbOptFloat64(val, DefOptTiFlashConcurrencyFactor)
return nil
}},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBOptCopCPUFactor, Value: strconv.FormatFloat(DefOptCopCPUFactor, 'f', -1, 64), Type: TypeFloat, MinValue: 0, MaxValue: math.MaxUint64, SetSession: func(s *SessionVars, val string) error {
s.CopCPUFactor = tidbOptFloat64(val, DefOptCopCPUFactor)
return nil
}},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBOptNetworkFactor, Value: strconv.FormatFloat(DefOptNetworkFactor, 'f', -1, 64), Type: TypeFloat, MinValue: 0, MaxValue: math.MaxUint64},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBOptScanFactor, Value: strconv.FormatFloat(DefOptScanFactor, 'f', -1, 64), Type: TypeFloat, MinValue: 0, MaxValue: math.MaxUint64},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBOptDescScanFactor, Value: strconv.FormatFloat(DefOptDescScanFactor, 'f', -1, 64), Type: TypeFloat, MinValue: 0, MaxValue: math.MaxUint64},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBOptSeekFactor, Value: strconv.FormatFloat(DefOptSeekFactor, 'f', -1, 64), Type: TypeFloat, MinValue: 0, MaxValue: math.MaxUint64},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBOptMemoryFactor, Value: strconv.FormatFloat(DefOptMemoryFactor, 'f', -1, 64), Type: TypeFloat, MinValue: 0, MaxValue: math.MaxUint64},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBOptDiskFactor, Value: strconv.FormatFloat(DefOptDiskFactor, 'f', -1, 64), Type: TypeFloat, MinValue: 0, MaxValue: math.MaxUint64},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBOptConcurrencyFactor, Value: strconv.FormatFloat(DefOptConcurrencyFactor, 'f', -1, 64), Type: TypeFloat, MinValue: 0, MaxValue: math.MaxUint64},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBIndexJoinBatchSize, Value: strconv.Itoa(DefIndexJoinBatchSize), Type: TypeUnsigned, MinValue: 1, MaxValue: math.MaxUint64},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBIndexLookupSize, Value: strconv.Itoa(DefIndexLookupSize), Type: TypeUnsigned, MinValue: 1, MaxValue: math.MaxUint64},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBIndexLookupConcurrency, Value: strconv.Itoa(DefIndexLookupConcurrency), Type: TypeInt, MinValue: 1, MaxValue: math.MaxInt64, AllowAutoValue: true},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBIndexLookupJoinConcurrency, Value: strconv.Itoa(DefIndexLookupJoinConcurrency), Type: TypeInt, MinValue: 1, MaxValue: math.MaxInt64, AllowAutoValue: true},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBIndexSerialScanConcurrency, Value: strconv.Itoa(DefIndexSerialScanConcurrency), Type: TypeUnsigned, MinValue: 1, MaxValue: math.MaxUint64},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBSkipUTF8Check, Value: BoolToOnOff(DefSkipUTF8Check), Type: TypeBool, SetSession: func(s *SessionVars, val string) error {
s.SkipUTF8Check = TiDBOptOn(val)
return nil
}},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBSkipASCIICheck, Value: BoolToOnOff(DefSkipASCIICheck), Type: TypeBool, SetSession: func(s *SessionVars, val string) error {
s.SkipASCIICheck = TiDBOptOn(val)
return nil
}},
{Scope: ScopeSession, Name: TiDBBatchInsert, Value: BoolToOnOff(DefBatchInsert), Type: TypeBool},
{Scope: ScopeSession, Name: TiDBBatchDelete, Value: BoolToOnOff(DefBatchDelete), Type: TypeBool},
{Scope: ScopeSession, Name: TiDBBatchCommit, Value: BoolToOnOff(DefBatchCommit), Type: TypeBool},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBDMLBatchSize, Value: strconv.Itoa(DefDMLBatchSize), Type: TypeUnsigned, MinValue: 0, MaxValue: math.MaxUint64},
{Scope: ScopeSession, Name: TiDBCurrentTS, Value: strconv.Itoa(DefCurretTS), ReadOnly: true},
{Scope: ScopeSession, Name: TiDBLastTxnInfo, Value: strconv.Itoa(DefCurretTS), ReadOnly: true},
{Scope: ScopeSession, Name: TiDBLastQueryInfo, Value: strconv.Itoa(DefCurretTS), ReadOnly: true},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBMaxChunkSize, Value: strconv.Itoa(DefMaxChunkSize), Type: TypeUnsigned, MinValue: maxChunkSizeLowerBound, MaxValue: math.MaxUint64},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBAllowBatchCop, Value: strconv.Itoa(DefTiDBAllowBatchCop), Type: TypeInt, MinValue: 0, MaxValue: 2, Validation: func(vars *SessionVars, normalizedValue string, originalValue string, scope ScopeFlag) (string, error) {
if normalizedValue == "0" && vars.AllowBCJ {
return normalizedValue, ErrWrongValueForVar.GenWithStackByArgs("Can't set batch cop 0 but tidb_opt_broadcast_join is 1, please set tidb_opt_broadcast_join 0 at first")
}
return normalizedValue, nil
}},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBInitChunkSize, Value: strconv.Itoa(DefInitChunkSize), Type: TypeUnsigned, MinValue: 1, MaxValue: initChunkSizeUpperBound},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBEnableCascadesPlanner, Value: BoolOff, Type: TypeBool},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBEnableIndexMerge, Value: BoolOff, Type: TypeBool},
{Scope: ScopeSession, Name: TIDBMemQuotaQuery, Value: strconv.FormatInt(config.GetGlobalConfig().MemQuotaQuery, 10), Type: TypeInt, MinValue: -1, MaxValue: math.MaxInt64},
{Scope: ScopeSession, Name: TIDBMemQuotaHashJoin, Value: strconv.FormatInt(DefTiDBMemQuotaHashJoin, 10), Type: TypeInt, MinValue: -1, MaxValue: math.MaxInt64},
{Scope: ScopeSession, Name: TIDBMemQuotaMergeJoin, Value: strconv.FormatInt(DefTiDBMemQuotaMergeJoin, 10), Type: TypeInt, MinValue: -1, MaxValue: math.MaxInt64},
{Scope: ScopeSession, Name: TIDBMemQuotaSort, Value: strconv.FormatInt(DefTiDBMemQuotaSort, 10), Type: TypeInt, MinValue: -1, MaxValue: math.MaxInt64},
{Scope: ScopeSession, Name: TIDBMemQuotaTopn, Value: strconv.FormatInt(DefTiDBMemQuotaTopn, 10), Type: TypeInt, MinValue: -1, MaxValue: math.MaxInt64},
{Scope: ScopeSession, Name: TIDBMemQuotaIndexLookupReader, Value: strconv.FormatInt(DefTiDBMemQuotaIndexLookupReader, 10), Type: TypeInt, MinValue: -1, MaxValue: math.MaxInt64},
{Scope: ScopeSession, Name: TIDBMemQuotaIndexLookupJoin, Value: strconv.FormatInt(DefTiDBMemQuotaIndexLookupJoin, 10), Type: TypeInt, MinValue: -1, MaxValue: math.MaxInt64},
{Scope: ScopeSession, Name: TiDBEnableStreaming, Value: BoolOff, Type: TypeBool},
{Scope: ScopeSession, Name: TiDBEnableChunkRPC, Value: BoolOn, Type: TypeBool},
{Scope: ScopeSession, Name: TxnIsolationOneShot, Value: ""},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBEnableTablePartition, Value: BoolOn, Type: TypeEnum, PossibleValues: []string{BoolOff, BoolOn, "AUTO"}},
{Scope: ScopeSession, Name: TiDBEnableListTablePartition, Value: BoolOff, Type: TypeBool},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBHashJoinConcurrency, Value: strconv.Itoa(DefTiDBHashJoinConcurrency), Type: TypeInt, MinValue: 1, MaxValue: math.MaxInt64, AllowAutoValue: true},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBProjectionConcurrency, Value: strconv.Itoa(DefTiDBProjectionConcurrency), Type: TypeInt, MinValue: -1, MaxValue: math.MaxInt64},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBHashAggPartialConcurrency, Value: strconv.Itoa(DefTiDBHashAggPartialConcurrency), Type: TypeInt, MinValue: 1, MaxValue: math.MaxInt64, AllowAutoValue: true},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBHashAggFinalConcurrency, Value: strconv.Itoa(DefTiDBHashAggFinalConcurrency), Type: TypeInt, MinValue: 1, MaxValue: math.MaxInt64, AllowAutoValue: true},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBWindowConcurrency, Value: strconv.Itoa(DefTiDBWindowConcurrency), Type: TypeInt, MinValue: 1, MaxValue: math.MaxInt64, AllowAutoValue: true},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBMergeJoinConcurrency, Value: strconv.Itoa(DefTiDBMergeJoinConcurrency), Type: TypeInt, MinValue: 1, MaxValue: math.MaxInt64, AllowAutoValue: true},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBStreamAggConcurrency, Value: strconv.Itoa(DefTiDBStreamAggConcurrency), Type: TypeInt, MinValue: 1, MaxValue: math.MaxInt64, AllowAutoValue: true},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBEnableParallelApply, Value: BoolToOnOff(DefTiDBEnableParallelApply), Type: TypeBool},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBMemQuotaApplyCache, Value: strconv.Itoa(DefTiDBMemQuotaApplyCache)},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBBackoffLockFast, Value: strconv.Itoa(kv.DefBackoffLockFast), Type: TypeUnsigned, MinValue: 1, MaxValue: math.MaxUint64},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBBackOffWeight, Value: strconv.Itoa(kv.DefBackOffWeight), Type: TypeUnsigned, MinValue: 1, MaxValue: math.MaxUint64},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBRetryLimit, Value: strconv.Itoa(DefTiDBRetryLimit), Type: TypeInt, MinValue: -1, MaxValue: math.MaxInt64},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBDisableTxnAutoRetry, Value: BoolToOnOff(DefTiDBDisableTxnAutoRetry), Type: TypeBool},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBConstraintCheckInPlace, Value: BoolToOnOff(DefTiDBConstraintCheckInPlace), Type: TypeBool},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBTxnMode, Value: DefTiDBTxnMode, AllowEmptyAll: true, Type: TypeEnum, PossibleValues: []string{"pessimistic", "optimistic"}},
{Scope: ScopeGlobal, Name: TiDBRowFormatVersion, Value: strconv.Itoa(DefTiDBRowFormatV1), Type: TypeUnsigned, MinValue: 1, MaxValue: 2},
{Scope: ScopeSession, Name: TiDBOptimizerSelectivityLevel, Value: strconv.Itoa(DefTiDBOptimizerSelectivityLevel), Type: TypeUnsigned, MinValue: 1, MaxValue: math.MaxUint64},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBEnableWindowFunction, Value: BoolToOnOff(DefEnableWindowFunction), Type: TypeBool},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBEnableStrictDoubleTypeCheck, Value: BoolToOnOff(DefEnableStrictDoubleTypeCheck), Type: TypeBool},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBEnableVectorizedExpression, Value: BoolToOnOff(DefEnableVectorizedExpression), Type: TypeBool},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBEnableFastAnalyze, Value: BoolToOnOff(DefTiDBUseFastAnalyze), Type: TypeBool},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBSkipIsolationLevelCheck, Value: BoolToOnOff(DefTiDBSkipIsolationLevelCheck), Type: TypeBool},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBEnableRateLimitAction, Value: BoolToOnOff(DefTiDBEnableRateLimitAction), Type: TypeBool},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBAllowFallbackToTiKV, Value: "", Validation: func(vars *SessionVars, normalizedValue string, originalValue string, scope ScopeFlag) (string, error) {
if normalizedValue == "" {
return "", nil
}
engines := strings.Split(normalizedValue, ",")
var formatVal string
storeTypes := make(map[kv.StoreType]struct{})
for i, engine := range engines {
engine = strings.TrimSpace(engine)
switch {
case strings.EqualFold(engine, kv.TiFlash.Name()):
if _, ok := storeTypes[kv.TiFlash]; !ok {
if i != 0 {
formatVal += ","
}
formatVal += kv.TiFlash.Name()
storeTypes[kv.TiFlash] = struct{}{}
}
default:
return normalizedValue, ErrWrongValueForVar.GenWithStackByArgs(TiDBAllowFallbackToTiKV, normalizedValue)
}
}
return formatVal, nil
}},
/* The following variable is defined as session scope but is actually server scope. */
{Scope: ScopeGlobal | ScopeSession, Name: TiDBEnableDynamicPrivileges, Value: BoolOff, Type: TypeBool, SetSession: func(s *SessionVars, val string) error {
s.EnableDynamicPrivileges = TiDBOptOn(val)
return nil
}},
{Scope: ScopeSession, Name: TiDBGeneralLog, Value: BoolToOnOff(DefTiDBGeneralLog), Type: TypeBool},
{Scope: ScopeSession, Name: TiDBPProfSQLCPU, Value: strconv.Itoa(DefTiDBPProfSQLCPU), Type: TypeInt, MinValue: 0, MaxValue: 1},
{Scope: ScopeSession, Name: TiDBDDLSlowOprThreshold, Value: strconv.Itoa(DefTiDBDDLSlowOprThreshold)},
{Scope: ScopeSession, Name: TiDBConfig, Value: "", ReadOnly: true},
{Scope: ScopeGlobal, Name: TiDBDDLReorgWorkerCount, Value: strconv.Itoa(DefTiDBDDLReorgWorkerCount), Type: TypeUnsigned, MinValue: 1, MaxValue: uint64(maxDDLReorgWorkerCount)},
{Scope: ScopeGlobal, Name: TiDBDDLReorgBatchSize, Value: strconv.Itoa(DefTiDBDDLReorgBatchSize), Type: TypeUnsigned, MinValue: int64(MinDDLReorgBatchSize), MaxValue: uint64(MaxDDLReorgBatchSize), AutoConvertOutOfRange: true},
{Scope: ScopeGlobal, Name: TiDBDDLErrorCountLimit, Value: strconv.Itoa(DefTiDBDDLErrorCountLimit), Type: TypeUnsigned, MinValue: 0, MaxValue: uint64(math.MaxInt64), AutoConvertOutOfRange: true},
{Scope: ScopeSession, Name: TiDBDDLReorgPriority, Value: "PRIORITY_LOW"},
{Scope: ScopeGlobal, Name: TiDBMaxDeltaSchemaCount, Value: strconv.Itoa(DefTiDBMaxDeltaSchemaCount), Type: TypeUnsigned, MinValue: 100, MaxValue: 16384, AutoConvertOutOfRange: true},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBEnableChangeColumnType, Value: BoolToOnOff(DefTiDBChangeColumnType), Type: TypeBool},
{Scope: ScopeGlobal, Name: TiDBEnableChangeMultiSchema, Value: BoolToOnOff(DefTiDBChangeMultiSchema), Type: TypeBool},
{Scope: ScopeGlobal, Name: TiDBEnablePointGetCache, Value: BoolToOnOff(DefTiDBPointGetCache), Type: TypeBool},
{Scope: ScopeGlobal, Name: TiDBEnableAlterPlacement, Value: BoolToOnOff(DefTiDBEnableAlterPlacement), Type: TypeBool},
{Scope: ScopeSession, Name: TiDBForcePriority, Value: mysql.Priority2Str[DefTiDBForcePriority]},
{Scope: ScopeSession, Name: TiDBEnableRadixJoin, Value: BoolToOnOff(DefTiDBUseRadixJoin), Type: TypeBool},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBOptJoinReorderThreshold, Value: strconv.Itoa(DefTiDBOptJoinReorderThreshold), Type: TypeUnsigned, MinValue: 0, MaxValue: 63},
{Scope: ScopeSession, Name: TiDBSlowQueryFile, Value: ""},
{Scope: ScopeGlobal, Name: TiDBScatterRegion, Value: BoolToOnOff(DefTiDBScatterRegion), Type: TypeBool},
{Scope: ScopeSession, Name: TiDBWaitSplitRegionFinish, Value: BoolToOnOff(DefTiDBWaitSplitRegionFinish), Type: TypeBool},
{Scope: ScopeSession, Name: TiDBWaitSplitRegionTimeout, Value: strconv.Itoa(DefWaitSplitRegionTimeout), Type: TypeUnsigned, MinValue: 1, MaxValue: math.MaxInt64},
{Scope: ScopeSession, Name: TiDBLowResolutionTSO, Value: BoolOff, Type: TypeBool},
{Scope: ScopeSession, Name: TiDBExpensiveQueryTimeThreshold, Value: strconv.Itoa(DefTiDBExpensiveQueryTimeThreshold), Type: TypeUnsigned, MinValue: int64(MinExpensiveQueryTimeThreshold), MaxValue: uint64(math.MaxInt64), AutoConvertOutOfRange: true},
{Scope: ScopeSession, Name: TiDBMemoryUsageAlarmRatio, Value: strconv.FormatFloat(config.GetGlobalConfig().Performance.MemoryUsageAlarmRatio, 'f', -1, 64), Type: TypeFloat, MinValue: 0.0, MaxValue: 1.0},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBEnableNoopFuncs, Value: BoolToOnOff(DefTiDBEnableNoopFuncs), Type: TypeBool},
{Scope: ScopeSession, Name: TiDBReplicaRead, Value: "leader", Type: TypeEnum, PossibleValues: []string{"leader", "follower", "leader-and-follower"}},
{Scope: ScopeSession, Name: TiDBAllowRemoveAutoInc, Value: BoolToOnOff(DefTiDBAllowRemoveAutoInc), Type: TypeBool},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBEnableStmtSummary, Value: BoolToOnOff(config.GetGlobalConfig().StmtSummary.Enable), Type: TypeBool, AllowEmpty: true},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBStmtSummaryInternalQuery, Value: BoolToOnOff(config.GetGlobalConfig().StmtSummary.EnableInternalQuery), Type: TypeBool, AllowEmpty: true},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBStmtSummaryRefreshInterval, Value: strconv.Itoa(config.GetGlobalConfig().StmtSummary.RefreshInterval), Type: TypeInt, MinValue: 1, MaxValue: uint64(math.MaxInt32), AllowEmpty: true},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBStmtSummaryHistorySize, Value: strconv.Itoa(config.GetGlobalConfig().StmtSummary.HistorySize), Type: TypeInt, MinValue: 0, MaxValue: uint64(math.MaxUint8), AllowEmpty: true},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBStmtSummaryMaxStmtCount, Value: strconv.FormatUint(uint64(config.GetGlobalConfig().StmtSummary.MaxStmtCount), 10), Type: TypeInt, MinValue: 1, MaxValue: uint64(math.MaxInt16), AllowEmpty: true},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBStmtSummaryMaxSQLLength, Value: strconv.FormatUint(uint64(config.GetGlobalConfig().StmtSummary.MaxSQLLength), 10), Type: TypeInt, MinValue: 0, MaxValue: uint64(math.MaxInt32), AllowEmpty: true},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBCapturePlanBaseline, Value: BoolOff, Type: TypeBool, AllowEmptyAll: true},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBUsePlanBaselines, Value: BoolToOnOff(DefTiDBUsePlanBaselines), Type: TypeBool},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBEvolvePlanBaselines, Value: BoolToOnOff(DefTiDBEvolvePlanBaselines), Type: TypeBool},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBEnableExtendedStats, Value: BoolToOnOff(false), Type: TypeBool},
{Scope: ScopeGlobal, Name: TiDBEvolvePlanTaskMaxTime, Value: strconv.Itoa(DefTiDBEvolvePlanTaskMaxTime), Type: TypeInt, MinValue: -1, MaxValue: math.MaxInt64},
{Scope: ScopeGlobal, Name: TiDBEvolvePlanTaskStartTime, Value: DefTiDBEvolvePlanTaskStartTime, Type: TypeTime},
{Scope: ScopeGlobal, Name: TiDBEvolvePlanTaskEndTime, Value: DefTiDBEvolvePlanTaskEndTime, Type: TypeTime},
{Scope: ScopeSession, Name: TiDBIsolationReadEngines, Value: strings.Join(config.GetGlobalConfig().IsolationRead.Engines, ", "), Validation: func(vars *SessionVars, normalizedValue string, originalValue string, scope ScopeFlag) (string, error) {
engines := strings.Split(normalizedValue, ",")
var formatVal string
for i, engine := range engines {
engine = strings.TrimSpace(engine)
if i != 0 {
formatVal += ","
}
switch {
case strings.EqualFold(engine, kv.TiKV.Name()):
formatVal += kv.TiKV.Name()
case strings.EqualFold(engine, kv.TiFlash.Name()):
formatVal += kv.TiFlash.Name()
case strings.EqualFold(engine, kv.TiDB.Name()):
formatVal += kv.TiDB.Name()
default:
return normalizedValue, ErrWrongValueForVar.GenWithStackByArgs(TiDBIsolationReadEngines, normalizedValue)
}
}
return formatVal, nil
}},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBStoreLimit, Value: strconv.FormatInt(atomic.LoadInt64(&config.GetGlobalConfig().TiKVClient.StoreLimit), 10), Type: TypeInt, MinValue: 0, MaxValue: uint64(math.MaxInt64), AutoConvertOutOfRange: true},
{Scope: ScopeSession, Name: TiDBMetricSchemaStep, Value: strconv.Itoa(DefTiDBMetricSchemaStep), Type: TypeUnsigned, MinValue: 10, MaxValue: 60 * 60 * 60},
{Scope: ScopeSession, Name: TiDBMetricSchemaRangeDuration, Value: strconv.Itoa(DefTiDBMetricSchemaRangeDuration), Type: TypeUnsigned, MinValue: 10, MaxValue: 60 * 60 * 60},
{Scope: ScopeSession, Name: TiDBSlowLogThreshold, Value: strconv.Itoa(logutil.DefaultSlowThreshold), Type: TypeInt, MinValue: -1, MaxValue: math.MaxInt64},
{Scope: ScopeSession, Name: TiDBRecordPlanInSlowLog, Value: int32ToBoolStr(logutil.DefaultRecordPlanInSlowLog), Type: TypeBool},
{Scope: ScopeSession, Name: TiDBEnableSlowLog, Value: BoolToOnOff(logutil.DefaultTiDBEnableSlowLog), Type: TypeBool},
{Scope: ScopeSession, Name: TiDBQueryLogMaxLen, Value: strconv.Itoa(logutil.DefaultQueryLogMaxLen), Type: TypeInt, MinValue: -1, MaxValue: math.MaxInt64},
{Scope: ScopeSession, Name: TiDBCheckMb4ValueInUTF8, Value: BoolToOnOff(config.GetGlobalConfig().CheckMb4ValueInUTF8), Type: TypeBool},
{Scope: ScopeSession, Name: TiDBFoundInPlanCache, Value: BoolToOnOff(DefTiDBFoundInPlanCache), Type: TypeBool, ReadOnly: true},
{Scope: ScopeSession, Name: TiDBFoundInBinding, Value: BoolToOnOff(DefTiDBFoundInBinding), Type: TypeBool, ReadOnly: true},
{Scope: ScopeSession, Name: TiDBEnableCollectExecutionInfo, Value: BoolToOnOff(DefTiDBEnableCollectExecutionInfo), Type: TypeBool},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBAllowAutoRandExplicitInsert, Value: BoolToOnOff(DefTiDBAllowAutoRandExplicitInsert), Type: TypeBool},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBEnableClusteredIndex, Value: IntOnly, Type: TypeEnum, PossibleValues: []string{Off, On, IntOnly, "1", "0"}},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBPartitionPruneMode, Value: string(Static), Type: TypeStr, Validation: func(vars *SessionVars, normalizedValue string, originalValue string, scope ScopeFlag) (string, error) {
mode := PartitionPruneMode(normalizedValue).Update()
if !mode.Valid() {
return normalizedValue, ErrWrongTypeForVar.GenWithStackByArgs(TiDBPartitionPruneMode)
}
return string(mode), nil
}},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBSlowLogMasking, Value: BoolToOnOff(DefTiDBRedactLog), Type: TypeBool},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBRedactLog, Value: BoolToOnOff(DefTiDBRedactLog), Type: TypeBool},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBShardAllocateStep, Value: strconv.Itoa(DefTiDBShardAllocateStep), Type: TypeInt, MinValue: 1, MaxValue: uint64(math.MaxInt64), AutoConvertOutOfRange: true},
{Scope: ScopeGlobal, Name: TiDBEnableTelemetry, Value: BoolToOnOff(DefTiDBEnableTelemetry), Type: TypeBool},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBEnableAmendPessimisticTxn, Value: BoolToOnOff(DefTiDBEnableAmendPessimisticTxn), Type: TypeBool},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBEnableAsyncCommit, Value: BoolToOnOff(DefTiDBEnableAsyncCommit), Type: TypeBool},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBEnable1PC, Value: BoolToOnOff(DefTiDBEnable1PC), Type: TypeBool},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBGuaranteeLinearizability, Value: BoolToOnOff(DefTiDBGuaranteeLinearizability), Type: TypeBool},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBAnalyzeVersion, Value: strconv.Itoa(DefTiDBAnalyzeVersion), Type: TypeInt, MinValue: 1, MaxValue: 2, Validation: func(vars *SessionVars, normalizedValue string, originalValue string, scope ScopeFlag) (string, error) {
if normalizedValue == "2" && FeedbackProbability.Load() > 0 {
var original string
var err error
if scope == ScopeGlobal {
original, err = vars.GlobalVarsAccessor.GetGlobalSysVar(TiDBAnalyzeVersion)
if err != nil {
return normalizedValue, nil
}
} else {
original = strconv.Itoa(vars.AnalyzeVersion)
}
vars.StmtCtx.AppendError(errors.New("variable tidb_analyze_version not updated because analyze version 2 is incompatible with query feedback. Please consider setting feedback-probability to 0.0 in config file to disable query feedback"))
return original, nil
}
return normalizedValue, nil
}},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBEnableIndexMergeJoin, Value: BoolToOnOff(DefTiDBEnableIndexMergeJoin), Type: TypeBool},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBTrackAggregateMemoryUsage, Value: BoolToOnOff(DefTiDBTrackAggregateMemoryUsage), Type: TypeBool},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBMultiStatementMode, Value: Off, Type: TypeEnum, PossibleValues: []string{Off, On, Warn}, SetSession: func(s *SessionVars, val string) error {
s.MultiStatementMode = TiDBOptMultiStmt(val)
return nil
}},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBEnableExchangePartition, Value: BoolToOnOff(DefTiDBEnableExchangePartition), Type: TypeBool},
/* tikv gc metrics */
{Scope: ScopeGlobal, Name: TiDBGCEnable, Value: BoolOn, Type: TypeBool},
{Scope: ScopeGlobal, Name: TiDBGCRunInterval, Value: "10m0s", Type: TypeDuration, MinValue: int64(time.Minute * 10), MaxValue: math.MaxInt64},
{Scope: ScopeGlobal, Name: TiDBGCLifetime, Value: "10m0s", Type: TypeDuration, MinValue: int64(time.Minute * 10), MaxValue: math.MaxInt64},
{Scope: ScopeGlobal, Name: TiDBGCConcurrency, Value: "-1", Type: TypeInt, MinValue: 1, MaxValue: 128, AllowAutoValue: true},
{Scope: ScopeGlobal, Name: TiDBGCScanLockMode, Value: "PHYSICAL", Type: TypeEnum, PossibleValues: []string{"PHYSICAL", "LEGACY"}},
}
// FeedbackProbability points to the FeedbackProbability in statistics package.
// It's initialized in init() in feedback.go to solve import cycle.
var FeedbackProbability *atomic2.Float64
// SynonymsSysVariables is synonyms of system variables.
var SynonymsSysVariables = map[string][]string{}
func addSynonymsSysVariables(synonyms ...string) {
for _, s := range synonyms {
SynonymsSysVariables[s] = synonyms
}
}
func initSynonymsSysVariables() {
addSynonymsSysVariables(TxnIsolation, TransactionIsolation)
addSynonymsSysVariables(TxReadOnly, TransactionReadOnly)
}
// SetNamesVariables is the system variable names related to set names statements.
var SetNamesVariables = []string{
CharacterSetClient,
CharacterSetConnection,
CharacterSetResults,
}
// SetCharsetVariables is the system variable names related to set charset statements.
var SetCharsetVariables = []string{
CharacterSetClient,
CharacterSetResults,
}
const (
// CharacterSetConnection is the name for character_set_connection system variable.
CharacterSetConnection = "character_set_connection"
// CollationConnection is the name for collation_connection system variable.
CollationConnection = "collation_connection"
// CharsetDatabase is the name for character_set_database system variable.
CharsetDatabase = "character_set_database"
// CollationDatabase is the name for collation_database system variable.
CollationDatabase = "collation_database"
// CharacterSetFilesystem is the name for character_set_filesystem system variable.
CharacterSetFilesystem = "character_set_filesystem"
// CharacterSetClient is the name for character_set_client system variable.
CharacterSetClient = "character_set_client"
// CharacterSetSystem is the name for character_set_system system variable.
CharacterSetSystem = "character_set_system"
// GeneralLog is the name for 'general_log' system variable.
GeneralLog = "general_log"
// AvoidTemporalUpgrade is the name for 'avoid_temporal_upgrade' system variable.
AvoidTemporalUpgrade = "avoid_temporal_upgrade"
// MaxPreparedStmtCount is the name for 'max_prepared_stmt_count' system variable.
MaxPreparedStmtCount = "max_prepared_stmt_count"
// BigTables is the name for 'big_tables' system variable.
BigTables = "big_tables"
// CheckProxyUsers is the name for 'check_proxy_users' system variable.
CheckProxyUsers = "check_proxy_users"
// CoreFile is the name for 'core_file' system variable.
CoreFile = "core_file"
// DefaultWeekFormat is the name for 'default_week_format' system variable.
DefaultWeekFormat = "default_week_format"
// GroupConcatMaxLen is the name for 'group_concat_max_len' system variable.
GroupConcatMaxLen = "group_concat_max_len"
// DelayKeyWrite is the name for 'delay_key_write' system variable.
DelayKeyWrite = "delay_key_write"
// EndMarkersInJSON is the name for 'end_markers_in_json' system variable.
EndMarkersInJSON = "end_markers_in_json"
// Hostname is the name for 'hostname' system variable.
Hostname = "hostname"
// InnodbCommitConcurrency is the name for 'innodb_commit_concurrency' system variable.
InnodbCommitConcurrency = "innodb_commit_concurrency"
// InnodbFastShutdown is the name for 'innodb_fast_shutdown' system variable.
InnodbFastShutdown = "innodb_fast_shutdown"
// InnodbLockWaitTimeout is the name for 'innodb_lock_wait_timeout' system variable.
InnodbLockWaitTimeout = "innodb_lock_wait_timeout"
// SQLLogBin is the name for 'sql_log_bin' system variable.
SQLLogBin = "sql_log_bin"
// LogBin is the name for 'log_bin' system variable.
LogBin = "log_bin"
// MaxSortLength is the name for 'max_sort_length' system variable.
MaxSortLength = "max_sort_length"
// MaxSpRecursionDepth is the name for 'max_sp_recursion_depth' system variable.
MaxSpRecursionDepth = "max_sp_recursion_depth"
// MaxUserConnections is the name for 'max_user_connections' system variable.
MaxUserConnections = "max_user_connections"
// OfflineMode is the name for 'offline_mode' system variable.
OfflineMode = "offline_mode"
// InteractiveTimeout is the name for 'interactive_timeout' system variable.
InteractiveTimeout = "interactive_timeout"
// FlushTime is the name for 'flush_time' system variable.
FlushTime = "flush_time"
// PseudoSlaveMode is the name for 'pseudo_slave_mode' system variable.
PseudoSlaveMode = "pseudo_slave_mode"
// LowPriorityUpdates is the name for 'low_priority_updates' system variable.
LowPriorityUpdates = "low_priority_updates"
// LowerCaseTableNames is the name for 'lower_case_table_names' system variable.
LowerCaseTableNames = "lower_case_table_names"
// SessionTrackGtids is the name for 'session_track_gtids' system variable.
SessionTrackGtids = "session_track_gtids"
// OldPasswords is the name for 'old_passwords' system variable.
OldPasswords = "old_passwords"
// MaxConnections is the name for 'max_connections' system variable.
MaxConnections = "max_connections"
// SkipNameResolve is the name for 'skip_name_resolve' system variable.
SkipNameResolve = "skip_name_resolve"
// ForeignKeyChecks is the name for 'foreign_key_checks' system variable.
ForeignKeyChecks = "foreign_key_checks"
// SQLSafeUpdates is the name for 'sql_safe_updates' system variable.
SQLSafeUpdates = "sql_safe_updates"
// WarningCount is the name for 'warning_count' system variable.
WarningCount = "warning_count"
// ErrorCount is the name for 'error_count' system variable.
ErrorCount = "error_count"
// SQLSelectLimit is the name for 'sql_select_limit' system variable.
SQLSelectLimit = "sql_select_limit"
// MaxConnectErrors is the name for 'max_connect_errors' system variable.
MaxConnectErrors = "max_connect_errors"
// TableDefinitionCache is the name for 'table_definition_cache' system variable.
TableDefinitionCache = "table_definition_cache"
// TmpTableSize is the name for 'tmp_table_size' system variable.
TmpTableSize = "tmp_table_size"
// Timestamp is the name for 'timestamp' system variable.
Timestamp = "timestamp"
// ConnectTimeout is the name for 'connect_timeout' system variable.
ConnectTimeout = "connect_timeout"
// SyncBinlog is the name for 'sync_binlog' system variable.
SyncBinlog = "sync_binlog"
// BlockEncryptionMode is the name for 'block_encryption_mode' system variable.
BlockEncryptionMode = "block_encryption_mode"
// WaitTimeout is the name for 'wait_timeout' system variable.
WaitTimeout = "wait_timeout"
// ValidatePasswordNumberCount is the name of 'validate_password_number_count' system variable.
ValidatePasswordNumberCount = "validate_password_number_count"
// ValidatePasswordLength is the name of 'validate_password_length' system variable.
ValidatePasswordLength = "validate_password_length"
// Version is the name of 'version' system variable.
Version = "version"
// VersionComment is the name of 'version_comment' system variable.
VersionComment = "version_comment"
// PluginDir is the name of 'plugin_dir' system variable.
PluginDir = "plugin_dir"
// PluginLoad is the name of 'plugin_load' system variable.
PluginLoad = "plugin_load"
// Port is the name for 'port' system variable.
Port = "port"
// DataDir is the name for 'datadir' system variable.
DataDir = "datadir"
// Profiling is the name for 'Profiling' system variable.
Profiling = "profiling"
// Socket is the name for 'socket' system variable.
Socket = "socket"
// BinlogOrderCommits is the name for 'binlog_order_commits' system variable.
BinlogOrderCommits = "binlog_order_commits"
// MasterVerifyChecksum is the name for 'master_verify_checksum' system variable.
MasterVerifyChecksum = "master_verify_checksum"
// ValidatePasswordCheckUserName is the name for 'validate_password_check_user_name' system variable.
ValidatePasswordCheckUserName = "validate_password_check_user_name"
// SuperReadOnly is the name for 'super_read_only' system variable.
SuperReadOnly = "super_read_only"
// SQLNotes is the name for 'sql_notes' system variable.
SQLNotes = "sql_notes"
// QueryCacheType is the name for 'query_cache_type' system variable.
QueryCacheType = "query_cache_type"
// SlaveCompressedProtocol is the name for 'slave_compressed_protocol' system variable.
SlaveCompressedProtocol = "slave_compressed_protocol"
// BinlogRowQueryLogEvents is the name for 'binlog_rows_query_log_events' system variable.
BinlogRowQueryLogEvents = "binlog_rows_query_log_events"
// LogSlowSlaveStatements is the name for 'log_slow_slave_statements' system variable.
LogSlowSlaveStatements = "log_slow_slave_statements"
// LogSlowAdminStatements is the name for 'log_slow_admin_statements' system variable.
LogSlowAdminStatements = "log_slow_admin_statements"
// LogQueriesNotUsingIndexes is the name for 'log_queries_not_using_indexes' system variable.
LogQueriesNotUsingIndexes = "log_queries_not_using_indexes"
// QueryCacheWlockInvalidate is the name for 'query_cache_wlock_invalidate' system variable.
QueryCacheWlockInvalidate = "query_cache_wlock_invalidate"
// SQLAutoIsNull is the name for 'sql_auto_is_null' system variable.
SQLAutoIsNull = "sql_auto_is_null"
// RelayLogPurge is the name for 'relay_log_purge' system variable.
RelayLogPurge = "relay_log_purge"
// AutomaticSpPrivileges is the name for 'automatic_sp_privileges' system variable.
AutomaticSpPrivileges = "automatic_sp_privileges"
// SQLQuoteShowCreate is the name for 'sql_quote_show_create' system variable.
SQLQuoteShowCreate = "sql_quote_show_create"
// SlowQueryLog is the name for 'slow_query_log' system variable.
SlowQueryLog = "slow_query_log"
// BinlogDirectNonTransactionalUpdates is the name for 'binlog_direct_non_transactional_updates' system variable.
BinlogDirectNonTransactionalUpdates = "binlog_direct_non_transactional_updates"
// SQLBigSelects is the name for 'sql_big_selects' system variable.
SQLBigSelects = "sql_big_selects"
// LogBinTrustFunctionCreators is the name for 'log_bin_trust_function_creators' system variable.
LogBinTrustFunctionCreators = "log_bin_trust_function_creators"
// OldAlterTable is the name for 'old_alter_table' system variable.
OldAlterTable = "old_alter_table"
// EnforceGtidConsistency is the name for 'enforce_gtid_consistency' system variable.
EnforceGtidConsistency = "enforce_gtid_consistency"
// SecureAuth is the name for 'secure_auth' system variable.
SecureAuth = "secure_auth"
// UniqueChecks is the name for 'unique_checks' system variable.
UniqueChecks = "unique_checks"
// SQLWarnings is the name for 'sql_warnings' system variable.
SQLWarnings = "sql_warnings"
// AutoCommit is the name for 'autocommit' system variable.
AutoCommit = "autocommit"
// KeepFilesOnCreate is the name for 'keep_files_on_create' system variable.
KeepFilesOnCreate = "keep_files_on_create"
// ShowOldTemporals is the name for 'show_old_temporals' system variable.
ShowOldTemporals = "show_old_temporals"
// LocalInFile is the name for 'local_infile' system variable.
LocalInFile = "local_infile"
// PerformanceSchema is the name for 'performance_schema' system variable.
PerformanceSchema = "performance_schema"
// Flush is the name for 'flush' system variable.
Flush = "flush"
// SlaveAllowBatching is the name for 'slave_allow_batching' system variable.
SlaveAllowBatching = "slave_allow_batching"
// MyISAMUseMmap is the name for 'myisam_use_mmap' system variable.
MyISAMUseMmap = "myisam_use_mmap"
// InnodbFilePerTable is the name for 'innodb_file_per_table' system variable.
InnodbFilePerTable = "innodb_file_per_table"
// InnodbLogCompressedPages is the name for 'innodb_log_compressed_pages' system variable.
InnodbLogCompressedPages = "innodb_log_compressed_pages"
// InnodbPrintAllDeadlocks is the name for 'innodb_print_all_deadlocks' system variable.
InnodbPrintAllDeadlocks = "innodb_print_all_deadlocks"
// InnodbStrictMode is the name for 'innodb_strict_mode' system variable.
InnodbStrictMode = "innodb_strict_mode"
// InnodbCmpPerIndexEnabled is the name for 'innodb_cmp_per_index_enabled' system variable.
InnodbCmpPerIndexEnabled = "innodb_cmp_per_index_enabled"
// InnodbBufferPoolDumpAtShutdown is the name for 'innodb_buffer_pool_dump_at_shutdown' system variable.
InnodbBufferPoolDumpAtShutdown = "innodb_buffer_pool_dump_at_shutdown"
// InnodbAdaptiveHashIndex is the name for 'innodb_adaptive_hash_index' system variable.
InnodbAdaptiveHashIndex = "innodb_adaptive_hash_index"
// InnodbFtEnableStopword is the name for 'innodb_ft_enable_stopword' system variable.
InnodbFtEnableStopword = "innodb_ft_enable_stopword"
// InnodbSupportXA is the name for 'innodb_support_xa' system variable.
InnodbSupportXA = "innodb_support_xa"
// InnodbOptimizeFullTextOnly is the name for 'innodb_optimize_fulltext_only' system variable.
InnodbOptimizeFullTextOnly = "innodb_optimize_fulltext_only"
// InnodbStatusOutputLocks is the name for 'innodb_status_output_locks' system variable.
InnodbStatusOutputLocks = "innodb_status_output_locks"
// InnodbBufferPoolDumpNow is the name for 'innodb_buffer_pool_dump_now' system variable.
InnodbBufferPoolDumpNow = "innodb_buffer_pool_dump_now"
// InnodbBufferPoolLoadNow is the name for 'innodb_buffer_pool_load_now' system variable.
InnodbBufferPoolLoadNow = "innodb_buffer_pool_load_now"
// InnodbStatsOnMetadata is the name for 'innodb_stats_on_metadata' system variable.
InnodbStatsOnMetadata = "innodb_stats_on_metadata"
// InnodbDisableSortFileCache is the name for 'innodb_disable_sort_file_cache' system variable.
InnodbDisableSortFileCache = "innodb_disable_sort_file_cache"
// InnodbStatsAutoRecalc is the name for 'innodb_stats_auto_recalc' system variable.
InnodbStatsAutoRecalc = "innodb_stats_auto_recalc"
// InnodbBufferPoolLoadAbort is the name for 'innodb_buffer_pool_load_abort' system variable.
InnodbBufferPoolLoadAbort = "innodb_buffer_pool_load_abort"
// InnodbStatsPersistent is the name for 'innodb_stats_persistent' system variable.
InnodbStatsPersistent = "innodb_stats_persistent"
// InnodbRandomReadAhead is the name for 'innodb_random_read_ahead' system variable.
InnodbRandomReadAhead = "innodb_random_read_ahead"
// InnodbAdaptiveFlushing is the name for 'innodb_adaptive_flushing' system variable.
InnodbAdaptiveFlushing = "innodb_adaptive_flushing"
// InnodbTableLocks is the name for 'innodb_table_locks' system variable.
InnodbTableLocks = "innodb_table_locks"
// InnodbStatusOutput is the name for 'innodb_status_output' system variable.
InnodbStatusOutput = "innodb_status_output"
// NetBufferLength is the name for 'net_buffer_length' system variable.
NetBufferLength = "net_buffer_length"
// QueryCacheSize is the name of 'query_cache_size' system variable.
QueryCacheSize = "query_cache_size"
// TxReadOnly is the name of 'tx_read_only' system variable.
TxReadOnly = "tx_read_only"
// TransactionReadOnly is the name of 'transaction_read_only' system variable.
TransactionReadOnly = "transaction_read_only"
// CharacterSetServer is the name of 'character_set_server' system variable.
CharacterSetServer = "character_set_server"
// AutoIncrementIncrement is the name of 'auto_increment_increment' system variable.
AutoIncrementIncrement = "auto_increment_increment"
// AutoIncrementOffset is the name of 'auto_increment_offset' system variable.
AutoIncrementOffset = "auto_increment_offset"
// InitConnect is the name of 'init_connect' system variable.
InitConnect = "init_connect"
// CollationServer is the name of 'collation_server' variable.
CollationServer = "collation_server"
// NetWriteTimeout is the name of 'net_write_timeout' variable.
NetWriteTimeout = "net_write_timeout"
// ThreadPoolSize is the name of 'thread_pool_size' variable.
ThreadPoolSize = "thread_pool_size"
// WindowingUseHighPrecision is the name of 'windowing_use_high_precision' system variable.
WindowingUseHighPrecision = "windowing_use_high_precision"
// OptimizerSwitch is the name of 'optimizer_switch' system variable.
OptimizerSwitch = "optimizer_switch"
// SystemTimeZone is the name of 'system_time_zone' system variable.
SystemTimeZone = "system_time_zone"
)
// GlobalVarAccessor is the interface for accessing global scope system and status variables.
type GlobalVarAccessor interface {
// GetGlobalSysVar gets the global system variable value for name.
GetGlobalSysVar(name string) (string, error)
// SetGlobalSysVar sets the global system variable name to value.
SetGlobalSysVar(name string, value string) error
}
| sessionctx/variable/sysvar.go | 1 | https://github.com/pingcap/tidb/commit/80edc8cd20b452089a26b817b4b450b7f31db77f | [
0.0310880858451128,
0.0009018514538183808,
0.00016031299310270697,
0.00017294763529207557,
0.003546547843143344
] |
{
"id": 1,
"code_window": [
"\t{Scope: ScopeGlobal | ScopeSession, Name: InnodbSupportXA, Value: \"1\"},\n",
"\t{Scope: ScopeGlobal, Name: \"innodb_compression_level\", Value: \"6\"},\n",
"\t{Scope: ScopeNone, Name: \"innodb_file_format_check\", Value: \"1\"},\n",
"\t{Scope: ScopeNone, Name: \"myisam_mmap_size\", Value: \"18446744073709551615\"},\n",
"\t{Scope: ScopeNone, Name: \"innodb_buffer_pool_instances\", Value: \"8\"},\n",
"\t{Scope: ScopeGlobal | ScopeSession, Name: BlockEncryptionMode, Value: \"aes-128-ecb\"},\n",
"\t{Scope: ScopeGlobal | ScopeSession, Name: \"max_length_for_sort_data\", Value: \"1024\", IsHintUpdatable: true},\n",
"\t{Scope: ScopeNone, Name: \"character_set_system\", Value: \"utf8\"},\n",
"\t{Scope: ScopeGlobal, Name: InnodbOptimizeFullTextOnly, Value: \"0\"},\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "sessionctx/variable/noop.go",
"type": "replace",
"edit_start_line_idx": 146
} | // Copyright 2016 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package oracles
import (
"context"
"strings"
"sync"
"sync/atomic"
"time"
"github.com/pingcap/errors"
"github.com/pingcap/tidb/store/tikv/logutil"
"github.com/pingcap/tidb/store/tikv/metrics"
"github.com/pingcap/tidb/store/tikv/oracle"
pd "github.com/tikv/pd/client"
"go.uber.org/zap"
)
var _ oracle.Oracle = &pdOracle{}
const slowDist = 30 * time.Millisecond
// pdOracle is an Oracle that uses a placement driver client as source.
type pdOracle struct {
c pd.Client
// txn_scope (string) -> lastTSPointer (*uint64)
lastTSMap sync.Map
// txn_scope (string) -> lastArrivalTSPointer (*uint64)
lastArrivalTSMap sync.Map
quit chan struct{}
}
// NewPdOracle create an Oracle that uses a pd client source.
// Refer https://github.com/tikv/pd/blob/master/client/client.go for more details.
// PdOracle mantains `lastTS` to store the last timestamp got from PD server. If
// `GetTimestamp()` is not called after `updateInterval`, it will be called by
// itself to keep up with the timestamp on PD server.
func NewPdOracle(pdClient pd.Client, updateInterval time.Duration) (oracle.Oracle, error) {
o := &pdOracle{
c: pdClient,
quit: make(chan struct{}),
}
ctx := context.TODO()
go o.updateTS(ctx, updateInterval)
// Initialize the timestamp of the global txnScope by Get.
_, err := o.GetTimestamp(ctx, &oracle.Option{TxnScope: oracle.GlobalTxnScope})
if err != nil {
o.Close()
return nil, errors.Trace(err)
}
return o, nil
}
// IsExpired returns whether lockTS+TTL is expired, both are ms. It uses `lastTS`
// to compare, may return false negative result temporarily.
func (o *pdOracle) IsExpired(lockTS, TTL uint64, opt *oracle.Option) bool {
lastTS, exist := o.getLastTS(opt.TxnScope)
if !exist {
return true
}
return oracle.ExtractPhysical(lastTS) >= oracle.ExtractPhysical(lockTS)+int64(TTL)
}
// GetTimestamp gets a new increasing time.
func (o *pdOracle) GetTimestamp(ctx context.Context, opt *oracle.Option) (uint64, error) {
ts, err := o.getTimestamp(ctx, opt.TxnScope)
if err != nil {
return 0, errors.Trace(err)
}
o.setLastTS(ts, opt.TxnScope)
return ts, nil
}
type tsFuture struct {
pd.TSFuture
o *pdOracle
txnScope string
}
// Wait implements the oracle.Future interface.
func (f *tsFuture) Wait() (uint64, error) {
now := time.Now()
physical, logical, err := f.TSFuture.Wait()
metrics.TiKVTSFutureWaitDuration.Observe(time.Since(now).Seconds())
if err != nil {
return 0, errors.Trace(err)
}
ts := oracle.ComposeTS(physical, logical)
f.o.setLastTS(ts, f.txnScope)
return ts, nil
}
func (o *pdOracle) GetTimestampAsync(ctx context.Context, opt *oracle.Option) oracle.Future {
var ts pd.TSFuture
if opt.TxnScope == oracle.GlobalTxnScope || opt.TxnScope == "" {
ts = o.c.GetTSAsync(ctx)
} else {
ts = o.c.GetLocalTSAsync(ctx, opt.TxnScope)
}
return &tsFuture{ts, o, opt.TxnScope}
}
func (o *pdOracle) getTimestamp(ctx context.Context, txnScope string) (uint64, error) {
now := time.Now()
var (
physical, logical int64
err error
)
if txnScope == oracle.GlobalTxnScope || txnScope == "" {
physical, logical, err = o.c.GetTS(ctx)
} else {
physical, logical, err = o.c.GetLocalTS(ctx, txnScope)
}
if err != nil {
return 0, errors.Trace(err)
}
dist := time.Since(now)
if dist > slowDist {
logutil.Logger(ctx).Warn("get timestamp too slow",
zap.Duration("cost time", dist))
}
return oracle.ComposeTS(physical, logical), nil
}
func (o *pdOracle) getArrivalTimestamp() uint64 {
return oracle.ComposeTS(oracle.GetPhysical(time.Now()), 0)
}
func (o *pdOracle) setLastTS(ts uint64, txnScope string) {
if txnScope == "" {
txnScope = oracle.GlobalTxnScope
}
lastTSInterface, ok := o.lastTSMap.Load(txnScope)
if !ok {
lastTSInterface, _ = o.lastTSMap.LoadOrStore(txnScope, new(uint64))
}
lastTSPointer := lastTSInterface.(*uint64)
for {
lastTS := atomic.LoadUint64(lastTSPointer)
if ts <= lastTS {
return
}
if atomic.CompareAndSwapUint64(lastTSPointer, lastTS, ts) {
break
}
}
o.setLastArrivalTS(o.getArrivalTimestamp(), txnScope)
}
func (o *pdOracle) setLastArrivalTS(ts uint64, txnScope string) {
if txnScope == "" {
txnScope = oracle.GlobalTxnScope
}
lastTSInterface, ok := o.lastArrivalTSMap.Load(txnScope)
if !ok {
lastTSInterface, _ = o.lastArrivalTSMap.LoadOrStore(txnScope, new(uint64))
}
lastTSPointer := lastTSInterface.(*uint64)
for {
lastTS := atomic.LoadUint64(lastTSPointer)
if ts <= lastTS {
return
}
if atomic.CompareAndSwapUint64(lastTSPointer, lastTS, ts) {
return
}
}
}
func (o *pdOracle) getLastTS(txnScope string) (uint64, bool) {
if txnScope == "" {
txnScope = oracle.GlobalTxnScope
}
lastTSInterface, ok := o.lastTSMap.Load(txnScope)
if !ok {
return 0, false
}
return atomic.LoadUint64(lastTSInterface.(*uint64)), true
}
func (o *pdOracle) getLastArrivalTS(txnScope string) (uint64, bool) {
if txnScope == "" {
txnScope = oracle.GlobalTxnScope
}
lastArrivalTSInterface, ok := o.lastArrivalTSMap.Load(txnScope)
if !ok {
return 0, false
}
return atomic.LoadUint64(lastArrivalTSInterface.(*uint64)), true
}
func (o *pdOracle) updateTS(ctx context.Context, interval time.Duration) {
ticker := time.NewTicker(interval)
defer ticker.Stop()
for {
select {
case <-ticker.C:
// Update the timestamp for each txnScope
o.lastTSMap.Range(func(key, _ interface{}) bool {
txnScope := key.(string)
ts, err := o.getTimestamp(ctx, txnScope)
if err != nil {
logutil.Logger(ctx).Error("updateTS error", zap.String("txnScope", txnScope), zap.Error(err))
return true
}
o.setLastTS(ts, txnScope)
return true
})
case <-o.quit:
return
}
}
}
// UntilExpired implement oracle.Oracle interface.
func (o *pdOracle) UntilExpired(lockTS uint64, TTL uint64, opt *oracle.Option) int64 {
lastTS, ok := o.getLastTS(opt.TxnScope)
if !ok {
return 0
}
return oracle.ExtractPhysical(lockTS) + int64(TTL) - oracle.ExtractPhysical(lastTS)
}
func (o *pdOracle) Close() {
close(o.quit)
}
// A future that resolves immediately to a low resolution timestamp.
type lowResolutionTsFuture struct {
ts uint64
err error
}
// Wait implements the oracle.Future interface.
func (f lowResolutionTsFuture) Wait() (uint64, error) {
return f.ts, f.err
}
// GetLowResolutionTimestamp gets a new increasing time.
func (o *pdOracle) GetLowResolutionTimestamp(ctx context.Context, opt *oracle.Option) (uint64, error) {
lastTS, ok := o.getLastTS(opt.TxnScope)
if !ok {
return 0, errors.Errorf("get low resolution timestamp fail, invalid txnScope = %s", opt.TxnScope)
}
return lastTS, nil
}
func (o *pdOracle) GetLowResolutionTimestampAsync(ctx context.Context, opt *oracle.Option) oracle.Future {
lastTS, ok := o.getLastTS(opt.TxnScope)
if !ok {
return lowResolutionTsFuture{
ts: 0,
err: errors.Errorf("get low resolution timestamp async fail, invalid txnScope = %s", opt.TxnScope),
}
}
return lowResolutionTsFuture{
ts: lastTS,
err: nil,
}
}
func (o *pdOracle) getStaleTimestamp(txnScope string, prevSecond uint64) (uint64, error) {
ts, ok := o.getLastTS(txnScope)
if !ok {
return 0, errors.Errorf("get stale timestamp fail, txnScope: %s", txnScope)
}
arrivalTS, ok := o.getLastArrivalTS(txnScope)
if !ok {
return 0, errors.Errorf("get stale arrival timestamp fail, txnScope: %s", txnScope)
}
arrivalTime := oracle.GetTimeFromTS(arrivalTS)
physicalTime := oracle.GetTimeFromTS(ts)
if uint64(physicalTime.Unix()) <= prevSecond {
return 0, errors.Errorf("invalid prevSecond %v", prevSecond)
}
staleTime := physicalTime.Add(-arrivalTime.Sub(time.Now().Add(-time.Duration(prevSecond) * time.Second)))
return oracle.ComposeTS(oracle.GetPhysical(staleTime), 0), nil
}
// GetStaleTimestamp generate a TSO which represents for the TSO prevSecond secs ago.
func (o *pdOracle) GetStaleTimestamp(ctx context.Context, txnScope string, prevSecond uint64) (ts uint64, err error) {
ts, err = o.getStaleTimestamp(txnScope, prevSecond)
if err != nil {
if !strings.HasPrefix(err.Error(), "invalid prevSecond") {
// If any error happened, we will try to fetch tso and set it as last ts.
_, tErr := o.GetTimestamp(ctx, &oracle.Option{TxnScope: txnScope})
if tErr != nil {
return 0, errors.Trace(tErr)
}
}
return 0, errors.Trace(err)
}
return ts, nil
}
| store/tikv/oracle/oracles/pd.go | 0 | https://github.com/pingcap/tidb/commit/80edc8cd20b452089a26b817b4b450b7f31db77f | [
0.00017829901480581611,
0.00016931031132116914,
0.0001607196609256789,
0.00016923945804592222,
0.0000037469988001248566
] |
{
"id": 1,
"code_window": [
"\t{Scope: ScopeGlobal | ScopeSession, Name: InnodbSupportXA, Value: \"1\"},\n",
"\t{Scope: ScopeGlobal, Name: \"innodb_compression_level\", Value: \"6\"},\n",
"\t{Scope: ScopeNone, Name: \"innodb_file_format_check\", Value: \"1\"},\n",
"\t{Scope: ScopeNone, Name: \"myisam_mmap_size\", Value: \"18446744073709551615\"},\n",
"\t{Scope: ScopeNone, Name: \"innodb_buffer_pool_instances\", Value: \"8\"},\n",
"\t{Scope: ScopeGlobal | ScopeSession, Name: BlockEncryptionMode, Value: \"aes-128-ecb\"},\n",
"\t{Scope: ScopeGlobal | ScopeSession, Name: \"max_length_for_sort_data\", Value: \"1024\", IsHintUpdatable: true},\n",
"\t{Scope: ScopeNone, Name: \"character_set_system\", Value: \"utf8\"},\n",
"\t{Scope: ScopeGlobal, Name: InnodbOptimizeFullTextOnly, Value: \"0\"},\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "sessionctx/variable/noop.go",
"type": "replace",
"edit_start_line_idx": 146
} | // Copyright 2018 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package ddl_test
import (
"context"
"fmt"
"math/rand"
"os"
"sync/atomic"
"testing"
"time"
. "github.com/pingcap/check"
"github.com/pingcap/errors"
"github.com/pingcap/failpoint"
"github.com/pingcap/parser"
"github.com/pingcap/parser/model"
"github.com/pingcap/tidb/config"
"github.com/pingcap/tidb/ddl"
"github.com/pingcap/tidb/ddl/testutil"
ddlutil "github.com/pingcap/tidb/ddl/util"
"github.com/pingcap/tidb/domain"
"github.com/pingcap/tidb/kv"
"github.com/pingcap/tidb/session"
"github.com/pingcap/tidb/sessionctx/variable"
"github.com/pingcap/tidb/store/mockstore"
"github.com/pingcap/tidb/store/tikv/mockstore/cluster"
"github.com/pingcap/tidb/util/logutil"
"github.com/pingcap/tidb/util/testkit"
"github.com/pingcap/tidb/util/testleak"
. "github.com/pingcap/tidb/util/testutil"
)
func TestT(t *testing.T) {
CustomVerboseFlag = true
logLevel := os.Getenv("log_level")
err := logutil.InitLogger(logutil.NewLogConfig(logLevel, "", "", logutil.EmptyFileLogConfig, false))
if err != nil {
t.Fatal(err)
}
config.UpdateGlobal(func(conf *config.Config) {
conf.TiKVClient.AsyncCommit.SafeWindow = 0
conf.TiKVClient.AsyncCommit.AllowedClockDrift = 0
})
testleak.BeforeTest()
TestingT(t)
testleak.AfterTestT(t)()
}
var _ = SerialSuites(&testFailDBSuite{})
type testFailDBSuite struct {
cluster cluster.Cluster
lease time.Duration
store kv.Storage
dom *domain.Domain
se session.Session
p *parser.Parser
CommonHandleSuite
}
func (s *testFailDBSuite) SetUpSuite(c *C) {
s.lease = 200 * time.Millisecond
ddl.SetWaitTimeWhenErrorOccurred(1 * time.Microsecond)
var err error
s.store, err = mockstore.NewMockStore(
mockstore.WithClusterInspector(func(c cluster.Cluster) {
mockstore.BootstrapWithSingleStore(c)
s.cluster = c
}),
)
c.Assert(err, IsNil)
session.SetSchemaLease(s.lease)
s.dom, err = session.BootstrapSession(s.store)
c.Assert(err, IsNil)
s.se, err = session.CreateSession4Test(s.store)
c.Assert(err, IsNil)
s.p = parser.New()
}
func (s *testFailDBSuite) TearDownSuite(c *C) {
_, err := s.se.Execute(context.Background(), "drop database if exists test_db_state")
c.Assert(err, IsNil)
s.se.Close()
s.dom.Close()
s.store.Close()
}
// TestHalfwayCancelOperations tests the case that the schema is correct after the execution of operations are cancelled halfway.
func (s *testFailDBSuite) TestHalfwayCancelOperations(c *C) {
c.Assert(failpoint.Enable("github.com/pingcap/tidb/ddl/truncateTableErr", `return(true)`), IsNil)
defer func() {
c.Assert(failpoint.Disable("github.com/pingcap/tidb/ddl/truncateTableErr"), IsNil)
}()
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("create database cancel_job_db")
tk.MustExec("use cancel_job_db")
// test for truncating table
tk.MustExec("create table t(a int)")
tk.MustExec("insert into t values(1)")
_, err := tk.Exec("truncate table t")
c.Assert(err, NotNil)
// Make sure that the table's data has not been deleted.
tk.MustQuery("select * from t").Check(testkit.Rows("1"))
// Execute ddl statement reload schema
tk.MustExec("alter table t comment 'test1'")
err = s.dom.DDL().GetHook().OnChanged(nil)
c.Assert(err, IsNil)
tk = testkit.NewTestKit(c, s.store)
tk.MustExec("use cancel_job_db")
// Test schema is correct.
tk.MustExec("select * from t")
// test for renaming table
c.Assert(failpoint.Enable("github.com/pingcap/tidb/ddl/renameTableErr", `return("ty")`), IsNil)
defer func() {
c.Assert(failpoint.Disable("github.com/pingcap/tidb/ddl/renameTableErr"), IsNil)
}()
tk.MustExec("create table tx(a int)")
tk.MustExec("insert into tx values(1)")
_, err = tk.Exec("rename table tx to ty")
c.Assert(err, NotNil)
tk.MustExec("create table ty(a int)")
tk.MustExec("insert into ty values(2)")
_, err = tk.Exec("rename table ty to tz, tx to ty")
c.Assert(err, NotNil)
_, err = tk.Exec("select * from tz")
c.Assert(err, NotNil)
_, err = tk.Exec("rename table tx to ty, ty to tz")
c.Assert(err, NotNil)
tk.MustQuery("select * from ty").Check(testkit.Rows("2"))
// Make sure that the table's data has not been deleted.
tk.MustQuery("select * from tx").Check(testkit.Rows("1"))
// Execute ddl statement reload schema.
tk.MustExec("alter table tx comment 'tx'")
err = s.dom.DDL().GetHook().OnChanged(nil)
c.Assert(err, IsNil)
tk = testkit.NewTestKit(c, s.store)
tk.MustExec("use cancel_job_db")
tk.MustExec("select * from tx")
// test for exchanging partition
c.Assert(failpoint.Enable("github.com/pingcap/tidb/ddl/exchangePartitionErr", `return(true)`), IsNil)
defer func() {
c.Assert(failpoint.Disable("github.com/pingcap/tidb/ddl/exchangePartitionErr"), IsNil)
}()
tk.MustExec("create table pt(a int) partition by hash (a) partitions 2")
tk.MustExec("insert into pt values(1), (3), (5)")
tk.MustExec("create table nt(a int)")
tk.MustExec("insert into nt values(7)")
tk.MustExec("set @@tidb_enable_exchange_partition=1")
defer tk.MustExec("set @@tidb_enable_exchange_partition=0")
_, err = tk.Exec("alter table pt exchange partition p1 with table nt")
c.Assert(err, NotNil)
tk.MustQuery("select * from pt").Check(testkit.Rows("1", "3", "5"))
tk.MustQuery("select * from nt").Check(testkit.Rows("7"))
// Execute ddl statement reload schema.
tk.MustExec("alter table pt comment 'pt'")
err = s.dom.DDL().GetHook().OnChanged(nil)
c.Assert(err, IsNil)
tk = testkit.NewTestKit(c, s.store)
tk.MustExec("use cancel_job_db")
// Test schema is correct.
tk.MustExec("select * from pt")
// clean up
tk.MustExec("drop database cancel_job_db")
}
// TestInitializeOffsetAndState tests the case that the column's offset and state don't be initialized in the file of ddl_api.go when
// doing the operation of 'modify column'.
func (s *testFailDBSuite) TestInitializeOffsetAndState(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("create table t(a int, b int, c int)")
defer tk.MustExec("drop table t")
c.Assert(failpoint.Enable("github.com/pingcap/tidb/ddl/uninitializedOffsetAndState", `return(true)`), IsNil)
tk.MustExec("ALTER TABLE t MODIFY COLUMN b int FIRST;")
c.Assert(failpoint.Disable("github.com/pingcap/tidb/ddl/uninitializedOffsetAndState"), IsNil)
}
func (s *testFailDBSuite) TestUpdateHandleFailed(c *C) {
c.Assert(failpoint.Enable("github.com/pingcap/tidb/ddl/errorUpdateReorgHandle", `1*return`), IsNil)
defer func() {
c.Assert(failpoint.Disable("github.com/pingcap/tidb/ddl/errorUpdateReorgHandle"), IsNil)
}()
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("create database if not exists test_handle_failed")
defer tk.MustExec("drop database test_handle_failed")
tk.MustExec("use test_handle_failed")
tk.MustExec("create table t(a int primary key, b int)")
tk.MustExec("insert into t values(-1, 1)")
tk.MustExec("alter table t add index idx_b(b)")
result := tk.MustQuery("select count(*) from t use index(idx_b)")
result.Check(testkit.Rows("1"))
tk.MustExec("admin check index t idx_b")
}
func (s *testFailDBSuite) TestAddIndexFailed(c *C) {
c.Assert(failpoint.Enable("github.com/pingcap/tidb/ddl/mockBackfillRunErr", `1*return`), IsNil)
defer func() {
c.Assert(failpoint.Disable("github.com/pingcap/tidb/ddl/mockBackfillRunErr"), IsNil)
}()
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("create database if not exists test_add_index_failed")
defer tk.MustExec("drop database test_add_index_failed")
tk.MustExec("use test_add_index_failed")
tk.MustExec("create table t(a bigint PRIMARY KEY, b int)")
for i := 0; i < 1000; i++ {
tk.MustExec(fmt.Sprintf("insert into t values(%v, %v)", i, i))
}
// Get table ID for split.
dom := domain.GetDomain(tk.Se)
is := dom.InfoSchema()
tbl, err := is.TableByName(model.NewCIStr("test_add_index_failed"), model.NewCIStr("t"))
c.Assert(err, IsNil)
tblID := tbl.Meta().ID
// Split the table.
s.cluster.SplitTable(tblID, 100)
tk.MustExec("alter table t add index idx_b(b)")
tk.MustExec("admin check index t idx_b")
tk.MustExec("admin check table t")
}
// TestFailSchemaSyncer test when the schema syncer is done,
// should prohibit DML executing until the syncer is restartd by loadSchemaInLoop.
func (s *testFailDBSuite) TestFailSchemaSyncer(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a int)")
defer tk.MustExec("drop table if exists t")
originalRetryTimes := domain.SchemaOutOfDateRetryTimes
domain.SchemaOutOfDateRetryTimes = 1
defer func() {
domain.SchemaOutOfDateRetryTimes = originalRetryTimes
}()
c.Assert(s.dom.SchemaValidator.IsStarted(), IsTrue)
mockSyncer, ok := s.dom.DDL().SchemaSyncer().(*ddl.MockSchemaSyncer)
c.Assert(ok, IsTrue)
// make reload failed.
c.Assert(failpoint.Enable("github.com/pingcap/tidb/domain/ErrorMockReloadFailed", `return(true)`), IsNil)
mockSyncer.CloseSession()
// wait the schemaValidator is stopped.
for i := 0; i < 50; i++ {
if !s.dom.SchemaValidator.IsStarted() {
break
}
time.Sleep(20 * time.Millisecond)
}
c.Assert(s.dom.SchemaValidator.IsStarted(), IsFalse)
_, err := tk.Exec("insert into t values(1)")
c.Assert(err, NotNil)
c.Assert(err.Error(), Equals, "[domain:8027]Information schema is out of date: schema failed to update in 1 lease, please make sure TiDB can connect to TiKV")
c.Assert(failpoint.Disable("github.com/pingcap/tidb/domain/ErrorMockReloadFailed"), IsNil)
// wait the schemaValidator is started.
for i := 0; i < 50; i++ {
if s.dom.SchemaValidator.IsStarted() {
break
}
time.Sleep(100 * time.Millisecond)
}
c.Assert(s.dom.SchemaValidator.IsStarted(), IsTrue)
_, err = tk.Exec("insert into t values(1)")
c.Assert(err, IsNil)
}
func (s *testFailDBSuite) TestGenGlobalIDFail(c *C) {
defer func() {
c.Assert(failpoint.Disable("github.com/pingcap/tidb/ddl/mockGenGlobalIDFail"), IsNil)
}()
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("create database if not exists gen_global_id_fail")
tk.MustExec("use gen_global_id_fail")
sql1 := "create table t1(a bigint PRIMARY KEY, b int)"
sql2 := `create table t2(a bigint PRIMARY KEY, b int) partition by range (a) (
partition p0 values less than (3440),
partition p1 values less than (61440),
partition p2 values less than (122880),
partition p3 values less than maxvalue)`
sql3 := `truncate table t1`
sql4 := `truncate table t2`
testcases := []struct {
sql string
table string
mockErr bool
}{
{sql1, "t1", true},
{sql2, "t2", true},
{sql1, "t1", false},
{sql2, "t2", false},
{sql3, "t1", true},
{sql4, "t2", true},
{sql3, "t1", false},
{sql4, "t2", false},
}
for idx, test := range testcases {
if test.mockErr {
c.Assert(failpoint.Enable("github.com/pingcap/tidb/ddl/mockGenGlobalIDFail", `return(true)`), IsNil)
_, err := tk.Exec(test.sql)
c.Assert(err, NotNil, Commentf("the %dth test case '%s' fail", idx, test.sql))
} else {
c.Assert(failpoint.Enable("github.com/pingcap/tidb/ddl/mockGenGlobalIDFail", `return(false)`), IsNil)
tk.MustExec(test.sql)
tk.MustExec(fmt.Sprintf("insert into %s values (%d, 42)", test.table, rand.Intn(65536)))
tk.MustExec(fmt.Sprintf("admin check table %s", test.table))
}
}
tk.MustExec("admin check table t1")
tk.MustExec("admin check table t2")
}
func batchInsert(tk *testkit.TestKit, tbl string, start, end int) {
dml := fmt.Sprintf("insert into %s values", tbl)
for i := start; i < end; i++ {
dml += fmt.Sprintf("(%d, %d, %d)", i, i, i)
if i != end-1 {
dml += ","
}
}
tk.MustExec(dml)
}
func (s *testFailDBSuite) TestAddIndexWorkerNum(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("create database if not exists test_db")
tk.MustExec("use test_db")
tk.MustExec("drop table if exists test_add_index")
if s.IsCommonHandle {
tk.Se.GetSessionVars().EnableClusteredIndex = variable.ClusteredIndexDefModeOn
tk.MustExec("create table test_add_index (c1 bigint, c2 bigint, c3 bigint, primary key(c1, c3))")
} else {
tk.MustExec("create table test_add_index (c1 bigint, c2 bigint, c3 bigint, primary key(c1))")
}
done := make(chan error, 1)
start := -10
// first add some rows
for i := start; i < 4090; i += 100 {
batchInsert(tk, "test_add_index", i, i+100)
}
is := s.dom.InfoSchema()
schemaName := model.NewCIStr("test_db")
tableName := model.NewCIStr("test_add_index")
tbl, err := is.TableByName(schemaName, tableName)
c.Assert(err, IsNil)
splitCount := 100
// Split table to multi region.
s.cluster.SplitTable(tbl.Meta().ID, splitCount)
err = ddlutil.LoadDDLReorgVars(tk.Se)
c.Assert(err, IsNil)
originDDLAddIndexWorkerCnt := variable.GetDDLReorgWorkerCounter()
lastSetWorkerCnt := originDDLAddIndexWorkerCnt
atomic.StoreInt32(&ddl.TestCheckWorkerNumber, lastSetWorkerCnt)
ddl.TestCheckWorkerNumber = lastSetWorkerCnt
defer tk.MustExec(fmt.Sprintf("set @@global.tidb_ddl_reorg_worker_cnt=%d", originDDLAddIndexWorkerCnt))
if !s.IsCommonHandle { // only enable failpoint once
c.Assert(failpoint.Enable("github.com/pingcap/tidb/ddl/checkBackfillWorkerNum", `return(true)`), IsNil)
defer func() {
c.Assert(failpoint.Disable("github.com/pingcap/tidb/ddl/checkBackfillWorkerNum"), IsNil)
}()
}
testutil.SessionExecInGoroutine(c, s.store, "create index c3_index on test_add_index (c3)", done)
checkNum := 0
LOOP:
for {
select {
case err = <-done:
if err == nil {
break LOOP
}
c.Assert(err, IsNil, Commentf("err:%v", errors.ErrorStack(err)))
case <-ddl.TestCheckWorkerNumCh:
lastSetWorkerCnt = int32(rand.Intn(8) + 8)
tk.MustExec(fmt.Sprintf("set @@global.tidb_ddl_reorg_worker_cnt=%d", lastSetWorkerCnt))
atomic.StoreInt32(&ddl.TestCheckWorkerNumber, lastSetWorkerCnt)
checkNum++
}
}
c.Assert(checkNum, Greater, 5)
tk.MustExec("admin check table test_add_index")
tk.MustExec("drop table test_add_index")
s.RerunWithCommonHandleEnabled(c, s.TestAddIndexWorkerNum)
}
// TestRunDDLJobPanic tests recover panic when run ddl job panic.
func (s *testFailDBSuite) TestRunDDLJobPanic(c *C) {
defer func() {
c.Assert(failpoint.Disable("github.com/pingcap/tidb/ddl/mockPanicInRunDDLJob"), IsNil)
}()
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
c.Assert(failpoint.Enable("github.com/pingcap/tidb/ddl/mockPanicInRunDDLJob", `1*panic("panic test")`), IsNil)
_, err := tk.Exec("create table t(c1 int, c2 int)")
c.Assert(err, NotNil)
c.Assert(err.Error(), Equals, "[ddl:8214]Cancelled DDL job")
}
func (s *testFailDBSuite) TestPartitionAddIndexGC(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec(`create table partition_add_idx (
id int not null,
hired date not null
)
partition by range( year(hired) ) (
partition p1 values less than (1991),
partition p5 values less than (2008),
partition p7 values less than (2018)
);`)
tk.MustExec("insert into partition_add_idx values(1, '2010-01-01'), (2, '1990-01-01'), (3, '2001-01-01')")
c.Assert(failpoint.Enable("github.com/pingcap/tidb/ddl/mockUpdateCachedSafePoint", `return(true)`), IsNil)
defer func() {
c.Assert(failpoint.Disable("github.com/pingcap/tidb/ddl/mockUpdateCachedSafePoint"), IsNil)
}()
tk.MustExec("alter table partition_add_idx add index idx (id, hired)")
}
func (s *testFailDBSuite) TestModifyColumn(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t;")
enableChangeColumnType := tk.Se.GetSessionVars().EnableChangeColumnType
tk.Se.GetSessionVars().EnableChangeColumnType = true
defer func() {
tk.Se.GetSessionVars().EnableChangeColumnType = enableChangeColumnType
}()
tk.MustExec("create table t (a int not null default 1, b int default 2, c int not null default 0, primary key(c), index idx(b), index idx1(a), index idx2(b, c))")
tk.MustExec("insert into t values(1, 2, 3), (11, 22, 33)")
_, err := tk.Exec("alter table t change column c cc mediumint")
c.Assert(err.Error(), Equals, "[ddl:8200]Unsupported modify column: tidb_enable_change_column_type is true and this column has primary key flag")
tk.MustExec("alter table t change column b bb mediumint first")
dom := domain.GetDomain(tk.Se)
is := dom.InfoSchema()
tbl, err := is.TableByName(model.NewCIStr("test"), model.NewCIStr("t"))
c.Assert(err, IsNil)
cols := tbl.Meta().Columns
colsStr := ""
idxsStr := ""
for _, col := range cols {
colsStr += col.Name.L + " "
}
for _, idx := range tbl.Meta().Indices {
idxsStr += idx.Name.L + " "
}
c.Assert(len(cols), Equals, 3)
c.Assert(len(tbl.Meta().Indices), Equals, 3)
tk.MustQuery("select * from t").Check(testkit.Rows("2 1 3", "22 11 33"))
tk.MustQuery("show create table t").Check(testkit.Rows("t CREATE TABLE `t` (\n" +
" `bb` mediumint(9) DEFAULT NULL,\n" +
" `a` int(11) NOT NULL DEFAULT '1',\n" +
" `c` int(11) NOT NULL DEFAULT '0',\n" +
" PRIMARY KEY (`c`) /*T![clustered_index] CLUSTERED */,\n" +
" KEY `idx` (`bb`),\n" +
" KEY `idx1` (`a`),\n" +
" KEY `idx2` (`bb`,`c`)\n" +
") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin"))
tk.MustExec("admin check table t")
tk.MustExec("insert into t values(111, 222, 333)")
_, err = tk.Exec("alter table t change column a aa tinyint after c")
c.Assert(err.Error(), Equals, "[types:1690]constant 222 overflows tinyint")
tk.MustExec("alter table t change column a aa mediumint after c")
tk.MustQuery("show create table t").Check(testkit.Rows("t CREATE TABLE `t` (\n" +
" `bb` mediumint(9) DEFAULT NULL,\n" +
" `c` int(11) NOT NULL DEFAULT '0',\n" +
" `aa` mediumint(9) DEFAULT NULL,\n" +
" PRIMARY KEY (`c`) /*T![clustered_index] CLUSTERED */,\n" +
" KEY `idx` (`bb`),\n" +
" KEY `idx1` (`aa`),\n" +
" KEY `idx2` (`bb`,`c`)\n" +
") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin"))
tk.MustQuery("select * from t").Check(testkit.Rows("2 3 1", "22 33 11", "111 333 222"))
tk.MustExec("admin check table t")
// Test unsupport statements.
tk.MustExec("create table t1(a int) partition by hash (a) partitions 2")
_, err = tk.Exec("alter table t1 modify column a mediumint")
c.Assert(err.Error(), Equals, "[ddl:8200]Unsupported modify column: tidb_enable_change_column_type is true, table is partition table")
tk.MustExec("create table t2(id int, a int, b int generated always as (abs(a)) virtual, c int generated always as (a+1) stored)")
_, err = tk.Exec("alter table t2 modify column b mediumint")
c.Assert(err.Error(), Equals, "[ddl:8200]Unsupported modify column: tidb_enable_change_column_type is true, newCol IsGenerated false, oldCol IsGenerated true")
_, err = tk.Exec("alter table t2 modify column c mediumint")
c.Assert(err.Error(), Equals, "[ddl:8200]Unsupported modify column: tidb_enable_change_column_type is true, newCol IsGenerated false, oldCol IsGenerated true")
_, err = tk.Exec("alter table t2 modify column a mediumint generated always as(id+1) stored")
c.Assert(err.Error(), Equals, "[ddl:8200]Unsupported modify column: tidb_enable_change_column_type is true, newCol IsGenerated true, oldCol IsGenerated false")
// Test multiple rows of data.
tk.MustExec("create table t3(a int not null default 1, b int default 2, c int not null default 0, primary key(c), index idx(b), index idx1(a), index idx2(b, c))")
// Add some discrete rows.
maxBatch := 20
batchCnt := 100
// Make sure there are no duplicate keys.
defaultBatchSize := variable.DefTiDBDDLReorgBatchSize * variable.DefTiDBDDLReorgWorkerCount
base := defaultBatchSize * 20
for i := 1; i < batchCnt; i++ {
n := base + i*defaultBatchSize + i
for j := 0; j < rand.Intn(maxBatch); j++ {
n += j
sql := fmt.Sprintf("insert into t3 values (%d, %d, %d)", n, n, n)
tk.MustExec(sql)
}
}
tk.MustExec("alter table t3 modify column a mediumint")
tk.MustExec("admin check table t")
// Test PointGet.
tk.MustExec("create table t4(a bigint, b int, unique index idx(a));")
tk.MustExec("insert into t4 values (1,1),(2,2),(3,3),(4,4),(5,5);")
tk.MustExec("alter table t4 modify a bigint unsigned;")
tk.MustQuery("select * from t4 where a=1;").Check(testkit.Rows("1 1"))
// Test changing null to not null.
tk.MustExec("create table t5(a bigint, b int, unique index idx(a));")
tk.MustExec("insert into t5 values (1,1),(2,2),(3,3),(4,4),(5,5);")
tk.MustExec("alter table t5 modify a int not null;")
tk.MustExec("drop table t, t1, t2, t3, t4, t5")
}
func (s *testFailDBSuite) TestPartitionAddPanic(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec(`use test;`)
tk.MustExec(`drop table if exists t;`)
tk.MustExec(`create table t (a int) partition by range(a) (partition p0 values less than (10));`)
c.Assert(failpoint.Enable("github.com/pingcap/tidb/ddl/CheckPartitionByRangeErr", `return(true)`), IsNil)
defer func() {
c.Assert(failpoint.Disable("github.com/pingcap/tidb/ddl/CheckPartitionByRangeErr"), IsNil)
}()
_, err := tk.Exec(`alter table t add partition (partition p1 values less than (20));`)
c.Assert(err, NotNil)
result := tk.MustQuery("show create table t").Rows()[0][1]
c.Assert(result, Matches, `(?s).*PARTITION .p0. VALUES LESS THAN \(10\).*`)
c.Assert(result, Not(Matches), `(?s).*PARTITION .p0. VALUES LESS THAN \(20\).*`)
}
| ddl/failtest/fail_db_test.go | 0 | https://github.com/pingcap/tidb/commit/80edc8cd20b452089a26b817b4b450b7f31db77f | [
0.00017853325698524714,
0.00017155593377538025,
0.00016339427384082228,
0.0001722165907267481,
0.000003010232376254862
] |
{
"id": 1,
"code_window": [
"\t{Scope: ScopeGlobal | ScopeSession, Name: InnodbSupportXA, Value: \"1\"},\n",
"\t{Scope: ScopeGlobal, Name: \"innodb_compression_level\", Value: \"6\"},\n",
"\t{Scope: ScopeNone, Name: \"innodb_file_format_check\", Value: \"1\"},\n",
"\t{Scope: ScopeNone, Name: \"myisam_mmap_size\", Value: \"18446744073709551615\"},\n",
"\t{Scope: ScopeNone, Name: \"innodb_buffer_pool_instances\", Value: \"8\"},\n",
"\t{Scope: ScopeGlobal | ScopeSession, Name: BlockEncryptionMode, Value: \"aes-128-ecb\"},\n",
"\t{Scope: ScopeGlobal | ScopeSession, Name: \"max_length_for_sort_data\", Value: \"1024\", IsHintUpdatable: true},\n",
"\t{Scope: ScopeNone, Name: \"character_set_system\", Value: \"utf8\"},\n",
"\t{Scope: ScopeGlobal, Name: InnodbOptimizeFullTextOnly, Value: \"0\"},\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "sessionctx/variable/noop.go",
"type": "replace",
"edit_start_line_idx": 146
} | // Copyright 2019 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package oracles
import (
"sync/atomic"
"time"
"github.com/pingcap/tidb/store/tikv/oracle"
)
// SetOracleHookCurrentTime exports localOracle's time hook to test.
func SetOracleHookCurrentTime(oc oracle.Oracle, t time.Time) {
switch o := oc.(type) {
case *localOracle:
if o.hook == nil {
o.hook = &struct {
currentTime time.Time
}{}
}
o.hook.currentTime = t
}
}
// ClearOracleHook exports localOracle's clear hook method
func ClearOracleHook(oc oracle.Oracle) {
switch o := oc.(type) {
case *localOracle:
o.hook = nil
}
}
// NewEmptyPDOracle exports pdOracle struct to test
func NewEmptyPDOracle() oracle.Oracle {
return &pdOracle{}
}
// SetEmptyPDOracleLastTs exports PD oracle's global last ts to test.
func SetEmptyPDOracleLastTs(oc oracle.Oracle, ts uint64) {
switch o := oc.(type) {
case *pdOracle:
lastTSInterface, _ := o.lastTSMap.LoadOrStore(oracle.GlobalTxnScope, new(uint64))
lastTSPointer := lastTSInterface.(*uint64)
atomic.StoreUint64(lastTSPointer, ts)
lasTSArrivalInterface, _ := o.lastArrivalTSMap.LoadOrStore(oracle.GlobalTxnScope, new(uint64))
lasTSArrivalPointer := lasTSArrivalInterface.(*uint64)
atomic.StoreUint64(lasTSArrivalPointer, uint64(time.Now().Unix()*1000))
}
setEmptyPDOracleLastArrivalTs(oc, ts)
}
// setEmptyPDOracleLastArrivalTs exports PD oracle's global last ts to test.
func setEmptyPDOracleLastArrivalTs(oc oracle.Oracle, ts uint64) {
switch o := oc.(type) {
case *pdOracle:
o.setLastArrivalTS(ts, oracle.GlobalTxnScope)
}
}
| store/tikv/oracle/oracles/export_test.go | 0 | https://github.com/pingcap/tidb/commit/80edc8cd20b452089a26b817b4b450b7f31db77f | [
0.00017820621724240482,
0.00017169911006931216,
0.00016787974163889885,
0.00017103513528127223,
0.000003442057050051517
] |
{
"id": 2,
"code_window": [
"\t{Scope: ScopeGlobal, Name: \"log_timestamps\", Value: \"\"},\n",
"\t{Scope: ScopeNone, Name: \"version_compile_machine\", Value: \"x86_64\"},\n",
"\t{Scope: ScopeGlobal, Name: \"event_scheduler\", Value: BoolOff},\n",
"\t{Scope: ScopeGlobal | ScopeSession, Name: \"ndb_deferred_constraints\", Value: \"\"},\n",
"\t{Scope: ScopeGlobal, Name: \"log_syslog_include_pid\", Value: \"\"},\n",
"\t{Scope: ScopeSession, Name: \"last_insert_id\", Value: \"\"},\n",
"\t{Scope: ScopeNone, Name: \"innodb_ft_cache_size\", Value: \"8000000\"},\n",
"\t{Scope: ScopeGlobal, Name: InnodbDisableSortFileCache, Value: \"0\"},\n",
"\t{Scope: ScopeGlobal, Name: \"log_error_verbosity\", Value: \"\"},\n",
"\t{Scope: ScopeNone, Name: \"performance_schema_hosts_size\", Value: \"100\"},\n",
"\t{Scope: ScopeGlobal, Name: \"innodb_replication_delay\", Value: \"0\"},\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "sessionctx/variable/noop.go",
"type": "replace",
"edit_start_line_idx": 257
} | // Copyright 2020 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package variable
import (
"math"
)
// The following sysVars are noops.
// Some applications will depend on certain variables to be present or settable,
// for example query_cache_time. These are included for MySQL compatibility,
// but changing them has no effect on behavior.
var noopSysVars = []*SysVar{
{Scope: ScopeGlobal, Name: ConnectTimeout, Value: "10", Type: TypeUnsigned, MinValue: 2, MaxValue: secondsPerYear, AutoConvertOutOfRange: true},
{Scope: ScopeGlobal | ScopeSession, Name: QueryCacheWlockInvalidate, Value: BoolOff, Type: TypeBool},
{Scope: ScopeGlobal | ScopeSession, Name: "sql_buffer_result", Value: BoolOff, IsHintUpdatable: true},
{Scope: ScopeGlobal, Name: MyISAMUseMmap, Value: BoolOff, Type: TypeBool, AutoConvertNegativeBool: true},
{Scope: ScopeGlobal, Name: "gtid_mode", Value: BoolOff, Type: TypeBool},
{Scope: ScopeGlobal, Name: FlushTime, Value: "0", Type: TypeUnsigned, MinValue: 0, MaxValue: secondsPerYear, AutoConvertOutOfRange: true},
{Scope: ScopeNone, Name: "performance_schema_max_mutex_classes", Value: "200"},
{Scope: ScopeGlobal | ScopeSession, Name: LowPriorityUpdates, Value: BoolOff, Type: TypeBool},
{Scope: ScopeGlobal | ScopeSession, Name: SessionTrackGtids, Value: BoolOff, Type: TypeEnum, PossibleValues: []string{BoolOff, "OWN_GTID", "ALL_GTIDS"}},
{Scope: ScopeGlobal | ScopeSession, Name: "ndbinfo_max_rows", Value: ""},
{Scope: ScopeGlobal | ScopeSession, Name: "ndb_index_stat_option", Value: ""},
{Scope: ScopeGlobal | ScopeSession, Name: OldPasswords, Value: "0", Type: TypeUnsigned, MinValue: 0, MaxValue: 2, AutoConvertOutOfRange: true},
{Scope: ScopeNone, Name: "innodb_version", Value: "5.6.25"},
{Scope: ScopeGlobal | ScopeSession, Name: BigTables, Value: BoolOff, Type: TypeBool},
{Scope: ScopeNone, Name: "skip_external_locking", Value: "1"},
{Scope: ScopeNone, Name: "innodb_sync_array_size", Value: "1"},
{Scope: ScopeSession, Name: "rand_seed2", Value: ""},
{Scope: ScopeGlobal, Name: ValidatePasswordCheckUserName, Value: BoolOff, Type: TypeBool},
{Scope: ScopeGlobal, Name: ValidatePasswordNumberCount, Value: "1", Type: TypeUnsigned, MinValue: 0, MaxValue: math.MaxUint64, AutoConvertOutOfRange: true},
{Scope: ScopeSession, Name: "gtid_next", Value: ""},
{Scope: ScopeGlobal, Name: "ndb_show_foreign_key_mock_tables", Value: ""},
{Scope: ScopeNone, Name: "multi_range_count", Value: "256"},
{Scope: ScopeGlobal | ScopeSession, Name: "binlog_error_action", Value: "IGNORE_ERROR"},
{Scope: ScopeGlobal | ScopeSession, Name: "default_storage_engine", Value: "InnoDB"},
{Scope: ScopeNone, Name: "ft_query_expansion_limit", Value: "20"},
{Scope: ScopeGlobal, Name: MaxConnectErrors, Value: "100", Type: TypeUnsigned, MinValue: 1, MaxValue: math.MaxUint64, AutoConvertOutOfRange: true},
{Scope: ScopeGlobal, Name: SyncBinlog, Value: "0", Type: TypeUnsigned, MinValue: 0, MaxValue: 4294967295, AutoConvertOutOfRange: true},
{Scope: ScopeNone, Name: "max_digest_length", Value: "1024"},
{Scope: ScopeNone, Name: "innodb_force_load_corrupted", Value: "0"},
{Scope: ScopeNone, Name: "performance_schema_max_table_handles", Value: "4000"},
{Scope: ScopeGlobal, Name: InnodbFastShutdown, Value: "1", Type: TypeUnsigned, MinValue: 0, MaxValue: 2, AutoConvertOutOfRange: true},
{Scope: ScopeNone, Name: "ft_max_word_len", Value: "84"},
{Scope: ScopeGlobal, Name: "log_backward_compatible_user_definitions", Value: ""},
{Scope: ScopeNone, Name: "lc_messages_dir", Value: "/usr/local/mysql-5.6.25-osx10.8-x86_64/share/"},
{Scope: ScopeGlobal, Name: "ft_boolean_syntax", Value: "+ -><()~*:\"\"&|"},
{Scope: ScopeGlobal, Name: TableDefinitionCache, Value: "-1", Type: TypeUnsigned, MinValue: 400, MaxValue: 524288, AutoConvertOutOfRange: true},
{Scope: ScopeNone, Name: SkipNameResolve, Value: BoolOff, Type: TypeBool},
{Scope: ScopeNone, Name: "performance_schema_max_file_handles", Value: "32768"},
{Scope: ScopeSession, Name: "transaction_allow_batching", Value: ""},
{Scope: ScopeNone, Name: "performance_schema_max_statement_classes", Value: "168"},
{Scope: ScopeGlobal, Name: "server_id", Value: "0"},
{Scope: ScopeGlobal, Name: "innodb_flushing_avg_loops", Value: "30"},
{Scope: ScopeGlobal | ScopeSession, Name: TmpTableSize, Value: "16777216", Type: TypeUnsigned, MinValue: 1024, MaxValue: math.MaxUint64, AutoConvertOutOfRange: true, IsHintUpdatable: true},
{Scope: ScopeGlobal, Name: "innodb_max_purge_lag", Value: "0"},
{Scope: ScopeGlobal | ScopeSession, Name: "preload_buffer_size", Value: "32768"},
{Scope: ScopeGlobal, Name: CheckProxyUsers, Value: BoolOff, Type: TypeBool},
{Scope: ScopeNone, Name: "have_query_cache", Value: "YES"},
{Scope: ScopeGlobal, Name: "innodb_flush_log_at_timeout", Value: "1"},
{Scope: ScopeGlobal, Name: "innodb_max_undo_log_size", Value: ""},
{Scope: ScopeGlobal | ScopeSession, Name: "range_alloc_block_size", Value: "4096", IsHintUpdatable: true},
{Scope: ScopeNone, Name: "have_rtree_keys", Value: "YES"},
{Scope: ScopeGlobal, Name: "innodb_old_blocks_pct", Value: "37"},
{Scope: ScopeGlobal, Name: "innodb_file_format", Value: "Barracuda", Type: TypeEnum, PossibleValues: []string{"Antelope", "Barracuda"}},
{Scope: ScopeGlobal, Name: "innodb_default_row_format", Value: "dynamic", Type: TypeEnum, PossibleValues: []string{"redundant", "compact", "dynamic"}},
{Scope: ScopeGlobal, Name: "innodb_compression_failure_threshold_pct", Value: "5"},
{Scope: ScopeNone, Name: "performance_schema_events_waits_history_long_size", Value: "10000"},
{Scope: ScopeGlobal, Name: "innodb_checksum_algorithm", Value: "innodb"},
{Scope: ScopeNone, Name: "innodb_ft_sort_pll_degree", Value: "2"},
{Scope: ScopeNone, Name: "thread_stack", Value: "262144"},
{Scope: ScopeGlobal, Name: "relay_log_info_repository", Value: "FILE"},
{Scope: ScopeGlobal, Name: SuperReadOnly, Value: "0", Type: TypeBool},
{Scope: ScopeGlobal | ScopeSession, Name: "max_delayed_threads", Value: "20"},
{Scope: ScopeNone, Name: "protocol_version", Value: "10"},
{Scope: ScopeGlobal | ScopeSession, Name: "new", Value: BoolOff},
{Scope: ScopeGlobal | ScopeSession, Name: "myisam_sort_buffer_size", Value: "8388608"},
{Scope: ScopeGlobal | ScopeSession, Name: "optimizer_trace_offset", Value: "-1"},
{Scope: ScopeGlobal, Name: InnodbBufferPoolDumpAtShutdown, Value: "0"},
{Scope: ScopeGlobal | ScopeSession, Name: SQLNotes, Value: "1"},
{Scope: ScopeGlobal, Name: InnodbCmpPerIndexEnabled, Value: BoolOff, Type: TypeBool, AutoConvertNegativeBool: true},
{Scope: ScopeGlobal, Name: "innodb_ft_server_stopword_table", Value: ""},
{Scope: ScopeNone, Name: "performance_schema_max_file_instances", Value: "7693"},
{Scope: ScopeNone, Name: "log_output", Value: "FILE"},
{Scope: ScopeGlobal, Name: "binlog_group_commit_sync_delay", Value: ""},
{Scope: ScopeGlobal, Name: "binlog_group_commit_sync_no_delay_count", Value: ""},
{Scope: ScopeNone, Name: "have_crypt", Value: "YES"},
{Scope: ScopeGlobal, Name: "innodb_log_write_ahead_size", Value: ""},
{Scope: ScopeNone, Name: "innodb_log_group_home_dir", Value: "./"},
{Scope: ScopeNone, Name: "performance_schema_events_statements_history_size", Value: "10"},
{Scope: ScopeGlobal, Name: GeneralLog, Value: BoolOff, Type: TypeBool},
{Scope: ScopeGlobal, Name: "validate_password_dictionary_file", Value: ""},
{Scope: ScopeGlobal, Name: BinlogOrderCommits, Value: BoolOn, Type: TypeBool},
{Scope: ScopeGlobal, Name: "key_cache_division_limit", Value: "100"},
{Scope: ScopeGlobal | ScopeSession, Name: "max_insert_delayed_threads", Value: "20"},
{Scope: ScopeNone, Name: "performance_schema_session_connect_attrs_size", Value: "512"},
{Scope: ScopeGlobal, Name: "innodb_max_dirty_pages_pct", Value: "75"},
{Scope: ScopeGlobal, Name: InnodbFilePerTable, Value: BoolOn, Type: TypeBool, AutoConvertNegativeBool: true},
{Scope: ScopeGlobal, Name: InnodbLogCompressedPages, Value: "1"},
{Scope: ScopeNone, Name: "skip_networking", Value: "0"},
{Scope: ScopeGlobal, Name: "innodb_monitor_reset", Value: ""},
{Scope: ScopeNone, Name: "have_ssl", Value: "DISABLED"},
{Scope: ScopeNone, Name: "have_openssl", Value: "DISABLED"},
{Scope: ScopeNone, Name: "ssl_ca", Value: ""},
{Scope: ScopeNone, Name: "ssl_cert", Value: ""},
{Scope: ScopeNone, Name: "ssl_key", Value: ""},
{Scope: ScopeNone, Name: "ssl_cipher", Value: ""},
{Scope: ScopeNone, Name: "tls_version", Value: "TLSv1,TLSv1.1,TLSv1.2"},
{Scope: ScopeGlobal, Name: InnodbPrintAllDeadlocks, Value: BoolOff, Type: TypeBool, AutoConvertNegativeBool: true},
{Scope: ScopeNone, Name: "innodb_autoinc_lock_mode", Value: "1"},
{Scope: ScopeGlobal, Name: "key_buffer_size", Value: "8388608"},
{Scope: ScopeGlobal, Name: "host_cache_size", Value: "279"},
{Scope: ScopeGlobal, Name: DelayKeyWrite, Value: BoolOn, Type: TypeEnum, PossibleValues: []string{BoolOff, BoolOn, "ALL"}},
{Scope: ScopeNone, Name: "metadata_locks_cache_size", Value: "1024"},
{Scope: ScopeNone, Name: "innodb_force_recovery", Value: "0"},
{Scope: ScopeGlobal, Name: "innodb_file_format_max", Value: "Antelope"},
{Scope: ScopeGlobal | ScopeSession, Name: "debug", Value: ""},
{Scope: ScopeGlobal, Name: "log_warnings", Value: "1"},
{Scope: ScopeGlobal, Name: OfflineMode, Value: "0", Type: TypeBool},
{Scope: ScopeGlobal | ScopeSession, Name: InnodbStrictMode, Value: "1", Type: TypeBool, AutoConvertNegativeBool: true},
{Scope: ScopeGlobal, Name: "innodb_rollback_segments", Value: "128"},
{Scope: ScopeGlobal | ScopeSession, Name: "join_buffer_size", Value: "262144", IsHintUpdatable: true},
{Scope: ScopeNone, Name: "innodb_mirrored_log_groups", Value: "1"},
{Scope: ScopeGlobal, Name: "max_binlog_size", Value: "1073741824"},
{Scope: ScopeGlobal, Name: "concurrent_insert", Value: "AUTO"},
{Scope: ScopeGlobal, Name: InnodbAdaptiveHashIndex, Value: BoolOn, Type: TypeBool, AutoConvertNegativeBool: true},
{Scope: ScopeGlobal, Name: InnodbFtEnableStopword, Value: BoolOn, Type: TypeBool, AutoConvertNegativeBool: true},
{Scope: ScopeGlobal, Name: "general_log_file", Value: "/usr/local/mysql/data/localhost.log"},
{Scope: ScopeGlobal | ScopeSession, Name: InnodbSupportXA, Value: "1"},
{Scope: ScopeGlobal, Name: "innodb_compression_level", Value: "6"},
{Scope: ScopeNone, Name: "innodb_file_format_check", Value: "1"},
{Scope: ScopeNone, Name: "myisam_mmap_size", Value: "18446744073709551615"},
{Scope: ScopeNone, Name: "innodb_buffer_pool_instances", Value: "8"},
{Scope: ScopeGlobal | ScopeSession, Name: BlockEncryptionMode, Value: "aes-128-ecb"},
{Scope: ScopeGlobal | ScopeSession, Name: "max_length_for_sort_data", Value: "1024", IsHintUpdatable: true},
{Scope: ScopeNone, Name: "character_set_system", Value: "utf8"},
{Scope: ScopeGlobal, Name: InnodbOptimizeFullTextOnly, Value: "0"},
{Scope: ScopeNone, Name: "character_sets_dir", Value: "/usr/local/mysql-5.6.25-osx10.8-x86_64/share/charsets/"},
{Scope: ScopeGlobal | ScopeSession, Name: QueryCacheType, Value: BoolOff, Type: TypeEnum, PossibleValues: []string{BoolOff, BoolOn, "DEMAND"}},
{Scope: ScopeNone, Name: "innodb_rollback_on_timeout", Value: "0"},
{Scope: ScopeGlobal | ScopeSession, Name: "query_alloc_block_size", Value: "8192"},
{Scope: ScopeGlobal | ScopeSession, Name: InitConnect, Value: ""},
{Scope: ScopeNone, Name: "have_compress", Value: "YES"},
{Scope: ScopeNone, Name: "thread_concurrency", Value: "10"},
{Scope: ScopeGlobal | ScopeSession, Name: "query_prealloc_size", Value: "8192"},
{Scope: ScopeNone, Name: "relay_log_space_limit", Value: "0"},
{Scope: ScopeGlobal | ScopeSession, Name: MaxUserConnections, Value: "0", Type: TypeUnsigned, MinValue: 0, MaxValue: 4294967295, AutoConvertOutOfRange: true},
{Scope: ScopeNone, Name: "performance_schema_max_thread_classes", Value: "50"},
{Scope: ScopeGlobal, Name: "innodb_api_trx_level", Value: "0"},
{Scope: ScopeNone, Name: "disconnect_on_expired_password", Value: "1"},
{Scope: ScopeNone, Name: "performance_schema_max_file_classes", Value: "50"},
{Scope: ScopeGlobal, Name: "expire_logs_days", Value: "0"},
{Scope: ScopeGlobal | ScopeSession, Name: BinlogRowQueryLogEvents, Value: BoolOff, Type: TypeBool},
{Scope: ScopeGlobal, Name: "default_password_lifetime", Value: ""},
{Scope: ScopeNone, Name: "pid_file", Value: "/usr/local/mysql/data/localhost.pid"},
{Scope: ScopeNone, Name: "innodb_undo_tablespaces", Value: "0"},
{Scope: ScopeGlobal, Name: InnodbStatusOutputLocks, Value: BoolOff, Type: TypeBool, AutoConvertNegativeBool: true},
{Scope: ScopeNone, Name: "performance_schema_accounts_size", Value: "100"},
{Scope: ScopeGlobal | ScopeSession, Name: "max_error_count", Value: "64", IsHintUpdatable: true},
{Scope: ScopeGlobal, Name: "max_write_lock_count", Value: "18446744073709551615"},
{Scope: ScopeNone, Name: "performance_schema_max_socket_instances", Value: "322"},
{Scope: ScopeNone, Name: "performance_schema_max_table_instances", Value: "12500"},
{Scope: ScopeGlobal, Name: "innodb_stats_persistent_sample_pages", Value: "20"},
{Scope: ScopeGlobal, Name: "show_compatibility_56", Value: ""},
{Scope: ScopeNone, Name: "innodb_open_files", Value: "2000"},
{Scope: ScopeGlobal, Name: "innodb_spin_wait_delay", Value: "6"},
{Scope: ScopeGlobal, Name: "thread_cache_size", Value: "9"},
{Scope: ScopeGlobal, Name: LogSlowAdminStatements, Value: BoolOff, Type: TypeBool},
{Scope: ScopeNone, Name: "innodb_checksums", Type: TypeBool, Value: BoolOn},
{Scope: ScopeNone, Name: "ft_stopword_file", Value: "(built-in)"},
{Scope: ScopeGlobal, Name: "innodb_max_dirty_pages_pct_lwm", Value: "0"},
{Scope: ScopeGlobal, Name: LogQueriesNotUsingIndexes, Value: BoolOff, Type: TypeBool},
{Scope: ScopeGlobal | ScopeSession, Name: "max_heap_table_size", Value: "16777216", IsHintUpdatable: true},
{Scope: ScopeGlobal | ScopeSession, Name: "div_precision_increment", Value: "4", IsHintUpdatable: true},
{Scope: ScopeGlobal, Name: "innodb_lru_scan_depth", Value: "1024"},
{Scope: ScopeGlobal, Name: "innodb_purge_rseg_truncate_frequency", Value: ""},
{Scope: ScopeGlobal | ScopeSession, Name: SQLAutoIsNull, Value: BoolOff, Type: TypeBool, IsHintUpdatable: true},
{Scope: ScopeNone, Name: "innodb_api_enable_binlog", Value: "0"},
{Scope: ScopeGlobal | ScopeSession, Name: "innodb_ft_user_stopword_table", Value: ""},
{Scope: ScopeNone, Name: "server_id_bits", Value: "32"},
{Scope: ScopeGlobal, Name: "innodb_log_checksum_algorithm", Value: ""},
{Scope: ScopeNone, Name: "innodb_buffer_pool_load_at_startup", Value: "1"},
{Scope: ScopeGlobal | ScopeSession, Name: "sort_buffer_size", Value: "262144", IsHintUpdatable: true},
{Scope: ScopeGlobal, Name: "innodb_flush_neighbors", Value: "1"},
{Scope: ScopeNone, Name: "innodb_use_sys_malloc", Value: "1"},
{Scope: ScopeSession, Name: PluginLoad, Value: ""},
{Scope: ScopeSession, Name: PluginDir, Value: "/data/deploy/plugin"},
{Scope: ScopeNone, Name: "performance_schema_max_socket_classes", Value: "10"},
{Scope: ScopeNone, Name: "performance_schema_max_stage_classes", Value: "150"},
{Scope: ScopeGlobal, Name: "innodb_purge_batch_size", Value: "300"},
{Scope: ScopeNone, Name: "have_profiling", Value: "NO"},
{Scope: ScopeGlobal, Name: InnodbBufferPoolDumpNow, Value: BoolOff, Type: TypeBool, AutoConvertNegativeBool: true},
{Scope: ScopeGlobal, Name: RelayLogPurge, Value: BoolOn, Type: TypeBool},
{Scope: ScopeGlobal, Name: "ndb_distribution", Value: ""},
{Scope: ScopeGlobal, Name: "myisam_data_pointer_size", Value: "6"},
{Scope: ScopeGlobal, Name: "ndb_optimization_delay", Value: ""},
{Scope: ScopeGlobal, Name: "innodb_ft_num_word_optimize", Value: "2000"},
{Scope: ScopeGlobal | ScopeSession, Name: "max_join_size", Value: "18446744073709551615", IsHintUpdatable: true},
{Scope: ScopeNone, Name: CoreFile, Value: BoolOff, Type: TypeBool},
{Scope: ScopeGlobal | ScopeSession, Name: "max_seeks_for_key", Value: "18446744073709551615", IsHintUpdatable: true},
{Scope: ScopeNone, Name: "innodb_log_buffer_size", Value: "8388608"},
{Scope: ScopeGlobal, Name: "delayed_insert_timeout", Value: "300"},
{Scope: ScopeGlobal, Name: "max_relay_log_size", Value: "0"},
{Scope: ScopeGlobal | ScopeSession, Name: MaxSortLength, Value: "1024", Type: TypeUnsigned, MinValue: 4, MaxValue: 8388608, AutoConvertOutOfRange: true, IsHintUpdatable: true},
{Scope: ScopeNone, Name: "metadata_locks_hash_instances", Value: "8"},
{Scope: ScopeGlobal, Name: "ndb_eventbuffer_free_percent", Value: ""},
{Scope: ScopeNone, Name: "large_files_support", Value: "1"},
{Scope: ScopeGlobal, Name: "binlog_max_flush_queue_time", Value: "0"},
{Scope: ScopeGlobal, Name: "innodb_fill_factor", Value: ""},
{Scope: ScopeGlobal, Name: "log_syslog_facility", Value: ""},
{Scope: ScopeNone, Name: "innodb_ft_min_token_size", Value: "3"},
{Scope: ScopeGlobal | ScopeSession, Name: "transaction_write_set_extraction", Value: ""},
{Scope: ScopeGlobal | ScopeSession, Name: "ndb_blob_write_batch_bytes", Value: ""},
{Scope: ScopeGlobal, Name: "automatic_sp_privileges", Value: "1"},
{Scope: ScopeGlobal, Name: "innodb_flush_sync", Value: ""},
{Scope: ScopeNone, Name: "performance_schema_events_statements_history_long_size", Value: "10000"},
{Scope: ScopeGlobal, Name: "innodb_monitor_disable", Value: ""},
{Scope: ScopeNone, Name: "innodb_doublewrite", Value: "1"},
{Scope: ScopeNone, Name: "log_bin_use_v1_row_events", Value: "0"},
{Scope: ScopeSession, Name: "innodb_optimize_point_storage", Value: ""},
{Scope: ScopeNone, Name: "innodb_api_disable_rowlock", Value: "0"},
{Scope: ScopeGlobal, Name: "innodb_adaptive_flushing_lwm", Value: "10"},
{Scope: ScopeNone, Name: "innodb_log_files_in_group", Value: "2"},
{Scope: ScopeGlobal, Name: InnodbBufferPoolLoadNow, Value: BoolOff, Type: TypeBool, AutoConvertNegativeBool: true},
{Scope: ScopeNone, Name: "performance_schema_max_rwlock_classes", Value: "40"},
{Scope: ScopeNone, Name: "binlog_gtid_simple_recovery", Value: "1"},
{Scope: ScopeNone, Name: "performance_schema_digests_size", Value: "10000"},
{Scope: ScopeGlobal | ScopeSession, Name: Profiling, Value: BoolOff, Type: TypeBool},
{Scope: ScopeSession, Name: "rand_seed1", Value: ""},
{Scope: ScopeGlobal, Name: "sha256_password_proxy_users", Value: ""},
{Scope: ScopeGlobal | ScopeSession, Name: SQLQuoteShowCreate, Value: BoolOn, Type: TypeBool},
{Scope: ScopeGlobal | ScopeSession, Name: "binlogging_impossible_mode", Value: "IGNORE_ERROR"},
{Scope: ScopeGlobal | ScopeSession, Name: QueryCacheSize, Value: "1048576"},
{Scope: ScopeGlobal, Name: "innodb_stats_transient_sample_pages", Value: "8"},
{Scope: ScopeGlobal, Name: InnodbStatsOnMetadata, Value: "0"},
{Scope: ScopeNone, Name: "server_uuid", Value: "00000000-0000-0000-0000-000000000000"},
{Scope: ScopeNone, Name: "open_files_limit", Value: "5000"},
{Scope: ScopeGlobal | ScopeSession, Name: "ndb_force_send", Value: ""},
{Scope: ScopeNone, Name: "skip_show_database", Value: "0"},
{Scope: ScopeGlobal, Name: "log_timestamps", Value: ""},
{Scope: ScopeNone, Name: "version_compile_machine", Value: "x86_64"},
{Scope: ScopeGlobal, Name: "event_scheduler", Value: BoolOff},
{Scope: ScopeGlobal | ScopeSession, Name: "ndb_deferred_constraints", Value: ""},
{Scope: ScopeGlobal, Name: "log_syslog_include_pid", Value: ""},
{Scope: ScopeSession, Name: "last_insert_id", Value: ""},
{Scope: ScopeNone, Name: "innodb_ft_cache_size", Value: "8000000"},
{Scope: ScopeGlobal, Name: InnodbDisableSortFileCache, Value: "0"},
{Scope: ScopeGlobal, Name: "log_error_verbosity", Value: ""},
{Scope: ScopeNone, Name: "performance_schema_hosts_size", Value: "100"},
{Scope: ScopeGlobal, Name: "innodb_replication_delay", Value: "0"},
{Scope: ScopeGlobal, Name: SlowQueryLog, Value: "0"},
{Scope: ScopeSession, Name: "debug_sync", Value: ""},
{Scope: ScopeGlobal, Name: InnodbStatsAutoRecalc, Value: "1"},
{Scope: ScopeGlobal | ScopeSession, Name: "lc_messages", Value: "en_US"},
{Scope: ScopeGlobal | ScopeSession, Name: "bulk_insert_buffer_size", Value: "8388608", IsHintUpdatable: true},
{Scope: ScopeGlobal | ScopeSession, Name: BinlogDirectNonTransactionalUpdates, Value: BoolOff, Type: TypeBool},
{Scope: ScopeGlobal, Name: "innodb_change_buffering", Value: "all"},
{Scope: ScopeGlobal | ScopeSession, Name: SQLBigSelects, Value: BoolOn, Type: TypeBool, IsHintUpdatable: true},
{Scope: ScopeGlobal, Name: "innodb_max_purge_lag_delay", Value: "0"},
{Scope: ScopeGlobal | ScopeSession, Name: "session_track_schema", Value: ""},
{Scope: ScopeGlobal, Name: "innodb_io_capacity_max", Value: "2000"},
{Scope: ScopeGlobal, Name: "innodb_autoextend_increment", Value: "64"},
{Scope: ScopeGlobal | ScopeSession, Name: "binlog_format", Value: "STATEMENT"},
{Scope: ScopeGlobal | ScopeSession, Name: "optimizer_trace", Value: "enabled=off,one_line=off"},
{Scope: ScopeGlobal | ScopeSession, Name: "read_rnd_buffer_size", Value: "262144", IsHintUpdatable: true},
{Scope: ScopeGlobal | ScopeSession, Name: NetWriteTimeout, Value: "60"},
{Scope: ScopeGlobal, Name: InnodbBufferPoolLoadAbort, Value: BoolOff, Type: TypeBool, AutoConvertNegativeBool: true},
{Scope: ScopeGlobal | ScopeSession, Name: "transaction_prealloc_size", Value: "4096"},
{Scope: ScopeNone, Name: "performance_schema_setup_objects_size", Value: "100"},
{Scope: ScopeGlobal, Name: "sync_relay_log", Value: "10000"},
{Scope: ScopeGlobal, Name: "innodb_ft_result_cache_limit", Value: "2000000000"},
{Scope: ScopeNone, Name: "innodb_sort_buffer_size", Value: "1048576"},
{Scope: ScopeGlobal, Name: "innodb_ft_enable_diag_print", Type: TypeBool, Value: BoolOff},
{Scope: ScopeNone, Name: "thread_handling", Value: "one-thread-per-connection"},
{Scope: ScopeGlobal, Name: "stored_program_cache", Value: "256"},
{Scope: ScopeNone, Name: "performance_schema_max_mutex_instances", Value: "15906"},
{Scope: ScopeGlobal, Name: "innodb_adaptive_max_sleep_delay", Value: "150000"},
{Scope: ScopeNone, Name: "large_pages", Value: BoolOff},
{Scope: ScopeGlobal | ScopeSession, Name: "session_track_system_variables", Value: ""},
{Scope: ScopeGlobal, Name: "innodb_change_buffer_max_size", Value: "25"},
{Scope: ScopeGlobal, Name: LogBinTrustFunctionCreators, Value: BoolOff, Type: TypeBool},
{Scope: ScopeNone, Name: "innodb_write_io_threads", Value: "4"},
{Scope: ScopeGlobal, Name: "mysql_native_password_proxy_users", Value: ""},
{Scope: ScopeGlobal, Name: serverReadOnly, Value: BoolOff, Type: TypeBool},
{Scope: ScopeNone, Name: "large_page_size", Value: "0"},
{Scope: ScopeNone, Name: "table_open_cache_instances", Value: "1"},
{Scope: ScopeGlobal, Name: InnodbStatsPersistent, Value: BoolOn, Type: TypeBool, AutoConvertNegativeBool: true},
{Scope: ScopeGlobal | ScopeSession, Name: "session_track_state_change", Value: ""},
{Scope: ScopeNone, Name: OptimizerSwitch, Value: "index_merge=on,index_merge_union=on,index_merge_sort_union=on,index_merge_intersection=on,engine_condition_pushdown=on,index_condition_pushdown=on,mrr=on,mrr_cost_based=on,block_nested_loop=on,batched_key_access=off,materialization=on,semijoin=on,loosescan=on,firstmatch=on,subquery_materialization_cost_based=on,use_index_extensions=on", IsHintUpdatable: true},
{Scope: ScopeGlobal, Name: "delayed_queue_size", Value: "1000"},
{Scope: ScopeNone, Name: "innodb_read_only", Value: "0"},
{Scope: ScopeNone, Name: "datetime_format", Value: "%Y-%m-%d %H:%i:%s"},
{Scope: ScopeGlobal, Name: "log_syslog", Value: ""},
{Scope: ScopeGlobal | ScopeSession, Name: "transaction_alloc_block_size", Value: "8192"},
{Scope: ScopeGlobal, Name: "innodb_large_prefix", Type: TypeBool, Value: BoolOff},
{Scope: ScopeNone, Name: "performance_schema_max_cond_classes", Value: "80"},
{Scope: ScopeGlobal, Name: "innodb_io_capacity", Value: "200"},
{Scope: ScopeGlobal, Name: "max_binlog_cache_size", Value: "18446744073709547520"},
{Scope: ScopeGlobal | ScopeSession, Name: "ndb_index_stat_enable", Value: ""},
{Scope: ScopeGlobal, Name: "executed_gtids_compression_period", Value: ""},
{Scope: ScopeNone, Name: "time_format", Value: "%H:%i:%s"},
{Scope: ScopeGlobal | ScopeSession, Name: OldAlterTable, Value: BoolOff, Type: TypeBool},
{Scope: ScopeGlobal | ScopeSession, Name: "long_query_time", Value: "10.000000"},
{Scope: ScopeNone, Name: "innodb_use_native_aio", Value: "0"},
{Scope: ScopeGlobal, Name: "log_throttle_queries_not_using_indexes", Value: "0"},
{Scope: ScopeNone, Name: "locked_in_memory", Value: "0"},
{Scope: ScopeNone, Name: "innodb_api_enable_mdl", Value: "0"},
{Scope: ScopeGlobal, Name: "binlog_cache_size", Value: "32768"},
{Scope: ScopeGlobal, Name: "innodb_compression_pad_pct_max", Value: "50"},
{Scope: ScopeGlobal, Name: InnodbCommitConcurrency, Value: "0", Type: TypeUnsigned, MinValue: 0, MaxValue: 1000, AutoConvertOutOfRange: true},
{Scope: ScopeNone, Name: "ft_min_word_len", Value: "4"},
{Scope: ScopeGlobal, Name: EnforceGtidConsistency, Value: BoolOff, Type: TypeEnum, PossibleValues: []string{BoolOff, BoolOn, "WARN"}},
{Scope: ScopeGlobal, Name: SecureAuth, Value: BoolOn, Type: TypeBool, Validation: func(vars *SessionVars, normalizedValue string, originalValue string, scope ScopeFlag) (string, error) {
if TiDBOptOn(normalizedValue) {
return BoolOn, nil
}
return normalizedValue, ErrWrongValueForVar.GenWithStackByArgs(SecureAuth, originalValue)
}},
{Scope: ScopeNone, Name: "max_tmp_tables", Value: "32"},
{Scope: ScopeGlobal, Name: InnodbRandomReadAhead, Value: BoolOff, Type: TypeBool, AutoConvertNegativeBool: true},
{Scope: ScopeGlobal | ScopeSession, Name: UniqueChecks, Value: BoolOn, Type: TypeBool, IsHintUpdatable: true},
{Scope: ScopeGlobal, Name: "internal_tmp_disk_storage_engine", Value: ""},
{Scope: ScopeGlobal | ScopeSession, Name: "myisam_repair_threads", Value: "1"},
{Scope: ScopeGlobal, Name: "ndb_eventbuffer_max_alloc", Value: ""},
{Scope: ScopeGlobal, Name: "innodb_read_ahead_threshold", Value: "56"},
{Scope: ScopeGlobal, Name: "key_cache_block_size", Value: "1024"},
{Scope: ScopeNone, Name: "ndb_recv_thread_cpu_mask", Value: ""},
{Scope: ScopeGlobal, Name: "gtid_purged", Value: ""},
{Scope: ScopeGlobal, Name: "max_binlog_stmt_cache_size", Value: "18446744073709547520"},
{Scope: ScopeGlobal | ScopeSession, Name: "lock_wait_timeout", Value: "31536000"},
{Scope: ScopeGlobal | ScopeSession, Name: "read_buffer_size", Value: "131072", IsHintUpdatable: true},
{Scope: ScopeNone, Name: "innodb_read_io_threads", Value: "4"},
{Scope: ScopeGlobal | ScopeSession, Name: MaxSpRecursionDepth, Value: "0", Type: TypeUnsigned, MinValue: 0, MaxValue: 255, AutoConvertOutOfRange: true},
{Scope: ScopeNone, Name: "ignore_builtin_innodb", Value: "0"},
{Scope: ScopeGlobal, Name: "slow_query_log_file", Value: "/usr/local/mysql/data/localhost-slow.log"},
{Scope: ScopeGlobal, Name: "innodb_thread_sleep_delay", Value: "10000"},
{Scope: ScopeNone, Name: "license", Value: "Apache License 2.0"},
{Scope: ScopeGlobal, Name: "innodb_ft_aux_table", Value: ""},
{Scope: ScopeGlobal | ScopeSession, Name: SQLWarnings, Value: BoolOff, Type: TypeBool},
{Scope: ScopeGlobal | ScopeSession, Name: KeepFilesOnCreate, Value: BoolOff, Type: TypeBool},
{Scope: ScopeNone, Name: "innodb_data_file_path", Value: "ibdata1:12M:autoextend"},
{Scope: ScopeNone, Name: "performance_schema_setup_actors_size", Value: "100"},
{Scope: ScopeNone, Name: "innodb_additional_mem_pool_size", Value: "8388608"},
{Scope: ScopeNone, Name: "log_error", Value: "/usr/local/mysql/data/localhost.err"},
{Scope: ScopeGlobal, Name: "binlog_stmt_cache_size", Value: "32768"},
{Scope: ScopeNone, Name: "relay_log_info_file", Value: "relay-log.info"},
{Scope: ScopeNone, Name: "innodb_ft_total_cache_size", Value: "640000000"},
{Scope: ScopeNone, Name: "performance_schema_max_rwlock_instances", Value: "9102"},
{Scope: ScopeGlobal, Name: "table_open_cache", Value: "2000"},
{Scope: ScopeNone, Name: "performance_schema_events_stages_history_long_size", Value: "10000"},
{Scope: ScopeSession, Name: "insert_id", Value: ""},
{Scope: ScopeGlobal | ScopeSession, Name: "default_tmp_storage_engine", Value: "InnoDB", IsHintUpdatable: true},
{Scope: ScopeGlobal | ScopeSession, Name: "optimizer_search_depth", Value: "62", IsHintUpdatable: true},
{Scope: ScopeGlobal | ScopeSession, Name: "max_points_in_geometry", Value: "65536", IsHintUpdatable: true},
{Scope: ScopeGlobal, Name: "innodb_stats_sample_pages", Value: "8"},
{Scope: ScopeGlobal | ScopeSession, Name: "profiling_history_size", Value: "15"},
{Scope: ScopeNone, Name: "have_symlink", Value: "YES"},
{Scope: ScopeGlobal | ScopeSession, Name: "storage_engine", Value: "InnoDB"},
{Scope: ScopeGlobal | ScopeSession, Name: "sql_log_off", Value: "0"},
// In MySQL, the default value of `explicit_defaults_for_timestamp` is `0`.
// But In TiDB, it's set to `1` to be consistent with TiDB timestamp behavior.
// See: https://github.com/pingcap/tidb/pull/6068 for details
{Scope: ScopeNone, Name: "explicit_defaults_for_timestamp", Value: BoolOn, Type: TypeBool},
{Scope: ScopeNone, Name: "performance_schema_events_waits_history_size", Value: "10"},
{Scope: ScopeGlobal, Name: "log_syslog_tag", Value: ""},
{Scope: ScopeGlobal, Name: "innodb_undo_log_truncate", Value: ""},
{Scope: ScopeSession, Name: "innodb_create_intrinsic", Value: ""},
{Scope: ScopeGlobal, Name: "gtid_executed_compression_period", Value: ""},
{Scope: ScopeGlobal, Name: "ndb_log_empty_epochs", Value: ""},
{Scope: ScopeNone, Name: "have_geometry", Value: "YES"},
{Scope: ScopeGlobal | ScopeSession, Name: "optimizer_trace_max_mem_size", Value: "16384"},
{Scope: ScopeGlobal | ScopeSession, Name: "net_retry_count", Value: "10"},
{Scope: ScopeSession, Name: "ndb_table_no_logging", Value: ""},
{Scope: ScopeGlobal | ScopeSession, Name: "optimizer_trace_features", Value: "greedy_search=on,range_optimizer=on,dynamic_range=on,repeated_subselect=on"},
{Scope: ScopeGlobal, Name: "innodb_flush_log_at_trx_commit", Value: "1"},
{Scope: ScopeGlobal, Name: "rewriter_enabled", Value: ""},
{Scope: ScopeGlobal, Name: "query_cache_min_res_unit", Value: "4096"},
{Scope: ScopeGlobal | ScopeSession, Name: "updatable_views_with_limit", Value: "YES", IsHintUpdatable: true},
{Scope: ScopeGlobal | ScopeSession, Name: "optimizer_prune_level", Value: "1", IsHintUpdatable: true},
{Scope: ScopeGlobal | ScopeSession, Name: "completion_type", Value: "NO_CHAIN"},
{Scope: ScopeGlobal, Name: "binlog_checksum", Value: "CRC32"},
{Scope: ScopeNone, Name: "report_port", Value: "3306"},
{Scope: ScopeGlobal | ScopeSession, Name: ShowOldTemporals, Value: BoolOff, Type: TypeBool},
{Scope: ScopeGlobal, Name: "query_cache_limit", Value: "1048576"},
{Scope: ScopeGlobal, Name: "innodb_buffer_pool_size", Value: "134217728"},
{Scope: ScopeGlobal, Name: InnodbAdaptiveFlushing, Value: BoolOn, Type: TypeBool, AutoConvertNegativeBool: true},
{Scope: ScopeGlobal, Name: "innodb_monitor_enable", Value: ""},
{Scope: ScopeNone, Name: "date_format", Value: "%Y-%m-%d"},
{Scope: ScopeGlobal, Name: "innodb_buffer_pool_filename", Value: "ib_buffer_pool"},
{Scope: ScopeGlobal, Name: "slow_launch_time", Value: "2"},
{Scope: ScopeGlobal | ScopeSession, Name: "ndb_use_transactions", Value: ""},
{Scope: ScopeNone, Name: "innodb_purge_threads", Value: "1"},
{Scope: ScopeGlobal, Name: "innodb_concurrency_tickets", Value: "5000"},
{Scope: ScopeGlobal, Name: "innodb_monitor_reset_all", Value: ""},
{Scope: ScopeNone, Name: "performance_schema_users_size", Value: "100"},
{Scope: ScopeGlobal, Name: "ndb_log_updated_only", Value: ""},
{Scope: ScopeNone, Name: "basedir", Value: "/usr/local/mysql"},
{Scope: ScopeGlobal, Name: "innodb_old_blocks_time", Value: "1000"},
{Scope: ScopeGlobal, Name: "innodb_stats_method", Value: "nulls_equal"},
{Scope: ScopeGlobal, Name: LocalInFile, Value: BoolOn, Type: TypeBool},
{Scope: ScopeGlobal | ScopeSession, Name: "myisam_stats_method", Value: "nulls_unequal"},
{Scope: ScopeNone, Name: "version_compile_os", Value: "osx10.8"},
{Scope: ScopeNone, Name: "relay_log_recovery", Value: "0"},
{Scope: ScopeNone, Name: "old", Value: "0"},
{Scope: ScopeGlobal | ScopeSession, Name: InnodbTableLocks, Value: BoolOn, Type: TypeBool, AutoConvertNegativeBool: true},
{Scope: ScopeNone, Name: PerformanceSchema, Value: BoolOff, Type: TypeBool},
{Scope: ScopeNone, Name: "myisam_recover_options", Value: BoolOff},
{Scope: ScopeGlobal | ScopeSession, Name: NetBufferLength, Value: "16384"},
{Scope: ScopeGlobal | ScopeSession, Name: "binlog_row_image", Value: "FULL"},
{Scope: ScopeNone, Name: "innodb_locks_unsafe_for_binlog", Value: "0"},
{Scope: ScopeSession, Name: "rbr_exec_mode", Value: ""},
{Scope: ScopeGlobal, Name: "myisam_max_sort_file_size", Value: "9223372036853727232"},
{Scope: ScopeNone, Name: "back_log", Value: "80"},
{Scope: ScopeSession, Name: "pseudo_thread_id", Value: ""},
{Scope: ScopeNone, Name: "have_dynamic_loading", Value: "YES"},
{Scope: ScopeGlobal, Name: "rewriter_verbose", Value: ""},
{Scope: ScopeGlobal, Name: "innodb_undo_logs", Value: "128"},
{Scope: ScopeNone, Name: "performance_schema_max_cond_instances", Value: "3504"},
{Scope: ScopeGlobal, Name: "delayed_insert_limit", Value: "100"},
{Scope: ScopeGlobal, Name: Flush, Value: BoolOff, Type: TypeBool},
{Scope: ScopeGlobal | ScopeSession, Name: "eq_range_index_dive_limit", Value: "200", IsHintUpdatable: true},
{Scope: ScopeNone, Name: "performance_schema_events_stages_history_size", Value: "10"},
{Scope: ScopeGlobal | ScopeSession, Name: "ndb_join_pushdown", Value: ""},
{Scope: ScopeGlobal, Name: "validate_password_special_char_count", Value: "1"},
{Scope: ScopeNone, Name: "performance_schema_max_thread_instances", Value: "402"},
{Scope: ScopeGlobal | ScopeSession, Name: "ndbinfo_show_hidden", Value: ""},
{Scope: ScopeGlobal | ScopeSession, Name: "net_read_timeout", Value: "30"},
{Scope: ScopeNone, Name: "innodb_page_size", Value: "16384"},
{Scope: ScopeNone, Name: "innodb_log_file_size", Value: "50331648"},
{Scope: ScopeGlobal, Name: "sync_relay_log_info", Value: "10000"},
{Scope: ScopeGlobal | ScopeSession, Name: "optimizer_trace_limit", Value: "1"},
{Scope: ScopeNone, Name: "innodb_ft_max_token_size", Value: "84"},
{Scope: ScopeGlobal, Name: ValidatePasswordLength, Value: "8", Type: TypeUnsigned, MinValue: 0, MaxValue: math.MaxUint64, AutoConvertOutOfRange: true},
{Scope: ScopeGlobal, Name: "ndb_log_binlog_index", Value: ""},
{Scope: ScopeGlobal, Name: "innodb_api_bk_commit_interval", Value: "5"},
{Scope: ScopeNone, Name: "innodb_undo_directory", Value: "."},
{Scope: ScopeNone, Name: "bind_address", Value: "*"},
{Scope: ScopeGlobal, Name: "innodb_sync_spin_loops", Value: "30"},
{Scope: ScopeGlobal | ScopeSession, Name: SQLSafeUpdates, Value: BoolOff, Type: TypeBool, IsHintUpdatable: true},
{Scope: ScopeNone, Name: "tmpdir", Value: "/var/tmp/"},
{Scope: ScopeGlobal, Name: "innodb_thread_concurrency", Value: "0"},
{Scope: ScopeGlobal, Name: "innodb_buffer_pool_dump_pct", Value: ""},
{Scope: ScopeGlobal | ScopeSession, Name: "lc_time_names", Value: "en_US"},
{Scope: ScopeGlobal | ScopeSession, Name: "max_statement_time", Value: ""},
{Scope: ScopeGlobal | ScopeSession, Name: EndMarkersInJSON, Value: BoolOff, Type: TypeBool, IsHintUpdatable: true},
{Scope: ScopeGlobal, Name: AvoidTemporalUpgrade, Value: BoolOff, Type: TypeBool},
{Scope: ScopeGlobal, Name: "key_cache_age_threshold", Value: "300"},
{Scope: ScopeGlobal, Name: InnodbStatusOutput, Value: BoolOff, Type: TypeBool, AutoConvertNegativeBool: true},
{Scope: ScopeSession, Name: "identity", Value: ""},
{Scope: ScopeGlobal | ScopeSession, Name: "min_examined_row_limit", Value: "0"},
{Scope: ScopeGlobal, Name: "sync_frm", Type: TypeBool, Value: BoolOn},
{Scope: ScopeGlobal, Name: "innodb_online_alter_log_max_size", Value: "134217728"},
{Scope: ScopeGlobal | ScopeSession, Name: "information_schema_stats_expiry", Value: "86400"},
{Scope: ScopeGlobal, Name: ThreadPoolSize, Value: "16", Type: TypeUnsigned, MinValue: 1, MaxValue: 64, AutoConvertOutOfRange: true},
{Scope: ScopeNone, Name: "lower_case_file_system", Value: "1"},
// for compatibility purpose, we should leave them alone.
// TODO: Follow the Terminology Updates of MySQL after their changes arrived.
// https://mysqlhighavailability.com/mysql-terminology-updates/
{Scope: ScopeSession, Name: PseudoSlaveMode, Value: "", Type: TypeInt},
{Scope: ScopeGlobal, Name: "slave_pending_jobs_size_max", Value: "16777216"},
{Scope: ScopeGlobal, Name: "slave_transaction_retries", Value: "10"},
{Scope: ScopeGlobal, Name: "slave_checkpoint_period", Value: "300"},
{Scope: ScopeGlobal, Name: MasterVerifyChecksum, Value: BoolOff, Type: TypeBool},
{Scope: ScopeGlobal, Name: "rpl_semi_sync_master_trace_level", Value: ""},
{Scope: ScopeGlobal, Name: "master_info_repository", Value: "FILE"},
{Scope: ScopeGlobal, Name: "rpl_stop_slave_timeout", Value: "31536000"},
{Scope: ScopeGlobal, Name: "slave_net_timeout", Value: "3600"},
{Scope: ScopeGlobal, Name: "sync_master_info", Value: "10000"},
{Scope: ScopeGlobal, Name: "init_slave", Value: ""},
{Scope: ScopeGlobal, Name: SlaveCompressedProtocol, Value: BoolOff, Type: TypeBool},
{Scope: ScopeGlobal, Name: "rpl_semi_sync_slave_trace_level", Value: ""},
{Scope: ScopeGlobal, Name: LogSlowSlaveStatements, Value: BoolOff, Type: TypeBool},
{Scope: ScopeGlobal, Name: "slave_checkpoint_group", Value: "512"},
{Scope: ScopeNone, Name: "slave_load_tmpdir", Value: "/var/tmp/"},
{Scope: ScopeGlobal, Name: "slave_parallel_type", Value: ""},
{Scope: ScopeGlobal, Name: "slave_parallel_workers", Value: "0"},
{Scope: ScopeGlobal, Name: "rpl_semi_sync_master_timeout", Value: "10000", Type: TypeInt},
{Scope: ScopeNone, Name: "slave_skip_errors", Value: BoolOff},
{Scope: ScopeGlobal, Name: "sql_slave_skip_counter", Value: "0"},
{Scope: ScopeGlobal, Name: "rpl_semi_sync_slave_enabled", Value: BoolOff, Type: TypeBool},
{Scope: ScopeGlobal, Name: "rpl_semi_sync_master_enabled", Value: BoolOff, Type: TypeBool},
{Scope: ScopeGlobal, Name: "slave_preserve_commit_order", Value: BoolOff, Type: TypeBool},
{Scope: ScopeGlobal, Name: "slave_exec_mode", Value: "STRICT"},
{Scope: ScopeNone, Name: "log_slave_updates", Value: BoolOff, Type: TypeBool},
{Scope: ScopeGlobal, Name: "rpl_semi_sync_master_wait_point", Value: "AFTER_SYNC", Type: TypeEnum, PossibleValues: []string{"AFTER_SYNC", "AFTER_COMMIT"}},
{Scope: ScopeGlobal, Name: "slave_sql_verify_checksum", Value: BoolOn, Type: TypeBool},
{Scope: ScopeGlobal, Name: "slave_max_allowed_packet", Value: "1073741824"},
{Scope: ScopeGlobal, Name: "rpl_semi_sync_master_wait_for_slave_count", Value: "1", Type: TypeInt, MinValue: 1, MaxValue: 65535},
{Scope: ScopeGlobal, Name: "rpl_semi_sync_master_wait_no_slave", Value: BoolOn, Type: TypeBool},
{Scope: ScopeGlobal, Name: "slave_rows_search_algorithms", Value: "TABLE_SCAN,INDEX_SCAN"},
{Scope: ScopeGlobal, Name: SlaveAllowBatching, Value: BoolOff, Type: TypeBool},
}
| sessionctx/variable/noop.go | 1 | https://github.com/pingcap/tidb/commit/80edc8cd20b452089a26b817b4b450b7f31db77f | [
0.991651713848114,
0.02574177086353302,
0.00017368550470564514,
0.001796458731405437,
0.13757169246673584
] |
{
"id": 2,
"code_window": [
"\t{Scope: ScopeGlobal, Name: \"log_timestamps\", Value: \"\"},\n",
"\t{Scope: ScopeNone, Name: \"version_compile_machine\", Value: \"x86_64\"},\n",
"\t{Scope: ScopeGlobal, Name: \"event_scheduler\", Value: BoolOff},\n",
"\t{Scope: ScopeGlobal | ScopeSession, Name: \"ndb_deferred_constraints\", Value: \"\"},\n",
"\t{Scope: ScopeGlobal, Name: \"log_syslog_include_pid\", Value: \"\"},\n",
"\t{Scope: ScopeSession, Name: \"last_insert_id\", Value: \"\"},\n",
"\t{Scope: ScopeNone, Name: \"innodb_ft_cache_size\", Value: \"8000000\"},\n",
"\t{Scope: ScopeGlobal, Name: InnodbDisableSortFileCache, Value: \"0\"},\n",
"\t{Scope: ScopeGlobal, Name: \"log_error_verbosity\", Value: \"\"},\n",
"\t{Scope: ScopeNone, Name: \"performance_schema_hosts_size\", Value: \"100\"},\n",
"\t{Scope: ScopeGlobal, Name: \"innodb_replication_delay\", Value: \"0\"},\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "sessionctx/variable/noop.go",
"type": "replace",
"edit_start_line_idx": 257
} | // Copyright 2021 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package util
// RateLimit wraps a fix sized channel to control concurrency.
type RateLimit struct {
capacity int
token chan struct{}
}
// NewRateLimit creates a limit controller with capacity n.
func NewRateLimit(n int) *RateLimit {
return &RateLimit{
capacity: n,
token: make(chan struct{}, n),
}
}
// GetToken acquires a token.
func (r *RateLimit) GetToken(done <-chan struct{}) (exit bool) {
select {
case <-done:
return true
case r.token <- struct{}{}:
return false
}
}
// PutToken puts a token back.
func (r *RateLimit) PutToken() {
select {
case <-r.token:
default:
panic("put a redundant token")
}
}
// GetCapacity returns the token capacity.
func (r *RateLimit) GetCapacity() int {
return r.capacity
}
| store/tikv/util/rate_limit.go | 0 | https://github.com/pingcap/tidb/commit/80edc8cd20b452089a26b817b4b450b7f31db77f | [
0.00017766955716069788,
0.00017124997975770384,
0.0001663661969359964,
0.00017045935965143144,
0.000003482454530967516
] |
{
"id": 2,
"code_window": [
"\t{Scope: ScopeGlobal, Name: \"log_timestamps\", Value: \"\"},\n",
"\t{Scope: ScopeNone, Name: \"version_compile_machine\", Value: \"x86_64\"},\n",
"\t{Scope: ScopeGlobal, Name: \"event_scheduler\", Value: BoolOff},\n",
"\t{Scope: ScopeGlobal | ScopeSession, Name: \"ndb_deferred_constraints\", Value: \"\"},\n",
"\t{Scope: ScopeGlobal, Name: \"log_syslog_include_pid\", Value: \"\"},\n",
"\t{Scope: ScopeSession, Name: \"last_insert_id\", Value: \"\"},\n",
"\t{Scope: ScopeNone, Name: \"innodb_ft_cache_size\", Value: \"8000000\"},\n",
"\t{Scope: ScopeGlobal, Name: InnodbDisableSortFileCache, Value: \"0\"},\n",
"\t{Scope: ScopeGlobal, Name: \"log_error_verbosity\", Value: \"\"},\n",
"\t{Scope: ScopeNone, Name: \"performance_schema_hosts_size\", Value: \"100\"},\n",
"\t{Scope: ScopeGlobal, Name: \"innodb_replication_delay\", Value: \"0\"},\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "sessionctx/variable/noop.go",
"type": "replace",
"edit_start_line_idx": 257
} | // Copyright 2018 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package core
import (
mysql "github.com/pingcap/tidb/errno"
"github.com/pingcap/tidb/util/dbterror"
)
// error definitions.
var (
ErrUnsupportedType = dbterror.ClassOptimizer.NewStd(mysql.ErrUnsupportedType)
ErrAnalyzeMissIndex = dbterror.ClassOptimizer.NewStd(mysql.ErrAnalyzeMissIndex)
ErrWrongParamCount = dbterror.ClassOptimizer.NewStd(mysql.ErrWrongParamCount)
ErrSchemaChanged = dbterror.ClassOptimizer.NewStd(mysql.ErrSchemaChanged)
ErrTablenameNotAllowedHere = dbterror.ClassOptimizer.NewStd(mysql.ErrTablenameNotAllowedHere)
ErrNotSupportedYet = dbterror.ClassOptimizer.NewStd(mysql.ErrNotSupportedYet)
ErrWrongUsage = dbterror.ClassOptimizer.NewStd(mysql.ErrWrongUsage)
ErrUnknown = dbterror.ClassOptimizer.NewStd(mysql.ErrUnknown)
ErrUnknownTable = dbterror.ClassOptimizer.NewStd(mysql.ErrUnknownTable)
ErrNoSuchTable = dbterror.ClassOptimizer.NewStd(mysql.ErrNoSuchTable)
ErrViewRecursive = dbterror.ClassOptimizer.NewStd(mysql.ErrViewRecursive)
ErrWrongArguments = dbterror.ClassOptimizer.NewStd(mysql.ErrWrongArguments)
ErrWrongNumberOfColumnsInSelect = dbterror.ClassOptimizer.NewStd(mysql.ErrWrongNumberOfColumnsInSelect)
ErrBadGeneratedColumn = dbterror.ClassOptimizer.NewStd(mysql.ErrBadGeneratedColumn)
ErrFieldNotInGroupBy = dbterror.ClassOptimizer.NewStd(mysql.ErrFieldNotInGroupBy)
ErrAggregateOrderNonAggQuery = dbterror.ClassOptimizer.NewStd(mysql.ErrAggregateOrderNonAggQuery)
ErrFieldInOrderNotSelect = dbterror.ClassOptimizer.NewStd(mysql.ErrFieldInOrderNotSelect)
ErrAggregateInOrderNotSelect = dbterror.ClassOptimizer.NewStd(mysql.ErrAggregateInOrderNotSelect)
ErrBadTable = dbterror.ClassOptimizer.NewStd(mysql.ErrBadTable)
ErrKeyDoesNotExist = dbterror.ClassOptimizer.NewStd(mysql.ErrKeyDoesNotExist)
ErrOperandColumns = dbterror.ClassOptimizer.NewStd(mysql.ErrOperandColumns)
ErrInvalidGroupFuncUse = dbterror.ClassOptimizer.NewStd(mysql.ErrInvalidGroupFuncUse)
ErrIllegalReference = dbterror.ClassOptimizer.NewStd(mysql.ErrIllegalReference)
ErrNoDB = dbterror.ClassOptimizer.NewStd(mysql.ErrNoDB)
ErrUnknownExplainFormat = dbterror.ClassOptimizer.NewStd(mysql.ErrUnknownExplainFormat)
ErrWrongGroupField = dbterror.ClassOptimizer.NewStd(mysql.ErrWrongGroupField)
ErrDupFieldName = dbterror.ClassOptimizer.NewStd(mysql.ErrDupFieldName)
ErrNonUpdatableTable = dbterror.ClassOptimizer.NewStd(mysql.ErrNonUpdatableTable)
ErrMultiUpdateKeyConflict = dbterror.ClassOptimizer.NewStd(mysql.ErrMultiUpdateKeyConflict)
ErrInternal = dbterror.ClassOptimizer.NewStd(mysql.ErrInternal)
ErrNonUniqTable = dbterror.ClassOptimizer.NewStd(mysql.ErrNonuniqTable)
ErrWindowInvalidWindowFuncUse = dbterror.ClassOptimizer.NewStd(mysql.ErrWindowInvalidWindowFuncUse)
ErrWindowInvalidWindowFuncAliasUse = dbterror.ClassOptimizer.NewStd(mysql.ErrWindowInvalidWindowFuncAliasUse)
ErrWindowNoSuchWindow = dbterror.ClassOptimizer.NewStd(mysql.ErrWindowNoSuchWindow)
ErrWindowCircularityInWindowGraph = dbterror.ClassOptimizer.NewStd(mysql.ErrWindowCircularityInWindowGraph)
ErrWindowNoChildPartitioning = dbterror.ClassOptimizer.NewStd(mysql.ErrWindowNoChildPartitioning)
ErrWindowNoInherentFrame = dbterror.ClassOptimizer.NewStd(mysql.ErrWindowNoInherentFrame)
ErrWindowNoRedefineOrderBy = dbterror.ClassOptimizer.NewStd(mysql.ErrWindowNoRedefineOrderBy)
ErrWindowDuplicateName = dbterror.ClassOptimizer.NewStd(mysql.ErrWindowDuplicateName)
ErrPartitionClauseOnNonpartitioned = dbterror.ClassOptimizer.NewStd(mysql.ErrPartitionClauseOnNonpartitioned)
ErrWindowFrameStartIllegal = dbterror.ClassOptimizer.NewStd(mysql.ErrWindowFrameStartIllegal)
ErrWindowFrameEndIllegal = dbterror.ClassOptimizer.NewStd(mysql.ErrWindowFrameEndIllegal)
ErrWindowFrameIllegal = dbterror.ClassOptimizer.NewStd(mysql.ErrWindowFrameIllegal)
ErrWindowRangeFrameOrderType = dbterror.ClassOptimizer.NewStd(mysql.ErrWindowRangeFrameOrderType)
ErrWindowRangeFrameTemporalType = dbterror.ClassOptimizer.NewStd(mysql.ErrWindowRangeFrameTemporalType)
ErrWindowRangeFrameNumericType = dbterror.ClassOptimizer.NewStd(mysql.ErrWindowRangeFrameNumericType)
ErrWindowRangeBoundNotConstant = dbterror.ClassOptimizer.NewStd(mysql.ErrWindowRangeBoundNotConstant)
ErrWindowRowsIntervalUse = dbterror.ClassOptimizer.NewStd(mysql.ErrWindowRowsIntervalUse)
ErrWindowFunctionIgnoresFrame = dbterror.ClassOptimizer.NewStd(mysql.ErrWindowFunctionIgnoresFrame)
ErrUnsupportedOnGeneratedColumn = dbterror.ClassOptimizer.NewStd(mysql.ErrUnsupportedOnGeneratedColumn)
ErrPrivilegeCheckFail = dbterror.ClassOptimizer.NewStd(mysql.ErrPrivilegeCheckFail)
ErrInvalidWildCard = dbterror.ClassOptimizer.NewStd(mysql.ErrInvalidWildCard)
ErrMixOfGroupFuncAndFields = dbterror.ClassOptimizer.NewStd(mysql.ErrMixOfGroupFuncAndFieldsIncompatible)
errTooBigPrecision = dbterror.ClassExpression.NewStd(mysql.ErrTooBigPrecision)
ErrDBaccessDenied = dbterror.ClassOptimizer.NewStd(mysql.ErrDBaccessDenied)
ErrTableaccessDenied = dbterror.ClassOptimizer.NewStd(mysql.ErrTableaccessDenied)
ErrSpecificAccessDenied = dbterror.ClassOptimizer.NewStd(mysql.ErrSpecificAccessDenied)
ErrViewNoExplain = dbterror.ClassOptimizer.NewStd(mysql.ErrViewNoExplain)
ErrWrongValueCountOnRow = dbterror.ClassOptimizer.NewStd(mysql.ErrWrongValueCountOnRow)
ErrViewInvalid = dbterror.ClassOptimizer.NewStd(mysql.ErrViewInvalid)
ErrNoSuchThread = dbterror.ClassOptimizer.NewStd(mysql.ErrNoSuchThread)
ErrUnknownColumn = dbterror.ClassOptimizer.NewStd(mysql.ErrBadField)
ErrCartesianProductUnsupported = dbterror.ClassOptimizer.NewStd(mysql.ErrCartesianProductUnsupported)
ErrStmtNotFound = dbterror.ClassOptimizer.NewStd(mysql.ErrPreparedStmtNotFound)
ErrAmbiguous = dbterror.ClassOptimizer.NewStd(mysql.ErrNonUniq)
ErrUnresolvedHintName = dbterror.ClassOptimizer.NewStd(mysql.ErrUnresolvedHintName)
ErrNotHintUpdatable = dbterror.ClassOptimizer.NewStd(mysql.ErrNotHintUpdatable)
ErrWarnConflictingHint = dbterror.ClassOptimizer.NewStd(mysql.ErrWarnConflictingHint)
// Since we cannot know if user logged in with a password, use message of ErrAccessDeniedNoPassword instead
ErrAccessDenied = dbterror.ClassOptimizer.NewStdErr(mysql.ErrAccessDenied, mysql.MySQLErrName[mysql.ErrAccessDeniedNoPassword])
ErrBadNull = dbterror.ClassOptimizer.NewStd(mysql.ErrBadNull)
)
| planner/core/errors.go | 0 | https://github.com/pingcap/tidb/commit/80edc8cd20b452089a26b817b4b450b7f31db77f | [
0.00017803160881157964,
0.00016723305452615023,
0.00016205028805416077,
0.00016555580077692866,
0.000005024010533816181
] |
{
"id": 2,
"code_window": [
"\t{Scope: ScopeGlobal, Name: \"log_timestamps\", Value: \"\"},\n",
"\t{Scope: ScopeNone, Name: \"version_compile_machine\", Value: \"x86_64\"},\n",
"\t{Scope: ScopeGlobal, Name: \"event_scheduler\", Value: BoolOff},\n",
"\t{Scope: ScopeGlobal | ScopeSession, Name: \"ndb_deferred_constraints\", Value: \"\"},\n",
"\t{Scope: ScopeGlobal, Name: \"log_syslog_include_pid\", Value: \"\"},\n",
"\t{Scope: ScopeSession, Name: \"last_insert_id\", Value: \"\"},\n",
"\t{Scope: ScopeNone, Name: \"innodb_ft_cache_size\", Value: \"8000000\"},\n",
"\t{Scope: ScopeGlobal, Name: InnodbDisableSortFileCache, Value: \"0\"},\n",
"\t{Scope: ScopeGlobal, Name: \"log_error_verbosity\", Value: \"\"},\n",
"\t{Scope: ScopeNone, Name: \"performance_schema_hosts_size\", Value: \"100\"},\n",
"\t{Scope: ScopeGlobal, Name: \"innodb_replication_delay\", Value: \"0\"},\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "sessionctx/variable/noop.go",
"type": "replace",
"edit_start_line_idx": 257
} | // Copyright 2019 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package executor_test
import (
"fmt"
"sync"
"time"
. "github.com/pingcap/check"
"github.com/pingcap/errors"
"github.com/pingcap/tidb/domain"
"github.com/pingcap/tidb/kv"
"github.com/pingcap/tidb/session"
"github.com/pingcap/tidb/sessionctx/variable"
"github.com/pingcap/tidb/store/mockstore"
"github.com/pingcap/tidb/util/testkit"
)
type testBatchPointGetSuite struct {
store kv.Storage
dom *domain.Domain
}
func newStoreWithBootstrap() (kv.Storage, *domain.Domain, error) {
store, err := mockstore.NewMockStore()
if err != nil {
return nil, nil, errors.Trace(err)
}
session.SetSchemaLease(0)
session.DisableStats4Test()
dom, err := session.BootstrapSession(store)
if err != nil {
return nil, nil, err
}
return store, dom, errors.Trace(err)
}
func (s *testBatchPointGetSuite) SetUpSuite(c *C) {
store, dom, err := newStoreWithBootstrap()
c.Assert(err, IsNil)
s.store = store
s.dom = dom
}
func (s *testBatchPointGetSuite) TearDownSuite(c *C) {
s.dom.Close()
s.store.Close()
}
func (s *testBatchPointGetSuite) TestBatchPointGetExec(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a int primary key auto_increment not null, b int, c int, unique key idx_abc(a, b, c))")
tk.MustExec("insert into t values(1, 1, 1), (2, 2, 2), (3, 3, 3), (4, 4, 5)")
tk.MustQuery("select * from t").Check(testkit.Rows(
"1 1 1",
"2 2 2",
"3 3 3",
"4 4 5",
))
tk.MustQuery("select a, b, c from t where (a, b, c) in ((1, 1, 1), (1, 1, 1), (1, 1, 1))").Check(testkit.Rows(
"1 1 1",
))
tk.MustQuery("select a, b, c from t where (a, b, c) in ((1, 1, 1), (2, 2, 2), (1, 1, 1))").Check(testkit.Rows(
"1 1 1",
"2 2 2",
))
tk.MustQuery("select a, b, c from t where (a, b, c) in ((1, 1, 1), (2, 2, 2), (100, 1, 1))").Check(testkit.Rows(
"1 1 1",
"2 2 2",
))
tk.MustQuery("select a, b, c from t where (a, b, c) in ((1, 1, 1), (2, 2, 2), (100, 1, 1), (4, 4, 5))").Check(testkit.Rows(
"1 1 1",
"2 2 2",
"4 4 5",
))
tk.MustQuery("select * from t where a in (1, 2, 4, 1, 2)").Check(testkit.Rows(
"1 1 1",
"2 2 2",
"4 4 5",
))
tk.MustQuery("select * from t where a in (1, 2, 4, 1, 2, 100)").Check(testkit.Rows(
"1 1 1",
"2 2 2",
"4 4 5",
))
tk.MustQuery("select a from t where a in (1, 2, 4, 1, 2, 100)").Check(testkit.Rows(
"1",
"2",
"4",
))
}
func (s *testBatchPointGetSuite) TestBatchPointGetInTxn(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (id int primary key auto_increment, name varchar(30))")
// Fix a bug that BatchPointGetExec doesn't consider membuffer data in a transaction.
tk.MustExec("begin")
tk.MustExec("insert into t values (4, 'name')")
tk.MustQuery("select * from t where id in (4)").Check(testkit.Rows("4 name"))
tk.MustQuery("select * from t where id in (4) for update").Check(testkit.Rows("4 name"))
tk.MustExec("rollback")
tk.MustExec("begin pessimistic")
tk.MustExec("insert into t values (4, 'name')")
tk.MustQuery("select * from t where id in (4)").Check(testkit.Rows("4 name"))
tk.MustQuery("select * from t where id in (4) for update").Check(testkit.Rows("4 name"))
tk.MustExec("rollback")
tk.MustExec("create table s (a int, b int, c int, primary key (a, b))")
tk.MustExec("insert s values (1, 1, 1), (3, 3, 3), (5, 5, 5)")
tk.MustExec("begin pessimistic")
tk.MustExec("update s set c = 10 where a = 3")
tk.MustQuery("select * from s where (a, b) in ((1, 1), (2, 2), (3, 3)) for update").Check(testkit.Rows("1 1 1", "3 3 10"))
tk.MustExec("rollback")
}
func (s *testBatchPointGetSuite) TestBatchPointGetCache(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("create table customers (id int primary key, token varchar(255) unique)")
tk.MustExec("INSERT INTO test.customers (id, token) VALUES (28, '07j')")
tk.MustExec("INSERT INTO test.customers (id, token) VALUES (29, '03j')")
tk.MustExec("BEGIN")
tk.MustQuery("SELECT id, token FROM test.customers WHERE id IN (28)")
tk.MustQuery("SELECT id, token FROM test.customers WHERE id IN (28, 29);").Check(testkit.Rows("28 07j", "29 03j"))
}
func (s *testBatchPointGetSuite) TestIssue18843(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("create table t18843 ( id bigint(10) primary key, f varchar(191) default null, unique key `idx_f` (`f`))")
tk.MustExec("insert into t18843 values (1, '')")
tk.MustQuery("select * from t18843 where f in (null)").Check(testkit.Rows())
tk.MustExec("insert into t18843 values (2, null)")
tk.MustQuery("select * from t18843 where f in (null)").Check(testkit.Rows())
tk.MustQuery("select * from t18843 where f is null").Check(testkit.Rows("2 <nil>"))
}
func (s *testBatchPointGetSuite) TestBatchPointGetUnsignedHandleWithSort(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t2")
tk.MustExec("create table t2 (id bigint(20) unsigned, primary key(id))")
tk.MustExec("insert into t2 values (8738875760185212610)")
tk.MustExec("insert into t2 values (9814441339970117597)")
tk.MustExec("insert into t2 values (1)")
tk.MustQuery("select id from t2 where id in (8738875760185212610, 1, 9814441339970117597) order by id").Check(testkit.Rows("1", "8738875760185212610", "9814441339970117597"))
tk.MustQuery("select id from t2 where id in (8738875760185212610, 1, 9814441339970117597) order by id desc").Check(testkit.Rows("9814441339970117597", "8738875760185212610", "1"))
}
func (s *testBatchPointGetSuite) TestBatchPointGetLockExistKey(c *C) {
var wg sync.WaitGroup
errCh := make(chan error)
testLock := func(rc bool, key string, tableName string) {
doneCh := make(chan struct{}, 1)
tk1, tk2 := testkit.NewTestKit(c, s.store), testkit.NewTestKit(c, s.store)
errCh <- tk1.ExecToErr("use test")
errCh <- tk2.ExecToErr("use test")
tk1.Se.GetSessionVars().EnableClusteredIndex = variable.ClusteredIndexDefModeIntOnly
errCh <- tk1.ExecToErr(fmt.Sprintf("drop table if exists %s", tableName))
errCh <- tk1.ExecToErr(fmt.Sprintf("create table %s(id int, v int, k int, %s key0(id, v))", tableName, key))
errCh <- tk1.ExecToErr(fmt.Sprintf("insert into %s values(1, 1, 1), (2, 2, 2)", tableName))
if rc {
errCh <- tk1.ExecToErr("set tx_isolation = 'READ-COMMITTED'")
errCh <- tk2.ExecToErr("set tx_isolation = 'READ-COMMITTED'")
}
errCh <- tk1.ExecToErr("begin pessimistic")
errCh <- tk2.ExecToErr("begin pessimistic")
// select for update
if !rc {
// lock exist key only for repeatable read
errCh <- tk1.ExecToErr(fmt.Sprintf("select * from %s where (id, v) in ((1, 1), (2, 2)) for update", tableName))
} else {
// read committed will not lock non-exist key
errCh <- tk1.ExecToErr(fmt.Sprintf("select * from %s where (id, v) in ((1, 1), (2, 2), (3, 3)) for update", tableName))
}
errCh <- tk2.ExecToErr(fmt.Sprintf("insert into %s values(3, 3, 3)", tableName))
go func() {
errCh <- tk2.ExecToErr(fmt.Sprintf("insert into %s values(1, 1, 10)", tableName))
doneCh <- struct{}{}
}()
time.Sleep(150 * time.Millisecond)
errCh <- tk1.ExecToErr(fmt.Sprintf("update %s set v = 2 where id = 1 and v = 1", tableName))
errCh <- tk1.ExecToErr("commit")
<-doneCh
errCh <- tk2.ExecToErr("commit")
tk1.MustQuery(fmt.Sprintf("select * from %s", tableName)).Check(testkit.Rows(
"1 2 1",
"2 2 2",
"3 3 3",
"1 1 10",
))
// update
errCh <- tk1.ExecToErr("begin pessimistic")
errCh <- tk2.ExecToErr("begin pessimistic")
if !rc {
// lock exist key only for repeatable read
errCh <- tk1.ExecToErr(fmt.Sprintf("update %s set v = v + 1 where (id, v) in ((2, 2), (3, 3))", tableName))
} else {
// read committed will not lock non-exist key
errCh <- tk1.ExecToErr(fmt.Sprintf("update %s set v = v + 1 where (id, v) in ((2, 2), (3, 3), (4, 4))", tableName))
}
errCh <- tk2.ExecToErr(fmt.Sprintf("insert into %s values(4, 4, 4)", tableName))
go func() {
errCh <- tk2.ExecToErr(fmt.Sprintf("insert into %s values(3, 3, 30)", tableName))
doneCh <- struct{}{}
}()
time.Sleep(150 * time.Millisecond)
errCh <- tk1.ExecToErr("commit")
<-doneCh
errCh <- tk2.ExecToErr("commit")
tk1.MustQuery(fmt.Sprintf("select * from %s", tableName)).Check(testkit.Rows(
"1 2 1",
"2 3 2",
"3 4 3",
"1 1 10",
"4 4 4",
"3 3 30",
))
// delete
errCh <- tk1.ExecToErr("begin pessimistic")
errCh <- tk2.ExecToErr("begin pessimistic")
if !rc {
// lock exist key only for repeatable read
errCh <- tk1.ExecToErr(fmt.Sprintf("delete from %s where (id, v) in ((3, 4), (4, 4))", tableName))
} else {
// read committed will not lock non-exist key
errCh <- tk1.ExecToErr(fmt.Sprintf("delete from %s where (id, v) in ((3, 4), (4, 4), (5, 5))", tableName))
}
errCh <- tk2.ExecToErr(fmt.Sprintf("insert into %s values(5, 5, 5)", tableName))
go func() {
errCh <- tk2.ExecToErr(fmt.Sprintf("insert into %s values(4, 4,40)", tableName))
doneCh <- struct{}{}
}()
time.Sleep(150 * time.Millisecond)
errCh <- tk1.ExecToErr("commit")
<-doneCh
errCh <- tk2.ExecToErr("commit")
tk1.MustQuery(fmt.Sprintf("select * from %s", tableName)).Check(testkit.Rows(
"1 2 1",
"2 3 2",
"1 1 10",
"3 3 30",
"5 5 5",
"4 4 40",
))
wg.Done()
}
for i, one := range []struct {
rc bool
key string
}{
{rc: false, key: "primary key"},
{rc: false, key: "unique key"},
{rc: true, key: "primary key"},
{rc: true, key: "unique key"},
} {
wg.Add(1)
tableName := fmt.Sprintf("t_%d", i)
go testLock(one.rc, one.key, tableName)
}
// should works for common handle in clustered index
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(id varchar(40) primary key)")
tk.MustExec("insert into t values('1'), ('2')")
tk.MustExec("set tx_isolation = 'READ-COMMITTED'")
tk.MustExec("begin pessimistic")
tk.MustExec("select * from t where id in('1', '2') for update")
tk.MustExec("commit")
go func() {
wg.Wait()
close(errCh)
}()
for err := range errCh {
c.Assert(err, IsNil)
}
}
| executor/batch_point_get_test.go | 0 | https://github.com/pingcap/tidb/commit/80edc8cd20b452089a26b817b4b450b7f31db77f | [
0.0001777549769030884,
0.00017057904915418476,
0.0001622446288820356,
0.00017167073383461684,
0.0000033109113246609922
] |
{
"id": 3,
"code_window": [
"\t{Scope: ScopeNone, Name: \"ignore_builtin_innodb\", Value: \"0\"},\n",
"\t{Scope: ScopeGlobal, Name: \"slow_query_log_file\", Value: \"/usr/local/mysql/data/localhost-slow.log\"},\n",
"\t{Scope: ScopeGlobal, Name: \"innodb_thread_sleep_delay\", Value: \"10000\"},\n",
"\t{Scope: ScopeNone, Name: \"license\", Value: \"Apache License 2.0\"},\n",
"\t{Scope: ScopeGlobal, Name: \"innodb_ft_aux_table\", Value: \"\"},\n",
"\t{Scope: ScopeGlobal | ScopeSession, Name: SQLWarnings, Value: BoolOff, Type: TypeBool},\n",
"\t{Scope: ScopeGlobal | ScopeSession, Name: KeepFilesOnCreate, Value: BoolOff, Type: TypeBool},\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "sessionctx/variable/noop.go",
"type": "replace",
"edit_start_line_idx": 349
} | // Copyright 2015 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package variable
import (
"fmt"
"math"
"strconv"
"strings"
"sync"
"sync/atomic"
"time"
"github.com/cznic/mathutil"
"github.com/pingcap/errors"
"github.com/pingcap/parser/charset"
"github.com/pingcap/parser/mysql"
"github.com/pingcap/tidb/config"
"github.com/pingcap/tidb/kv"
"github.com/pingcap/tidb/store/tikv/oracle"
"github.com/pingcap/tidb/types"
"github.com/pingcap/tidb/util/collate"
"github.com/pingcap/tidb/util/logutil"
"github.com/pingcap/tidb/util/versioninfo"
atomic2 "go.uber.org/atomic"
)
// ScopeFlag is for system variable whether can be changed in global/session dynamically or not.
type ScopeFlag uint8
// TypeFlag is the SysVar type, which doesn't exactly match MySQL types.
type TypeFlag byte
const (
// ScopeNone means the system variable can not be changed dynamically.
ScopeNone ScopeFlag = 0
// ScopeGlobal means the system variable can be changed globally.
ScopeGlobal ScopeFlag = 1 << 0
// ScopeSession means the system variable can only be changed in current session.
ScopeSession ScopeFlag = 1 << 1
// TypeStr is the default
TypeStr TypeFlag = 0
// TypeBool for boolean
TypeBool TypeFlag = 1
// TypeInt for integer
TypeInt TypeFlag = 2
// TypeEnum for Enum
TypeEnum TypeFlag = 3
// TypeFloat for Double
TypeFloat TypeFlag = 4
// TypeUnsigned for Unsigned integer
TypeUnsigned TypeFlag = 5
// TypeTime for time of day (a TiDB extension)
TypeTime TypeFlag = 6
// TypeDuration for a golang duration (a TiDB extension)
TypeDuration TypeFlag = 7
// BoolOff is the canonical string representation of a boolean false.
BoolOff = "OFF"
// BoolOn is the canonical string representation of a boolean true.
BoolOn = "ON"
// On is the canonical string for ON
On = "ON"
// Off is the canonical string for OFF
Off = "OFF"
// Warn means return warnings
Warn = "WARN"
// IntOnly means enable for int type
IntOnly = "INT_ONLY"
)
// SysVar is for system variable.
type SysVar struct {
// Scope is for whether can be changed or not
Scope ScopeFlag
// Name is the variable name.
Name string
// Value is the variable value.
Value string
// Type is the MySQL type (optional)
Type TypeFlag
// MinValue will automatically be validated when specified (optional)
MinValue int64
// MaxValue will automatically be validated when specified (optional)
MaxValue uint64
// AutoConvertNegativeBool applies to boolean types (optional)
AutoConvertNegativeBool bool
// AutoConvertOutOfRange applies to int and unsigned types.
AutoConvertOutOfRange bool
// ReadOnly applies to all types
ReadOnly bool
// PossibleValues applies to ENUM type
PossibleValues []string
// AllowEmpty is a special TiDB behavior which means "read value from config" (do not use)
AllowEmpty bool
// AllowEmptyAll is a special behavior that only applies to TiDBCapturePlanBaseline, TiDBTxnMode (do not use)
AllowEmptyAll bool
// AllowAutoValue means that the special value "-1" is permitted, even when outside of range.
AllowAutoValue bool
// Validation is a callback after the type validation has been performed
Validation func(*SessionVars, string, string, ScopeFlag) (string, error)
// SetSession is called after validation
SetSession func(*SessionVars, string) error
// IsHintUpdatable indicate whether it's updatable via SET_VAR() hint (optional)
IsHintUpdatable bool
}
// SetSessionFromHook calls the SetSession func if it exists.
func (sv *SysVar) SetSessionFromHook(s *SessionVars, val string) error {
if sv.SetSession != nil {
return sv.SetSession(s, val)
}
return nil
}
// ValidateFromType provides automatic validation based on the SysVar's type
func (sv *SysVar) ValidateFromType(vars *SessionVars, value string, scope ScopeFlag) (string, error) {
// Some sysvars are read-only. Attempting to set should always fail.
if sv.ReadOnly || sv.Scope == ScopeNone {
return value, ErrIncorrectScope.GenWithStackByArgs(sv.Name, "read only")
}
// The string "DEFAULT" is a special keyword in MySQL, which restores
// the compiled sysvar value. In which case we can skip further validation.
if strings.EqualFold(value, "DEFAULT") {
return sv.Value, nil
}
// Some sysvars in TiDB have a special behavior where the empty string means
// "use the config file value". This needs to be cleaned up once the behavior
// for instance variables is determined.
if value == "" && ((sv.AllowEmpty && scope == ScopeSession) || sv.AllowEmptyAll) {
return value, nil
}
// Provide validation using the SysVar struct
switch sv.Type {
case TypeUnsigned:
return sv.checkUInt64SystemVar(value, vars)
case TypeInt:
return sv.checkInt64SystemVar(value, vars)
case TypeBool:
return sv.checkBoolSystemVar(value, vars)
case TypeFloat:
return sv.checkFloatSystemVar(value, vars)
case TypeEnum:
return sv.checkEnumSystemVar(value, vars)
case TypeTime:
return sv.checkTimeSystemVar(value, vars)
case TypeDuration:
return sv.checkDurationSystemVar(value, vars)
}
return value, nil // typeString
}
const (
localDayTimeFormat = "15:04"
// FullDayTimeFormat is the full format of analyze start time and end time.
FullDayTimeFormat = "15:04 -0700"
)
func (sv *SysVar) checkTimeSystemVar(value string, vars *SessionVars) (string, error) {
var t time.Time
var err error
if len(value) <= len(localDayTimeFormat) {
t, err = time.ParseInLocation(localDayTimeFormat, value, vars.TimeZone)
} else {
t, err = time.ParseInLocation(FullDayTimeFormat, value, vars.TimeZone)
}
if err != nil {
return "", err
}
return t.Format(FullDayTimeFormat), nil
}
func (sv *SysVar) checkDurationSystemVar(value string, vars *SessionVars) (string, error) {
d, err := time.ParseDuration(value)
if err != nil {
return value, ErrWrongTypeForVar.GenWithStackByArgs(sv.Name)
}
// Check for min/max violations
if int64(d) < sv.MinValue {
return value, ErrWrongTypeForVar.GenWithStackByArgs(sv.Name)
}
if uint64(d) > sv.MaxValue {
return value, ErrWrongTypeForVar.GenWithStackByArgs(sv.Name)
}
// return a string representation of the duration
return d.String(), nil
}
func (sv *SysVar) checkUInt64SystemVar(value string, vars *SessionVars) (string, error) {
if sv.AllowAutoValue && value == "-1" {
return value, nil
}
// There are two types of validation behaviors for integer values. The default
// is to return an error saying the value is out of range. For MySQL compatibility, some
// values prefer convert the value to the min/max and return a warning.
if !sv.AutoConvertOutOfRange {
return sv.checkUint64SystemVarWithError(value)
}
if len(value) == 0 {
return value, ErrWrongTypeForVar.GenWithStackByArgs(sv.Name)
}
if value[0] == '-' {
_, err := strconv.ParseInt(value, 10, 64)
if err != nil {
return value, ErrWrongTypeForVar.GenWithStackByArgs(sv.Name)
}
vars.StmtCtx.AppendWarning(ErrTruncatedWrongValue.GenWithStackByArgs(sv.Name, value))
return fmt.Sprintf("%d", sv.MinValue), nil
}
val, err := strconv.ParseUint(value, 10, 64)
if err != nil {
return value, ErrWrongTypeForVar.GenWithStackByArgs(sv.Name)
}
if val < uint64(sv.MinValue) {
vars.StmtCtx.AppendWarning(ErrTruncatedWrongValue.GenWithStackByArgs(sv.Name, value))
return fmt.Sprintf("%d", sv.MinValue), nil
}
if val > sv.MaxValue {
vars.StmtCtx.AppendWarning(ErrTruncatedWrongValue.GenWithStackByArgs(sv.Name, value))
return fmt.Sprintf("%d", sv.MaxValue), nil
}
return value, nil
}
func (sv *SysVar) checkInt64SystemVar(value string, vars *SessionVars) (string, error) {
if sv.AllowAutoValue && value == "-1" {
return value, nil
}
// There are two types of validation behaviors for integer values. The default
// is to return an error saying the value is out of range. For MySQL compatibility, some
// values prefer convert the value to the min/max and return a warning.
if !sv.AutoConvertOutOfRange {
return sv.checkInt64SystemVarWithError(value)
}
val, err := strconv.ParseInt(value, 10, 64)
if err != nil {
return value, ErrWrongTypeForVar.GenWithStackByArgs(sv.Name)
}
if val < sv.MinValue {
vars.StmtCtx.AppendWarning(ErrTruncatedWrongValue.GenWithStackByArgs(sv.Name, value))
return fmt.Sprintf("%d", sv.MinValue), nil
}
if val > int64(sv.MaxValue) {
vars.StmtCtx.AppendWarning(ErrTruncatedWrongValue.GenWithStackByArgs(sv.Name, value))
return fmt.Sprintf("%d", sv.MaxValue), nil
}
return value, nil
}
func (sv *SysVar) checkEnumSystemVar(value string, vars *SessionVars) (string, error) {
// The value could be either a string or the ordinal position in the PossibleValues.
// This allows for the behavior 0 = OFF, 1 = ON, 2 = DEMAND etc.
var iStr string
for i, v := range sv.PossibleValues {
iStr = fmt.Sprintf("%d", i)
if strings.EqualFold(value, v) || strings.EqualFold(value, iStr) {
return v, nil
}
}
return value, ErrWrongValueForVar.GenWithStackByArgs(sv.Name, value)
}
func (sv *SysVar) checkFloatSystemVar(value string, vars *SessionVars) (string, error) {
if len(value) == 0 {
return value, ErrWrongTypeForVar.GenWithStackByArgs(sv.Name)
}
val, err := strconv.ParseFloat(value, 64)
if err != nil {
return value, ErrWrongTypeForVar.GenWithStackByArgs(sv.Name)
}
if val < float64(sv.MinValue) || val > float64(sv.MaxValue) {
return value, ErrWrongValueForVar.GenWithStackByArgs(sv.Name, value)
}
return value, nil
}
func (sv *SysVar) checkBoolSystemVar(value string, vars *SessionVars) (string, error) {
if strings.EqualFold(value, "ON") {
return BoolOn, nil
} else if strings.EqualFold(value, "OFF") {
return BoolOff, nil
}
val, err := strconv.ParseInt(value, 10, 64)
if err == nil {
// There are two types of conversion rules for integer values.
// The default only allows 0 || 1, but a subset of values convert any
// negative integer to 1.
if !sv.AutoConvertNegativeBool {
if val == 0 {
return BoolOff, nil
} else if val == 1 {
return BoolOn, nil
}
} else {
if val == 1 || val < 0 {
return BoolOn, nil
} else if val == 0 {
return BoolOff, nil
}
}
}
return value, ErrWrongValueForVar.GenWithStackByArgs(sv.Name, value)
}
func (sv *SysVar) checkUint64SystemVarWithError(value string) (string, error) {
if len(value) == 0 {
return value, ErrWrongTypeForVar.GenWithStackByArgs(sv.Name)
}
if value[0] == '-' {
// // in strict it expects the error WrongValue, but in non-strict it returns WrongType
return value, ErrWrongValueForVar.GenWithStackByArgs(sv.Name, value)
}
val, err := strconv.ParseUint(value, 10, 64)
if err != nil {
return value, ErrWrongTypeForVar.GenWithStackByArgs(sv.Name)
}
if val < uint64(sv.MinValue) || val > sv.MaxValue {
return value, ErrWrongValueForVar.GenWithStackByArgs(sv.Name, value)
}
return value, nil
}
func (sv *SysVar) checkInt64SystemVarWithError(value string) (string, error) {
if len(value) == 0 {
return value, ErrWrongTypeForVar.GenWithStackByArgs(sv.Name)
}
val, err := strconv.ParseInt(value, 10, 64)
if err != nil {
return value, ErrWrongTypeForVar.GenWithStackByArgs(sv.Name)
}
if val < sv.MinValue || val > int64(sv.MaxValue) {
return value, ErrWrongValueForVar.GenWithStackByArgs(sv.Name, value)
}
return value, nil
}
// ValidateFromHook calls the anonymous function on the sysvar if it exists.
func (sv *SysVar) ValidateFromHook(vars *SessionVars, normalizedValue string, originalValue string, scope ScopeFlag) (string, error) {
if sv.Validation != nil {
return sv.Validation(vars, normalizedValue, originalValue, scope)
}
return normalizedValue, nil
}
// GetNativeValType attempts to convert the val to the approx MySQL non-string type
func (sv *SysVar) GetNativeValType(val string) (types.Datum, byte, uint) {
switch sv.Type {
case TypeUnsigned:
u, err := strconv.ParseUint(val, 10, 64)
if err != nil {
u = 0
}
return types.NewUintDatum(u), mysql.TypeLonglong, mysql.UnsignedFlag
case TypeBool:
optVal := int64(0) // OFF
if TiDBOptOn(val) {
optVal = 1
}
return types.NewIntDatum(optVal), mysql.TypeLong, 0
}
return types.NewStringDatum(val), mysql.TypeVarString, 0
}
var sysVars map[string]*SysVar
var sysVarsLock sync.RWMutex
// RegisterSysVar adds a sysvar to the SysVars list
func RegisterSysVar(sv *SysVar) {
name := strings.ToLower(sv.Name)
sysVarsLock.Lock()
sysVars[name] = sv
sysVarsLock.Unlock()
}
// UnregisterSysVar removes a sysvar from the SysVars list
// currently only used in tests.
func UnregisterSysVar(name string) {
name = strings.ToLower(name)
sysVarsLock.Lock()
delete(sysVars, name)
sysVarsLock.Unlock()
}
// GetSysVar returns sys var info for name as key.
func GetSysVar(name string) *SysVar {
name = strings.ToLower(name)
sysVarsLock.RLock()
defer sysVarsLock.RUnlock()
return sysVars[name]
}
// SetSysVar sets a sysvar. This will not propagate to the cluster, so it should only be
// used for instance scoped AUTO variables such as system_time_zone.
func SetSysVar(name string, value string) {
name = strings.ToLower(name)
sysVarsLock.Lock()
defer sysVarsLock.Unlock()
sysVars[name].Value = value
}
// GetSysVars returns the sysVars list under a RWLock
func GetSysVars() map[string]*SysVar {
sysVarsLock.RLock()
defer sysVarsLock.RUnlock()
return sysVars
}
// PluginVarNames is global plugin var names set.
var PluginVarNames []string
func init() {
sysVars = make(map[string]*SysVar)
for _, v := range defaultSysVars {
RegisterSysVar(v)
}
for _, v := range noopSysVars {
RegisterSysVar(v)
}
initSynonymsSysVariables()
}
// BoolToOnOff returns the string representation of a bool, i.e. "ON/OFF"
func BoolToOnOff(b bool) string {
if b {
return BoolOn
}
return BoolOff
}
func int32ToBoolStr(i int32) string {
if i == 1 {
return BoolOn
}
return BoolOff
}
func checkCharacterValid(normalizedValue string, argName string) (string, error) {
if normalizedValue == "" {
return normalizedValue, errors.Trace(ErrWrongValueForVar.GenWithStackByArgs(argName, "NULL"))
}
cht, _, err := charset.GetCharsetInfo(normalizedValue)
if err != nil {
return normalizedValue, errors.Trace(err)
}
return cht, nil
}
var defaultSysVars = []*SysVar{
{Scope: ScopeGlobal, Name: MaxConnections, Value: "151", Type: TypeUnsigned, MinValue: 1, MaxValue: 100000, AutoConvertOutOfRange: true},
{Scope: ScopeGlobal | ScopeSession, Name: SQLSelectLimit, Value: "18446744073709551615", Type: TypeUnsigned, MinValue: 0, MaxValue: math.MaxUint64, AutoConvertOutOfRange: true},
{Scope: ScopeGlobal | ScopeSession, Name: DefaultWeekFormat, Value: "0", Type: TypeUnsigned, MinValue: 0, MaxValue: 7, AutoConvertOutOfRange: true},
{Scope: ScopeGlobal | ScopeSession, Name: SQLModeVar, Value: mysql.DefaultSQLMode, IsHintUpdatable: true},
{Scope: ScopeGlobal | ScopeSession, Name: MaxExecutionTime, Value: "0", Type: TypeUnsigned, MinValue: 0, MaxValue: math.MaxUint64, AutoConvertOutOfRange: true, IsHintUpdatable: true, SetSession: func(s *SessionVars, val string) error {
timeoutMS := tidbOptPositiveInt32(val, 0)
s.MaxExecutionTime = uint64(timeoutMS)
return nil
}},
{Scope: ScopeGlobal | ScopeSession, Name: CollationServer, Value: mysql.DefaultCollationName, Validation: func(vars *SessionVars, normalizedValue string, originalValue string, scope ScopeFlag) (string, error) {
if _, err := collate.GetCollationByName(normalizedValue); err != nil {
return normalizedValue, errors.Trace(err)
}
return normalizedValue, nil
}},
{Scope: ScopeGlobal | ScopeSession, Name: SQLLogBin, Value: BoolOn, Type: TypeBool},
{Scope: ScopeGlobal | ScopeSession, Name: TimeZone, Value: "SYSTEM", IsHintUpdatable: true, Validation: func(vars *SessionVars, normalizedValue string, originalValue string, scope ScopeFlag) (string, error) {
if strings.EqualFold(normalizedValue, "SYSTEM") {
return "SYSTEM", nil
}
_, err := parseTimeZone(normalizedValue)
return normalizedValue, err
}},
{Scope: ScopeNone, Name: SystemTimeZone, Value: "CST"},
{Scope: ScopeGlobal | ScopeSession, Name: ForeignKeyChecks, Value: BoolOff, Type: TypeBool, Validation: func(vars *SessionVars, normalizedValue string, originalValue string, scope ScopeFlag) (string, error) {
if TiDBOptOn(normalizedValue) {
// TiDB does not yet support foreign keys.
// Return the original value in the warning, so that users are not confused.
vars.StmtCtx.AppendWarning(ErrUnsupportedValueForVar.GenWithStackByArgs(ForeignKeyChecks, originalValue))
return BoolOff, nil
} else if !TiDBOptOn(normalizedValue) {
return BoolOff, nil
}
return normalizedValue, ErrWrongValueForVar.GenWithStackByArgs(ForeignKeyChecks, originalValue)
}},
{Scope: ScopeNone, Name: Hostname, Value: ServerHostname},
{Scope: ScopeSession, Name: Timestamp, Value: ""},
{Scope: ScopeGlobal | ScopeSession, Name: CharacterSetFilesystem, Value: "binary", Validation: func(vars *SessionVars, normalizedValue string, originalValue string, scope ScopeFlag) (string, error) {
return checkCharacterValid(normalizedValue, CharacterSetFilesystem)
}},
{Scope: ScopeGlobal | ScopeSession, Name: CollationDatabase, Value: mysql.DefaultCollationName, Validation: func(vars *SessionVars, normalizedValue string, originalValue string, scope ScopeFlag) (string, error) {
if _, err := collate.GetCollationByName(normalizedValue); err != nil {
return normalizedValue, errors.Trace(err)
}
return normalizedValue, nil
}},
{Scope: ScopeGlobal | ScopeSession, Name: AutoIncrementIncrement, Value: strconv.FormatInt(DefAutoIncrementIncrement, 10), Type: TypeUnsigned, MinValue: 1, MaxValue: math.MaxUint16, AutoConvertOutOfRange: true, SetSession: func(s *SessionVars, val string) error {
// AutoIncrementIncrement is valid in [1, 65535].
s.AutoIncrementIncrement = tidbOptPositiveInt32(val, DefAutoIncrementIncrement)
return nil
}},
{Scope: ScopeGlobal | ScopeSession, Name: AutoIncrementOffset, Value: strconv.FormatInt(DefAutoIncrementOffset, 10), Type: TypeUnsigned, MinValue: 1, MaxValue: math.MaxUint16, AutoConvertOutOfRange: true, SetSession: func(s *SessionVars, val string) error {
// AutoIncrementOffset is valid in [1, 65535].
s.AutoIncrementOffset = tidbOptPositiveInt32(val, DefAutoIncrementOffset)
return nil
}},
{Scope: ScopeGlobal | ScopeSession, Name: CharacterSetClient, Value: mysql.DefaultCharset, Validation: func(vars *SessionVars, normalizedValue string, originalValue string, scope ScopeFlag) (string, error) {
return checkCharacterValid(normalizedValue, CharacterSetClient)
}},
{Scope: ScopeNone, Name: Port, Value: "4000", Type: TypeUnsigned, MinValue: 0, MaxValue: math.MaxUint16},
{Scope: ScopeNone, Name: LowerCaseTableNames, Value: "2"},
{Scope: ScopeNone, Name: LogBin, Value: BoolOff, Type: TypeBool},
{Scope: ScopeGlobal | ScopeSession, Name: CharacterSetResults, Value: mysql.DefaultCharset, Validation: func(vars *SessionVars, normalizedValue string, originalValue string, scope ScopeFlag) (string, error) {
if normalizedValue == "" {
return normalizedValue, nil
}
return checkCharacterValid(normalizedValue, "")
}},
{Scope: ScopeNone, Name: VersionComment, Value: "TiDB Server (Apache License 2.0) " + versioninfo.TiDBEdition + " Edition, MySQL 5.7 compatible"},
{Scope: ScopeGlobal | ScopeSession, Name: TxnIsolation, Value: "REPEATABLE-READ", Type: TypeEnum, PossibleValues: []string{"READ-UNCOMMITTED", "READ-COMMITTED", "REPEATABLE-READ", "SERIALIZABLE"}, Validation: func(vars *SessionVars, normalizedValue string, originalValue string, scope ScopeFlag) (string, error) {
if normalizedValue == "SERIALIZABLE" || normalizedValue == "READ-UNCOMMITTED" {
if skipIsolationLevelCheck, err := GetSessionSystemVar(vars, TiDBSkipIsolationLevelCheck); err != nil {
return normalizedValue, err
} else if !TiDBOptOn(skipIsolationLevelCheck) {
return normalizedValue, ErrUnsupportedIsolationLevel.GenWithStackByArgs(normalizedValue)
}
}
return normalizedValue, nil
}},
{Scope: ScopeGlobal | ScopeSession, Name: TransactionIsolation, Value: "REPEATABLE-READ", Type: TypeEnum, PossibleValues: []string{"READ-UNCOMMITTED", "READ-COMMITTED", "REPEATABLE-READ", "SERIALIZABLE"}, Validation: func(vars *SessionVars, normalizedValue string, originalValue string, scope ScopeFlag) (string, error) {
if normalizedValue == "SERIALIZABLE" || normalizedValue == "READ-UNCOMMITTED" {
returnErr := ErrUnsupportedIsolationLevel.GenWithStackByArgs(normalizedValue)
if skipIsolationLevelCheck, err := GetSessionSystemVar(vars, TiDBSkipIsolationLevelCheck); err != nil {
return normalizedValue, err
} else if !TiDBOptOn(skipIsolationLevelCheck) {
return normalizedValue, returnErr
}
vars.StmtCtx.AppendWarning(returnErr)
}
return normalizedValue, nil
}},
{Scope: ScopeGlobal | ScopeSession, Name: CollationConnection, Value: mysql.DefaultCollationName, Validation: func(vars *SessionVars, normalizedValue string, originalValue string, scope ScopeFlag) (string, error) {
if _, err := collate.GetCollationByName(normalizedValue); err != nil {
return normalizedValue, errors.Trace(err)
}
return normalizedValue, nil
}},
{Scope: ScopeNone, Name: Version, Value: mysql.ServerVersion},
{Scope: ScopeGlobal | ScopeSession, Name: AutoCommit, Value: BoolOn, Type: TypeBool},
{Scope: ScopeGlobal | ScopeSession, Name: CharsetDatabase, Value: mysql.DefaultCharset, Validation: func(vars *SessionVars, normalizedValue string, originalValue string, scope ScopeFlag) (string, error) {
return checkCharacterValid(normalizedValue, CharsetDatabase)
}},
{Scope: ScopeGlobal | ScopeSession, Name: TxReadOnly, Value: "0"},
{Scope: ScopeGlobal | ScopeSession, Name: TransactionReadOnly, Value: "0"},
{Scope: ScopeGlobal, Name: MaxPreparedStmtCount, Value: strconv.FormatInt(DefMaxPreparedStmtCount, 10), Type: TypeInt, MinValue: -1, MaxValue: 1048576, AutoConvertOutOfRange: true},
{Scope: ScopeNone, Name: DataDir, Value: "/usr/local/mysql/data/"},
{Scope: ScopeGlobal | ScopeSession, Name: WaitTimeout, Value: strconv.FormatInt(DefWaitTimeout, 10), Type: TypeUnsigned, MinValue: 0, MaxValue: secondsPerYear, AutoConvertOutOfRange: true},
{Scope: ScopeGlobal | ScopeSession, Name: InteractiveTimeout, Value: "28800", Type: TypeUnsigned, MinValue: 1, MaxValue: secondsPerYear, AutoConvertOutOfRange: true},
{Scope: ScopeGlobal | ScopeSession, Name: InnodbLockWaitTimeout, Value: strconv.FormatInt(DefInnodbLockWaitTimeout, 10), Type: TypeUnsigned, MinValue: 1, MaxValue: 1073741824, AutoConvertOutOfRange: true, SetSession: func(s *SessionVars, val string) error {
lockWaitSec := tidbOptInt64(val, DefInnodbLockWaitTimeout)
s.LockWaitTimeout = lockWaitSec * 1000
return nil
}},
{Scope: ScopeGlobal | ScopeSession, Name: GroupConcatMaxLen, Value: "1024", AutoConvertOutOfRange: true, IsHintUpdatable: true, Type: TypeUnsigned, MinValue: 4, MaxValue: math.MaxUint64, Validation: func(vars *SessionVars, normalizedValue string, originalValue string, scope ScopeFlag) (string, error) {
// https://dev.mysql.com/doc/refman/8.0/en/server-system-variables.html#sysvar_group_concat_max_len
// Minimum Value 4
// Maximum Value (64-bit platforms) 18446744073709551615
// Maximum Value (32-bit platforms) 4294967295
if mathutil.IntBits == 32 {
if val, err := strconv.ParseUint(normalizedValue, 10, 64); err == nil {
if val > uint64(math.MaxUint32) {
vars.StmtCtx.AppendWarning(ErrTruncatedWrongValue.GenWithStackByArgs(GroupConcatMaxLen, originalValue))
return fmt.Sprintf("%d", math.MaxUint32), nil
}
}
}
return normalizedValue, nil
}},
{Scope: ScopeNone, Name: Socket, Value: "/tmp/myssock"},
{Scope: ScopeGlobal | ScopeSession, Name: CharacterSetConnection, Value: mysql.DefaultCharset, Validation: func(vars *SessionVars, normalizedValue string, originalValue string, scope ScopeFlag) (string, error) {
return checkCharacterValid(normalizedValue, CharacterSetConnection)
}},
{Scope: ScopeGlobal | ScopeSession, Name: CharacterSetServer, Value: mysql.DefaultCharset, Validation: func(vars *SessionVars, normalizedValue string, originalValue string, scope ScopeFlag) (string, error) {
return checkCharacterValid(normalizedValue, CharacterSetServer)
}},
{Scope: ScopeGlobal | ScopeSession, Name: MaxAllowedPacket, Value: "67108864", Type: TypeUnsigned, MinValue: 1024, MaxValue: MaxOfMaxAllowedPacket, AutoConvertOutOfRange: true},
{Scope: ScopeSession, Name: WarningCount, Value: "0", ReadOnly: true},
{Scope: ScopeSession, Name: ErrorCount, Value: "0", ReadOnly: true},
{Scope: ScopeGlobal | ScopeSession, Name: WindowingUseHighPrecision, Value: BoolOn, Type: TypeBool, IsHintUpdatable: true, SetSession: func(s *SessionVars, val string) error {
s.WindowingUseHighPrecision = TiDBOptOn(val)
return nil
}},
{Scope: ScopeSession, Name: TiDBTxnScope, Value: func() string {
if isGlobal, _ := config.GetTxnScopeFromConfig(); isGlobal {
return oracle.GlobalTxnScope
}
return oracle.LocalTxnScope
}()},
/* TiDB specific variables */
{Scope: ScopeGlobal | ScopeSession, Name: TiDBAllowMPPExecution, Type: TypeBool, Value: BoolToOnOff(DefTiDBAllowMPPExecution)},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBBCJThresholdCount, Value: strconv.Itoa(DefBroadcastJoinThresholdCount), Type: TypeInt, MinValue: 0, MaxValue: math.MaxInt64, SetSession: func(s *SessionVars, val string) error {
s.BroadcastJoinThresholdCount = tidbOptInt64(val, DefBroadcastJoinThresholdCount)
return nil
}},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBBCJThresholdSize, Value: strconv.Itoa(DefBroadcastJoinThresholdSize), Type: TypeInt, MinValue: 0, MaxValue: math.MaxInt64, SetSession: func(s *SessionVars, val string) error {
s.BroadcastJoinThresholdSize = tidbOptInt64(val, DefBroadcastJoinThresholdSize)
return nil
}},
{Scope: ScopeSession, Name: TiDBSnapshot, Value: ""},
{Scope: ScopeSession, Name: TiDBOptAggPushDown, Value: BoolToOnOff(DefOptAggPushDown), Type: TypeBool, SetSession: func(s *SessionVars, val string) error {
s.AllowAggPushDown = TiDBOptOn(val)
return nil
}},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBOptBCJ, Value: BoolToOnOff(DefOptBCJ), Type: TypeBool, Validation: func(vars *SessionVars, normalizedValue string, originalValue string, scope ScopeFlag) (string, error) {
if TiDBOptOn(normalizedValue) && vars.AllowBatchCop == 0 {
return normalizedValue, ErrWrongValueForVar.GenWithStackByArgs("Can't set Broadcast Join to 1 but tidb_allow_batch_cop is 0, please active batch cop at first.")
}
return normalizedValue, nil
}, SetSession: func(s *SessionVars, val string) error {
s.AllowBCJ = TiDBOptOn(val)
return nil
}},
{Scope: ScopeSession, Name: TiDBOptDistinctAggPushDown, Value: BoolToOnOff(config.GetGlobalConfig().Performance.DistinctAggPushDown), Type: TypeBool, SetSession: func(s *SessionVars, val string) error {
s.AllowDistinctAggPushDown = TiDBOptOn(val)
return nil
}},
{Scope: ScopeSession, Name: TiDBOptWriteRowID, Value: BoolToOnOff(DefOptWriteRowID), SetSession: func(s *SessionVars, val string) error {
s.AllowWriteRowID = TiDBOptOn(val)
return nil
}},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBBuildStatsConcurrency, Value: strconv.Itoa(DefBuildStatsConcurrency)},
{Scope: ScopeGlobal, Name: TiDBAutoAnalyzeRatio, Value: strconv.FormatFloat(DefAutoAnalyzeRatio, 'f', -1, 64), Type: TypeFloat, MinValue: 0, MaxValue: math.MaxUint64},
{Scope: ScopeGlobal, Name: TiDBAutoAnalyzeStartTime, Value: DefAutoAnalyzeStartTime, Type: TypeTime},
{Scope: ScopeGlobal, Name: TiDBAutoAnalyzeEndTime, Value: DefAutoAnalyzeEndTime, Type: TypeTime},
{Scope: ScopeSession, Name: TiDBChecksumTableConcurrency, Value: strconv.Itoa(DefChecksumTableConcurrency)},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBExecutorConcurrency, Value: strconv.Itoa(DefExecutorConcurrency), Type: TypeUnsigned, MinValue: 1, MaxValue: math.MaxUint64},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBDistSQLScanConcurrency, Value: strconv.Itoa(DefDistSQLScanConcurrency), Type: TypeUnsigned, MinValue: 1, MaxValue: math.MaxUint64},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBOptInSubqToJoinAndAgg, Value: BoolToOnOff(DefOptInSubqToJoinAndAgg), Type: TypeBool, SetSession: func(s *SessionVars, val string) error {
s.SetAllowInSubqToJoinAndAgg(TiDBOptOn(val))
return nil
}},
{Scope: ScopeSession, Name: TiDBOptPreferRangeScan, Value: BoolToOnOff(DefOptPreferRangeScan), Type: TypeBool, IsHintUpdatable: true, SetSession: func(s *SessionVars, val string) error {
s.SetAllowPreferRangeScan(TiDBOptOn(val))
return nil
}},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBOptCorrelationThreshold, Value: strconv.FormatFloat(DefOptCorrelationThreshold, 'f', -1, 64), Type: TypeFloat, MinValue: 0, MaxValue: 1, SetSession: func(s *SessionVars, val string) error {
s.CorrelationThreshold = tidbOptFloat64(val, DefOptCorrelationThreshold)
return nil
}},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBOptCorrelationExpFactor, Value: strconv.Itoa(DefOptCorrelationExpFactor), Type: TypeUnsigned, MinValue: 0, MaxValue: math.MaxUint64, SetSession: func(s *SessionVars, val string) error {
s.CorrelationExpFactor = int(tidbOptInt64(val, DefOptCorrelationExpFactor))
return nil
}},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBOptCPUFactor, Value: strconv.FormatFloat(DefOptCPUFactor, 'f', -1, 64), Type: TypeFloat, MinValue: 0, MaxValue: math.MaxUint64, SetSession: func(s *SessionVars, val string) error {
s.CPUFactor = tidbOptFloat64(val, DefOptCPUFactor)
return nil
}},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBOptTiFlashConcurrencyFactor, Value: strconv.FormatFloat(DefOptTiFlashConcurrencyFactor, 'f', -1, 64), Type: TypeFloat, MinValue: 0, MaxValue: math.MaxUint64, SetSession: func(s *SessionVars, val string) error {
s.CopTiFlashConcurrencyFactor = tidbOptFloat64(val, DefOptTiFlashConcurrencyFactor)
return nil
}},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBOptCopCPUFactor, Value: strconv.FormatFloat(DefOptCopCPUFactor, 'f', -1, 64), Type: TypeFloat, MinValue: 0, MaxValue: math.MaxUint64, SetSession: func(s *SessionVars, val string) error {
s.CopCPUFactor = tidbOptFloat64(val, DefOptCopCPUFactor)
return nil
}},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBOptNetworkFactor, Value: strconv.FormatFloat(DefOptNetworkFactor, 'f', -1, 64), Type: TypeFloat, MinValue: 0, MaxValue: math.MaxUint64},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBOptScanFactor, Value: strconv.FormatFloat(DefOptScanFactor, 'f', -1, 64), Type: TypeFloat, MinValue: 0, MaxValue: math.MaxUint64},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBOptDescScanFactor, Value: strconv.FormatFloat(DefOptDescScanFactor, 'f', -1, 64), Type: TypeFloat, MinValue: 0, MaxValue: math.MaxUint64},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBOptSeekFactor, Value: strconv.FormatFloat(DefOptSeekFactor, 'f', -1, 64), Type: TypeFloat, MinValue: 0, MaxValue: math.MaxUint64},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBOptMemoryFactor, Value: strconv.FormatFloat(DefOptMemoryFactor, 'f', -1, 64), Type: TypeFloat, MinValue: 0, MaxValue: math.MaxUint64},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBOptDiskFactor, Value: strconv.FormatFloat(DefOptDiskFactor, 'f', -1, 64), Type: TypeFloat, MinValue: 0, MaxValue: math.MaxUint64},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBOptConcurrencyFactor, Value: strconv.FormatFloat(DefOptConcurrencyFactor, 'f', -1, 64), Type: TypeFloat, MinValue: 0, MaxValue: math.MaxUint64},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBIndexJoinBatchSize, Value: strconv.Itoa(DefIndexJoinBatchSize), Type: TypeUnsigned, MinValue: 1, MaxValue: math.MaxUint64},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBIndexLookupSize, Value: strconv.Itoa(DefIndexLookupSize), Type: TypeUnsigned, MinValue: 1, MaxValue: math.MaxUint64},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBIndexLookupConcurrency, Value: strconv.Itoa(DefIndexLookupConcurrency), Type: TypeInt, MinValue: 1, MaxValue: math.MaxInt64, AllowAutoValue: true},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBIndexLookupJoinConcurrency, Value: strconv.Itoa(DefIndexLookupJoinConcurrency), Type: TypeInt, MinValue: 1, MaxValue: math.MaxInt64, AllowAutoValue: true},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBIndexSerialScanConcurrency, Value: strconv.Itoa(DefIndexSerialScanConcurrency), Type: TypeUnsigned, MinValue: 1, MaxValue: math.MaxUint64},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBSkipUTF8Check, Value: BoolToOnOff(DefSkipUTF8Check), Type: TypeBool, SetSession: func(s *SessionVars, val string) error {
s.SkipUTF8Check = TiDBOptOn(val)
return nil
}},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBSkipASCIICheck, Value: BoolToOnOff(DefSkipASCIICheck), Type: TypeBool, SetSession: func(s *SessionVars, val string) error {
s.SkipASCIICheck = TiDBOptOn(val)
return nil
}},
{Scope: ScopeSession, Name: TiDBBatchInsert, Value: BoolToOnOff(DefBatchInsert), Type: TypeBool},
{Scope: ScopeSession, Name: TiDBBatchDelete, Value: BoolToOnOff(DefBatchDelete), Type: TypeBool},
{Scope: ScopeSession, Name: TiDBBatchCommit, Value: BoolToOnOff(DefBatchCommit), Type: TypeBool},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBDMLBatchSize, Value: strconv.Itoa(DefDMLBatchSize), Type: TypeUnsigned, MinValue: 0, MaxValue: math.MaxUint64},
{Scope: ScopeSession, Name: TiDBCurrentTS, Value: strconv.Itoa(DefCurretTS), ReadOnly: true},
{Scope: ScopeSession, Name: TiDBLastTxnInfo, Value: strconv.Itoa(DefCurretTS), ReadOnly: true},
{Scope: ScopeSession, Name: TiDBLastQueryInfo, Value: strconv.Itoa(DefCurretTS), ReadOnly: true},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBMaxChunkSize, Value: strconv.Itoa(DefMaxChunkSize), Type: TypeUnsigned, MinValue: maxChunkSizeLowerBound, MaxValue: math.MaxUint64},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBAllowBatchCop, Value: strconv.Itoa(DefTiDBAllowBatchCop), Type: TypeInt, MinValue: 0, MaxValue: 2, Validation: func(vars *SessionVars, normalizedValue string, originalValue string, scope ScopeFlag) (string, error) {
if normalizedValue == "0" && vars.AllowBCJ {
return normalizedValue, ErrWrongValueForVar.GenWithStackByArgs("Can't set batch cop 0 but tidb_opt_broadcast_join is 1, please set tidb_opt_broadcast_join 0 at first")
}
return normalizedValue, nil
}},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBInitChunkSize, Value: strconv.Itoa(DefInitChunkSize), Type: TypeUnsigned, MinValue: 1, MaxValue: initChunkSizeUpperBound},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBEnableCascadesPlanner, Value: BoolOff, Type: TypeBool},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBEnableIndexMerge, Value: BoolOff, Type: TypeBool},
{Scope: ScopeSession, Name: TIDBMemQuotaQuery, Value: strconv.FormatInt(config.GetGlobalConfig().MemQuotaQuery, 10), Type: TypeInt, MinValue: -1, MaxValue: math.MaxInt64},
{Scope: ScopeSession, Name: TIDBMemQuotaHashJoin, Value: strconv.FormatInt(DefTiDBMemQuotaHashJoin, 10), Type: TypeInt, MinValue: -1, MaxValue: math.MaxInt64},
{Scope: ScopeSession, Name: TIDBMemQuotaMergeJoin, Value: strconv.FormatInt(DefTiDBMemQuotaMergeJoin, 10), Type: TypeInt, MinValue: -1, MaxValue: math.MaxInt64},
{Scope: ScopeSession, Name: TIDBMemQuotaSort, Value: strconv.FormatInt(DefTiDBMemQuotaSort, 10), Type: TypeInt, MinValue: -1, MaxValue: math.MaxInt64},
{Scope: ScopeSession, Name: TIDBMemQuotaTopn, Value: strconv.FormatInt(DefTiDBMemQuotaTopn, 10), Type: TypeInt, MinValue: -1, MaxValue: math.MaxInt64},
{Scope: ScopeSession, Name: TIDBMemQuotaIndexLookupReader, Value: strconv.FormatInt(DefTiDBMemQuotaIndexLookupReader, 10), Type: TypeInt, MinValue: -1, MaxValue: math.MaxInt64},
{Scope: ScopeSession, Name: TIDBMemQuotaIndexLookupJoin, Value: strconv.FormatInt(DefTiDBMemQuotaIndexLookupJoin, 10), Type: TypeInt, MinValue: -1, MaxValue: math.MaxInt64},
{Scope: ScopeSession, Name: TiDBEnableStreaming, Value: BoolOff, Type: TypeBool},
{Scope: ScopeSession, Name: TiDBEnableChunkRPC, Value: BoolOn, Type: TypeBool},
{Scope: ScopeSession, Name: TxnIsolationOneShot, Value: ""},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBEnableTablePartition, Value: BoolOn, Type: TypeEnum, PossibleValues: []string{BoolOff, BoolOn, "AUTO"}},
{Scope: ScopeSession, Name: TiDBEnableListTablePartition, Value: BoolOff, Type: TypeBool},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBHashJoinConcurrency, Value: strconv.Itoa(DefTiDBHashJoinConcurrency), Type: TypeInt, MinValue: 1, MaxValue: math.MaxInt64, AllowAutoValue: true},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBProjectionConcurrency, Value: strconv.Itoa(DefTiDBProjectionConcurrency), Type: TypeInt, MinValue: -1, MaxValue: math.MaxInt64},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBHashAggPartialConcurrency, Value: strconv.Itoa(DefTiDBHashAggPartialConcurrency), Type: TypeInt, MinValue: 1, MaxValue: math.MaxInt64, AllowAutoValue: true},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBHashAggFinalConcurrency, Value: strconv.Itoa(DefTiDBHashAggFinalConcurrency), Type: TypeInt, MinValue: 1, MaxValue: math.MaxInt64, AllowAutoValue: true},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBWindowConcurrency, Value: strconv.Itoa(DefTiDBWindowConcurrency), Type: TypeInt, MinValue: 1, MaxValue: math.MaxInt64, AllowAutoValue: true},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBMergeJoinConcurrency, Value: strconv.Itoa(DefTiDBMergeJoinConcurrency), Type: TypeInt, MinValue: 1, MaxValue: math.MaxInt64, AllowAutoValue: true},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBStreamAggConcurrency, Value: strconv.Itoa(DefTiDBStreamAggConcurrency), Type: TypeInt, MinValue: 1, MaxValue: math.MaxInt64, AllowAutoValue: true},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBEnableParallelApply, Value: BoolToOnOff(DefTiDBEnableParallelApply), Type: TypeBool},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBMemQuotaApplyCache, Value: strconv.Itoa(DefTiDBMemQuotaApplyCache)},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBBackoffLockFast, Value: strconv.Itoa(kv.DefBackoffLockFast), Type: TypeUnsigned, MinValue: 1, MaxValue: math.MaxUint64},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBBackOffWeight, Value: strconv.Itoa(kv.DefBackOffWeight), Type: TypeUnsigned, MinValue: 1, MaxValue: math.MaxUint64},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBRetryLimit, Value: strconv.Itoa(DefTiDBRetryLimit), Type: TypeInt, MinValue: -1, MaxValue: math.MaxInt64},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBDisableTxnAutoRetry, Value: BoolToOnOff(DefTiDBDisableTxnAutoRetry), Type: TypeBool},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBConstraintCheckInPlace, Value: BoolToOnOff(DefTiDBConstraintCheckInPlace), Type: TypeBool},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBTxnMode, Value: DefTiDBTxnMode, AllowEmptyAll: true, Type: TypeEnum, PossibleValues: []string{"pessimistic", "optimistic"}},
{Scope: ScopeGlobal, Name: TiDBRowFormatVersion, Value: strconv.Itoa(DefTiDBRowFormatV1), Type: TypeUnsigned, MinValue: 1, MaxValue: 2},
{Scope: ScopeSession, Name: TiDBOptimizerSelectivityLevel, Value: strconv.Itoa(DefTiDBOptimizerSelectivityLevel), Type: TypeUnsigned, MinValue: 1, MaxValue: math.MaxUint64},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBEnableWindowFunction, Value: BoolToOnOff(DefEnableWindowFunction), Type: TypeBool},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBEnableStrictDoubleTypeCheck, Value: BoolToOnOff(DefEnableStrictDoubleTypeCheck), Type: TypeBool},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBEnableVectorizedExpression, Value: BoolToOnOff(DefEnableVectorizedExpression), Type: TypeBool},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBEnableFastAnalyze, Value: BoolToOnOff(DefTiDBUseFastAnalyze), Type: TypeBool},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBSkipIsolationLevelCheck, Value: BoolToOnOff(DefTiDBSkipIsolationLevelCheck), Type: TypeBool},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBEnableRateLimitAction, Value: BoolToOnOff(DefTiDBEnableRateLimitAction), Type: TypeBool},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBAllowFallbackToTiKV, Value: "", Validation: func(vars *SessionVars, normalizedValue string, originalValue string, scope ScopeFlag) (string, error) {
if normalizedValue == "" {
return "", nil
}
engines := strings.Split(normalizedValue, ",")
var formatVal string
storeTypes := make(map[kv.StoreType]struct{})
for i, engine := range engines {
engine = strings.TrimSpace(engine)
switch {
case strings.EqualFold(engine, kv.TiFlash.Name()):
if _, ok := storeTypes[kv.TiFlash]; !ok {
if i != 0 {
formatVal += ","
}
formatVal += kv.TiFlash.Name()
storeTypes[kv.TiFlash] = struct{}{}
}
default:
return normalizedValue, ErrWrongValueForVar.GenWithStackByArgs(TiDBAllowFallbackToTiKV, normalizedValue)
}
}
return formatVal, nil
}},
/* The following variable is defined as session scope but is actually server scope. */
{Scope: ScopeGlobal | ScopeSession, Name: TiDBEnableDynamicPrivileges, Value: BoolOff, Type: TypeBool, SetSession: func(s *SessionVars, val string) error {
s.EnableDynamicPrivileges = TiDBOptOn(val)
return nil
}},
{Scope: ScopeSession, Name: TiDBGeneralLog, Value: BoolToOnOff(DefTiDBGeneralLog), Type: TypeBool},
{Scope: ScopeSession, Name: TiDBPProfSQLCPU, Value: strconv.Itoa(DefTiDBPProfSQLCPU), Type: TypeInt, MinValue: 0, MaxValue: 1},
{Scope: ScopeSession, Name: TiDBDDLSlowOprThreshold, Value: strconv.Itoa(DefTiDBDDLSlowOprThreshold)},
{Scope: ScopeSession, Name: TiDBConfig, Value: "", ReadOnly: true},
{Scope: ScopeGlobal, Name: TiDBDDLReorgWorkerCount, Value: strconv.Itoa(DefTiDBDDLReorgWorkerCount), Type: TypeUnsigned, MinValue: 1, MaxValue: uint64(maxDDLReorgWorkerCount)},
{Scope: ScopeGlobal, Name: TiDBDDLReorgBatchSize, Value: strconv.Itoa(DefTiDBDDLReorgBatchSize), Type: TypeUnsigned, MinValue: int64(MinDDLReorgBatchSize), MaxValue: uint64(MaxDDLReorgBatchSize), AutoConvertOutOfRange: true},
{Scope: ScopeGlobal, Name: TiDBDDLErrorCountLimit, Value: strconv.Itoa(DefTiDBDDLErrorCountLimit), Type: TypeUnsigned, MinValue: 0, MaxValue: uint64(math.MaxInt64), AutoConvertOutOfRange: true},
{Scope: ScopeSession, Name: TiDBDDLReorgPriority, Value: "PRIORITY_LOW"},
{Scope: ScopeGlobal, Name: TiDBMaxDeltaSchemaCount, Value: strconv.Itoa(DefTiDBMaxDeltaSchemaCount), Type: TypeUnsigned, MinValue: 100, MaxValue: 16384, AutoConvertOutOfRange: true},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBEnableChangeColumnType, Value: BoolToOnOff(DefTiDBChangeColumnType), Type: TypeBool},
{Scope: ScopeGlobal, Name: TiDBEnableChangeMultiSchema, Value: BoolToOnOff(DefTiDBChangeMultiSchema), Type: TypeBool},
{Scope: ScopeGlobal, Name: TiDBEnablePointGetCache, Value: BoolToOnOff(DefTiDBPointGetCache), Type: TypeBool},
{Scope: ScopeGlobal, Name: TiDBEnableAlterPlacement, Value: BoolToOnOff(DefTiDBEnableAlterPlacement), Type: TypeBool},
{Scope: ScopeSession, Name: TiDBForcePriority, Value: mysql.Priority2Str[DefTiDBForcePriority]},
{Scope: ScopeSession, Name: TiDBEnableRadixJoin, Value: BoolToOnOff(DefTiDBUseRadixJoin), Type: TypeBool},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBOptJoinReorderThreshold, Value: strconv.Itoa(DefTiDBOptJoinReorderThreshold), Type: TypeUnsigned, MinValue: 0, MaxValue: 63},
{Scope: ScopeSession, Name: TiDBSlowQueryFile, Value: ""},
{Scope: ScopeGlobal, Name: TiDBScatterRegion, Value: BoolToOnOff(DefTiDBScatterRegion), Type: TypeBool},
{Scope: ScopeSession, Name: TiDBWaitSplitRegionFinish, Value: BoolToOnOff(DefTiDBWaitSplitRegionFinish), Type: TypeBool},
{Scope: ScopeSession, Name: TiDBWaitSplitRegionTimeout, Value: strconv.Itoa(DefWaitSplitRegionTimeout), Type: TypeUnsigned, MinValue: 1, MaxValue: math.MaxInt64},
{Scope: ScopeSession, Name: TiDBLowResolutionTSO, Value: BoolOff, Type: TypeBool},
{Scope: ScopeSession, Name: TiDBExpensiveQueryTimeThreshold, Value: strconv.Itoa(DefTiDBExpensiveQueryTimeThreshold), Type: TypeUnsigned, MinValue: int64(MinExpensiveQueryTimeThreshold), MaxValue: uint64(math.MaxInt64), AutoConvertOutOfRange: true},
{Scope: ScopeSession, Name: TiDBMemoryUsageAlarmRatio, Value: strconv.FormatFloat(config.GetGlobalConfig().Performance.MemoryUsageAlarmRatio, 'f', -1, 64), Type: TypeFloat, MinValue: 0.0, MaxValue: 1.0},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBEnableNoopFuncs, Value: BoolToOnOff(DefTiDBEnableNoopFuncs), Type: TypeBool},
{Scope: ScopeSession, Name: TiDBReplicaRead, Value: "leader", Type: TypeEnum, PossibleValues: []string{"leader", "follower", "leader-and-follower"}},
{Scope: ScopeSession, Name: TiDBAllowRemoveAutoInc, Value: BoolToOnOff(DefTiDBAllowRemoveAutoInc), Type: TypeBool},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBEnableStmtSummary, Value: BoolToOnOff(config.GetGlobalConfig().StmtSummary.Enable), Type: TypeBool, AllowEmpty: true},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBStmtSummaryInternalQuery, Value: BoolToOnOff(config.GetGlobalConfig().StmtSummary.EnableInternalQuery), Type: TypeBool, AllowEmpty: true},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBStmtSummaryRefreshInterval, Value: strconv.Itoa(config.GetGlobalConfig().StmtSummary.RefreshInterval), Type: TypeInt, MinValue: 1, MaxValue: uint64(math.MaxInt32), AllowEmpty: true},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBStmtSummaryHistorySize, Value: strconv.Itoa(config.GetGlobalConfig().StmtSummary.HistorySize), Type: TypeInt, MinValue: 0, MaxValue: uint64(math.MaxUint8), AllowEmpty: true},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBStmtSummaryMaxStmtCount, Value: strconv.FormatUint(uint64(config.GetGlobalConfig().StmtSummary.MaxStmtCount), 10), Type: TypeInt, MinValue: 1, MaxValue: uint64(math.MaxInt16), AllowEmpty: true},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBStmtSummaryMaxSQLLength, Value: strconv.FormatUint(uint64(config.GetGlobalConfig().StmtSummary.MaxSQLLength), 10), Type: TypeInt, MinValue: 0, MaxValue: uint64(math.MaxInt32), AllowEmpty: true},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBCapturePlanBaseline, Value: BoolOff, Type: TypeBool, AllowEmptyAll: true},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBUsePlanBaselines, Value: BoolToOnOff(DefTiDBUsePlanBaselines), Type: TypeBool},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBEvolvePlanBaselines, Value: BoolToOnOff(DefTiDBEvolvePlanBaselines), Type: TypeBool},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBEnableExtendedStats, Value: BoolToOnOff(false), Type: TypeBool},
{Scope: ScopeGlobal, Name: TiDBEvolvePlanTaskMaxTime, Value: strconv.Itoa(DefTiDBEvolvePlanTaskMaxTime), Type: TypeInt, MinValue: -1, MaxValue: math.MaxInt64},
{Scope: ScopeGlobal, Name: TiDBEvolvePlanTaskStartTime, Value: DefTiDBEvolvePlanTaskStartTime, Type: TypeTime},
{Scope: ScopeGlobal, Name: TiDBEvolvePlanTaskEndTime, Value: DefTiDBEvolvePlanTaskEndTime, Type: TypeTime},
{Scope: ScopeSession, Name: TiDBIsolationReadEngines, Value: strings.Join(config.GetGlobalConfig().IsolationRead.Engines, ", "), Validation: func(vars *SessionVars, normalizedValue string, originalValue string, scope ScopeFlag) (string, error) {
engines := strings.Split(normalizedValue, ",")
var formatVal string
for i, engine := range engines {
engine = strings.TrimSpace(engine)
if i != 0 {
formatVal += ","
}
switch {
case strings.EqualFold(engine, kv.TiKV.Name()):
formatVal += kv.TiKV.Name()
case strings.EqualFold(engine, kv.TiFlash.Name()):
formatVal += kv.TiFlash.Name()
case strings.EqualFold(engine, kv.TiDB.Name()):
formatVal += kv.TiDB.Name()
default:
return normalizedValue, ErrWrongValueForVar.GenWithStackByArgs(TiDBIsolationReadEngines, normalizedValue)
}
}
return formatVal, nil
}},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBStoreLimit, Value: strconv.FormatInt(atomic.LoadInt64(&config.GetGlobalConfig().TiKVClient.StoreLimit), 10), Type: TypeInt, MinValue: 0, MaxValue: uint64(math.MaxInt64), AutoConvertOutOfRange: true},
{Scope: ScopeSession, Name: TiDBMetricSchemaStep, Value: strconv.Itoa(DefTiDBMetricSchemaStep), Type: TypeUnsigned, MinValue: 10, MaxValue: 60 * 60 * 60},
{Scope: ScopeSession, Name: TiDBMetricSchemaRangeDuration, Value: strconv.Itoa(DefTiDBMetricSchemaRangeDuration), Type: TypeUnsigned, MinValue: 10, MaxValue: 60 * 60 * 60},
{Scope: ScopeSession, Name: TiDBSlowLogThreshold, Value: strconv.Itoa(logutil.DefaultSlowThreshold), Type: TypeInt, MinValue: -1, MaxValue: math.MaxInt64},
{Scope: ScopeSession, Name: TiDBRecordPlanInSlowLog, Value: int32ToBoolStr(logutil.DefaultRecordPlanInSlowLog), Type: TypeBool},
{Scope: ScopeSession, Name: TiDBEnableSlowLog, Value: BoolToOnOff(logutil.DefaultTiDBEnableSlowLog), Type: TypeBool},
{Scope: ScopeSession, Name: TiDBQueryLogMaxLen, Value: strconv.Itoa(logutil.DefaultQueryLogMaxLen), Type: TypeInt, MinValue: -1, MaxValue: math.MaxInt64},
{Scope: ScopeSession, Name: TiDBCheckMb4ValueInUTF8, Value: BoolToOnOff(config.GetGlobalConfig().CheckMb4ValueInUTF8), Type: TypeBool},
{Scope: ScopeSession, Name: TiDBFoundInPlanCache, Value: BoolToOnOff(DefTiDBFoundInPlanCache), Type: TypeBool, ReadOnly: true},
{Scope: ScopeSession, Name: TiDBFoundInBinding, Value: BoolToOnOff(DefTiDBFoundInBinding), Type: TypeBool, ReadOnly: true},
{Scope: ScopeSession, Name: TiDBEnableCollectExecutionInfo, Value: BoolToOnOff(DefTiDBEnableCollectExecutionInfo), Type: TypeBool},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBAllowAutoRandExplicitInsert, Value: BoolToOnOff(DefTiDBAllowAutoRandExplicitInsert), Type: TypeBool},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBEnableClusteredIndex, Value: IntOnly, Type: TypeEnum, PossibleValues: []string{Off, On, IntOnly, "1", "0"}},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBPartitionPruneMode, Value: string(Static), Type: TypeStr, Validation: func(vars *SessionVars, normalizedValue string, originalValue string, scope ScopeFlag) (string, error) {
mode := PartitionPruneMode(normalizedValue).Update()
if !mode.Valid() {
return normalizedValue, ErrWrongTypeForVar.GenWithStackByArgs(TiDBPartitionPruneMode)
}
return string(mode), nil
}},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBSlowLogMasking, Value: BoolToOnOff(DefTiDBRedactLog), Type: TypeBool},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBRedactLog, Value: BoolToOnOff(DefTiDBRedactLog), Type: TypeBool},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBShardAllocateStep, Value: strconv.Itoa(DefTiDBShardAllocateStep), Type: TypeInt, MinValue: 1, MaxValue: uint64(math.MaxInt64), AutoConvertOutOfRange: true},
{Scope: ScopeGlobal, Name: TiDBEnableTelemetry, Value: BoolToOnOff(DefTiDBEnableTelemetry), Type: TypeBool},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBEnableAmendPessimisticTxn, Value: BoolToOnOff(DefTiDBEnableAmendPessimisticTxn), Type: TypeBool},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBEnableAsyncCommit, Value: BoolToOnOff(DefTiDBEnableAsyncCommit), Type: TypeBool},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBEnable1PC, Value: BoolToOnOff(DefTiDBEnable1PC), Type: TypeBool},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBGuaranteeLinearizability, Value: BoolToOnOff(DefTiDBGuaranteeLinearizability), Type: TypeBool},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBAnalyzeVersion, Value: strconv.Itoa(DefTiDBAnalyzeVersion), Type: TypeInt, MinValue: 1, MaxValue: 2, Validation: func(vars *SessionVars, normalizedValue string, originalValue string, scope ScopeFlag) (string, error) {
if normalizedValue == "2" && FeedbackProbability.Load() > 0 {
var original string
var err error
if scope == ScopeGlobal {
original, err = vars.GlobalVarsAccessor.GetGlobalSysVar(TiDBAnalyzeVersion)
if err != nil {
return normalizedValue, nil
}
} else {
original = strconv.Itoa(vars.AnalyzeVersion)
}
vars.StmtCtx.AppendError(errors.New("variable tidb_analyze_version not updated because analyze version 2 is incompatible with query feedback. Please consider setting feedback-probability to 0.0 in config file to disable query feedback"))
return original, nil
}
return normalizedValue, nil
}},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBEnableIndexMergeJoin, Value: BoolToOnOff(DefTiDBEnableIndexMergeJoin), Type: TypeBool},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBTrackAggregateMemoryUsage, Value: BoolToOnOff(DefTiDBTrackAggregateMemoryUsage), Type: TypeBool},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBMultiStatementMode, Value: Off, Type: TypeEnum, PossibleValues: []string{Off, On, Warn}, SetSession: func(s *SessionVars, val string) error {
s.MultiStatementMode = TiDBOptMultiStmt(val)
return nil
}},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBEnableExchangePartition, Value: BoolToOnOff(DefTiDBEnableExchangePartition), Type: TypeBool},
/* tikv gc metrics */
{Scope: ScopeGlobal, Name: TiDBGCEnable, Value: BoolOn, Type: TypeBool},
{Scope: ScopeGlobal, Name: TiDBGCRunInterval, Value: "10m0s", Type: TypeDuration, MinValue: int64(time.Minute * 10), MaxValue: math.MaxInt64},
{Scope: ScopeGlobal, Name: TiDBGCLifetime, Value: "10m0s", Type: TypeDuration, MinValue: int64(time.Minute * 10), MaxValue: math.MaxInt64},
{Scope: ScopeGlobal, Name: TiDBGCConcurrency, Value: "-1", Type: TypeInt, MinValue: 1, MaxValue: 128, AllowAutoValue: true},
{Scope: ScopeGlobal, Name: TiDBGCScanLockMode, Value: "PHYSICAL", Type: TypeEnum, PossibleValues: []string{"PHYSICAL", "LEGACY"}},
}
// FeedbackProbability points to the FeedbackProbability in statistics package.
// It's initialized in init() in feedback.go to solve import cycle.
var FeedbackProbability *atomic2.Float64
// SynonymsSysVariables is synonyms of system variables.
var SynonymsSysVariables = map[string][]string{}
func addSynonymsSysVariables(synonyms ...string) {
for _, s := range synonyms {
SynonymsSysVariables[s] = synonyms
}
}
func initSynonymsSysVariables() {
addSynonymsSysVariables(TxnIsolation, TransactionIsolation)
addSynonymsSysVariables(TxReadOnly, TransactionReadOnly)
}
// SetNamesVariables is the system variable names related to set names statements.
var SetNamesVariables = []string{
CharacterSetClient,
CharacterSetConnection,
CharacterSetResults,
}
// SetCharsetVariables is the system variable names related to set charset statements.
var SetCharsetVariables = []string{
CharacterSetClient,
CharacterSetResults,
}
const (
// CharacterSetConnection is the name for character_set_connection system variable.
CharacterSetConnection = "character_set_connection"
// CollationConnection is the name for collation_connection system variable.
CollationConnection = "collation_connection"
// CharsetDatabase is the name for character_set_database system variable.
CharsetDatabase = "character_set_database"
// CollationDatabase is the name for collation_database system variable.
CollationDatabase = "collation_database"
// CharacterSetFilesystem is the name for character_set_filesystem system variable.
CharacterSetFilesystem = "character_set_filesystem"
// CharacterSetClient is the name for character_set_client system variable.
CharacterSetClient = "character_set_client"
// CharacterSetSystem is the name for character_set_system system variable.
CharacterSetSystem = "character_set_system"
// GeneralLog is the name for 'general_log' system variable.
GeneralLog = "general_log"
// AvoidTemporalUpgrade is the name for 'avoid_temporal_upgrade' system variable.
AvoidTemporalUpgrade = "avoid_temporal_upgrade"
// MaxPreparedStmtCount is the name for 'max_prepared_stmt_count' system variable.
MaxPreparedStmtCount = "max_prepared_stmt_count"
// BigTables is the name for 'big_tables' system variable.
BigTables = "big_tables"
// CheckProxyUsers is the name for 'check_proxy_users' system variable.
CheckProxyUsers = "check_proxy_users"
// CoreFile is the name for 'core_file' system variable.
CoreFile = "core_file"
// DefaultWeekFormat is the name for 'default_week_format' system variable.
DefaultWeekFormat = "default_week_format"
// GroupConcatMaxLen is the name for 'group_concat_max_len' system variable.
GroupConcatMaxLen = "group_concat_max_len"
// DelayKeyWrite is the name for 'delay_key_write' system variable.
DelayKeyWrite = "delay_key_write"
// EndMarkersInJSON is the name for 'end_markers_in_json' system variable.
EndMarkersInJSON = "end_markers_in_json"
// Hostname is the name for 'hostname' system variable.
Hostname = "hostname"
// InnodbCommitConcurrency is the name for 'innodb_commit_concurrency' system variable.
InnodbCommitConcurrency = "innodb_commit_concurrency"
// InnodbFastShutdown is the name for 'innodb_fast_shutdown' system variable.
InnodbFastShutdown = "innodb_fast_shutdown"
// InnodbLockWaitTimeout is the name for 'innodb_lock_wait_timeout' system variable.
InnodbLockWaitTimeout = "innodb_lock_wait_timeout"
// SQLLogBin is the name for 'sql_log_bin' system variable.
SQLLogBin = "sql_log_bin"
// LogBin is the name for 'log_bin' system variable.
LogBin = "log_bin"
// MaxSortLength is the name for 'max_sort_length' system variable.
MaxSortLength = "max_sort_length"
// MaxSpRecursionDepth is the name for 'max_sp_recursion_depth' system variable.
MaxSpRecursionDepth = "max_sp_recursion_depth"
// MaxUserConnections is the name for 'max_user_connections' system variable.
MaxUserConnections = "max_user_connections"
// OfflineMode is the name for 'offline_mode' system variable.
OfflineMode = "offline_mode"
// InteractiveTimeout is the name for 'interactive_timeout' system variable.
InteractiveTimeout = "interactive_timeout"
// FlushTime is the name for 'flush_time' system variable.
FlushTime = "flush_time"
// PseudoSlaveMode is the name for 'pseudo_slave_mode' system variable.
PseudoSlaveMode = "pseudo_slave_mode"
// LowPriorityUpdates is the name for 'low_priority_updates' system variable.
LowPriorityUpdates = "low_priority_updates"
// LowerCaseTableNames is the name for 'lower_case_table_names' system variable.
LowerCaseTableNames = "lower_case_table_names"
// SessionTrackGtids is the name for 'session_track_gtids' system variable.
SessionTrackGtids = "session_track_gtids"
// OldPasswords is the name for 'old_passwords' system variable.
OldPasswords = "old_passwords"
// MaxConnections is the name for 'max_connections' system variable.
MaxConnections = "max_connections"
// SkipNameResolve is the name for 'skip_name_resolve' system variable.
SkipNameResolve = "skip_name_resolve"
// ForeignKeyChecks is the name for 'foreign_key_checks' system variable.
ForeignKeyChecks = "foreign_key_checks"
// SQLSafeUpdates is the name for 'sql_safe_updates' system variable.
SQLSafeUpdates = "sql_safe_updates"
// WarningCount is the name for 'warning_count' system variable.
WarningCount = "warning_count"
// ErrorCount is the name for 'error_count' system variable.
ErrorCount = "error_count"
// SQLSelectLimit is the name for 'sql_select_limit' system variable.
SQLSelectLimit = "sql_select_limit"
// MaxConnectErrors is the name for 'max_connect_errors' system variable.
MaxConnectErrors = "max_connect_errors"
// TableDefinitionCache is the name for 'table_definition_cache' system variable.
TableDefinitionCache = "table_definition_cache"
// TmpTableSize is the name for 'tmp_table_size' system variable.
TmpTableSize = "tmp_table_size"
// Timestamp is the name for 'timestamp' system variable.
Timestamp = "timestamp"
// ConnectTimeout is the name for 'connect_timeout' system variable.
ConnectTimeout = "connect_timeout"
// SyncBinlog is the name for 'sync_binlog' system variable.
SyncBinlog = "sync_binlog"
// BlockEncryptionMode is the name for 'block_encryption_mode' system variable.
BlockEncryptionMode = "block_encryption_mode"
// WaitTimeout is the name for 'wait_timeout' system variable.
WaitTimeout = "wait_timeout"
// ValidatePasswordNumberCount is the name of 'validate_password_number_count' system variable.
ValidatePasswordNumberCount = "validate_password_number_count"
// ValidatePasswordLength is the name of 'validate_password_length' system variable.
ValidatePasswordLength = "validate_password_length"
// Version is the name of 'version' system variable.
Version = "version"
// VersionComment is the name of 'version_comment' system variable.
VersionComment = "version_comment"
// PluginDir is the name of 'plugin_dir' system variable.
PluginDir = "plugin_dir"
// PluginLoad is the name of 'plugin_load' system variable.
PluginLoad = "plugin_load"
// Port is the name for 'port' system variable.
Port = "port"
// DataDir is the name for 'datadir' system variable.
DataDir = "datadir"
// Profiling is the name for 'Profiling' system variable.
Profiling = "profiling"
// Socket is the name for 'socket' system variable.
Socket = "socket"
// BinlogOrderCommits is the name for 'binlog_order_commits' system variable.
BinlogOrderCommits = "binlog_order_commits"
// MasterVerifyChecksum is the name for 'master_verify_checksum' system variable.
MasterVerifyChecksum = "master_verify_checksum"
// ValidatePasswordCheckUserName is the name for 'validate_password_check_user_name' system variable.
ValidatePasswordCheckUserName = "validate_password_check_user_name"
// SuperReadOnly is the name for 'super_read_only' system variable.
SuperReadOnly = "super_read_only"
// SQLNotes is the name for 'sql_notes' system variable.
SQLNotes = "sql_notes"
// QueryCacheType is the name for 'query_cache_type' system variable.
QueryCacheType = "query_cache_type"
// SlaveCompressedProtocol is the name for 'slave_compressed_protocol' system variable.
SlaveCompressedProtocol = "slave_compressed_protocol"
// BinlogRowQueryLogEvents is the name for 'binlog_rows_query_log_events' system variable.
BinlogRowQueryLogEvents = "binlog_rows_query_log_events"
// LogSlowSlaveStatements is the name for 'log_slow_slave_statements' system variable.
LogSlowSlaveStatements = "log_slow_slave_statements"
// LogSlowAdminStatements is the name for 'log_slow_admin_statements' system variable.
LogSlowAdminStatements = "log_slow_admin_statements"
// LogQueriesNotUsingIndexes is the name for 'log_queries_not_using_indexes' system variable.
LogQueriesNotUsingIndexes = "log_queries_not_using_indexes"
// QueryCacheWlockInvalidate is the name for 'query_cache_wlock_invalidate' system variable.
QueryCacheWlockInvalidate = "query_cache_wlock_invalidate"
// SQLAutoIsNull is the name for 'sql_auto_is_null' system variable.
SQLAutoIsNull = "sql_auto_is_null"
// RelayLogPurge is the name for 'relay_log_purge' system variable.
RelayLogPurge = "relay_log_purge"
// AutomaticSpPrivileges is the name for 'automatic_sp_privileges' system variable.
AutomaticSpPrivileges = "automatic_sp_privileges"
// SQLQuoteShowCreate is the name for 'sql_quote_show_create' system variable.
SQLQuoteShowCreate = "sql_quote_show_create"
// SlowQueryLog is the name for 'slow_query_log' system variable.
SlowQueryLog = "slow_query_log"
// BinlogDirectNonTransactionalUpdates is the name for 'binlog_direct_non_transactional_updates' system variable.
BinlogDirectNonTransactionalUpdates = "binlog_direct_non_transactional_updates"
// SQLBigSelects is the name for 'sql_big_selects' system variable.
SQLBigSelects = "sql_big_selects"
// LogBinTrustFunctionCreators is the name for 'log_bin_trust_function_creators' system variable.
LogBinTrustFunctionCreators = "log_bin_trust_function_creators"
// OldAlterTable is the name for 'old_alter_table' system variable.
OldAlterTable = "old_alter_table"
// EnforceGtidConsistency is the name for 'enforce_gtid_consistency' system variable.
EnforceGtidConsistency = "enforce_gtid_consistency"
// SecureAuth is the name for 'secure_auth' system variable.
SecureAuth = "secure_auth"
// UniqueChecks is the name for 'unique_checks' system variable.
UniqueChecks = "unique_checks"
// SQLWarnings is the name for 'sql_warnings' system variable.
SQLWarnings = "sql_warnings"
// AutoCommit is the name for 'autocommit' system variable.
AutoCommit = "autocommit"
// KeepFilesOnCreate is the name for 'keep_files_on_create' system variable.
KeepFilesOnCreate = "keep_files_on_create"
// ShowOldTemporals is the name for 'show_old_temporals' system variable.
ShowOldTemporals = "show_old_temporals"
// LocalInFile is the name for 'local_infile' system variable.
LocalInFile = "local_infile"
// PerformanceSchema is the name for 'performance_schema' system variable.
PerformanceSchema = "performance_schema"
// Flush is the name for 'flush' system variable.
Flush = "flush"
// SlaveAllowBatching is the name for 'slave_allow_batching' system variable.
SlaveAllowBatching = "slave_allow_batching"
// MyISAMUseMmap is the name for 'myisam_use_mmap' system variable.
MyISAMUseMmap = "myisam_use_mmap"
// InnodbFilePerTable is the name for 'innodb_file_per_table' system variable.
InnodbFilePerTable = "innodb_file_per_table"
// InnodbLogCompressedPages is the name for 'innodb_log_compressed_pages' system variable.
InnodbLogCompressedPages = "innodb_log_compressed_pages"
// InnodbPrintAllDeadlocks is the name for 'innodb_print_all_deadlocks' system variable.
InnodbPrintAllDeadlocks = "innodb_print_all_deadlocks"
// InnodbStrictMode is the name for 'innodb_strict_mode' system variable.
InnodbStrictMode = "innodb_strict_mode"
// InnodbCmpPerIndexEnabled is the name for 'innodb_cmp_per_index_enabled' system variable.
InnodbCmpPerIndexEnabled = "innodb_cmp_per_index_enabled"
// InnodbBufferPoolDumpAtShutdown is the name for 'innodb_buffer_pool_dump_at_shutdown' system variable.
InnodbBufferPoolDumpAtShutdown = "innodb_buffer_pool_dump_at_shutdown"
// InnodbAdaptiveHashIndex is the name for 'innodb_adaptive_hash_index' system variable.
InnodbAdaptiveHashIndex = "innodb_adaptive_hash_index"
// InnodbFtEnableStopword is the name for 'innodb_ft_enable_stopword' system variable.
InnodbFtEnableStopword = "innodb_ft_enable_stopword"
// InnodbSupportXA is the name for 'innodb_support_xa' system variable.
InnodbSupportXA = "innodb_support_xa"
// InnodbOptimizeFullTextOnly is the name for 'innodb_optimize_fulltext_only' system variable.
InnodbOptimizeFullTextOnly = "innodb_optimize_fulltext_only"
// InnodbStatusOutputLocks is the name for 'innodb_status_output_locks' system variable.
InnodbStatusOutputLocks = "innodb_status_output_locks"
// InnodbBufferPoolDumpNow is the name for 'innodb_buffer_pool_dump_now' system variable.
InnodbBufferPoolDumpNow = "innodb_buffer_pool_dump_now"
// InnodbBufferPoolLoadNow is the name for 'innodb_buffer_pool_load_now' system variable.
InnodbBufferPoolLoadNow = "innodb_buffer_pool_load_now"
// InnodbStatsOnMetadata is the name for 'innodb_stats_on_metadata' system variable.
InnodbStatsOnMetadata = "innodb_stats_on_metadata"
// InnodbDisableSortFileCache is the name for 'innodb_disable_sort_file_cache' system variable.
InnodbDisableSortFileCache = "innodb_disable_sort_file_cache"
// InnodbStatsAutoRecalc is the name for 'innodb_stats_auto_recalc' system variable.
InnodbStatsAutoRecalc = "innodb_stats_auto_recalc"
// InnodbBufferPoolLoadAbort is the name for 'innodb_buffer_pool_load_abort' system variable.
InnodbBufferPoolLoadAbort = "innodb_buffer_pool_load_abort"
// InnodbStatsPersistent is the name for 'innodb_stats_persistent' system variable.
InnodbStatsPersistent = "innodb_stats_persistent"
// InnodbRandomReadAhead is the name for 'innodb_random_read_ahead' system variable.
InnodbRandomReadAhead = "innodb_random_read_ahead"
// InnodbAdaptiveFlushing is the name for 'innodb_adaptive_flushing' system variable.
InnodbAdaptiveFlushing = "innodb_adaptive_flushing"
// InnodbTableLocks is the name for 'innodb_table_locks' system variable.
InnodbTableLocks = "innodb_table_locks"
// InnodbStatusOutput is the name for 'innodb_status_output' system variable.
InnodbStatusOutput = "innodb_status_output"
// NetBufferLength is the name for 'net_buffer_length' system variable.
NetBufferLength = "net_buffer_length"
// QueryCacheSize is the name of 'query_cache_size' system variable.
QueryCacheSize = "query_cache_size"
// TxReadOnly is the name of 'tx_read_only' system variable.
TxReadOnly = "tx_read_only"
// TransactionReadOnly is the name of 'transaction_read_only' system variable.
TransactionReadOnly = "transaction_read_only"
// CharacterSetServer is the name of 'character_set_server' system variable.
CharacterSetServer = "character_set_server"
// AutoIncrementIncrement is the name of 'auto_increment_increment' system variable.
AutoIncrementIncrement = "auto_increment_increment"
// AutoIncrementOffset is the name of 'auto_increment_offset' system variable.
AutoIncrementOffset = "auto_increment_offset"
// InitConnect is the name of 'init_connect' system variable.
InitConnect = "init_connect"
// CollationServer is the name of 'collation_server' variable.
CollationServer = "collation_server"
// NetWriteTimeout is the name of 'net_write_timeout' variable.
NetWriteTimeout = "net_write_timeout"
// ThreadPoolSize is the name of 'thread_pool_size' variable.
ThreadPoolSize = "thread_pool_size"
// WindowingUseHighPrecision is the name of 'windowing_use_high_precision' system variable.
WindowingUseHighPrecision = "windowing_use_high_precision"
// OptimizerSwitch is the name of 'optimizer_switch' system variable.
OptimizerSwitch = "optimizer_switch"
// SystemTimeZone is the name of 'system_time_zone' system variable.
SystemTimeZone = "system_time_zone"
)
// GlobalVarAccessor is the interface for accessing global scope system and status variables.
type GlobalVarAccessor interface {
// GetGlobalSysVar gets the global system variable value for name.
GetGlobalSysVar(name string) (string, error)
// SetGlobalSysVar sets the global system variable name to value.
SetGlobalSysVar(name string, value string) error
}
| sessionctx/variable/sysvar.go | 1 | https://github.com/pingcap/tidb/commit/80edc8cd20b452089a26b817b4b450b7f31db77f | [
0.0038960434030741453,
0.00041962371324189007,
0.00016250551561824977,
0.00017539068358018994,
0.0006750528700649738
] |
{
"id": 3,
"code_window": [
"\t{Scope: ScopeNone, Name: \"ignore_builtin_innodb\", Value: \"0\"},\n",
"\t{Scope: ScopeGlobal, Name: \"slow_query_log_file\", Value: \"/usr/local/mysql/data/localhost-slow.log\"},\n",
"\t{Scope: ScopeGlobal, Name: \"innodb_thread_sleep_delay\", Value: \"10000\"},\n",
"\t{Scope: ScopeNone, Name: \"license\", Value: \"Apache License 2.0\"},\n",
"\t{Scope: ScopeGlobal, Name: \"innodb_ft_aux_table\", Value: \"\"},\n",
"\t{Scope: ScopeGlobal | ScopeSession, Name: SQLWarnings, Value: BoolOff, Type: TypeBool},\n",
"\t{Scope: ScopeGlobal | ScopeSession, Name: KeepFilesOnCreate, Value: BoolOff, Type: TypeBool},\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "sessionctx/variable/noop.go",
"type": "replace",
"edit_start_line_idx": 349
} | // Copyright 2020 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package unistore
import (
"context"
"fmt"
"testing"
. "github.com/pingcap/check"
"github.com/pingcap/kvproto/pkg/kvrpcpb"
)
func TestT(t *testing.T) {
TestingT(t)
}
type testSuite struct{}
func (ts testSuite) SetUpSuite(c *C) {}
func (ts testSuite) TearDownSuite(c *C) {}
var _ = Suite(testSuite{})
func (ts testSuite) TestRawHandler(c *C) {
h := newRawHandler()
ctx := context.Background()
keys := make([][]byte, 10)
vals := make([][]byte, 10)
for i := 0; i < 10; i++ {
keys[i] = []byte(fmt.Sprintf("key%d", i))
vals[i] = []byte(fmt.Sprintf("val%d", i))
}
putResp, _ := h.RawPut(ctx, &kvrpcpb.RawPutRequest{Key: keys[0], Value: vals[0]})
c.Assert(putResp, NotNil)
getResp, _ := h.RawGet(ctx, &kvrpcpb.RawGetRequest{Key: keys[0]})
c.Assert(getResp, NotNil)
c.Assert(getResp.Value, BytesEquals, vals[0])
delResp, _ := h.RawDelete(ctx, &kvrpcpb.RawDeleteRequest{Key: keys[0]})
c.Assert(delResp, NotNil)
batchPutReq := &kvrpcpb.RawBatchPutRequest{Pairs: []*kvrpcpb.KvPair{
{Key: keys[1], Value: vals[1]},
{Key: keys[3], Value: vals[3]},
{Key: keys[5], Value: vals[5]},
}}
batchPutResp, _ := h.RawBatchPut(ctx, batchPutReq)
c.Assert(batchPutResp, NotNil)
batchGetResp, _ := h.RawBatchGet(ctx, &kvrpcpb.RawBatchGetRequest{Keys: [][]byte{keys[1], keys[3], keys[5]}})
c.Assert(batchGetResp, NotNil)
c.Assert(batchGetResp.Pairs, DeepEquals, batchPutReq.Pairs)
batchDelResp, _ := h.RawBatchDelete(ctx, &kvrpcpb.RawBatchDeleteRequest{Keys: [][]byte{keys[1], keys[3], keys[5]}})
c.Assert(batchDelResp, NotNil)
batchPutReq.Pairs = []*kvrpcpb.KvPair{
{Key: keys[6], Value: vals[6]},
{Key: keys[7], Value: vals[7]},
{Key: keys[8], Value: vals[8]},
}
batchPutResp, _ = h.RawBatchPut(ctx, batchPutReq)
c.Assert(batchPutResp, NotNil)
scanReq := &kvrpcpb.RawScanRequest{StartKey: keys[0], EndKey: keys[9], Limit: 2}
scanResp, _ := h.RawScan(ctx, scanReq)
c.Assert(batchPutResp, NotNil)
c.Assert(scanResp.Kvs, HasLen, 2)
c.Assert(batchPutReq.Pairs[:2], DeepEquals, scanResp.Kvs)
delRangeResp, _ := h.RawDeleteRange(ctx, &kvrpcpb.RawDeleteRangeRequest{StartKey: keys[0], EndKey: keys[9]})
c.Assert(delRangeResp, NotNil)
scanResp, _ = h.RawScan(ctx, scanReq)
c.Assert(scanResp.Kvs, HasLen, 0)
}
| store/mockstore/unistore/raw_handler_test.go | 0 | https://github.com/pingcap/tidb/commit/80edc8cd20b452089a26b817b4b450b7f31db77f | [
0.00017422706878278404,
0.00017145562742371112,
0.00016698123363312334,
0.00017184593889396638,
0.000002134167289113975
] |
{
"id": 3,
"code_window": [
"\t{Scope: ScopeNone, Name: \"ignore_builtin_innodb\", Value: \"0\"},\n",
"\t{Scope: ScopeGlobal, Name: \"slow_query_log_file\", Value: \"/usr/local/mysql/data/localhost-slow.log\"},\n",
"\t{Scope: ScopeGlobal, Name: \"innodb_thread_sleep_delay\", Value: \"10000\"},\n",
"\t{Scope: ScopeNone, Name: \"license\", Value: \"Apache License 2.0\"},\n",
"\t{Scope: ScopeGlobal, Name: \"innodb_ft_aux_table\", Value: \"\"},\n",
"\t{Scope: ScopeGlobal | ScopeSession, Name: SQLWarnings, Value: BoolOff, Type: TypeBool},\n",
"\t{Scope: ScopeGlobal | ScopeSession, Name: KeepFilesOnCreate, Value: BoolOff, Type: TypeBool},\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "sessionctx/variable/noop.go",
"type": "replace",
"edit_start_line_idx": 349
} | // Copyright 2019 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
// Package tikv provides tcp connection to kvserver.
package tikv
import (
"context"
"math"
"runtime/trace"
"sync"
"sync/atomic"
"time"
"github.com/pingcap/errors"
"github.com/pingcap/failpoint"
"github.com/pingcap/kvproto/pkg/tikvpb"
"github.com/pingcap/parser/terror"
"github.com/pingcap/tidb/store/tikv/config"
"github.com/pingcap/tidb/store/tikv/logutil"
"github.com/pingcap/tidb/store/tikv/metrics"
"github.com/pingcap/tidb/store/tikv/tikvrpc"
"github.com/prometheus/client_golang/prometheus"
"go.uber.org/zap"
"google.golang.org/grpc"
"google.golang.org/grpc/connectivity"
"google.golang.org/grpc/metadata"
)
type batchCommandsEntry struct {
ctx context.Context
req *tikvpb.BatchCommandsRequest_Request
res chan *tikvpb.BatchCommandsResponse_Response
// forwardedHost is the address of a store which will handle the request.
// It's different from the address the request sent to.
forwardedHost string
// canceled indicated the request is canceled or not.
canceled int32
err error
}
func (b *batchCommandsEntry) isCanceled() bool {
return atomic.LoadInt32(&b.canceled) == 1
}
func (b *batchCommandsEntry) error(err error) {
b.err = err
close(b.res)
}
// batchCommandsBuilder collects a batch of `batchCommandsEntry`s to build
// `BatchCommandsRequest`s.
type batchCommandsBuilder struct {
// Each BatchCommandsRequest_Request sent to a store has a unique identity to
// distinguish its response.
idAlloc uint64
entries []*batchCommandsEntry
requests []*tikvpb.BatchCommandsRequest_Request
requestIDs []uint64
// In most cases, there isn't any forwardingReq.
forwardingReqs map[string]*tikvpb.BatchCommandsRequest
}
func (b *batchCommandsBuilder) len() int {
return len(b.entries)
}
func (b *batchCommandsBuilder) push(entry *batchCommandsEntry) {
b.entries = append(b.entries, entry)
}
// build builds BatchCommandsRequests and calls collect() for each valid entry.
// The first return value is the request that doesn't need forwarding.
// The second is a map that maps forwarded hosts to requests.
func (b *batchCommandsBuilder) build(
collect func(id uint64, e *batchCommandsEntry),
) (*tikvpb.BatchCommandsRequest, map[string]*tikvpb.BatchCommandsRequest) {
for _, e := range b.entries {
if e.isCanceled() {
continue
}
if collect != nil {
collect(b.idAlloc, e)
}
if e.forwardedHost == "" {
b.requestIDs = append(b.requestIDs, b.idAlloc)
b.requests = append(b.requests, e.req)
} else {
batchReq, ok := b.forwardingReqs[e.forwardedHost]
if !ok {
batchReq = &tikvpb.BatchCommandsRequest{}
b.forwardingReqs[e.forwardedHost] = batchReq
}
batchReq.RequestIds = append(batchReq.RequestIds, b.idAlloc)
batchReq.Requests = append(batchReq.Requests, e.req)
}
b.idAlloc++
}
var req *tikvpb.BatchCommandsRequest
if len(b.requests) > 0 {
req = &tikvpb.BatchCommandsRequest{
Requests: b.requests,
RequestIds: b.requestIDs,
}
}
return req, b.forwardingReqs
}
func (b *batchCommandsBuilder) cancel(e error) {
for _, entry := range b.entries {
entry.error(e)
}
}
// reset resets the builder to the initial state.
// Should call it before collecting a new batch.
func (b *batchCommandsBuilder) reset() {
// NOTE: We can't simply set entries = entries[:0] here.
// The data in the cap part of the slice would reference the prewrite keys whose
// underlying memory is borrowed from memdb. The reference cause GC can't release
// the memdb, leading to serious memory leak problems in the large transaction case.
for i := 0; i < len(b.entries); i++ {
b.entries[i] = nil
}
b.entries = b.entries[:0]
for i := 0; i < len(b.requests); i++ {
b.requests[i] = nil
}
b.requests = b.requests[:0]
b.requestIDs = b.requestIDs[:0]
for k := range b.forwardingReqs {
delete(b.forwardingReqs, k)
}
}
func newBatchCommandsBuilder(maxBatchSize uint) *batchCommandsBuilder {
return &batchCommandsBuilder{
idAlloc: 0,
entries: make([]*batchCommandsEntry, 0, maxBatchSize),
requests: make([]*tikvpb.BatchCommandsRequest_Request, 0, maxBatchSize),
requestIDs: make([]uint64, 0, maxBatchSize),
forwardingReqs: make(map[string]*tikvpb.BatchCommandsRequest),
}
}
type batchConn struct {
// An atomic flag indicates whether the batch is idle or not.
// 0 for busy, others for idle.
idle uint32
// batchCommandsCh used for batch commands.
batchCommandsCh chan *batchCommandsEntry
batchCommandsClients []*batchCommandsClient
tikvTransportLayerLoad uint64
closed chan struct{}
reqBuilder *batchCommandsBuilder
// Notify rpcClient to check the idle flag
idleNotify *uint32
idleDetect *time.Timer
pendingRequests prometheus.Observer
batchSize prometheus.Observer
index uint32
}
func newBatchConn(connCount, maxBatchSize uint, idleNotify *uint32) *batchConn {
return &batchConn{
batchCommandsCh: make(chan *batchCommandsEntry, maxBatchSize),
batchCommandsClients: make([]*batchCommandsClient, 0, connCount),
tikvTransportLayerLoad: 0,
closed: make(chan struct{}),
reqBuilder: newBatchCommandsBuilder(maxBatchSize),
idleNotify: idleNotify,
idleDetect: time.NewTimer(idleTimeout),
}
}
func (a *batchConn) isIdle() bool {
return atomic.LoadUint32(&a.idle) != 0
}
// fetchAllPendingRequests fetches all pending requests from the channel.
func (a *batchConn) fetchAllPendingRequests(
maxBatchSize int,
) time.Time {
// Block on the first element.
var headEntry *batchCommandsEntry
select {
case headEntry = <-a.batchCommandsCh:
if !a.idleDetect.Stop() {
<-a.idleDetect.C
}
a.idleDetect.Reset(idleTimeout)
case <-a.idleDetect.C:
a.idleDetect.Reset(idleTimeout)
atomic.AddUint32(&a.idle, 1)
atomic.CompareAndSwapUint32(a.idleNotify, 0, 1)
// This batchConn to be recycled
return time.Now()
case <-a.closed:
return time.Now()
}
if headEntry == nil {
return time.Now()
}
ts := time.Now()
a.reqBuilder.push(headEntry)
// This loop is for trying best to collect more requests.
for a.reqBuilder.len() < maxBatchSize {
select {
case entry := <-a.batchCommandsCh:
if entry == nil {
return ts
}
a.reqBuilder.push(entry)
default:
return ts
}
}
return ts
}
// fetchMorePendingRequests fetches more pending requests from the channel.
func (a *batchConn) fetchMorePendingRequests(
maxBatchSize int,
batchWaitSize int,
maxWaitTime time.Duration,
) {
// Try to collect `batchWaitSize` requests, or wait `maxWaitTime`.
after := time.NewTimer(maxWaitTime)
for a.reqBuilder.len() < batchWaitSize {
select {
case entry := <-a.batchCommandsCh:
if entry == nil {
return
}
a.reqBuilder.push(entry)
case <-after.C:
return
}
}
after.Stop()
// Do an additional non-block try. Here we test the lengh with `maxBatchSize` instead
// of `batchWaitSize` because trying best to fetch more requests is necessary so that
// we can adjust the `batchWaitSize` dynamically.
for a.reqBuilder.len() < maxBatchSize {
select {
case entry := <-a.batchCommandsCh:
if entry == nil {
return
}
a.reqBuilder.push(entry)
default:
return
}
}
}
const idleTimeout = 3 * time.Minute
func (a *batchConn) batchSendLoop(cfg config.TiKVClient) {
defer func() {
if r := recover(); r != nil {
metrics.TiKVPanicCounter.WithLabelValues(metrics.LabelBatchSendLoop).Inc()
logutil.BgLogger().Error("batchSendLoop",
zap.Reflect("r", r),
zap.Stack("stack"))
logutil.BgLogger().Info("restart batchSendLoop")
go a.batchSendLoop(cfg)
}
}()
bestBatchWaitSize := cfg.BatchWaitSize
for {
a.reqBuilder.reset()
start := a.fetchAllPendingRequests(int(cfg.MaxBatchSize))
a.pendingRequests.Observe(float64(len(a.batchCommandsCh)))
a.batchSize.Observe(float64(a.reqBuilder.len()))
// curl -XPUT -d 'return(true)' http://0.0.0.0:10080/fail/github.com/pingcap/tidb/store/tikv/mockBlockOnBatchClient
failpoint.Inject("mockBlockOnBatchClient", func(val failpoint.Value) {
if val.(bool) {
time.Sleep(1 * time.Hour)
}
})
if a.reqBuilder.len() < int(cfg.MaxBatchSize) && cfg.MaxBatchWaitTime > 0 {
// If the target TiKV is overload, wait a while to collect more requests.
if atomic.LoadUint64(&a.tikvTransportLayerLoad) >= uint64(cfg.OverloadThreshold) {
metrics.TiKVBatchWaitOverLoad.Inc()
a.fetchMorePendingRequests(int(cfg.MaxBatchSize), int(bestBatchWaitSize), cfg.MaxBatchWaitTime)
}
}
length := a.reqBuilder.len()
if uint(length) == 0 {
// The batch command channel is closed.
return
} else if uint(length) < bestBatchWaitSize && bestBatchWaitSize > 1 {
// Waits too long to collect requests, reduce the target batch size.
bestBatchWaitSize--
} else if uint(length) > bestBatchWaitSize+4 && bestBatchWaitSize < cfg.MaxBatchSize {
bestBatchWaitSize++
}
a.getClientAndSend()
metrics.TiKVBatchSendLatency.Observe(float64(time.Since(start)))
}
}
func (a *batchConn) getClientAndSend() {
// Choose a connection by round-robbin.
var (
cli *batchCommandsClient
target string
)
for i := 0; i < len(a.batchCommandsClients); i++ {
a.index = (a.index + 1) % uint32(len(a.batchCommandsClients))
target = a.batchCommandsClients[a.index].target
// The lock protects the batchCommandsClient from been closed while it's inuse.
if a.batchCommandsClients[a.index].tryLockForSend() {
cli = a.batchCommandsClients[a.index]
break
}
}
if cli == nil {
logutil.BgLogger().Warn("no available connections", zap.String("target", target))
metrics.TiKVNoAvailableConnectionCounter.Inc()
// Please ensure the error is handled in region cache correctly.
a.reqBuilder.cancel(errors.New("no available connections"))
return
}
defer cli.unlockForSend()
req, forwardingReqs := a.reqBuilder.build(func(id uint64, e *batchCommandsEntry) {
cli.batched.Store(id, e)
if trace.IsEnabled() {
trace.Log(e.ctx, "rpc", "send")
}
})
if req != nil {
cli.send("", req)
}
for forwardedHost, req := range forwardingReqs {
cli.send(forwardedHost, req)
}
}
type tryLock struct {
*sync.Cond
reCreating bool
}
func (l *tryLock) tryLockForSend() bool {
l.L.Lock()
if l.reCreating {
l.L.Unlock()
return false
}
return true
}
func (l *tryLock) unlockForSend() {
l.L.Unlock()
}
func (l *tryLock) lockForRecreate() {
l.L.Lock()
for l.reCreating {
l.Wait()
}
l.reCreating = true
l.L.Unlock()
}
func (l *tryLock) unlockForRecreate() {
l.L.Lock()
l.reCreating = false
l.Broadcast()
l.L.Unlock()
}
type batchCommandsStream struct {
tikvpb.Tikv_BatchCommandsClient
forwardedHost string
}
func (s *batchCommandsStream) recv() (resp *tikvpb.BatchCommandsResponse, err error) {
defer func() {
if r := recover(); r != nil {
metrics.TiKVPanicCounter.WithLabelValues(metrics.LabelBatchRecvLoop).Inc()
logutil.BgLogger().Error("batchCommandsClient.recv panic",
zap.Reflect("r", r),
zap.Stack("stack"))
err = errors.SuspendStack(errors.New("batch conn recv paniced"))
}
}()
failpoint.Inject("gotErrorInRecvLoop", func(_ failpoint.Value) (resp *tikvpb.BatchCommandsResponse, err error) {
err = errors.New("injected error in batchRecvLoop")
return
})
// When `conn.Close()` is called, `client.Recv()` will return an error.
resp, err = s.Recv()
return
}
// recreate creates a new BatchCommands stream. The conn should be ready for work.
func (s *batchCommandsStream) recreate(conn *grpc.ClientConn) error {
tikvClient := tikvpb.NewTikvClient(conn)
ctx := context.TODO()
// Set metadata for forwarding stream.
if s.forwardedHost != "" {
ctx = metadata.AppendToOutgoingContext(ctx, forwardMetadataKey, s.forwardedHost)
}
streamClient, err := tikvClient.BatchCommands(ctx)
if err != nil {
return errors.Trace(err)
}
s.Tikv_BatchCommandsClient = streamClient
return nil
}
type batchCommandsClient struct {
// The target host.
target string
conn *grpc.ClientConn
// client and forwardedClients are protected by tryLock.
//
// client is the stream that needn't forwarding.
client *batchCommandsStream
// TiDB uses [gRPC-metadata](https://github.com/grpc/grpc-go/blob/master/Documentation/grpc-metadata.md) to
// indicate a request needs forwarding. gRPC doesn't support setting a metadata for each request in a stream,
// so we need to create a stream for each forwarded host.
//
// forwardedClients are clients that need forwarding. It's a map that maps forwarded hosts to streams
forwardedClients map[string]*batchCommandsStream
batched sync.Map
tikvClientCfg config.TiKVClient
tikvLoad *uint64
dialTimeout time.Duration
// Increased in each reconnection.
// It's used to prevent the connection from reconnecting multiple times
// due to one failure because there may be more than 1 `batchRecvLoop`s.
epoch uint64
// closed indicates the batch client is closed explicitly or not.
closed int32
// tryLock protects client when re-create the streaming.
tryLock
}
func (c *batchCommandsClient) isStopped() bool {
return atomic.LoadInt32(&c.closed) != 0
}
func (c *batchCommandsClient) send(forwardedHost string, req *tikvpb.BatchCommandsRequest) {
err := c.initBatchClient(forwardedHost)
if err != nil {
logutil.BgLogger().Warn(
"init create streaming fail",
zap.String("target", c.target),
zap.String("forwardedHost", forwardedHost),
zap.Error(err),
)
c.failPendingRequests(err)
return
}
client := c.client
if forwardedHost != "" {
client = c.forwardedClients[forwardedHost]
}
if err := client.Send(req); err != nil {
logutil.BgLogger().Info(
"sending batch commands meets error",
zap.String("target", c.target),
zap.String("forwardedHost", forwardedHost),
zap.Uint64s("requestIDs", req.RequestIds),
zap.Error(err),
)
c.failPendingRequests(err)
}
}
// `failPendingRequests` must be called in locked contexts in order to avoid double closing channels.
func (c *batchCommandsClient) failPendingRequests(err error) {
failpoint.Inject("panicInFailPendingRequests", nil)
c.batched.Range(func(key, value interface{}) bool {
id, _ := key.(uint64)
entry, _ := value.(*batchCommandsEntry)
c.batched.Delete(id)
entry.error(err)
return true
})
}
func (c *batchCommandsClient) waitConnReady() (err error) {
if c.conn.GetState() == connectivity.Ready {
return
}
start := time.Now()
defer func() {
metrics.TiKVBatchClientWaitEstablish.Observe(time.Since(start).Seconds())
}()
dialCtx, cancel := context.WithTimeout(context.Background(), c.dialTimeout)
for {
s := c.conn.GetState()
if s == connectivity.Ready {
cancel()
break
}
if !c.conn.WaitForStateChange(dialCtx, s) {
cancel()
err = dialCtx.Err()
return
}
}
return
}
func (c *batchCommandsClient) recreateStreamingClientOnce(streamClient *batchCommandsStream) error {
err := c.waitConnReady()
// Re-establish a application layer stream. TCP layer is handled by gRPC.
if err == nil {
err := streamClient.recreate(c.conn)
if err == nil {
logutil.BgLogger().Info(
"batchRecvLoop re-create streaming success",
zap.String("target", c.target),
zap.String("forwardedHost", streamClient.forwardedHost),
)
return nil
}
}
logutil.BgLogger().Info(
"batchRecvLoop re-create streaming fail",
zap.String("target", c.target),
zap.String("forwardedHost", streamClient.forwardedHost),
zap.Error(err),
)
return err
}
func (c *batchCommandsClient) batchRecvLoop(cfg config.TiKVClient, tikvTransportLayerLoad *uint64, streamClient *batchCommandsStream) {
defer func() {
if r := recover(); r != nil {
metrics.TiKVPanicCounter.WithLabelValues(metrics.LabelBatchRecvLoop).Inc()
logutil.BgLogger().Error("batchRecvLoop",
zap.Reflect("r", r),
zap.Stack("stack"))
logutil.BgLogger().Info("restart batchRecvLoop")
go c.batchRecvLoop(cfg, tikvTransportLayerLoad, streamClient)
}
}()
epoch := atomic.LoadUint64(&c.epoch)
for {
resp, err := streamClient.recv()
if err != nil {
if c.isStopped() {
return
}
logutil.BgLogger().Info(
"batchRecvLoop fails when receiving, needs to reconnect",
zap.String("target", c.target),
zap.String("forwardedHost", streamClient.forwardedHost),
zap.Error(err),
)
now := time.Now()
if stopped := c.recreateStreamingClient(err, streamClient, &epoch); stopped {
return
}
metrics.TiKVBatchClientUnavailable.Observe(time.Since(now).Seconds())
continue
}
responses := resp.GetResponses()
for i, requestID := range resp.GetRequestIds() {
value, ok := c.batched.Load(requestID)
if !ok {
// this maybe caused by batchCommandsClient#send meets ambiguous error that request has be sent to TiKV but still report a error.
// then TiKV will send response back though stream and reach here.
logutil.BgLogger().Warn("batchRecvLoop receives outdated response", zap.Uint64("requestID", requestID), zap.String("forwardedHost", streamClient.forwardedHost))
continue
}
entry := value.(*batchCommandsEntry)
if trace.IsEnabled() {
trace.Log(entry.ctx, "rpc", "received")
}
logutil.Eventf(entry.ctx, "receive %T response with other %d batched requests from %s", responses[i].GetCmd(), len(responses), c.target)
if atomic.LoadInt32(&entry.canceled) == 0 {
// Put the response only if the request is not canceled.
entry.res <- responses[i]
}
c.batched.Delete(requestID)
}
transportLayerLoad := resp.GetTransportLayerLoad()
if transportLayerLoad > 0.0 && cfg.MaxBatchWaitTime > 0 {
// We need to consider TiKV load only if batch-wait strategy is enabled.
atomic.StoreUint64(tikvTransportLayerLoad, transportLayerLoad)
}
}
}
func (c *batchCommandsClient) recreateStreamingClient(err error, streamClient *batchCommandsStream, epoch *uint64) (stopped bool) {
// Forbids the batchSendLoop using the old client and
// blocks other streams trying to recreate.
c.lockForRecreate()
defer c.unlockForRecreate()
// Each batchCommandsStream has a batchRecvLoop. There is only one stream waiting for
// the connection ready in every epoch to prevent the connection from reconnecting
// multiple times due to one failure.
//
// Check it in the locked scope to prevent the stream which gets the token from
// reconnecting lately, i.e.
// goroutine 1 | goroutine 2
// CAS success |
// | CAS failure
// | lockForRecreate
// | recreate error
// | unlockForRecreate
// lockForRecreate |
// waitConnReady |
// recreate |
// unlockForRecreate |
waitConnReady := atomic.CompareAndSwapUint64(&c.epoch, *epoch, *epoch+1)
if !waitConnReady {
*epoch = atomic.LoadUint64(&c.epoch)
if err := streamClient.recreate(c.conn); err != nil {
logutil.BgLogger().Info(
"batchRecvLoop re-create streaming fail",
zap.String("target", c.target),
zap.String("forwardedHost", streamClient.forwardedHost),
zap.Error(err),
)
}
return c.isStopped()
}
*epoch++
c.failPendingRequests(err) // fail all pending requests.
b := NewBackofferWithVars(context.Background(), math.MaxInt32, nil)
for { // try to re-create the streaming in the loop.
if c.isStopped() {
return true
}
err1 := c.recreateStreamingClientOnce(streamClient)
if err1 == nil {
break
}
err2 := b.Backoff(BoTiKVRPC, err1)
// As timeout is set to math.MaxUint32, err2 should always be nil.
// This line is added to make the 'make errcheck' pass.
terror.Log(err2)
}
return false
}
func (c *batchCommandsClient) newBatchStream(forwardedHost string) (*batchCommandsStream, error) {
batchStream := &batchCommandsStream{forwardedHost: forwardedHost}
if err := batchStream.recreate(c.conn); err != nil {
return nil, errors.Trace(err)
}
return batchStream, nil
}
func (c *batchCommandsClient) initBatchClient(forwardedHost string) error {
if forwardedHost == "" && c.client != nil {
return nil
}
if _, ok := c.forwardedClients[forwardedHost]; ok {
return nil
}
if err := c.waitConnReady(); err != nil {
return err
}
streamClient, err := c.newBatchStream(forwardedHost)
if err != nil {
return errors.Trace(err)
}
if forwardedHost == "" {
c.client = streamClient
} else {
c.forwardedClients[forwardedHost] = streamClient
}
go c.batchRecvLoop(c.tikvClientCfg, c.tikvLoad, streamClient)
return nil
}
func (a *batchConn) Close() {
// Close all batchRecvLoop.
for _, c := range a.batchCommandsClients {
// After connections are closed, `batchRecvLoop`s will check the flag.
atomic.StoreInt32(&c.closed, 1)
}
// Don't close(batchCommandsCh) because when Close() is called, someone maybe
// calling SendRequest and writing batchCommandsCh, if we close it here the
// writing goroutine will panic.
close(a.closed)
}
func sendBatchRequest(
ctx context.Context,
addr string,
forwardedHost string,
batchConn *batchConn,
req *tikvpb.BatchCommandsRequest_Request,
timeout time.Duration,
) (*tikvrpc.Response, error) {
entry := &batchCommandsEntry{
ctx: ctx,
req: req,
res: make(chan *tikvpb.BatchCommandsResponse_Response, 1),
forwardedHost: forwardedHost,
canceled: 0,
err: nil,
}
timer := time.NewTimer(timeout)
defer timer.Stop()
start := time.Now()
select {
case batchConn.batchCommandsCh <- entry:
case <-ctx.Done():
logutil.BgLogger().Warn("send request is cancelled",
zap.String("to", addr), zap.String("cause", ctx.Err().Error()))
return nil, errors.Trace(ctx.Err())
case <-timer.C:
return nil, errors.SuspendStack(errors.Annotate(context.DeadlineExceeded, "wait sendLoop"))
}
metrics.TiKVBatchWaitDuration.Observe(float64(time.Since(start)))
select {
case res, ok := <-entry.res:
if !ok {
return nil, errors.Trace(entry.err)
}
return tikvrpc.FromBatchCommandsResponse(res)
case <-ctx.Done():
atomic.StoreInt32(&entry.canceled, 1)
logutil.BgLogger().Warn("wait response is cancelled",
zap.String("to", addr), zap.String("cause", ctx.Err().Error()))
return nil, errors.Trace(ctx.Err())
case <-timer.C:
return nil, errors.SuspendStack(errors.Annotate(context.DeadlineExceeded, "wait recvLoop"))
}
}
func (c *RPCClient) recycleIdleConnArray() {
var addrs []string
c.RLock()
for _, conn := range c.conns {
if conn.batchConn != nil && conn.isIdle() {
addrs = append(addrs, conn.target)
}
}
c.RUnlock()
for _, addr := range addrs {
c.Lock()
conn, ok := c.conns[addr]
if ok {
delete(c.conns, addr)
logutil.BgLogger().Info("recycle idle connection",
zap.String("target", addr))
}
c.Unlock()
if conn != nil {
conn.Close()
}
}
}
| store/tikv/client_batch.go | 0 | https://github.com/pingcap/tidb/commit/80edc8cd20b452089a26b817b4b450b7f31db77f | [
0.00042527419282123446,
0.00017359331832267344,
0.0001589188614161685,
0.00017047341680154204,
0.00002917963320214767
] |
{
"id": 3,
"code_window": [
"\t{Scope: ScopeNone, Name: \"ignore_builtin_innodb\", Value: \"0\"},\n",
"\t{Scope: ScopeGlobal, Name: \"slow_query_log_file\", Value: \"/usr/local/mysql/data/localhost-slow.log\"},\n",
"\t{Scope: ScopeGlobal, Name: \"innodb_thread_sleep_delay\", Value: \"10000\"},\n",
"\t{Scope: ScopeNone, Name: \"license\", Value: \"Apache License 2.0\"},\n",
"\t{Scope: ScopeGlobal, Name: \"innodb_ft_aux_table\", Value: \"\"},\n",
"\t{Scope: ScopeGlobal | ScopeSession, Name: SQLWarnings, Value: BoolOff, Type: TypeBool},\n",
"\t{Scope: ScopeGlobal | ScopeSession, Name: KeepFilesOnCreate, Value: BoolOff, Type: TypeBool},\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "sessionctx/variable/noop.go",
"type": "replace",
"edit_start_line_idx": 349
} | // Copyright 2018 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package statistics
import (
"bytes"
"encoding/gob"
"math"
"math/rand"
"sort"
"time"
"github.com/cznic/mathutil"
"github.com/pingcap/errors"
"github.com/pingcap/log"
"github.com/pingcap/parser/mysql"
"github.com/pingcap/tidb/kv"
"github.com/pingcap/tidb/metrics"
"github.com/pingcap/tidb/sessionctx/stmtctx"
"github.com/pingcap/tidb/sessionctx/variable"
"github.com/pingcap/tidb/tablecodec"
"github.com/pingcap/tidb/types"
"github.com/pingcap/tidb/util/chunk"
"github.com/pingcap/tidb/util/codec"
"github.com/pingcap/tidb/util/logutil"
"github.com/pingcap/tidb/util/ranger"
"go.uber.org/atomic"
"go.uber.org/zap"
)
// Feedback represents the total scan count in range [lower, upper).
type Feedback struct {
Lower *types.Datum
Upper *types.Datum
Count int64
Repeat int64
Ndv int64
}
// QueryFeedback is used to represent the query feedback info. It contains the query's scan ranges and number of rows
// in each range.
type QueryFeedback struct {
PhysicalID int64
Hist *Histogram
Tp int
Feedback []Feedback
Expected int64 // Expected is the Expected scan count of corresponding query.
actual int64 // actual is the actual scan count of corresponding query.
Valid bool // Valid represents the whether this query feedback is still Valid.
desc bool // desc represents the corresponding query is desc scan.
}
// NewQueryFeedback returns a new query feedback.
func NewQueryFeedback(physicalID int64, hist *Histogram, expected int64, desc bool) *QueryFeedback {
if hist != nil && hist.Len() == 0 {
hist = nil
}
tp := PkType
if hist != nil && hist.IsIndexHist() {
tp = IndexType
}
return &QueryFeedback{
PhysicalID: physicalID,
Valid: true,
Tp: tp,
Hist: hist,
Expected: expected,
desc: desc,
}
}
// QueryFeedbackKey is the key for a group of feedbacks on the same index/column.
type QueryFeedbackKey struct {
PhysicalID int64
HistID int64
Tp int
}
// QueryFeedbackMap is the collection of feedbacks.
type QueryFeedbackMap struct {
Size int
Feedbacks map[QueryFeedbackKey][]*QueryFeedback
}
// NewQueryFeedbackMap builds a feedback collection.
func NewQueryFeedbackMap() *QueryFeedbackMap {
return &QueryFeedbackMap{Feedbacks: make(map[QueryFeedbackKey][]*QueryFeedback)}
}
// Append adds a feedback into map.
func (m *QueryFeedbackMap) Append(q *QueryFeedback) {
k := QueryFeedbackKey{
PhysicalID: q.PhysicalID,
HistID: q.Hist.ID,
Tp: q.Tp,
}
m.append(k, []*QueryFeedback{q})
return
}
// MaxQueryFeedbackCount is the max number of feedbacks that are cached in memory.
var MaxQueryFeedbackCount = atomic.NewInt64(1 << 9)
func (m *QueryFeedbackMap) append(k QueryFeedbackKey, qs []*QueryFeedback) bool {
remained := MaxQueryFeedbackCount.Load() - int64(m.Size)
if remained <= 0 {
return false
}
s, ok := m.Feedbacks[k]
if !ok || s == nil {
s = make([]*QueryFeedback, 0, 8)
}
l := mathutil.MinInt64(int64(len(qs)), remained)
s = append(s, qs[:l]...)
m.Feedbacks[k] = s
m.Size = m.Size + int(l)
return true
}
// Merge combines 2 collections of feedbacks.
func (m *QueryFeedbackMap) Merge(r *QueryFeedbackMap) {
for k, qs := range r.Feedbacks {
if !m.append(k, qs) {
break
}
}
return
}
var (
// MaxNumberOfRanges is the max number of ranges before split to collect feedback.
MaxNumberOfRanges = 20
// FeedbackProbability is the probability to collect the feedback.
FeedbackProbability = atomic.NewFloat64(0)
)
func init() {
// This is for solving import cycle.
// We need to read the value of FeedbackProbability when setting the variable tidb_analyze_version in sessionctx/variable package
// but we have imported sessionctx/variable in statistics package here.
variable.FeedbackProbability = FeedbackProbability
}
// CalcErrorRate calculates the error rate the current QueryFeedback.
func (q *QueryFeedback) CalcErrorRate() float64 {
expected := float64(q.Expected)
if q.actual == 0 {
if expected == 0 {
return 0
}
return 1
}
return math.Abs(expected-float64(q.actual)) / float64(q.actual)
}
// CollectFeedback decides whether to collect the feedback. It returns false when:
// 1: the feedback is not generated by select query;
// 2: the histogram is nil or has no buckets;
// 3: the number of scan ranges exceeds the limit because it may affect the performance;
// 4: it does not pass the probabilistic sampler.
func CollectFeedback(sc *stmtctx.StatementContext, q *QueryFeedback, numOfRanges int) bool {
if !sc.InSelectStmt {
return false
}
if q.Hist == nil || q.Hist.Len() == 0 {
return false
}
if numOfRanges > MaxNumberOfRanges || rand.Float64() > FeedbackProbability.Load() {
return false
}
return true
}
// DecodeToRanges decode the feedback to ranges.
func (q *QueryFeedback) DecodeToRanges(isIndex bool) ([]*ranger.Range, error) {
ranges := make([]*ranger.Range, 0, len(q.Feedback))
for _, val := range q.Feedback {
low, high := *val.Lower, *val.Upper
var lowVal, highVal []types.Datum
if isIndex {
var err error
// As we do not know the origin length, just use a custom value here.
lowVal, _, err = codec.DecodeRange(low.GetBytes(), 4, nil, nil)
if err != nil {
return nil, errors.Trace(err)
}
highVal, _, err = codec.DecodeRange(high.GetBytes(), 4, nil, nil)
if err != nil {
return nil, errors.Trace(err)
}
} else {
_, lowInt, err := codec.DecodeInt(val.Lower.GetBytes())
if err != nil {
return nil, errors.Trace(err)
}
_, highInt, err := codec.DecodeInt(val.Upper.GetBytes())
if err != nil {
return nil, errors.Trace(err)
}
lowVal = []types.Datum{types.NewIntDatum(lowInt)}
highVal = []types.Datum{types.NewIntDatum(highInt)}
}
ranges = append(ranges, &(ranger.Range{
LowVal: lowVal,
HighVal: highVal,
HighExclude: true,
}))
}
return ranges, nil
}
// DecodeIntValues is called when the current Feedback stores encoded int values.
func (q *QueryFeedback) DecodeIntValues() *QueryFeedback {
nq := &QueryFeedback{}
nq.Feedback = make([]Feedback, 0, len(q.Feedback))
for _, fb := range q.Feedback {
_, lowInt, err := codec.DecodeInt(fb.Lower.GetBytes())
if err != nil {
logutil.BgLogger().Debug("decode feedback lower bound value to integer failed", zap.Binary("value", fb.Lower.GetBytes()), zap.Error(err))
continue
}
_, highInt, err := codec.DecodeInt(fb.Upper.GetBytes())
if err != nil {
logutil.BgLogger().Debug("decode feedback upper bound value to integer failed", zap.Binary("value", fb.Upper.GetBytes()), zap.Error(err))
continue
}
low, high := types.NewIntDatum(lowInt), types.NewIntDatum(highInt)
nq.Feedback = append(nq.Feedback, Feedback{Lower: &low, Upper: &high, Count: fb.Count})
}
return nq
}
// StoreRanges stores the ranges for update.
func (q *QueryFeedback) StoreRanges(ranges []*ranger.Range) {
q.Feedback = make([]Feedback, 0, len(ranges))
for _, ran := range ranges {
q.Feedback = append(q.Feedback, Feedback{&ran.LowVal[0], &ran.HighVal[0], 0, 0, 0})
}
}
// Invalidate is used to invalidate the query feedback.
func (q *QueryFeedback) Invalidate() {
q.Feedback = nil
q.Hist = nil
q.Valid = false
q.actual = -1
}
// Actual gets the actual row count.
func (q *QueryFeedback) Actual() int64 {
if !q.Valid {
return -1
}
return q.actual
}
// Update updates the query feedback. `startKey` is the start scan key of the partial result, used to find
// the range for update. `counts` is the scan counts of each range, used to update the feedback count info.
func (q *QueryFeedback) Update(startKey kv.Key, counts, ndvs []int64) {
// Older versions do not have the counts info.
if len(counts) == 0 {
q.Invalidate()
return
}
sum := int64(0)
for _, count := range counts {
sum += count
}
metrics.DistSQLScanKeysPartialHistogram.Observe(float64(sum))
q.actual += sum
if !q.Valid || q.Hist == nil {
return
}
if q.Tp == IndexType {
startKey = tablecodec.CutIndexPrefix(startKey)
} else {
startKey = tablecodec.CutRowKeyPrefix(startKey)
}
// Find the range that startKey falls in.
idx := sort.Search(len(q.Feedback), func(i int) bool {
return bytes.Compare(q.Feedback[i].Lower.GetBytes(), startKey) > 0
})
idx--
if idx < 0 {
return
}
// If the desc is true, the counts is reversed, so here we need to reverse it back.
if q.desc {
for i := 0; i < len(counts)/2; i++ {
j := len(counts) - i - 1
counts[i], counts[j] = counts[j], counts[i]
ndvs[i], ndvs[j] = ndvs[j], ndvs[i]
}
}
// Update the feedback count info.
for i, count := range counts {
if i+idx >= len(q.Feedback) {
q.Invalidate()
break
}
q.Feedback[i+idx].Count += count
q.Feedback[i+idx].Ndv += ndvs[i]
}
}
// NonOverlappedFeedbacks extracts a set of feedbacks which are not overlapped with each other.
func NonOverlappedFeedbacks(sc *stmtctx.StatementContext, fbs []Feedback) ([]Feedback, bool) {
// Sort feedbacks by end point and start point incrementally, then pick every feedback that is not overlapped
// with the previous chosen feedbacks.
var existsErr bool
sort.Slice(fbs, func(i, j int) bool {
res, err := fbs[i].Upper.CompareDatum(sc, fbs[j].Upper)
if err != nil {
existsErr = true
}
if existsErr || res != 0 {
return res < 0
}
res, err = fbs[i].Lower.CompareDatum(sc, fbs[j].Lower)
if err != nil {
existsErr = true
}
return res < 0
})
if existsErr {
return fbs, false
}
resFBs := make([]Feedback, 0, len(fbs))
previousEnd := &types.Datum{}
for _, fb := range fbs {
res, err := previousEnd.CompareDatum(sc, fb.Lower)
if err != nil {
return fbs, false
}
if res <= 0 {
resFBs = append(resFBs, fb)
previousEnd = fb.Upper
}
}
return resFBs, true
}
// BucketFeedback stands for all the feedback for a bucket.
type BucketFeedback struct {
feedback []Feedback // All the feedback info in the same bucket.
lower *types.Datum // The lower bound of the new bucket.
upper *types.Datum // The upper bound of the new bucket.
}
// outOfRange checks if the `val` is between `min` and `max`.
func outOfRange(sc *stmtctx.StatementContext, min, max, val *types.Datum) (int, error) {
result, err := val.CompareDatum(sc, min)
if err != nil {
return 0, err
}
if result < 0 {
return result, nil
}
result, err = val.CompareDatum(sc, max)
if err != nil {
return 0, err
}
if result > 0 {
return result, nil
}
return 0, nil
}
// adjustFeedbackBoundaries adjust the feedback boundaries according to the `min` and `max`.
// If the feedback has no intersection with `min` and `max`, we could just skip this feedback.
func (f *Feedback) adjustFeedbackBoundaries(sc *stmtctx.StatementContext, min, max *types.Datum) (bool, error) {
result, err := outOfRange(sc, min, max, f.Lower)
if err != nil {
return false, err
}
if result > 0 {
return true, nil
}
if result < 0 {
f.Lower = min
}
result, err = outOfRange(sc, min, max, f.Upper)
if err != nil {
return false, err
}
if result < 0 {
return true, nil
}
if result > 0 {
f.Upper = max
}
return false, nil
}
// buildBucketFeedback build the feedback for each bucket from the histogram feedback.
func buildBucketFeedback(h *Histogram, feedback *QueryFeedback) (map[int]*BucketFeedback, int) {
bktID2FB := make(map[int]*BucketFeedback)
if len(feedback.Feedback) == 0 {
return bktID2FB, 0
}
total := 0
sc := &stmtctx.StatementContext{TimeZone: time.UTC}
min, max := types.GetMinValue(h.Tp), types.GetMaxValue(h.Tp)
for _, fb := range feedback.Feedback {
skip, err := fb.adjustFeedbackBoundaries(sc, &min, &max)
if err != nil {
logutil.BgLogger().Debug("adjust feedback boundaries failed", zap.Error(err))
continue
}
if skip {
continue
}
idx := h.Bounds.UpperBound(0, fb.Lower)
bktIdx := 0
// The last bucket also stores the feedback that falls outside the upper bound.
if idx >= h.Bounds.NumRows()-1 {
bktIdx = h.Len() - 1
} else if h.Len() == 1 {
bktIdx = 0
} else {
if idx == 0 {
bktIdx = 0
} else {
bktIdx = (idx - 1) / 2
}
// Make sure that this feedback lies within the bucket.
if chunk.Compare(h.Bounds.GetRow(2*(bktIdx+1)), 0, fb.Upper) < 0 {
continue
}
}
total++
bkt := bktID2FB[bktIdx]
if bkt == nil {
bkt = &BucketFeedback{lower: h.GetLower(bktIdx), upper: h.GetUpper(bktIdx)}
bktID2FB[bktIdx] = bkt
}
bkt.feedback = append(bkt.feedback, fb)
// Update the bound if necessary.
res, err := bkt.lower.CompareDatum(nil, fb.Lower)
if err != nil {
logutil.BgLogger().Debug("compare datum failed", zap.Any("value1", bkt.lower), zap.Any("value2", fb.Lower), zap.Error(err))
continue
}
if res > 0 {
bkt.lower = fb.Lower
}
res, err = bkt.upper.CompareDatum(nil, fb.Upper)
if err != nil {
logutil.BgLogger().Debug("compare datum failed", zap.Any("value1", bkt.upper), zap.Any("value2", fb.Upper), zap.Error(err))
continue
}
if res < 0 {
bkt.upper = fb.Upper
}
}
return bktID2FB, total
}
// getBoundaries gets the new boundaries after split.
func (b *BucketFeedback) getBoundaries(num int) []types.Datum {
// Get all the possible new boundaries.
vals := make([]types.Datum, 0, len(b.feedback)*2+2)
for _, fb := range b.feedback {
vals = append(vals, *fb.Lower, *fb.Upper)
}
vals = append(vals, *b.lower)
err := types.SortDatums(nil, vals)
if err != nil {
logutil.BgLogger().Debug("sort datums failed", zap.Error(err))
return []types.Datum{*b.lower, *b.upper}
}
total, interval := 0, len(vals)/num
// Pick values per `interval`.
for i := 0; i < len(vals); i, total = i+interval, total+1 {
vals[total] = vals[i]
}
// Append the upper bound.
vals[total] = *b.upper
vals = vals[:total+1]
total = 1
// Erase the repeat values.
for i := 1; i < len(vals); i++ {
cmp, err := vals[total-1].CompareDatum(nil, &vals[i])
if err != nil {
logutil.BgLogger().Debug("compare datum failed", zap.Any("value1", vals[total-1]), zap.Any("value2", vals[i]), zap.Error(err))
continue
}
if cmp == 0 {
continue
}
vals[total] = vals[i]
total++
}
return vals[:total]
}
// There are only two types of datum in bucket: one is `Blob`, which is for index; the other one
// is `Int`, which is for primary key.
type bucket = Feedback
// splitBucket firstly splits this "BucketFeedback" to "newNumBkts" new buckets,
// calculates the count for each new bucket, merge the new bucket whose count
// is smaller than "minBucketFraction*totalCount" with the next new bucket
// until the last new bucket.
func (b *BucketFeedback) splitBucket(newNumBkts int, totalCount float64, originBucketCount float64, originalNdv int64) []bucket {
// Split the bucket.
bounds := b.getBoundaries(newNumBkts + 1)
bkts := make([]bucket, 0, len(bounds)-1)
sc := &stmtctx.StatementContext{TimeZone: time.UTC}
for i := 1; i < len(bounds); i++ {
newBkt := bucket{&bounds[i-1], bounds[i].Clone(), 0, 0, 0}
// get bucket count
_, ratio := getOverlapFraction(Feedback{b.lower, b.upper, int64(originBucketCount), 0, 0}, newBkt)
countInNewBkt := originBucketCount * ratio
ndvInNewBkt := int64(float64(originalNdv) * ratio)
countInNewBkt, ndvInNewBkt = b.refineBucketCount(sc, newBkt, countInNewBkt, ndvInNewBkt)
// do not split if the count of result bucket is too small.
if countInNewBkt < minBucketFraction*totalCount {
bounds[i] = bounds[i-1]
continue
}
newBkt.Count = int64(countInNewBkt)
newBkt.Ndv = ndvInNewBkt
bkts = append(bkts, newBkt)
// To guarantee that each bucket's range will not overlap.
setNextValue(&bounds[i])
}
return bkts
}
// getOverlapFraction gets the overlap fraction of feedback and bucket range. In order to get the bucket count, it also
// returns the ratio between bucket fraction and feedback fraction.
func getOverlapFraction(fb Feedback, bkt bucket) (float64, float64) {
datums := make([]types.Datum, 0, 4)
datums = append(datums, *fb.Lower, *fb.Upper)
datums = append(datums, *bkt.Lower, *bkt.Upper)
err := types.SortDatums(nil, datums)
if err != nil {
return 0, 0
}
minValue, maxValue := &datums[0], &datums[3]
fbLower := calcFraction4Datums(minValue, maxValue, fb.Lower)
fbUpper := calcFraction4Datums(minValue, maxValue, fb.Upper)
bktLower := calcFraction4Datums(minValue, maxValue, bkt.Lower)
bktUpper := calcFraction4Datums(minValue, maxValue, bkt.Upper)
ratio := (bktUpper - bktLower) / (fbUpper - fbLower)
// full overlap
if fbLower <= bktLower && bktUpper <= fbUpper {
return bktUpper - bktLower, ratio
}
if bktLower <= fbLower && fbUpper <= bktUpper {
return fbUpper - fbLower, ratio
}
// partial overlap
overlap := math.Min(bktUpper-fbLower, fbUpper-bktLower)
return overlap, ratio
}
// mergeFullyContainedFeedback merges the max fraction of non-overlapped feedbacks that are fully contained in the bucket.
func (b *BucketFeedback) mergeFullyContainedFeedback(sc *stmtctx.StatementContext, bkt bucket) (float64, float64, int64, bool) {
feedbacks := make([]Feedback, 0, len(b.feedback))
// Get all the fully contained feedbacks.
for _, fb := range b.feedback {
res, err := outOfRange(sc, bkt.Lower, bkt.Upper, fb.Lower)
if res != 0 || err != nil {
return 0, 0, 0, false
}
res, err = outOfRange(sc, bkt.Lower, bkt.Upper, fb.Upper)
if res != 0 || err != nil {
return 0, 0, 0, false
}
feedbacks = append(feedbacks, fb)
}
if len(feedbacks) == 0 {
return 0, 0, 0, false
}
sortedFBs, ok := NonOverlappedFeedbacks(sc, feedbacks)
if !ok {
return 0, 0, 0, false
}
var (
sumFraction, sumCount float64
ndv int64
)
for _, fb := range sortedFBs {
fraction, _ := getOverlapFraction(fb, bkt)
sumFraction += fraction
sumCount += float64(fb.Count)
ndv += fb.Ndv
}
return sumFraction, sumCount, ndv, true
}
// refineBucketCount refine the newly split bucket count. It uses the feedback that overlaps most
// with the bucket to get the bucket count.
func (b *BucketFeedback) refineBucketCount(sc *stmtctx.StatementContext, bkt bucket, defaultCount float64, defaultNdv int64) (float64, int64) {
bestFraction := minBucketFraction
count := defaultCount
ndv := defaultNdv
sumFraction, sumCount, sumNdv, ok := b.mergeFullyContainedFeedback(sc, bkt)
if ok && sumFraction > bestFraction {
bestFraction = sumFraction
count = sumCount / sumFraction
ndv = int64(float64(sumNdv) / sumFraction)
}
for _, fb := range b.feedback {
fraction, ratio := getOverlapFraction(fb, bkt)
// choose the max overlap fraction
if fraction > bestFraction {
bestFraction = fraction
count = float64(fb.Count) * ratio
ndv = int64(float64(fb.Ndv) * ratio)
}
}
return count, ndv
}
const (
defaultSplitCount = 10
splitPerFeedback = 10
)
// getSplitCount gets the split count for the histogram. It is based on the intuition that:
// 1: If we have more remaining unused buckets, we can split more.
// 2: We cannot split too aggressive, thus we make it split every `splitPerFeedback`.
func getSplitCount(numFeedbacks, remainBuckets int) int {
// Split more if have more buckets available.
splitCount := mathutil.Max(remainBuckets, defaultSplitCount)
return mathutil.Min(splitCount, numFeedbacks/splitPerFeedback)
}
type bucketScore struct {
id int
score float64
}
type bucketScores []bucketScore
func (bs bucketScores) Len() int { return len(bs) }
func (bs bucketScores) Swap(i, j int) { bs[i], bs[j] = bs[j], bs[i] }
func (bs bucketScores) Less(i, j int) bool { return bs[i].score < bs[j].score }
const (
// To avoid the histogram been too imbalanced, we constrain the count of a bucket in range
// [minBucketFraction * totalCount, maxBucketFraction * totalCount].
minBucketFraction = 1 / 10000.0
maxBucketFraction = 1 / 10.0
)
// getBucketScore gets the score for merge this bucket with previous one.
// TODO: We also need to consider the bucket hit count.
func getBucketScore(bkts []bucket, totalCount float64, id int) bucketScore {
preCount, count := float64(bkts[id-1].Count), float64(bkts[id].Count)
// do not merge if the result bucket is too large
if (preCount + count) > maxBucketFraction*totalCount {
return bucketScore{id, math.MaxFloat64}
}
// Merge them if the result bucket is already too small.
if (preCount + count) < minBucketFraction*totalCount {
return bucketScore{id, 0}
}
low, mid, high := bkts[id-1].Lower, bkts[id-1].Upper, bkts[id].Upper
// If we choose to merge, err is the absolute estimate error for the previous bucket.
err := calcFraction4Datums(low, high, mid)*(preCount+count) - preCount
return bucketScore{id, math.Abs(err / (preCount + count))}
}
// defaultBucketCount is the number of buckets a column histogram has.
var defaultBucketCount = 256
func mergeBuckets(bkts []bucket, isNewBuckets []bool, totalCount float64) []bucket {
mergeCount := len(bkts) - defaultBucketCount
if mergeCount <= 0 {
return bkts
}
bs := make(bucketScores, 0, len(bkts))
for i := 1; i < len(bkts); i++ {
// Do not merge the newly created buckets.
if !isNewBuckets[i] && !isNewBuckets[i-1] {
bs = append(bs, getBucketScore(bkts, totalCount, i))
}
}
sort.Sort(bs)
ids := make([]int, 0, mergeCount)
for i := 0; i < mergeCount; i++ {
ids = append(ids, bs[i].id)
}
sort.Ints(ids)
idCursor, bktCursor := 0, 0
for i := range bkts {
// Merge this bucket with last one.
if idCursor < mergeCount && ids[idCursor] == i {
bkts[bktCursor-1].Upper = bkts[i].Upper
bkts[bktCursor-1].Count += bkts[i].Count
bkts[bktCursor-1].Repeat = bkts[i].Repeat
bkts[bktCursor-1].Ndv += bkts[i].Ndv
idCursor++
} else {
bkts[bktCursor] = bkts[i]
bktCursor++
}
}
bkts = bkts[:bktCursor]
return bkts
}
// splitBuckets split the histogram buckets according to the feedback.
func splitBuckets(h *Histogram, feedback *QueryFeedback) ([]bucket, []bool, int64) {
bktID2FB, numTotalFBs := buildBucketFeedback(h, feedback)
buckets := make([]bucket, 0, h.Len())
isNewBuckets := make([]bool, 0, h.Len())
splitCount := getSplitCount(numTotalFBs, defaultBucketCount-h.Len())
for i := 0; i < h.Len(); i++ {
bktFB, ok := bktID2FB[i]
// No feedback, just use the original one.
if !ok {
buckets = append(buckets, bucket{h.GetLower(i), h.GetUpper(i), h.bucketCount(i), h.Buckets[i].Repeat, h.Buckets[i].NDV})
isNewBuckets = append(isNewBuckets, false)
continue
}
// Distribute the total split count to bucket based on number of bucket feedback.
newBktNums := splitCount * len(bktFB.feedback) / numTotalFBs
bkts := bktFB.splitBucket(newBktNums, h.TotalRowCount(), float64(h.bucketCount(i)), h.Buckets[i].NDV)
buckets = append(buckets, bkts...)
if len(bkts) == 1 {
isNewBuckets = append(isNewBuckets, false)
} else {
for i := 0; i < len(bkts); i++ {
isNewBuckets = append(isNewBuckets, true)
}
}
}
totCount := int64(0)
for _, bkt := range buckets {
totCount += bkt.Count
}
return buckets, isNewBuckets, totCount
}
// UpdateHistogram updates the histogram according buckets.
func UpdateHistogram(h *Histogram, feedback *QueryFeedback, statsVer int) *Histogram {
if statsVer < Version2 {
// If it's the stats we haven't maintain the bucket NDV yet. Reset the ndv.
for i := range feedback.Feedback {
feedback.Feedback[i].Ndv = 0
}
}
buckets, isNewBuckets, totalCount := splitBuckets(h, feedback)
buckets = mergeBuckets(buckets, isNewBuckets, float64(totalCount))
hist := buildNewHistogram(h, buckets)
// Update the NDV of primary key column.
if feedback.Tp == PkType {
hist.NDV = int64(hist.TotalRowCount())
// If we maintained the NDV of bucket. We can also update the total ndv.
} else if feedback.Tp == IndexType && statsVer == 2 {
totNdv := int64(0)
for _, bkt := range buckets {
totNdv += bkt.Ndv
}
hist.NDV = totNdv
}
return hist
}
// UpdateCMSketchAndTopN updates the CMSketch and TopN by feedback.
func UpdateCMSketchAndTopN(c *CMSketch, t *TopN, eqFeedbacks []Feedback) (*CMSketch, *TopN) {
if c == nil || len(eqFeedbacks) == 0 {
return c, t
}
newCMSketch := c.Copy()
newTopN := t.Copy()
for _, fb := range eqFeedbacks {
updateValueBytes(newCMSketch, newTopN, fb.Lower.GetBytes(), uint64(fb.Count))
}
return newCMSketch, newTopN
}
func buildNewHistogram(h *Histogram, buckets []bucket) *Histogram {
hist := NewHistogram(h.ID, h.NDV, h.NullCount, h.LastUpdateVersion, h.Tp, len(buckets), h.TotColSize)
preCount := int64(0)
for _, bkt := range buckets {
hist.AppendBucketWithNDV(bkt.Lower, bkt.Upper, bkt.Count+preCount, bkt.Repeat, bkt.Ndv)
preCount += bkt.Count
}
return hist
}
// queryFeedback is used to serialize the QueryFeedback.
type queryFeedback struct {
IntRanges []int64
// HashValues is the murmur hash values for each index point.
// Note that index points will be stored in `IndexPoints`, we keep it here only for compatibility.
HashValues []uint64
IndexRanges [][]byte
// IndexPoints stores the value of each equal condition.
IndexPoints [][]byte
// Counts is the number of scan keys in each range. It first stores the count for `IntRanges`, `IndexRanges` or `ColumnRanges`.
// After that, it stores the Ranges for `HashValues`.
Counts []int64
ColumnRanges [][]byte
Ndvs []int64
}
func encodePKFeedback(q *QueryFeedback) (*queryFeedback, error) {
pb := &queryFeedback{}
for _, fb := range q.Feedback {
// There is no need to update the point queries.
if bytes.Compare(kv.Key(fb.Lower.GetBytes()).PrefixNext(), fb.Upper.GetBytes()) >= 0 {
continue
}
_, low, err := codec.DecodeInt(fb.Lower.GetBytes())
if err != nil {
return nil, errors.Trace(err)
}
_, high, err := codec.DecodeInt(fb.Upper.GetBytes())
if err != nil {
return nil, errors.Trace(err)
}
pb.IntRanges = append(pb.IntRanges, low, high)
pb.Counts = append(pb.Counts, fb.Count)
pb.Ndvs = append(pb.Ndvs, fb.Ndv)
}
return pb, nil
}
func encodeIndexFeedback(q *QueryFeedback) *queryFeedback {
pb := &queryFeedback{}
var pointCounts []int64
for _, fb := range q.Feedback {
if bytes.Compare(kv.Key(fb.Lower.GetBytes()).PrefixNext(), fb.Upper.GetBytes()) >= 0 {
pb.IndexPoints = append(pb.IndexPoints, fb.Lower.GetBytes())
pointCounts = append(pointCounts, fb.Count)
pb.Ndvs = append(pb.Ndvs, fb.Ndv)
} else {
pb.IndexRanges = append(pb.IndexRanges, fb.Lower.GetBytes(), fb.Upper.GetBytes())
pb.Counts = append(pb.Counts, fb.Count)
pb.Ndvs = append(pb.Ndvs, fb.Ndv)
}
}
pb.Counts = append(pb.Counts, pointCounts...)
return pb
}
func encodeColumnFeedback(q *QueryFeedback) (*queryFeedback, error) {
pb := &queryFeedback{}
sc := stmtctx.StatementContext{TimeZone: time.UTC}
for _, fb := range q.Feedback {
lowerBytes, err := codec.EncodeKey(&sc, nil, *fb.Lower)
if err != nil {
return nil, errors.Trace(err)
}
upperBytes, err := codec.EncodeKey(&sc, nil, *fb.Upper)
if err != nil {
return nil, errors.Trace(err)
}
pb.ColumnRanges = append(pb.ColumnRanges, lowerBytes, upperBytes)
pb.Counts = append(pb.Counts, fb.Count)
}
return pb, nil
}
// EncodeFeedback encodes the given feedback to byte slice.
func EncodeFeedback(q *QueryFeedback) ([]byte, error) {
var pb *queryFeedback
var err error
switch q.Tp {
case PkType:
pb, err = encodePKFeedback(q)
case IndexType:
pb = encodeIndexFeedback(q)
case ColType:
pb, err = encodeColumnFeedback(q)
}
if err != nil {
return nil, errors.Trace(err)
}
var buf bytes.Buffer
enc := gob.NewEncoder(&buf)
err = enc.Encode(pb)
return buf.Bytes(), errors.Trace(err)
}
func decodeFeedbackForIndex(q *QueryFeedback, pb *queryFeedback, c *CMSketch, t *TopN) {
q.Tp = IndexType
// decode the index range feedback
for i := 0; i < len(pb.IndexRanges); i += 2 {
lower, upper := types.NewBytesDatum(pb.IndexRanges[i]), types.NewBytesDatum(pb.IndexRanges[i+1])
q.Feedback = append(q.Feedback, Feedback{&lower, &upper, pb.Counts[i/2], 0, pb.Ndvs[i/2]})
}
if c != nil {
// decode the index point feedback, just set value count in CM Sketch
start := len(pb.IndexRanges) / 2
if len(pb.HashValues) > 0 {
for i := 0; i < len(pb.HashValues); i += 2 {
c.setValue(pb.HashValues[i], pb.HashValues[i+1], uint64(pb.Counts[start+i/2]))
}
return
}
for i := 0; i < len(pb.IndexPoints); i++ {
updateValueBytes(c, t, pb.IndexPoints[i], uint64(pb.Counts[start+i]))
}
}
}
func decodeFeedbackForPK(q *QueryFeedback, pb *queryFeedback, isUnsigned bool) {
q.Tp = PkType
// decode feedback for primary key
for i := 0; i < len(pb.IntRanges); i += 2 {
var lower, upper types.Datum
if isUnsigned {
lower.SetUint64(uint64(pb.IntRanges[i]))
upper.SetUint64(uint64(pb.IntRanges[i+1]))
} else {
lower.SetInt64(pb.IntRanges[i])
upper.SetInt64(pb.IntRanges[i+1])
}
q.Feedback = append(q.Feedback, Feedback{&lower, &upper, pb.Counts[i/2], 0, pb.Ndvs[i/2]})
}
}
// ConvertDatumsType converts the datums type to `ft`.
func ConvertDatumsType(vals []types.Datum, ft *types.FieldType, loc *time.Location) error {
for i, val := range vals {
if val.Kind() == types.KindMinNotNull || val.Kind() == types.KindMaxValue {
continue
}
newVal, err := tablecodec.UnflattenDatums([]types.Datum{val}, []*types.FieldType{ft}, loc)
if err != nil {
return err
}
vals[i] = newVal[0]
}
return nil
}
func decodeColumnBounds(data []byte, ft *types.FieldType) ([]types.Datum, error) {
vals, _, err := codec.DecodeRange(data, 1, nil, nil)
if err != nil {
return nil, err
}
err = ConvertDatumsType(vals, ft, time.UTC)
return vals, err
}
func decodeFeedbackForColumn(q *QueryFeedback, pb *queryFeedback, ft *types.FieldType) error {
q.Tp = ColType
for i := 0; i < len(pb.ColumnRanges); i += 2 {
low, err := decodeColumnBounds(pb.ColumnRanges[i], ft)
if err != nil {
return err
}
high, err := decodeColumnBounds(pb.ColumnRanges[i+1], ft)
if err != nil {
return err
}
q.Feedback = append(q.Feedback, Feedback{&low[0], &high[0], pb.Counts[i/2], 0, 0})
}
return nil
}
// DecodeFeedback decodes a byte slice to feedback.
func DecodeFeedback(val []byte, q *QueryFeedback, c *CMSketch, t *TopN, ft *types.FieldType) error {
buf := bytes.NewBuffer(val)
dec := gob.NewDecoder(buf)
pb := &queryFeedback{}
err := dec.Decode(pb)
if err != nil {
return errors.Trace(err)
}
if len(pb.IndexRanges) > 0 || len(pb.HashValues) > 0 || len(pb.IndexPoints) > 0 {
decodeFeedbackForIndex(q, pb, c, t)
} else if len(pb.IntRanges) > 0 {
decodeFeedbackForPK(q, pb, mysql.HasUnsignedFlag(ft.Flag))
} else {
err = decodeFeedbackForColumn(q, pb, ft)
}
return err
}
// SplitFeedbackByQueryType splits the feedbacks into equality feedbacks and range feedbacks.
func SplitFeedbackByQueryType(feedbacks []Feedback) ([]Feedback, []Feedback) {
var eqFB, ranFB []Feedback
for _, fb := range feedbacks {
// Use `>=` here because sometimes the lower is equal to upper.
if bytes.Compare(kv.Key(fb.Lower.GetBytes()).PrefixNext(), fb.Upper.GetBytes()) >= 0 {
eqFB = append(eqFB, fb)
} else {
ranFB = append(ranFB, fb)
}
}
return eqFB, ranFB
}
// CleanRangeFeedbackByTopN will not update the part containing the TopN.
func CleanRangeFeedbackByTopN(feedbacks []Feedback, topN *TopN) []Feedback {
for i := len(feedbacks) - 1; i >= 0; i-- {
lIdx, lMatch := topN.LowerBound(feedbacks[i].Lower.GetBytes())
rIdx, _ := topN.LowerBound(feedbacks[i].Upper.GetBytes())
// If the LowerBound return the same result for the range's upper bound and lower bound and the lower one isn't matched,
// we can indicate that no top-n overlaps the feedback's ranges.
if lIdx == rIdx && !lMatch {
continue
}
feedbacks = append(feedbacks[:i], feedbacks[i+1:]...)
}
return feedbacks
}
// setNextValue sets the next value for the given datum. For types like float,
// we do not set because it is not discrete and does not matter too much when estimating the scalar info.
func setNextValue(d *types.Datum) {
switch d.Kind() {
case types.KindBytes, types.KindString:
// Here is the encoded value instead of string value, so SetBytes is enough.
d.SetBytes(kv.Key(d.GetBytes()).PrefixNext())
case types.KindInt64:
d.SetInt64(d.GetInt64() + 1)
case types.KindUint64:
d.SetUint64(d.GetUint64() + 1)
case types.KindMysqlDuration:
duration := d.GetMysqlDuration()
duration.Duration = duration.Duration + 1
d.SetMysqlDuration(duration)
case types.KindMysqlTime:
t := d.GetMysqlTime()
sc := &stmtctx.StatementContext{TimeZone: types.BoundTimezone}
if _, err := t.Add(sc, types.Duration{Duration: 1, Fsp: 0}); err != nil {
log.Error(errors.ErrorStack(err))
}
d.SetMysqlTime(t)
}
}
// SupportColumnType checks if the type of the column can be updated by feedback.
func SupportColumnType(ft *types.FieldType) bool {
switch ft.Tp {
case mysql.TypeTiny, mysql.TypeShort, mysql.TypeInt24, mysql.TypeLong, mysql.TypeLonglong, mysql.TypeFloat,
mysql.TypeDouble, mysql.TypeString, mysql.TypeVarString, mysql.TypeVarchar, mysql.TypeBlob, mysql.TypeTinyBlob, mysql.TypeMediumBlob, mysql.TypeLongBlob,
mysql.TypeNewDecimal, mysql.TypeDuration, mysql.TypeDate, mysql.TypeDatetime, mysql.TypeTimestamp:
return true
}
return false
}
| statistics/feedback.go | 0 | https://github.com/pingcap/tidb/commit/80edc8cd20b452089a26b817b4b450b7f31db77f | [
0.0006408118060790002,
0.00017660246521700174,
0.00016091194993350655,
0.0001713781093712896,
0.00004777696085511707
] |
{
"id": 4,
"code_window": [
"\t{Scope: ScopeGlobal | ScopeSession, Name: WindowingUseHighPrecision, Value: BoolOn, Type: TypeBool, IsHintUpdatable: true, SetSession: func(s *SessionVars, val string) error {\n",
"\t\ts.WindowingUseHighPrecision = TiDBOptOn(val)\n",
"\t\treturn nil\n",
"\t}},\n",
"\t{Scope: ScopeSession, Name: TiDBTxnScope, Value: func() string {\n",
"\t\tif isGlobal, _ := config.GetTxnScopeFromConfig(); isGlobal {\n",
"\t\t\treturn oracle.GlobalTxnScope\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t{Scope: ScopeNone, Name: \"license\", Value: \"Apache License 2.0\"},\n",
"\t{Scope: ScopeGlobal | ScopeSession, Name: BlockEncryptionMode, Value: \"aes-128-ecb\"},\n",
"\t{Scope: ScopeSession, Name: \"last_insert_id\", Value: \"\"},\n",
"\t{Scope: ScopeNone, Name: \"have_ssl\", Value: \"DISABLED\"},\n",
"\t{Scope: ScopeNone, Name: \"have_openssl\", Value: \"DISABLED\"},\n",
"\t{Scope: ScopeNone, Name: \"ssl_ca\", Value: \"\"},\n",
"\t{Scope: ScopeNone, Name: \"ssl_cert\", Value: \"\"},\n",
"\t{Scope: ScopeNone, Name: \"ssl_key\", Value: \"\"},\n",
"\n",
"\t/* TiDB specific variables */\n"
],
"file_path": "sessionctx/variable/sysvar.go",
"type": "add",
"edit_start_line_idx": 605
} | // Copyright 2015 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package variable
import (
"fmt"
"math"
"strconv"
"strings"
"sync"
"sync/atomic"
"time"
"github.com/cznic/mathutil"
"github.com/pingcap/errors"
"github.com/pingcap/parser/charset"
"github.com/pingcap/parser/mysql"
"github.com/pingcap/tidb/config"
"github.com/pingcap/tidb/kv"
"github.com/pingcap/tidb/store/tikv/oracle"
"github.com/pingcap/tidb/types"
"github.com/pingcap/tidb/util/collate"
"github.com/pingcap/tidb/util/logutil"
"github.com/pingcap/tidb/util/versioninfo"
atomic2 "go.uber.org/atomic"
)
// ScopeFlag is for system variable whether can be changed in global/session dynamically or not.
type ScopeFlag uint8
// TypeFlag is the SysVar type, which doesn't exactly match MySQL types.
type TypeFlag byte
const (
// ScopeNone means the system variable can not be changed dynamically.
ScopeNone ScopeFlag = 0
// ScopeGlobal means the system variable can be changed globally.
ScopeGlobal ScopeFlag = 1 << 0
// ScopeSession means the system variable can only be changed in current session.
ScopeSession ScopeFlag = 1 << 1
// TypeStr is the default
TypeStr TypeFlag = 0
// TypeBool for boolean
TypeBool TypeFlag = 1
// TypeInt for integer
TypeInt TypeFlag = 2
// TypeEnum for Enum
TypeEnum TypeFlag = 3
// TypeFloat for Double
TypeFloat TypeFlag = 4
// TypeUnsigned for Unsigned integer
TypeUnsigned TypeFlag = 5
// TypeTime for time of day (a TiDB extension)
TypeTime TypeFlag = 6
// TypeDuration for a golang duration (a TiDB extension)
TypeDuration TypeFlag = 7
// BoolOff is the canonical string representation of a boolean false.
BoolOff = "OFF"
// BoolOn is the canonical string representation of a boolean true.
BoolOn = "ON"
// On is the canonical string for ON
On = "ON"
// Off is the canonical string for OFF
Off = "OFF"
// Warn means return warnings
Warn = "WARN"
// IntOnly means enable for int type
IntOnly = "INT_ONLY"
)
// SysVar is for system variable.
type SysVar struct {
// Scope is for whether can be changed or not
Scope ScopeFlag
// Name is the variable name.
Name string
// Value is the variable value.
Value string
// Type is the MySQL type (optional)
Type TypeFlag
// MinValue will automatically be validated when specified (optional)
MinValue int64
// MaxValue will automatically be validated when specified (optional)
MaxValue uint64
// AutoConvertNegativeBool applies to boolean types (optional)
AutoConvertNegativeBool bool
// AutoConvertOutOfRange applies to int and unsigned types.
AutoConvertOutOfRange bool
// ReadOnly applies to all types
ReadOnly bool
// PossibleValues applies to ENUM type
PossibleValues []string
// AllowEmpty is a special TiDB behavior which means "read value from config" (do not use)
AllowEmpty bool
// AllowEmptyAll is a special behavior that only applies to TiDBCapturePlanBaseline, TiDBTxnMode (do not use)
AllowEmptyAll bool
// AllowAutoValue means that the special value "-1" is permitted, even when outside of range.
AllowAutoValue bool
// Validation is a callback after the type validation has been performed
Validation func(*SessionVars, string, string, ScopeFlag) (string, error)
// SetSession is called after validation
SetSession func(*SessionVars, string) error
// IsHintUpdatable indicate whether it's updatable via SET_VAR() hint (optional)
IsHintUpdatable bool
}
// SetSessionFromHook calls the SetSession func if it exists.
func (sv *SysVar) SetSessionFromHook(s *SessionVars, val string) error {
if sv.SetSession != nil {
return sv.SetSession(s, val)
}
return nil
}
// ValidateFromType provides automatic validation based on the SysVar's type
func (sv *SysVar) ValidateFromType(vars *SessionVars, value string, scope ScopeFlag) (string, error) {
// Some sysvars are read-only. Attempting to set should always fail.
if sv.ReadOnly || sv.Scope == ScopeNone {
return value, ErrIncorrectScope.GenWithStackByArgs(sv.Name, "read only")
}
// The string "DEFAULT" is a special keyword in MySQL, which restores
// the compiled sysvar value. In which case we can skip further validation.
if strings.EqualFold(value, "DEFAULT") {
return sv.Value, nil
}
// Some sysvars in TiDB have a special behavior where the empty string means
// "use the config file value". This needs to be cleaned up once the behavior
// for instance variables is determined.
if value == "" && ((sv.AllowEmpty && scope == ScopeSession) || sv.AllowEmptyAll) {
return value, nil
}
// Provide validation using the SysVar struct
switch sv.Type {
case TypeUnsigned:
return sv.checkUInt64SystemVar(value, vars)
case TypeInt:
return sv.checkInt64SystemVar(value, vars)
case TypeBool:
return sv.checkBoolSystemVar(value, vars)
case TypeFloat:
return sv.checkFloatSystemVar(value, vars)
case TypeEnum:
return sv.checkEnumSystemVar(value, vars)
case TypeTime:
return sv.checkTimeSystemVar(value, vars)
case TypeDuration:
return sv.checkDurationSystemVar(value, vars)
}
return value, nil // typeString
}
const (
localDayTimeFormat = "15:04"
// FullDayTimeFormat is the full format of analyze start time and end time.
FullDayTimeFormat = "15:04 -0700"
)
func (sv *SysVar) checkTimeSystemVar(value string, vars *SessionVars) (string, error) {
var t time.Time
var err error
if len(value) <= len(localDayTimeFormat) {
t, err = time.ParseInLocation(localDayTimeFormat, value, vars.TimeZone)
} else {
t, err = time.ParseInLocation(FullDayTimeFormat, value, vars.TimeZone)
}
if err != nil {
return "", err
}
return t.Format(FullDayTimeFormat), nil
}
func (sv *SysVar) checkDurationSystemVar(value string, vars *SessionVars) (string, error) {
d, err := time.ParseDuration(value)
if err != nil {
return value, ErrWrongTypeForVar.GenWithStackByArgs(sv.Name)
}
// Check for min/max violations
if int64(d) < sv.MinValue {
return value, ErrWrongTypeForVar.GenWithStackByArgs(sv.Name)
}
if uint64(d) > sv.MaxValue {
return value, ErrWrongTypeForVar.GenWithStackByArgs(sv.Name)
}
// return a string representation of the duration
return d.String(), nil
}
func (sv *SysVar) checkUInt64SystemVar(value string, vars *SessionVars) (string, error) {
if sv.AllowAutoValue && value == "-1" {
return value, nil
}
// There are two types of validation behaviors for integer values. The default
// is to return an error saying the value is out of range. For MySQL compatibility, some
// values prefer convert the value to the min/max and return a warning.
if !sv.AutoConvertOutOfRange {
return sv.checkUint64SystemVarWithError(value)
}
if len(value) == 0 {
return value, ErrWrongTypeForVar.GenWithStackByArgs(sv.Name)
}
if value[0] == '-' {
_, err := strconv.ParseInt(value, 10, 64)
if err != nil {
return value, ErrWrongTypeForVar.GenWithStackByArgs(sv.Name)
}
vars.StmtCtx.AppendWarning(ErrTruncatedWrongValue.GenWithStackByArgs(sv.Name, value))
return fmt.Sprintf("%d", sv.MinValue), nil
}
val, err := strconv.ParseUint(value, 10, 64)
if err != nil {
return value, ErrWrongTypeForVar.GenWithStackByArgs(sv.Name)
}
if val < uint64(sv.MinValue) {
vars.StmtCtx.AppendWarning(ErrTruncatedWrongValue.GenWithStackByArgs(sv.Name, value))
return fmt.Sprintf("%d", sv.MinValue), nil
}
if val > sv.MaxValue {
vars.StmtCtx.AppendWarning(ErrTruncatedWrongValue.GenWithStackByArgs(sv.Name, value))
return fmt.Sprintf("%d", sv.MaxValue), nil
}
return value, nil
}
func (sv *SysVar) checkInt64SystemVar(value string, vars *SessionVars) (string, error) {
if sv.AllowAutoValue && value == "-1" {
return value, nil
}
// There are two types of validation behaviors for integer values. The default
// is to return an error saying the value is out of range. For MySQL compatibility, some
// values prefer convert the value to the min/max and return a warning.
if !sv.AutoConvertOutOfRange {
return sv.checkInt64SystemVarWithError(value)
}
val, err := strconv.ParseInt(value, 10, 64)
if err != nil {
return value, ErrWrongTypeForVar.GenWithStackByArgs(sv.Name)
}
if val < sv.MinValue {
vars.StmtCtx.AppendWarning(ErrTruncatedWrongValue.GenWithStackByArgs(sv.Name, value))
return fmt.Sprintf("%d", sv.MinValue), nil
}
if val > int64(sv.MaxValue) {
vars.StmtCtx.AppendWarning(ErrTruncatedWrongValue.GenWithStackByArgs(sv.Name, value))
return fmt.Sprintf("%d", sv.MaxValue), nil
}
return value, nil
}
func (sv *SysVar) checkEnumSystemVar(value string, vars *SessionVars) (string, error) {
// The value could be either a string or the ordinal position in the PossibleValues.
// This allows for the behavior 0 = OFF, 1 = ON, 2 = DEMAND etc.
var iStr string
for i, v := range sv.PossibleValues {
iStr = fmt.Sprintf("%d", i)
if strings.EqualFold(value, v) || strings.EqualFold(value, iStr) {
return v, nil
}
}
return value, ErrWrongValueForVar.GenWithStackByArgs(sv.Name, value)
}
func (sv *SysVar) checkFloatSystemVar(value string, vars *SessionVars) (string, error) {
if len(value) == 0 {
return value, ErrWrongTypeForVar.GenWithStackByArgs(sv.Name)
}
val, err := strconv.ParseFloat(value, 64)
if err != nil {
return value, ErrWrongTypeForVar.GenWithStackByArgs(sv.Name)
}
if val < float64(sv.MinValue) || val > float64(sv.MaxValue) {
return value, ErrWrongValueForVar.GenWithStackByArgs(sv.Name, value)
}
return value, nil
}
func (sv *SysVar) checkBoolSystemVar(value string, vars *SessionVars) (string, error) {
if strings.EqualFold(value, "ON") {
return BoolOn, nil
} else if strings.EqualFold(value, "OFF") {
return BoolOff, nil
}
val, err := strconv.ParseInt(value, 10, 64)
if err == nil {
// There are two types of conversion rules for integer values.
// The default only allows 0 || 1, but a subset of values convert any
// negative integer to 1.
if !sv.AutoConvertNegativeBool {
if val == 0 {
return BoolOff, nil
} else if val == 1 {
return BoolOn, nil
}
} else {
if val == 1 || val < 0 {
return BoolOn, nil
} else if val == 0 {
return BoolOff, nil
}
}
}
return value, ErrWrongValueForVar.GenWithStackByArgs(sv.Name, value)
}
func (sv *SysVar) checkUint64SystemVarWithError(value string) (string, error) {
if len(value) == 0 {
return value, ErrWrongTypeForVar.GenWithStackByArgs(sv.Name)
}
if value[0] == '-' {
// // in strict it expects the error WrongValue, but in non-strict it returns WrongType
return value, ErrWrongValueForVar.GenWithStackByArgs(sv.Name, value)
}
val, err := strconv.ParseUint(value, 10, 64)
if err != nil {
return value, ErrWrongTypeForVar.GenWithStackByArgs(sv.Name)
}
if val < uint64(sv.MinValue) || val > sv.MaxValue {
return value, ErrWrongValueForVar.GenWithStackByArgs(sv.Name, value)
}
return value, nil
}
func (sv *SysVar) checkInt64SystemVarWithError(value string) (string, error) {
if len(value) == 0 {
return value, ErrWrongTypeForVar.GenWithStackByArgs(sv.Name)
}
val, err := strconv.ParseInt(value, 10, 64)
if err != nil {
return value, ErrWrongTypeForVar.GenWithStackByArgs(sv.Name)
}
if val < sv.MinValue || val > int64(sv.MaxValue) {
return value, ErrWrongValueForVar.GenWithStackByArgs(sv.Name, value)
}
return value, nil
}
// ValidateFromHook calls the anonymous function on the sysvar if it exists.
func (sv *SysVar) ValidateFromHook(vars *SessionVars, normalizedValue string, originalValue string, scope ScopeFlag) (string, error) {
if sv.Validation != nil {
return sv.Validation(vars, normalizedValue, originalValue, scope)
}
return normalizedValue, nil
}
// GetNativeValType attempts to convert the val to the approx MySQL non-string type
func (sv *SysVar) GetNativeValType(val string) (types.Datum, byte, uint) {
switch sv.Type {
case TypeUnsigned:
u, err := strconv.ParseUint(val, 10, 64)
if err != nil {
u = 0
}
return types.NewUintDatum(u), mysql.TypeLonglong, mysql.UnsignedFlag
case TypeBool:
optVal := int64(0) // OFF
if TiDBOptOn(val) {
optVal = 1
}
return types.NewIntDatum(optVal), mysql.TypeLong, 0
}
return types.NewStringDatum(val), mysql.TypeVarString, 0
}
var sysVars map[string]*SysVar
var sysVarsLock sync.RWMutex
// RegisterSysVar adds a sysvar to the SysVars list
func RegisterSysVar(sv *SysVar) {
name := strings.ToLower(sv.Name)
sysVarsLock.Lock()
sysVars[name] = sv
sysVarsLock.Unlock()
}
// UnregisterSysVar removes a sysvar from the SysVars list
// currently only used in tests.
func UnregisterSysVar(name string) {
name = strings.ToLower(name)
sysVarsLock.Lock()
delete(sysVars, name)
sysVarsLock.Unlock()
}
// GetSysVar returns sys var info for name as key.
func GetSysVar(name string) *SysVar {
name = strings.ToLower(name)
sysVarsLock.RLock()
defer sysVarsLock.RUnlock()
return sysVars[name]
}
// SetSysVar sets a sysvar. This will not propagate to the cluster, so it should only be
// used for instance scoped AUTO variables such as system_time_zone.
func SetSysVar(name string, value string) {
name = strings.ToLower(name)
sysVarsLock.Lock()
defer sysVarsLock.Unlock()
sysVars[name].Value = value
}
// GetSysVars returns the sysVars list under a RWLock
func GetSysVars() map[string]*SysVar {
sysVarsLock.RLock()
defer sysVarsLock.RUnlock()
return sysVars
}
// PluginVarNames is global plugin var names set.
var PluginVarNames []string
func init() {
sysVars = make(map[string]*SysVar)
for _, v := range defaultSysVars {
RegisterSysVar(v)
}
for _, v := range noopSysVars {
RegisterSysVar(v)
}
initSynonymsSysVariables()
}
// BoolToOnOff returns the string representation of a bool, i.e. "ON/OFF"
func BoolToOnOff(b bool) string {
if b {
return BoolOn
}
return BoolOff
}
func int32ToBoolStr(i int32) string {
if i == 1 {
return BoolOn
}
return BoolOff
}
func checkCharacterValid(normalizedValue string, argName string) (string, error) {
if normalizedValue == "" {
return normalizedValue, errors.Trace(ErrWrongValueForVar.GenWithStackByArgs(argName, "NULL"))
}
cht, _, err := charset.GetCharsetInfo(normalizedValue)
if err != nil {
return normalizedValue, errors.Trace(err)
}
return cht, nil
}
var defaultSysVars = []*SysVar{
{Scope: ScopeGlobal, Name: MaxConnections, Value: "151", Type: TypeUnsigned, MinValue: 1, MaxValue: 100000, AutoConvertOutOfRange: true},
{Scope: ScopeGlobal | ScopeSession, Name: SQLSelectLimit, Value: "18446744073709551615", Type: TypeUnsigned, MinValue: 0, MaxValue: math.MaxUint64, AutoConvertOutOfRange: true},
{Scope: ScopeGlobal | ScopeSession, Name: DefaultWeekFormat, Value: "0", Type: TypeUnsigned, MinValue: 0, MaxValue: 7, AutoConvertOutOfRange: true},
{Scope: ScopeGlobal | ScopeSession, Name: SQLModeVar, Value: mysql.DefaultSQLMode, IsHintUpdatable: true},
{Scope: ScopeGlobal | ScopeSession, Name: MaxExecutionTime, Value: "0", Type: TypeUnsigned, MinValue: 0, MaxValue: math.MaxUint64, AutoConvertOutOfRange: true, IsHintUpdatable: true, SetSession: func(s *SessionVars, val string) error {
timeoutMS := tidbOptPositiveInt32(val, 0)
s.MaxExecutionTime = uint64(timeoutMS)
return nil
}},
{Scope: ScopeGlobal | ScopeSession, Name: CollationServer, Value: mysql.DefaultCollationName, Validation: func(vars *SessionVars, normalizedValue string, originalValue string, scope ScopeFlag) (string, error) {
if _, err := collate.GetCollationByName(normalizedValue); err != nil {
return normalizedValue, errors.Trace(err)
}
return normalizedValue, nil
}},
{Scope: ScopeGlobal | ScopeSession, Name: SQLLogBin, Value: BoolOn, Type: TypeBool},
{Scope: ScopeGlobal | ScopeSession, Name: TimeZone, Value: "SYSTEM", IsHintUpdatable: true, Validation: func(vars *SessionVars, normalizedValue string, originalValue string, scope ScopeFlag) (string, error) {
if strings.EqualFold(normalizedValue, "SYSTEM") {
return "SYSTEM", nil
}
_, err := parseTimeZone(normalizedValue)
return normalizedValue, err
}},
{Scope: ScopeNone, Name: SystemTimeZone, Value: "CST"},
{Scope: ScopeGlobal | ScopeSession, Name: ForeignKeyChecks, Value: BoolOff, Type: TypeBool, Validation: func(vars *SessionVars, normalizedValue string, originalValue string, scope ScopeFlag) (string, error) {
if TiDBOptOn(normalizedValue) {
// TiDB does not yet support foreign keys.
// Return the original value in the warning, so that users are not confused.
vars.StmtCtx.AppendWarning(ErrUnsupportedValueForVar.GenWithStackByArgs(ForeignKeyChecks, originalValue))
return BoolOff, nil
} else if !TiDBOptOn(normalizedValue) {
return BoolOff, nil
}
return normalizedValue, ErrWrongValueForVar.GenWithStackByArgs(ForeignKeyChecks, originalValue)
}},
{Scope: ScopeNone, Name: Hostname, Value: ServerHostname},
{Scope: ScopeSession, Name: Timestamp, Value: ""},
{Scope: ScopeGlobal | ScopeSession, Name: CharacterSetFilesystem, Value: "binary", Validation: func(vars *SessionVars, normalizedValue string, originalValue string, scope ScopeFlag) (string, error) {
return checkCharacterValid(normalizedValue, CharacterSetFilesystem)
}},
{Scope: ScopeGlobal | ScopeSession, Name: CollationDatabase, Value: mysql.DefaultCollationName, Validation: func(vars *SessionVars, normalizedValue string, originalValue string, scope ScopeFlag) (string, error) {
if _, err := collate.GetCollationByName(normalizedValue); err != nil {
return normalizedValue, errors.Trace(err)
}
return normalizedValue, nil
}},
{Scope: ScopeGlobal | ScopeSession, Name: AutoIncrementIncrement, Value: strconv.FormatInt(DefAutoIncrementIncrement, 10), Type: TypeUnsigned, MinValue: 1, MaxValue: math.MaxUint16, AutoConvertOutOfRange: true, SetSession: func(s *SessionVars, val string) error {
// AutoIncrementIncrement is valid in [1, 65535].
s.AutoIncrementIncrement = tidbOptPositiveInt32(val, DefAutoIncrementIncrement)
return nil
}},
{Scope: ScopeGlobal | ScopeSession, Name: AutoIncrementOffset, Value: strconv.FormatInt(DefAutoIncrementOffset, 10), Type: TypeUnsigned, MinValue: 1, MaxValue: math.MaxUint16, AutoConvertOutOfRange: true, SetSession: func(s *SessionVars, val string) error {
// AutoIncrementOffset is valid in [1, 65535].
s.AutoIncrementOffset = tidbOptPositiveInt32(val, DefAutoIncrementOffset)
return nil
}},
{Scope: ScopeGlobal | ScopeSession, Name: CharacterSetClient, Value: mysql.DefaultCharset, Validation: func(vars *SessionVars, normalizedValue string, originalValue string, scope ScopeFlag) (string, error) {
return checkCharacterValid(normalizedValue, CharacterSetClient)
}},
{Scope: ScopeNone, Name: Port, Value: "4000", Type: TypeUnsigned, MinValue: 0, MaxValue: math.MaxUint16},
{Scope: ScopeNone, Name: LowerCaseTableNames, Value: "2"},
{Scope: ScopeNone, Name: LogBin, Value: BoolOff, Type: TypeBool},
{Scope: ScopeGlobal | ScopeSession, Name: CharacterSetResults, Value: mysql.DefaultCharset, Validation: func(vars *SessionVars, normalizedValue string, originalValue string, scope ScopeFlag) (string, error) {
if normalizedValue == "" {
return normalizedValue, nil
}
return checkCharacterValid(normalizedValue, "")
}},
{Scope: ScopeNone, Name: VersionComment, Value: "TiDB Server (Apache License 2.0) " + versioninfo.TiDBEdition + " Edition, MySQL 5.7 compatible"},
{Scope: ScopeGlobal | ScopeSession, Name: TxnIsolation, Value: "REPEATABLE-READ", Type: TypeEnum, PossibleValues: []string{"READ-UNCOMMITTED", "READ-COMMITTED", "REPEATABLE-READ", "SERIALIZABLE"}, Validation: func(vars *SessionVars, normalizedValue string, originalValue string, scope ScopeFlag) (string, error) {
if normalizedValue == "SERIALIZABLE" || normalizedValue == "READ-UNCOMMITTED" {
if skipIsolationLevelCheck, err := GetSessionSystemVar(vars, TiDBSkipIsolationLevelCheck); err != nil {
return normalizedValue, err
} else if !TiDBOptOn(skipIsolationLevelCheck) {
return normalizedValue, ErrUnsupportedIsolationLevel.GenWithStackByArgs(normalizedValue)
}
}
return normalizedValue, nil
}},
{Scope: ScopeGlobal | ScopeSession, Name: TransactionIsolation, Value: "REPEATABLE-READ", Type: TypeEnum, PossibleValues: []string{"READ-UNCOMMITTED", "READ-COMMITTED", "REPEATABLE-READ", "SERIALIZABLE"}, Validation: func(vars *SessionVars, normalizedValue string, originalValue string, scope ScopeFlag) (string, error) {
if normalizedValue == "SERIALIZABLE" || normalizedValue == "READ-UNCOMMITTED" {
returnErr := ErrUnsupportedIsolationLevel.GenWithStackByArgs(normalizedValue)
if skipIsolationLevelCheck, err := GetSessionSystemVar(vars, TiDBSkipIsolationLevelCheck); err != nil {
return normalizedValue, err
} else if !TiDBOptOn(skipIsolationLevelCheck) {
return normalizedValue, returnErr
}
vars.StmtCtx.AppendWarning(returnErr)
}
return normalizedValue, nil
}},
{Scope: ScopeGlobal | ScopeSession, Name: CollationConnection, Value: mysql.DefaultCollationName, Validation: func(vars *SessionVars, normalizedValue string, originalValue string, scope ScopeFlag) (string, error) {
if _, err := collate.GetCollationByName(normalizedValue); err != nil {
return normalizedValue, errors.Trace(err)
}
return normalizedValue, nil
}},
{Scope: ScopeNone, Name: Version, Value: mysql.ServerVersion},
{Scope: ScopeGlobal | ScopeSession, Name: AutoCommit, Value: BoolOn, Type: TypeBool},
{Scope: ScopeGlobal | ScopeSession, Name: CharsetDatabase, Value: mysql.DefaultCharset, Validation: func(vars *SessionVars, normalizedValue string, originalValue string, scope ScopeFlag) (string, error) {
return checkCharacterValid(normalizedValue, CharsetDatabase)
}},
{Scope: ScopeGlobal | ScopeSession, Name: TxReadOnly, Value: "0"},
{Scope: ScopeGlobal | ScopeSession, Name: TransactionReadOnly, Value: "0"},
{Scope: ScopeGlobal, Name: MaxPreparedStmtCount, Value: strconv.FormatInt(DefMaxPreparedStmtCount, 10), Type: TypeInt, MinValue: -1, MaxValue: 1048576, AutoConvertOutOfRange: true},
{Scope: ScopeNone, Name: DataDir, Value: "/usr/local/mysql/data/"},
{Scope: ScopeGlobal | ScopeSession, Name: WaitTimeout, Value: strconv.FormatInt(DefWaitTimeout, 10), Type: TypeUnsigned, MinValue: 0, MaxValue: secondsPerYear, AutoConvertOutOfRange: true},
{Scope: ScopeGlobal | ScopeSession, Name: InteractiveTimeout, Value: "28800", Type: TypeUnsigned, MinValue: 1, MaxValue: secondsPerYear, AutoConvertOutOfRange: true},
{Scope: ScopeGlobal | ScopeSession, Name: InnodbLockWaitTimeout, Value: strconv.FormatInt(DefInnodbLockWaitTimeout, 10), Type: TypeUnsigned, MinValue: 1, MaxValue: 1073741824, AutoConvertOutOfRange: true, SetSession: func(s *SessionVars, val string) error {
lockWaitSec := tidbOptInt64(val, DefInnodbLockWaitTimeout)
s.LockWaitTimeout = lockWaitSec * 1000
return nil
}},
{Scope: ScopeGlobal | ScopeSession, Name: GroupConcatMaxLen, Value: "1024", AutoConvertOutOfRange: true, IsHintUpdatable: true, Type: TypeUnsigned, MinValue: 4, MaxValue: math.MaxUint64, Validation: func(vars *SessionVars, normalizedValue string, originalValue string, scope ScopeFlag) (string, error) {
// https://dev.mysql.com/doc/refman/8.0/en/server-system-variables.html#sysvar_group_concat_max_len
// Minimum Value 4
// Maximum Value (64-bit platforms) 18446744073709551615
// Maximum Value (32-bit platforms) 4294967295
if mathutil.IntBits == 32 {
if val, err := strconv.ParseUint(normalizedValue, 10, 64); err == nil {
if val > uint64(math.MaxUint32) {
vars.StmtCtx.AppendWarning(ErrTruncatedWrongValue.GenWithStackByArgs(GroupConcatMaxLen, originalValue))
return fmt.Sprintf("%d", math.MaxUint32), nil
}
}
}
return normalizedValue, nil
}},
{Scope: ScopeNone, Name: Socket, Value: "/tmp/myssock"},
{Scope: ScopeGlobal | ScopeSession, Name: CharacterSetConnection, Value: mysql.DefaultCharset, Validation: func(vars *SessionVars, normalizedValue string, originalValue string, scope ScopeFlag) (string, error) {
return checkCharacterValid(normalizedValue, CharacterSetConnection)
}},
{Scope: ScopeGlobal | ScopeSession, Name: CharacterSetServer, Value: mysql.DefaultCharset, Validation: func(vars *SessionVars, normalizedValue string, originalValue string, scope ScopeFlag) (string, error) {
return checkCharacterValid(normalizedValue, CharacterSetServer)
}},
{Scope: ScopeGlobal | ScopeSession, Name: MaxAllowedPacket, Value: "67108864", Type: TypeUnsigned, MinValue: 1024, MaxValue: MaxOfMaxAllowedPacket, AutoConvertOutOfRange: true},
{Scope: ScopeSession, Name: WarningCount, Value: "0", ReadOnly: true},
{Scope: ScopeSession, Name: ErrorCount, Value: "0", ReadOnly: true},
{Scope: ScopeGlobal | ScopeSession, Name: WindowingUseHighPrecision, Value: BoolOn, Type: TypeBool, IsHintUpdatable: true, SetSession: func(s *SessionVars, val string) error {
s.WindowingUseHighPrecision = TiDBOptOn(val)
return nil
}},
{Scope: ScopeSession, Name: TiDBTxnScope, Value: func() string {
if isGlobal, _ := config.GetTxnScopeFromConfig(); isGlobal {
return oracle.GlobalTxnScope
}
return oracle.LocalTxnScope
}()},
/* TiDB specific variables */
{Scope: ScopeGlobal | ScopeSession, Name: TiDBAllowMPPExecution, Type: TypeBool, Value: BoolToOnOff(DefTiDBAllowMPPExecution)},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBBCJThresholdCount, Value: strconv.Itoa(DefBroadcastJoinThresholdCount), Type: TypeInt, MinValue: 0, MaxValue: math.MaxInt64, SetSession: func(s *SessionVars, val string) error {
s.BroadcastJoinThresholdCount = tidbOptInt64(val, DefBroadcastJoinThresholdCount)
return nil
}},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBBCJThresholdSize, Value: strconv.Itoa(DefBroadcastJoinThresholdSize), Type: TypeInt, MinValue: 0, MaxValue: math.MaxInt64, SetSession: func(s *SessionVars, val string) error {
s.BroadcastJoinThresholdSize = tidbOptInt64(val, DefBroadcastJoinThresholdSize)
return nil
}},
{Scope: ScopeSession, Name: TiDBSnapshot, Value: ""},
{Scope: ScopeSession, Name: TiDBOptAggPushDown, Value: BoolToOnOff(DefOptAggPushDown), Type: TypeBool, SetSession: func(s *SessionVars, val string) error {
s.AllowAggPushDown = TiDBOptOn(val)
return nil
}},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBOptBCJ, Value: BoolToOnOff(DefOptBCJ), Type: TypeBool, Validation: func(vars *SessionVars, normalizedValue string, originalValue string, scope ScopeFlag) (string, error) {
if TiDBOptOn(normalizedValue) && vars.AllowBatchCop == 0 {
return normalizedValue, ErrWrongValueForVar.GenWithStackByArgs("Can't set Broadcast Join to 1 but tidb_allow_batch_cop is 0, please active batch cop at first.")
}
return normalizedValue, nil
}, SetSession: func(s *SessionVars, val string) error {
s.AllowBCJ = TiDBOptOn(val)
return nil
}},
{Scope: ScopeSession, Name: TiDBOptDistinctAggPushDown, Value: BoolToOnOff(config.GetGlobalConfig().Performance.DistinctAggPushDown), Type: TypeBool, SetSession: func(s *SessionVars, val string) error {
s.AllowDistinctAggPushDown = TiDBOptOn(val)
return nil
}},
{Scope: ScopeSession, Name: TiDBOptWriteRowID, Value: BoolToOnOff(DefOptWriteRowID), SetSession: func(s *SessionVars, val string) error {
s.AllowWriteRowID = TiDBOptOn(val)
return nil
}},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBBuildStatsConcurrency, Value: strconv.Itoa(DefBuildStatsConcurrency)},
{Scope: ScopeGlobal, Name: TiDBAutoAnalyzeRatio, Value: strconv.FormatFloat(DefAutoAnalyzeRatio, 'f', -1, 64), Type: TypeFloat, MinValue: 0, MaxValue: math.MaxUint64},
{Scope: ScopeGlobal, Name: TiDBAutoAnalyzeStartTime, Value: DefAutoAnalyzeStartTime, Type: TypeTime},
{Scope: ScopeGlobal, Name: TiDBAutoAnalyzeEndTime, Value: DefAutoAnalyzeEndTime, Type: TypeTime},
{Scope: ScopeSession, Name: TiDBChecksumTableConcurrency, Value: strconv.Itoa(DefChecksumTableConcurrency)},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBExecutorConcurrency, Value: strconv.Itoa(DefExecutorConcurrency), Type: TypeUnsigned, MinValue: 1, MaxValue: math.MaxUint64},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBDistSQLScanConcurrency, Value: strconv.Itoa(DefDistSQLScanConcurrency), Type: TypeUnsigned, MinValue: 1, MaxValue: math.MaxUint64},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBOptInSubqToJoinAndAgg, Value: BoolToOnOff(DefOptInSubqToJoinAndAgg), Type: TypeBool, SetSession: func(s *SessionVars, val string) error {
s.SetAllowInSubqToJoinAndAgg(TiDBOptOn(val))
return nil
}},
{Scope: ScopeSession, Name: TiDBOptPreferRangeScan, Value: BoolToOnOff(DefOptPreferRangeScan), Type: TypeBool, IsHintUpdatable: true, SetSession: func(s *SessionVars, val string) error {
s.SetAllowPreferRangeScan(TiDBOptOn(val))
return nil
}},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBOptCorrelationThreshold, Value: strconv.FormatFloat(DefOptCorrelationThreshold, 'f', -1, 64), Type: TypeFloat, MinValue: 0, MaxValue: 1, SetSession: func(s *SessionVars, val string) error {
s.CorrelationThreshold = tidbOptFloat64(val, DefOptCorrelationThreshold)
return nil
}},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBOptCorrelationExpFactor, Value: strconv.Itoa(DefOptCorrelationExpFactor), Type: TypeUnsigned, MinValue: 0, MaxValue: math.MaxUint64, SetSession: func(s *SessionVars, val string) error {
s.CorrelationExpFactor = int(tidbOptInt64(val, DefOptCorrelationExpFactor))
return nil
}},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBOptCPUFactor, Value: strconv.FormatFloat(DefOptCPUFactor, 'f', -1, 64), Type: TypeFloat, MinValue: 0, MaxValue: math.MaxUint64, SetSession: func(s *SessionVars, val string) error {
s.CPUFactor = tidbOptFloat64(val, DefOptCPUFactor)
return nil
}},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBOptTiFlashConcurrencyFactor, Value: strconv.FormatFloat(DefOptTiFlashConcurrencyFactor, 'f', -1, 64), Type: TypeFloat, MinValue: 0, MaxValue: math.MaxUint64, SetSession: func(s *SessionVars, val string) error {
s.CopTiFlashConcurrencyFactor = tidbOptFloat64(val, DefOptTiFlashConcurrencyFactor)
return nil
}},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBOptCopCPUFactor, Value: strconv.FormatFloat(DefOptCopCPUFactor, 'f', -1, 64), Type: TypeFloat, MinValue: 0, MaxValue: math.MaxUint64, SetSession: func(s *SessionVars, val string) error {
s.CopCPUFactor = tidbOptFloat64(val, DefOptCopCPUFactor)
return nil
}},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBOptNetworkFactor, Value: strconv.FormatFloat(DefOptNetworkFactor, 'f', -1, 64), Type: TypeFloat, MinValue: 0, MaxValue: math.MaxUint64},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBOptScanFactor, Value: strconv.FormatFloat(DefOptScanFactor, 'f', -1, 64), Type: TypeFloat, MinValue: 0, MaxValue: math.MaxUint64},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBOptDescScanFactor, Value: strconv.FormatFloat(DefOptDescScanFactor, 'f', -1, 64), Type: TypeFloat, MinValue: 0, MaxValue: math.MaxUint64},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBOptSeekFactor, Value: strconv.FormatFloat(DefOptSeekFactor, 'f', -1, 64), Type: TypeFloat, MinValue: 0, MaxValue: math.MaxUint64},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBOptMemoryFactor, Value: strconv.FormatFloat(DefOptMemoryFactor, 'f', -1, 64), Type: TypeFloat, MinValue: 0, MaxValue: math.MaxUint64},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBOptDiskFactor, Value: strconv.FormatFloat(DefOptDiskFactor, 'f', -1, 64), Type: TypeFloat, MinValue: 0, MaxValue: math.MaxUint64},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBOptConcurrencyFactor, Value: strconv.FormatFloat(DefOptConcurrencyFactor, 'f', -1, 64), Type: TypeFloat, MinValue: 0, MaxValue: math.MaxUint64},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBIndexJoinBatchSize, Value: strconv.Itoa(DefIndexJoinBatchSize), Type: TypeUnsigned, MinValue: 1, MaxValue: math.MaxUint64},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBIndexLookupSize, Value: strconv.Itoa(DefIndexLookupSize), Type: TypeUnsigned, MinValue: 1, MaxValue: math.MaxUint64},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBIndexLookupConcurrency, Value: strconv.Itoa(DefIndexLookupConcurrency), Type: TypeInt, MinValue: 1, MaxValue: math.MaxInt64, AllowAutoValue: true},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBIndexLookupJoinConcurrency, Value: strconv.Itoa(DefIndexLookupJoinConcurrency), Type: TypeInt, MinValue: 1, MaxValue: math.MaxInt64, AllowAutoValue: true},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBIndexSerialScanConcurrency, Value: strconv.Itoa(DefIndexSerialScanConcurrency), Type: TypeUnsigned, MinValue: 1, MaxValue: math.MaxUint64},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBSkipUTF8Check, Value: BoolToOnOff(DefSkipUTF8Check), Type: TypeBool, SetSession: func(s *SessionVars, val string) error {
s.SkipUTF8Check = TiDBOptOn(val)
return nil
}},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBSkipASCIICheck, Value: BoolToOnOff(DefSkipASCIICheck), Type: TypeBool, SetSession: func(s *SessionVars, val string) error {
s.SkipASCIICheck = TiDBOptOn(val)
return nil
}},
{Scope: ScopeSession, Name: TiDBBatchInsert, Value: BoolToOnOff(DefBatchInsert), Type: TypeBool},
{Scope: ScopeSession, Name: TiDBBatchDelete, Value: BoolToOnOff(DefBatchDelete), Type: TypeBool},
{Scope: ScopeSession, Name: TiDBBatchCommit, Value: BoolToOnOff(DefBatchCommit), Type: TypeBool},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBDMLBatchSize, Value: strconv.Itoa(DefDMLBatchSize), Type: TypeUnsigned, MinValue: 0, MaxValue: math.MaxUint64},
{Scope: ScopeSession, Name: TiDBCurrentTS, Value: strconv.Itoa(DefCurretTS), ReadOnly: true},
{Scope: ScopeSession, Name: TiDBLastTxnInfo, Value: strconv.Itoa(DefCurretTS), ReadOnly: true},
{Scope: ScopeSession, Name: TiDBLastQueryInfo, Value: strconv.Itoa(DefCurretTS), ReadOnly: true},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBMaxChunkSize, Value: strconv.Itoa(DefMaxChunkSize), Type: TypeUnsigned, MinValue: maxChunkSizeLowerBound, MaxValue: math.MaxUint64},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBAllowBatchCop, Value: strconv.Itoa(DefTiDBAllowBatchCop), Type: TypeInt, MinValue: 0, MaxValue: 2, Validation: func(vars *SessionVars, normalizedValue string, originalValue string, scope ScopeFlag) (string, error) {
if normalizedValue == "0" && vars.AllowBCJ {
return normalizedValue, ErrWrongValueForVar.GenWithStackByArgs("Can't set batch cop 0 but tidb_opt_broadcast_join is 1, please set tidb_opt_broadcast_join 0 at first")
}
return normalizedValue, nil
}},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBInitChunkSize, Value: strconv.Itoa(DefInitChunkSize), Type: TypeUnsigned, MinValue: 1, MaxValue: initChunkSizeUpperBound},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBEnableCascadesPlanner, Value: BoolOff, Type: TypeBool},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBEnableIndexMerge, Value: BoolOff, Type: TypeBool},
{Scope: ScopeSession, Name: TIDBMemQuotaQuery, Value: strconv.FormatInt(config.GetGlobalConfig().MemQuotaQuery, 10), Type: TypeInt, MinValue: -1, MaxValue: math.MaxInt64},
{Scope: ScopeSession, Name: TIDBMemQuotaHashJoin, Value: strconv.FormatInt(DefTiDBMemQuotaHashJoin, 10), Type: TypeInt, MinValue: -1, MaxValue: math.MaxInt64},
{Scope: ScopeSession, Name: TIDBMemQuotaMergeJoin, Value: strconv.FormatInt(DefTiDBMemQuotaMergeJoin, 10), Type: TypeInt, MinValue: -1, MaxValue: math.MaxInt64},
{Scope: ScopeSession, Name: TIDBMemQuotaSort, Value: strconv.FormatInt(DefTiDBMemQuotaSort, 10), Type: TypeInt, MinValue: -1, MaxValue: math.MaxInt64},
{Scope: ScopeSession, Name: TIDBMemQuotaTopn, Value: strconv.FormatInt(DefTiDBMemQuotaTopn, 10), Type: TypeInt, MinValue: -1, MaxValue: math.MaxInt64},
{Scope: ScopeSession, Name: TIDBMemQuotaIndexLookupReader, Value: strconv.FormatInt(DefTiDBMemQuotaIndexLookupReader, 10), Type: TypeInt, MinValue: -1, MaxValue: math.MaxInt64},
{Scope: ScopeSession, Name: TIDBMemQuotaIndexLookupJoin, Value: strconv.FormatInt(DefTiDBMemQuotaIndexLookupJoin, 10), Type: TypeInt, MinValue: -1, MaxValue: math.MaxInt64},
{Scope: ScopeSession, Name: TiDBEnableStreaming, Value: BoolOff, Type: TypeBool},
{Scope: ScopeSession, Name: TiDBEnableChunkRPC, Value: BoolOn, Type: TypeBool},
{Scope: ScopeSession, Name: TxnIsolationOneShot, Value: ""},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBEnableTablePartition, Value: BoolOn, Type: TypeEnum, PossibleValues: []string{BoolOff, BoolOn, "AUTO"}},
{Scope: ScopeSession, Name: TiDBEnableListTablePartition, Value: BoolOff, Type: TypeBool},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBHashJoinConcurrency, Value: strconv.Itoa(DefTiDBHashJoinConcurrency), Type: TypeInt, MinValue: 1, MaxValue: math.MaxInt64, AllowAutoValue: true},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBProjectionConcurrency, Value: strconv.Itoa(DefTiDBProjectionConcurrency), Type: TypeInt, MinValue: -1, MaxValue: math.MaxInt64},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBHashAggPartialConcurrency, Value: strconv.Itoa(DefTiDBHashAggPartialConcurrency), Type: TypeInt, MinValue: 1, MaxValue: math.MaxInt64, AllowAutoValue: true},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBHashAggFinalConcurrency, Value: strconv.Itoa(DefTiDBHashAggFinalConcurrency), Type: TypeInt, MinValue: 1, MaxValue: math.MaxInt64, AllowAutoValue: true},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBWindowConcurrency, Value: strconv.Itoa(DefTiDBWindowConcurrency), Type: TypeInt, MinValue: 1, MaxValue: math.MaxInt64, AllowAutoValue: true},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBMergeJoinConcurrency, Value: strconv.Itoa(DefTiDBMergeJoinConcurrency), Type: TypeInt, MinValue: 1, MaxValue: math.MaxInt64, AllowAutoValue: true},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBStreamAggConcurrency, Value: strconv.Itoa(DefTiDBStreamAggConcurrency), Type: TypeInt, MinValue: 1, MaxValue: math.MaxInt64, AllowAutoValue: true},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBEnableParallelApply, Value: BoolToOnOff(DefTiDBEnableParallelApply), Type: TypeBool},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBMemQuotaApplyCache, Value: strconv.Itoa(DefTiDBMemQuotaApplyCache)},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBBackoffLockFast, Value: strconv.Itoa(kv.DefBackoffLockFast), Type: TypeUnsigned, MinValue: 1, MaxValue: math.MaxUint64},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBBackOffWeight, Value: strconv.Itoa(kv.DefBackOffWeight), Type: TypeUnsigned, MinValue: 1, MaxValue: math.MaxUint64},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBRetryLimit, Value: strconv.Itoa(DefTiDBRetryLimit), Type: TypeInt, MinValue: -1, MaxValue: math.MaxInt64},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBDisableTxnAutoRetry, Value: BoolToOnOff(DefTiDBDisableTxnAutoRetry), Type: TypeBool},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBConstraintCheckInPlace, Value: BoolToOnOff(DefTiDBConstraintCheckInPlace), Type: TypeBool},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBTxnMode, Value: DefTiDBTxnMode, AllowEmptyAll: true, Type: TypeEnum, PossibleValues: []string{"pessimistic", "optimistic"}},
{Scope: ScopeGlobal, Name: TiDBRowFormatVersion, Value: strconv.Itoa(DefTiDBRowFormatV1), Type: TypeUnsigned, MinValue: 1, MaxValue: 2},
{Scope: ScopeSession, Name: TiDBOptimizerSelectivityLevel, Value: strconv.Itoa(DefTiDBOptimizerSelectivityLevel), Type: TypeUnsigned, MinValue: 1, MaxValue: math.MaxUint64},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBEnableWindowFunction, Value: BoolToOnOff(DefEnableWindowFunction), Type: TypeBool},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBEnableStrictDoubleTypeCheck, Value: BoolToOnOff(DefEnableStrictDoubleTypeCheck), Type: TypeBool},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBEnableVectorizedExpression, Value: BoolToOnOff(DefEnableVectorizedExpression), Type: TypeBool},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBEnableFastAnalyze, Value: BoolToOnOff(DefTiDBUseFastAnalyze), Type: TypeBool},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBSkipIsolationLevelCheck, Value: BoolToOnOff(DefTiDBSkipIsolationLevelCheck), Type: TypeBool},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBEnableRateLimitAction, Value: BoolToOnOff(DefTiDBEnableRateLimitAction), Type: TypeBool},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBAllowFallbackToTiKV, Value: "", Validation: func(vars *SessionVars, normalizedValue string, originalValue string, scope ScopeFlag) (string, error) {
if normalizedValue == "" {
return "", nil
}
engines := strings.Split(normalizedValue, ",")
var formatVal string
storeTypes := make(map[kv.StoreType]struct{})
for i, engine := range engines {
engine = strings.TrimSpace(engine)
switch {
case strings.EqualFold(engine, kv.TiFlash.Name()):
if _, ok := storeTypes[kv.TiFlash]; !ok {
if i != 0 {
formatVal += ","
}
formatVal += kv.TiFlash.Name()
storeTypes[kv.TiFlash] = struct{}{}
}
default:
return normalizedValue, ErrWrongValueForVar.GenWithStackByArgs(TiDBAllowFallbackToTiKV, normalizedValue)
}
}
return formatVal, nil
}},
/* The following variable is defined as session scope but is actually server scope. */
{Scope: ScopeGlobal | ScopeSession, Name: TiDBEnableDynamicPrivileges, Value: BoolOff, Type: TypeBool, SetSession: func(s *SessionVars, val string) error {
s.EnableDynamicPrivileges = TiDBOptOn(val)
return nil
}},
{Scope: ScopeSession, Name: TiDBGeneralLog, Value: BoolToOnOff(DefTiDBGeneralLog), Type: TypeBool},
{Scope: ScopeSession, Name: TiDBPProfSQLCPU, Value: strconv.Itoa(DefTiDBPProfSQLCPU), Type: TypeInt, MinValue: 0, MaxValue: 1},
{Scope: ScopeSession, Name: TiDBDDLSlowOprThreshold, Value: strconv.Itoa(DefTiDBDDLSlowOprThreshold)},
{Scope: ScopeSession, Name: TiDBConfig, Value: "", ReadOnly: true},
{Scope: ScopeGlobal, Name: TiDBDDLReorgWorkerCount, Value: strconv.Itoa(DefTiDBDDLReorgWorkerCount), Type: TypeUnsigned, MinValue: 1, MaxValue: uint64(maxDDLReorgWorkerCount)},
{Scope: ScopeGlobal, Name: TiDBDDLReorgBatchSize, Value: strconv.Itoa(DefTiDBDDLReorgBatchSize), Type: TypeUnsigned, MinValue: int64(MinDDLReorgBatchSize), MaxValue: uint64(MaxDDLReorgBatchSize), AutoConvertOutOfRange: true},
{Scope: ScopeGlobal, Name: TiDBDDLErrorCountLimit, Value: strconv.Itoa(DefTiDBDDLErrorCountLimit), Type: TypeUnsigned, MinValue: 0, MaxValue: uint64(math.MaxInt64), AutoConvertOutOfRange: true},
{Scope: ScopeSession, Name: TiDBDDLReorgPriority, Value: "PRIORITY_LOW"},
{Scope: ScopeGlobal, Name: TiDBMaxDeltaSchemaCount, Value: strconv.Itoa(DefTiDBMaxDeltaSchemaCount), Type: TypeUnsigned, MinValue: 100, MaxValue: 16384, AutoConvertOutOfRange: true},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBEnableChangeColumnType, Value: BoolToOnOff(DefTiDBChangeColumnType), Type: TypeBool},
{Scope: ScopeGlobal, Name: TiDBEnableChangeMultiSchema, Value: BoolToOnOff(DefTiDBChangeMultiSchema), Type: TypeBool},
{Scope: ScopeGlobal, Name: TiDBEnablePointGetCache, Value: BoolToOnOff(DefTiDBPointGetCache), Type: TypeBool},
{Scope: ScopeGlobal, Name: TiDBEnableAlterPlacement, Value: BoolToOnOff(DefTiDBEnableAlterPlacement), Type: TypeBool},
{Scope: ScopeSession, Name: TiDBForcePriority, Value: mysql.Priority2Str[DefTiDBForcePriority]},
{Scope: ScopeSession, Name: TiDBEnableRadixJoin, Value: BoolToOnOff(DefTiDBUseRadixJoin), Type: TypeBool},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBOptJoinReorderThreshold, Value: strconv.Itoa(DefTiDBOptJoinReorderThreshold), Type: TypeUnsigned, MinValue: 0, MaxValue: 63},
{Scope: ScopeSession, Name: TiDBSlowQueryFile, Value: ""},
{Scope: ScopeGlobal, Name: TiDBScatterRegion, Value: BoolToOnOff(DefTiDBScatterRegion), Type: TypeBool},
{Scope: ScopeSession, Name: TiDBWaitSplitRegionFinish, Value: BoolToOnOff(DefTiDBWaitSplitRegionFinish), Type: TypeBool},
{Scope: ScopeSession, Name: TiDBWaitSplitRegionTimeout, Value: strconv.Itoa(DefWaitSplitRegionTimeout), Type: TypeUnsigned, MinValue: 1, MaxValue: math.MaxInt64},
{Scope: ScopeSession, Name: TiDBLowResolutionTSO, Value: BoolOff, Type: TypeBool},
{Scope: ScopeSession, Name: TiDBExpensiveQueryTimeThreshold, Value: strconv.Itoa(DefTiDBExpensiveQueryTimeThreshold), Type: TypeUnsigned, MinValue: int64(MinExpensiveQueryTimeThreshold), MaxValue: uint64(math.MaxInt64), AutoConvertOutOfRange: true},
{Scope: ScopeSession, Name: TiDBMemoryUsageAlarmRatio, Value: strconv.FormatFloat(config.GetGlobalConfig().Performance.MemoryUsageAlarmRatio, 'f', -1, 64), Type: TypeFloat, MinValue: 0.0, MaxValue: 1.0},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBEnableNoopFuncs, Value: BoolToOnOff(DefTiDBEnableNoopFuncs), Type: TypeBool},
{Scope: ScopeSession, Name: TiDBReplicaRead, Value: "leader", Type: TypeEnum, PossibleValues: []string{"leader", "follower", "leader-and-follower"}},
{Scope: ScopeSession, Name: TiDBAllowRemoveAutoInc, Value: BoolToOnOff(DefTiDBAllowRemoveAutoInc), Type: TypeBool},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBEnableStmtSummary, Value: BoolToOnOff(config.GetGlobalConfig().StmtSummary.Enable), Type: TypeBool, AllowEmpty: true},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBStmtSummaryInternalQuery, Value: BoolToOnOff(config.GetGlobalConfig().StmtSummary.EnableInternalQuery), Type: TypeBool, AllowEmpty: true},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBStmtSummaryRefreshInterval, Value: strconv.Itoa(config.GetGlobalConfig().StmtSummary.RefreshInterval), Type: TypeInt, MinValue: 1, MaxValue: uint64(math.MaxInt32), AllowEmpty: true},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBStmtSummaryHistorySize, Value: strconv.Itoa(config.GetGlobalConfig().StmtSummary.HistorySize), Type: TypeInt, MinValue: 0, MaxValue: uint64(math.MaxUint8), AllowEmpty: true},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBStmtSummaryMaxStmtCount, Value: strconv.FormatUint(uint64(config.GetGlobalConfig().StmtSummary.MaxStmtCount), 10), Type: TypeInt, MinValue: 1, MaxValue: uint64(math.MaxInt16), AllowEmpty: true},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBStmtSummaryMaxSQLLength, Value: strconv.FormatUint(uint64(config.GetGlobalConfig().StmtSummary.MaxSQLLength), 10), Type: TypeInt, MinValue: 0, MaxValue: uint64(math.MaxInt32), AllowEmpty: true},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBCapturePlanBaseline, Value: BoolOff, Type: TypeBool, AllowEmptyAll: true},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBUsePlanBaselines, Value: BoolToOnOff(DefTiDBUsePlanBaselines), Type: TypeBool},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBEvolvePlanBaselines, Value: BoolToOnOff(DefTiDBEvolvePlanBaselines), Type: TypeBool},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBEnableExtendedStats, Value: BoolToOnOff(false), Type: TypeBool},
{Scope: ScopeGlobal, Name: TiDBEvolvePlanTaskMaxTime, Value: strconv.Itoa(DefTiDBEvolvePlanTaskMaxTime), Type: TypeInt, MinValue: -1, MaxValue: math.MaxInt64},
{Scope: ScopeGlobal, Name: TiDBEvolvePlanTaskStartTime, Value: DefTiDBEvolvePlanTaskStartTime, Type: TypeTime},
{Scope: ScopeGlobal, Name: TiDBEvolvePlanTaskEndTime, Value: DefTiDBEvolvePlanTaskEndTime, Type: TypeTime},
{Scope: ScopeSession, Name: TiDBIsolationReadEngines, Value: strings.Join(config.GetGlobalConfig().IsolationRead.Engines, ", "), Validation: func(vars *SessionVars, normalizedValue string, originalValue string, scope ScopeFlag) (string, error) {
engines := strings.Split(normalizedValue, ",")
var formatVal string
for i, engine := range engines {
engine = strings.TrimSpace(engine)
if i != 0 {
formatVal += ","
}
switch {
case strings.EqualFold(engine, kv.TiKV.Name()):
formatVal += kv.TiKV.Name()
case strings.EqualFold(engine, kv.TiFlash.Name()):
formatVal += kv.TiFlash.Name()
case strings.EqualFold(engine, kv.TiDB.Name()):
formatVal += kv.TiDB.Name()
default:
return normalizedValue, ErrWrongValueForVar.GenWithStackByArgs(TiDBIsolationReadEngines, normalizedValue)
}
}
return formatVal, nil
}},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBStoreLimit, Value: strconv.FormatInt(atomic.LoadInt64(&config.GetGlobalConfig().TiKVClient.StoreLimit), 10), Type: TypeInt, MinValue: 0, MaxValue: uint64(math.MaxInt64), AutoConvertOutOfRange: true},
{Scope: ScopeSession, Name: TiDBMetricSchemaStep, Value: strconv.Itoa(DefTiDBMetricSchemaStep), Type: TypeUnsigned, MinValue: 10, MaxValue: 60 * 60 * 60},
{Scope: ScopeSession, Name: TiDBMetricSchemaRangeDuration, Value: strconv.Itoa(DefTiDBMetricSchemaRangeDuration), Type: TypeUnsigned, MinValue: 10, MaxValue: 60 * 60 * 60},
{Scope: ScopeSession, Name: TiDBSlowLogThreshold, Value: strconv.Itoa(logutil.DefaultSlowThreshold), Type: TypeInt, MinValue: -1, MaxValue: math.MaxInt64},
{Scope: ScopeSession, Name: TiDBRecordPlanInSlowLog, Value: int32ToBoolStr(logutil.DefaultRecordPlanInSlowLog), Type: TypeBool},
{Scope: ScopeSession, Name: TiDBEnableSlowLog, Value: BoolToOnOff(logutil.DefaultTiDBEnableSlowLog), Type: TypeBool},
{Scope: ScopeSession, Name: TiDBQueryLogMaxLen, Value: strconv.Itoa(logutil.DefaultQueryLogMaxLen), Type: TypeInt, MinValue: -1, MaxValue: math.MaxInt64},
{Scope: ScopeSession, Name: TiDBCheckMb4ValueInUTF8, Value: BoolToOnOff(config.GetGlobalConfig().CheckMb4ValueInUTF8), Type: TypeBool},
{Scope: ScopeSession, Name: TiDBFoundInPlanCache, Value: BoolToOnOff(DefTiDBFoundInPlanCache), Type: TypeBool, ReadOnly: true},
{Scope: ScopeSession, Name: TiDBFoundInBinding, Value: BoolToOnOff(DefTiDBFoundInBinding), Type: TypeBool, ReadOnly: true},
{Scope: ScopeSession, Name: TiDBEnableCollectExecutionInfo, Value: BoolToOnOff(DefTiDBEnableCollectExecutionInfo), Type: TypeBool},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBAllowAutoRandExplicitInsert, Value: BoolToOnOff(DefTiDBAllowAutoRandExplicitInsert), Type: TypeBool},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBEnableClusteredIndex, Value: IntOnly, Type: TypeEnum, PossibleValues: []string{Off, On, IntOnly, "1", "0"}},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBPartitionPruneMode, Value: string(Static), Type: TypeStr, Validation: func(vars *SessionVars, normalizedValue string, originalValue string, scope ScopeFlag) (string, error) {
mode := PartitionPruneMode(normalizedValue).Update()
if !mode.Valid() {
return normalizedValue, ErrWrongTypeForVar.GenWithStackByArgs(TiDBPartitionPruneMode)
}
return string(mode), nil
}},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBSlowLogMasking, Value: BoolToOnOff(DefTiDBRedactLog), Type: TypeBool},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBRedactLog, Value: BoolToOnOff(DefTiDBRedactLog), Type: TypeBool},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBShardAllocateStep, Value: strconv.Itoa(DefTiDBShardAllocateStep), Type: TypeInt, MinValue: 1, MaxValue: uint64(math.MaxInt64), AutoConvertOutOfRange: true},
{Scope: ScopeGlobal, Name: TiDBEnableTelemetry, Value: BoolToOnOff(DefTiDBEnableTelemetry), Type: TypeBool},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBEnableAmendPessimisticTxn, Value: BoolToOnOff(DefTiDBEnableAmendPessimisticTxn), Type: TypeBool},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBEnableAsyncCommit, Value: BoolToOnOff(DefTiDBEnableAsyncCommit), Type: TypeBool},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBEnable1PC, Value: BoolToOnOff(DefTiDBEnable1PC), Type: TypeBool},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBGuaranteeLinearizability, Value: BoolToOnOff(DefTiDBGuaranteeLinearizability), Type: TypeBool},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBAnalyzeVersion, Value: strconv.Itoa(DefTiDBAnalyzeVersion), Type: TypeInt, MinValue: 1, MaxValue: 2, Validation: func(vars *SessionVars, normalizedValue string, originalValue string, scope ScopeFlag) (string, error) {
if normalizedValue == "2" && FeedbackProbability.Load() > 0 {
var original string
var err error
if scope == ScopeGlobal {
original, err = vars.GlobalVarsAccessor.GetGlobalSysVar(TiDBAnalyzeVersion)
if err != nil {
return normalizedValue, nil
}
} else {
original = strconv.Itoa(vars.AnalyzeVersion)
}
vars.StmtCtx.AppendError(errors.New("variable tidb_analyze_version not updated because analyze version 2 is incompatible with query feedback. Please consider setting feedback-probability to 0.0 in config file to disable query feedback"))
return original, nil
}
return normalizedValue, nil
}},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBEnableIndexMergeJoin, Value: BoolToOnOff(DefTiDBEnableIndexMergeJoin), Type: TypeBool},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBTrackAggregateMemoryUsage, Value: BoolToOnOff(DefTiDBTrackAggregateMemoryUsage), Type: TypeBool},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBMultiStatementMode, Value: Off, Type: TypeEnum, PossibleValues: []string{Off, On, Warn}, SetSession: func(s *SessionVars, val string) error {
s.MultiStatementMode = TiDBOptMultiStmt(val)
return nil
}},
{Scope: ScopeGlobal | ScopeSession, Name: TiDBEnableExchangePartition, Value: BoolToOnOff(DefTiDBEnableExchangePartition), Type: TypeBool},
/* tikv gc metrics */
{Scope: ScopeGlobal, Name: TiDBGCEnable, Value: BoolOn, Type: TypeBool},
{Scope: ScopeGlobal, Name: TiDBGCRunInterval, Value: "10m0s", Type: TypeDuration, MinValue: int64(time.Minute * 10), MaxValue: math.MaxInt64},
{Scope: ScopeGlobal, Name: TiDBGCLifetime, Value: "10m0s", Type: TypeDuration, MinValue: int64(time.Minute * 10), MaxValue: math.MaxInt64},
{Scope: ScopeGlobal, Name: TiDBGCConcurrency, Value: "-1", Type: TypeInt, MinValue: 1, MaxValue: 128, AllowAutoValue: true},
{Scope: ScopeGlobal, Name: TiDBGCScanLockMode, Value: "PHYSICAL", Type: TypeEnum, PossibleValues: []string{"PHYSICAL", "LEGACY"}},
}
// FeedbackProbability points to the FeedbackProbability in statistics package.
// It's initialized in init() in feedback.go to solve import cycle.
var FeedbackProbability *atomic2.Float64
// SynonymsSysVariables is synonyms of system variables.
var SynonymsSysVariables = map[string][]string{}
func addSynonymsSysVariables(synonyms ...string) {
for _, s := range synonyms {
SynonymsSysVariables[s] = synonyms
}
}
func initSynonymsSysVariables() {
addSynonymsSysVariables(TxnIsolation, TransactionIsolation)
addSynonymsSysVariables(TxReadOnly, TransactionReadOnly)
}
// SetNamesVariables is the system variable names related to set names statements.
var SetNamesVariables = []string{
CharacterSetClient,
CharacterSetConnection,
CharacterSetResults,
}
// SetCharsetVariables is the system variable names related to set charset statements.
var SetCharsetVariables = []string{
CharacterSetClient,
CharacterSetResults,
}
const (
// CharacterSetConnection is the name for character_set_connection system variable.
CharacterSetConnection = "character_set_connection"
// CollationConnection is the name for collation_connection system variable.
CollationConnection = "collation_connection"
// CharsetDatabase is the name for character_set_database system variable.
CharsetDatabase = "character_set_database"
// CollationDatabase is the name for collation_database system variable.
CollationDatabase = "collation_database"
// CharacterSetFilesystem is the name for character_set_filesystem system variable.
CharacterSetFilesystem = "character_set_filesystem"
// CharacterSetClient is the name for character_set_client system variable.
CharacterSetClient = "character_set_client"
// CharacterSetSystem is the name for character_set_system system variable.
CharacterSetSystem = "character_set_system"
// GeneralLog is the name for 'general_log' system variable.
GeneralLog = "general_log"
// AvoidTemporalUpgrade is the name for 'avoid_temporal_upgrade' system variable.
AvoidTemporalUpgrade = "avoid_temporal_upgrade"
// MaxPreparedStmtCount is the name for 'max_prepared_stmt_count' system variable.
MaxPreparedStmtCount = "max_prepared_stmt_count"
// BigTables is the name for 'big_tables' system variable.
BigTables = "big_tables"
// CheckProxyUsers is the name for 'check_proxy_users' system variable.
CheckProxyUsers = "check_proxy_users"
// CoreFile is the name for 'core_file' system variable.
CoreFile = "core_file"
// DefaultWeekFormat is the name for 'default_week_format' system variable.
DefaultWeekFormat = "default_week_format"
// GroupConcatMaxLen is the name for 'group_concat_max_len' system variable.
GroupConcatMaxLen = "group_concat_max_len"
// DelayKeyWrite is the name for 'delay_key_write' system variable.
DelayKeyWrite = "delay_key_write"
// EndMarkersInJSON is the name for 'end_markers_in_json' system variable.
EndMarkersInJSON = "end_markers_in_json"
// Hostname is the name for 'hostname' system variable.
Hostname = "hostname"
// InnodbCommitConcurrency is the name for 'innodb_commit_concurrency' system variable.
InnodbCommitConcurrency = "innodb_commit_concurrency"
// InnodbFastShutdown is the name for 'innodb_fast_shutdown' system variable.
InnodbFastShutdown = "innodb_fast_shutdown"
// InnodbLockWaitTimeout is the name for 'innodb_lock_wait_timeout' system variable.
InnodbLockWaitTimeout = "innodb_lock_wait_timeout"
// SQLLogBin is the name for 'sql_log_bin' system variable.
SQLLogBin = "sql_log_bin"
// LogBin is the name for 'log_bin' system variable.
LogBin = "log_bin"
// MaxSortLength is the name for 'max_sort_length' system variable.
MaxSortLength = "max_sort_length"
// MaxSpRecursionDepth is the name for 'max_sp_recursion_depth' system variable.
MaxSpRecursionDepth = "max_sp_recursion_depth"
// MaxUserConnections is the name for 'max_user_connections' system variable.
MaxUserConnections = "max_user_connections"
// OfflineMode is the name for 'offline_mode' system variable.
OfflineMode = "offline_mode"
// InteractiveTimeout is the name for 'interactive_timeout' system variable.
InteractiveTimeout = "interactive_timeout"
// FlushTime is the name for 'flush_time' system variable.
FlushTime = "flush_time"
// PseudoSlaveMode is the name for 'pseudo_slave_mode' system variable.
PseudoSlaveMode = "pseudo_slave_mode"
// LowPriorityUpdates is the name for 'low_priority_updates' system variable.
LowPriorityUpdates = "low_priority_updates"
// LowerCaseTableNames is the name for 'lower_case_table_names' system variable.
LowerCaseTableNames = "lower_case_table_names"
// SessionTrackGtids is the name for 'session_track_gtids' system variable.
SessionTrackGtids = "session_track_gtids"
// OldPasswords is the name for 'old_passwords' system variable.
OldPasswords = "old_passwords"
// MaxConnections is the name for 'max_connections' system variable.
MaxConnections = "max_connections"
// SkipNameResolve is the name for 'skip_name_resolve' system variable.
SkipNameResolve = "skip_name_resolve"
// ForeignKeyChecks is the name for 'foreign_key_checks' system variable.
ForeignKeyChecks = "foreign_key_checks"
// SQLSafeUpdates is the name for 'sql_safe_updates' system variable.
SQLSafeUpdates = "sql_safe_updates"
// WarningCount is the name for 'warning_count' system variable.
WarningCount = "warning_count"
// ErrorCount is the name for 'error_count' system variable.
ErrorCount = "error_count"
// SQLSelectLimit is the name for 'sql_select_limit' system variable.
SQLSelectLimit = "sql_select_limit"
// MaxConnectErrors is the name for 'max_connect_errors' system variable.
MaxConnectErrors = "max_connect_errors"
// TableDefinitionCache is the name for 'table_definition_cache' system variable.
TableDefinitionCache = "table_definition_cache"
// TmpTableSize is the name for 'tmp_table_size' system variable.
TmpTableSize = "tmp_table_size"
// Timestamp is the name for 'timestamp' system variable.
Timestamp = "timestamp"
// ConnectTimeout is the name for 'connect_timeout' system variable.
ConnectTimeout = "connect_timeout"
// SyncBinlog is the name for 'sync_binlog' system variable.
SyncBinlog = "sync_binlog"
// BlockEncryptionMode is the name for 'block_encryption_mode' system variable.
BlockEncryptionMode = "block_encryption_mode"
// WaitTimeout is the name for 'wait_timeout' system variable.
WaitTimeout = "wait_timeout"
// ValidatePasswordNumberCount is the name of 'validate_password_number_count' system variable.
ValidatePasswordNumberCount = "validate_password_number_count"
// ValidatePasswordLength is the name of 'validate_password_length' system variable.
ValidatePasswordLength = "validate_password_length"
// Version is the name of 'version' system variable.
Version = "version"
// VersionComment is the name of 'version_comment' system variable.
VersionComment = "version_comment"
// PluginDir is the name of 'plugin_dir' system variable.
PluginDir = "plugin_dir"
// PluginLoad is the name of 'plugin_load' system variable.
PluginLoad = "plugin_load"
// Port is the name for 'port' system variable.
Port = "port"
// DataDir is the name for 'datadir' system variable.
DataDir = "datadir"
// Profiling is the name for 'Profiling' system variable.
Profiling = "profiling"
// Socket is the name for 'socket' system variable.
Socket = "socket"
// BinlogOrderCommits is the name for 'binlog_order_commits' system variable.
BinlogOrderCommits = "binlog_order_commits"
// MasterVerifyChecksum is the name for 'master_verify_checksum' system variable.
MasterVerifyChecksum = "master_verify_checksum"
// ValidatePasswordCheckUserName is the name for 'validate_password_check_user_name' system variable.
ValidatePasswordCheckUserName = "validate_password_check_user_name"
// SuperReadOnly is the name for 'super_read_only' system variable.
SuperReadOnly = "super_read_only"
// SQLNotes is the name for 'sql_notes' system variable.
SQLNotes = "sql_notes"
// QueryCacheType is the name for 'query_cache_type' system variable.
QueryCacheType = "query_cache_type"
// SlaveCompressedProtocol is the name for 'slave_compressed_protocol' system variable.
SlaveCompressedProtocol = "slave_compressed_protocol"
// BinlogRowQueryLogEvents is the name for 'binlog_rows_query_log_events' system variable.
BinlogRowQueryLogEvents = "binlog_rows_query_log_events"
// LogSlowSlaveStatements is the name for 'log_slow_slave_statements' system variable.
LogSlowSlaveStatements = "log_slow_slave_statements"
// LogSlowAdminStatements is the name for 'log_slow_admin_statements' system variable.
LogSlowAdminStatements = "log_slow_admin_statements"
// LogQueriesNotUsingIndexes is the name for 'log_queries_not_using_indexes' system variable.
LogQueriesNotUsingIndexes = "log_queries_not_using_indexes"
// QueryCacheWlockInvalidate is the name for 'query_cache_wlock_invalidate' system variable.
QueryCacheWlockInvalidate = "query_cache_wlock_invalidate"
// SQLAutoIsNull is the name for 'sql_auto_is_null' system variable.
SQLAutoIsNull = "sql_auto_is_null"
// RelayLogPurge is the name for 'relay_log_purge' system variable.
RelayLogPurge = "relay_log_purge"
// AutomaticSpPrivileges is the name for 'automatic_sp_privileges' system variable.
AutomaticSpPrivileges = "automatic_sp_privileges"
// SQLQuoteShowCreate is the name for 'sql_quote_show_create' system variable.
SQLQuoteShowCreate = "sql_quote_show_create"
// SlowQueryLog is the name for 'slow_query_log' system variable.
SlowQueryLog = "slow_query_log"
// BinlogDirectNonTransactionalUpdates is the name for 'binlog_direct_non_transactional_updates' system variable.
BinlogDirectNonTransactionalUpdates = "binlog_direct_non_transactional_updates"
// SQLBigSelects is the name for 'sql_big_selects' system variable.
SQLBigSelects = "sql_big_selects"
// LogBinTrustFunctionCreators is the name for 'log_bin_trust_function_creators' system variable.
LogBinTrustFunctionCreators = "log_bin_trust_function_creators"
// OldAlterTable is the name for 'old_alter_table' system variable.
OldAlterTable = "old_alter_table"
// EnforceGtidConsistency is the name for 'enforce_gtid_consistency' system variable.
EnforceGtidConsistency = "enforce_gtid_consistency"
// SecureAuth is the name for 'secure_auth' system variable.
SecureAuth = "secure_auth"
// UniqueChecks is the name for 'unique_checks' system variable.
UniqueChecks = "unique_checks"
// SQLWarnings is the name for 'sql_warnings' system variable.
SQLWarnings = "sql_warnings"
// AutoCommit is the name for 'autocommit' system variable.
AutoCommit = "autocommit"
// KeepFilesOnCreate is the name for 'keep_files_on_create' system variable.
KeepFilesOnCreate = "keep_files_on_create"
// ShowOldTemporals is the name for 'show_old_temporals' system variable.
ShowOldTemporals = "show_old_temporals"
// LocalInFile is the name for 'local_infile' system variable.
LocalInFile = "local_infile"
// PerformanceSchema is the name for 'performance_schema' system variable.
PerformanceSchema = "performance_schema"
// Flush is the name for 'flush' system variable.
Flush = "flush"
// SlaveAllowBatching is the name for 'slave_allow_batching' system variable.
SlaveAllowBatching = "slave_allow_batching"
// MyISAMUseMmap is the name for 'myisam_use_mmap' system variable.
MyISAMUseMmap = "myisam_use_mmap"
// InnodbFilePerTable is the name for 'innodb_file_per_table' system variable.
InnodbFilePerTable = "innodb_file_per_table"
// InnodbLogCompressedPages is the name for 'innodb_log_compressed_pages' system variable.
InnodbLogCompressedPages = "innodb_log_compressed_pages"
// InnodbPrintAllDeadlocks is the name for 'innodb_print_all_deadlocks' system variable.
InnodbPrintAllDeadlocks = "innodb_print_all_deadlocks"
// InnodbStrictMode is the name for 'innodb_strict_mode' system variable.
InnodbStrictMode = "innodb_strict_mode"
// InnodbCmpPerIndexEnabled is the name for 'innodb_cmp_per_index_enabled' system variable.
InnodbCmpPerIndexEnabled = "innodb_cmp_per_index_enabled"
// InnodbBufferPoolDumpAtShutdown is the name for 'innodb_buffer_pool_dump_at_shutdown' system variable.
InnodbBufferPoolDumpAtShutdown = "innodb_buffer_pool_dump_at_shutdown"
// InnodbAdaptiveHashIndex is the name for 'innodb_adaptive_hash_index' system variable.
InnodbAdaptiveHashIndex = "innodb_adaptive_hash_index"
// InnodbFtEnableStopword is the name for 'innodb_ft_enable_stopword' system variable.
InnodbFtEnableStopword = "innodb_ft_enable_stopword"
// InnodbSupportXA is the name for 'innodb_support_xa' system variable.
InnodbSupportXA = "innodb_support_xa"
// InnodbOptimizeFullTextOnly is the name for 'innodb_optimize_fulltext_only' system variable.
InnodbOptimizeFullTextOnly = "innodb_optimize_fulltext_only"
// InnodbStatusOutputLocks is the name for 'innodb_status_output_locks' system variable.
InnodbStatusOutputLocks = "innodb_status_output_locks"
// InnodbBufferPoolDumpNow is the name for 'innodb_buffer_pool_dump_now' system variable.
InnodbBufferPoolDumpNow = "innodb_buffer_pool_dump_now"
// InnodbBufferPoolLoadNow is the name for 'innodb_buffer_pool_load_now' system variable.
InnodbBufferPoolLoadNow = "innodb_buffer_pool_load_now"
// InnodbStatsOnMetadata is the name for 'innodb_stats_on_metadata' system variable.
InnodbStatsOnMetadata = "innodb_stats_on_metadata"
// InnodbDisableSortFileCache is the name for 'innodb_disable_sort_file_cache' system variable.
InnodbDisableSortFileCache = "innodb_disable_sort_file_cache"
// InnodbStatsAutoRecalc is the name for 'innodb_stats_auto_recalc' system variable.
InnodbStatsAutoRecalc = "innodb_stats_auto_recalc"
// InnodbBufferPoolLoadAbort is the name for 'innodb_buffer_pool_load_abort' system variable.
InnodbBufferPoolLoadAbort = "innodb_buffer_pool_load_abort"
// InnodbStatsPersistent is the name for 'innodb_stats_persistent' system variable.
InnodbStatsPersistent = "innodb_stats_persistent"
// InnodbRandomReadAhead is the name for 'innodb_random_read_ahead' system variable.
InnodbRandomReadAhead = "innodb_random_read_ahead"
// InnodbAdaptiveFlushing is the name for 'innodb_adaptive_flushing' system variable.
InnodbAdaptiveFlushing = "innodb_adaptive_flushing"
// InnodbTableLocks is the name for 'innodb_table_locks' system variable.
InnodbTableLocks = "innodb_table_locks"
// InnodbStatusOutput is the name for 'innodb_status_output' system variable.
InnodbStatusOutput = "innodb_status_output"
// NetBufferLength is the name for 'net_buffer_length' system variable.
NetBufferLength = "net_buffer_length"
// QueryCacheSize is the name of 'query_cache_size' system variable.
QueryCacheSize = "query_cache_size"
// TxReadOnly is the name of 'tx_read_only' system variable.
TxReadOnly = "tx_read_only"
// TransactionReadOnly is the name of 'transaction_read_only' system variable.
TransactionReadOnly = "transaction_read_only"
// CharacterSetServer is the name of 'character_set_server' system variable.
CharacterSetServer = "character_set_server"
// AutoIncrementIncrement is the name of 'auto_increment_increment' system variable.
AutoIncrementIncrement = "auto_increment_increment"
// AutoIncrementOffset is the name of 'auto_increment_offset' system variable.
AutoIncrementOffset = "auto_increment_offset"
// InitConnect is the name of 'init_connect' system variable.
InitConnect = "init_connect"
// CollationServer is the name of 'collation_server' variable.
CollationServer = "collation_server"
// NetWriteTimeout is the name of 'net_write_timeout' variable.
NetWriteTimeout = "net_write_timeout"
// ThreadPoolSize is the name of 'thread_pool_size' variable.
ThreadPoolSize = "thread_pool_size"
// WindowingUseHighPrecision is the name of 'windowing_use_high_precision' system variable.
WindowingUseHighPrecision = "windowing_use_high_precision"
// OptimizerSwitch is the name of 'optimizer_switch' system variable.
OptimizerSwitch = "optimizer_switch"
// SystemTimeZone is the name of 'system_time_zone' system variable.
SystemTimeZone = "system_time_zone"
)
// GlobalVarAccessor is the interface for accessing global scope system and status variables.
type GlobalVarAccessor interface {
// GetGlobalSysVar gets the global system variable value for name.
GetGlobalSysVar(name string) (string, error)
// SetGlobalSysVar sets the global system variable name to value.
SetGlobalSysVar(name string, value string) error
}
| sessionctx/variable/sysvar.go | 1 | https://github.com/pingcap/tidb/commit/80edc8cd20b452089a26b817b4b450b7f31db77f | [
0.9982333183288574,
0.010500850155949593,
0.00016195823263842613,
0.00027677693287841976,
0.09067519009113312
] |
{
"id": 4,
"code_window": [
"\t{Scope: ScopeGlobal | ScopeSession, Name: WindowingUseHighPrecision, Value: BoolOn, Type: TypeBool, IsHintUpdatable: true, SetSession: func(s *SessionVars, val string) error {\n",
"\t\ts.WindowingUseHighPrecision = TiDBOptOn(val)\n",
"\t\treturn nil\n",
"\t}},\n",
"\t{Scope: ScopeSession, Name: TiDBTxnScope, Value: func() string {\n",
"\t\tif isGlobal, _ := config.GetTxnScopeFromConfig(); isGlobal {\n",
"\t\t\treturn oracle.GlobalTxnScope\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t{Scope: ScopeNone, Name: \"license\", Value: \"Apache License 2.0\"},\n",
"\t{Scope: ScopeGlobal | ScopeSession, Name: BlockEncryptionMode, Value: \"aes-128-ecb\"},\n",
"\t{Scope: ScopeSession, Name: \"last_insert_id\", Value: \"\"},\n",
"\t{Scope: ScopeNone, Name: \"have_ssl\", Value: \"DISABLED\"},\n",
"\t{Scope: ScopeNone, Name: \"have_openssl\", Value: \"DISABLED\"},\n",
"\t{Scope: ScopeNone, Name: \"ssl_ca\", Value: \"\"},\n",
"\t{Scope: ScopeNone, Name: \"ssl_cert\", Value: \"\"},\n",
"\t{Scope: ScopeNone, Name: \"ssl_key\", Value: \"\"},\n",
"\n",
"\t/* TiDB specific variables */\n"
],
"file_path": "sessionctx/variable/sysvar.go",
"type": "add",
"edit_start_line_idx": 605
} | // Copyright 2021 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package tikv
import (
"context"
"sync/atomic"
"time"
"github.com/pingcap/errors"
"github.com/pingcap/kvproto/pkg/kvrpcpb"
pb "github.com/pingcap/kvproto/pkg/kvrpcpb"
"github.com/pingcap/tidb/store/tikv/tikvrpc"
"github.com/pingcap/tidb/store/tikv/unionstore"
pd "github.com/tikv/pd/client"
)
// StoreProbe wraps KVSTore and exposes internal states for testing purpose.
type StoreProbe struct {
*KVStore
}
// NewLockResolver creates a new LockResolver instance.
func (s StoreProbe) NewLockResolver() LockResolverProbe {
return LockResolverProbe{LockResolver: newLockResolver(s.KVStore)}
}
// GetTimestampWithRetry returns latest timestamp.
func (s StoreProbe) GetTimestampWithRetry(bo *Backoffer, scope string) (uint64, error) {
return s.getTimestampWithRetry(bo, scope)
}
// Begin starts a transaction.
func (s StoreProbe) Begin() (TxnProbe, error) {
txn, err := s.KVStore.Begin()
return TxnProbe{KVTxn: txn}, err
}
// SetRegionCachePDClient replaces pd client inside region cache.
func (s StoreProbe) SetRegionCachePDClient(client pd.Client) {
s.regionCache.pdClient = client
}
// ClearTxnLatches clears store's txn latch scheduler.
func (s StoreProbe) ClearTxnLatches() {
s.txnLatches = nil
}
// SendTxnHeartbeat renews a txn's ttl.
func (s StoreProbe) SendTxnHeartbeat(ctx context.Context, key []byte, startTS uint64, ttl uint64) (uint64, error) {
bo := NewBackofferWithVars(ctx, PrewriteMaxBackoff, nil)
return sendTxnHeartBeat(bo, s.KVStore, key, startTS, ttl)
}
// LoadSafePoint from safepoint kv.
func (s StoreProbe) LoadSafePoint() (uint64, error) {
return loadSafePoint(s.GetSafePointKV())
}
// SaveSafePoint saves safepoint to kv.
func (s StoreProbe) SaveSafePoint(v uint64) error {
return saveSafePoint(s.GetSafePointKV(), v)
}
// TxnProbe wraps a txn and exports internal states for testing purpose.
type TxnProbe struct {
*KVTxn
}
// SetStartTS resets the txn's start ts.
func (txn TxnProbe) SetStartTS(ts uint64) {
txn.startTS = ts
}
// GetCommitTS returns the commit ts.
func (txn TxnProbe) GetCommitTS() uint64 {
return txn.commitTS
}
// GetUnionStore returns transaction's embedded unionstore.
func (txn TxnProbe) GetUnionStore() *unionstore.KVUnionStore {
return txn.us
}
// IsAsyncCommit returns if the txn is committed using async commit.
func (txn TxnProbe) IsAsyncCommit() bool {
return txn.committer.isAsyncCommit()
}
// NewCommitter creates an committer.
func (txn TxnProbe) NewCommitter(sessionID uint64) (CommitterProbe, error) {
committer, err := newTwoPhaseCommitterWithInit(txn.KVTxn, sessionID)
return CommitterProbe{twoPhaseCommitter: committer}, err
}
// GetCommitter returns the transaction committer.
func (txn TxnProbe) GetCommitter() CommitterProbe {
return CommitterProbe{txn.committer}
}
// SetCommitter sets the bind committer of a transaction.
func (txn TxnProbe) SetCommitter(committer CommitterProbe) {
txn.committer = committer.twoPhaseCommitter
}
// CollectLockedKeys returns all locked keys of a transaction.
func (txn TxnProbe) CollectLockedKeys() [][]byte {
return txn.collectLockedKeys()
}
// BatchGetSingleRegion gets a batch of keys from a region.
func (txn TxnProbe) BatchGetSingleRegion(bo *Backoffer, region RegionVerID, keys [][]byte, collect func([]byte, []byte)) error {
snapshot := txn.GetSnapshot()
return snapshot.batchGetSingleRegion(bo, batchKeys{region: region, keys: keys}, collect)
}
// NewScanner returns a scanner to iterate given key range.
func (txn TxnProbe) NewScanner(start, end []byte, batchSize int, reverse bool) (*Scanner, error) {
return newScanner(txn.GetSnapshot(), start, end, batchSize, reverse)
}
// GetStartTime returns the time when txn starts.
func (txn TxnProbe) GetStartTime() time.Time {
return txn.startTime
}
func newTwoPhaseCommitterWithInit(txn *KVTxn, sessionID uint64) (*twoPhaseCommitter, error) {
c, err := newTwoPhaseCommitter(txn, sessionID)
if err != nil {
return nil, errors.Trace(err)
}
if err = c.initKeysAndMutations(); err != nil {
return nil, errors.Trace(err)
}
return c, nil
}
// CommitterProbe wraps a 2PC committer and exports internal states for testing purpose.
type CommitterProbe struct {
*twoPhaseCommitter
}
// InitKeysAndMutations prepares the committer for commit.
func (c CommitterProbe) InitKeysAndMutations() error {
return c.initKeysAndMutations()
}
// SetPrimaryKey resets the committer's commit ts.
func (c CommitterProbe) SetPrimaryKey(key []byte) {
c.primaryKey = key
}
// GetPrimaryKey returns primary key of the committer.
func (c CommitterProbe) GetPrimaryKey() []byte {
return c.primaryKey
}
// GetMutations returns the mutation buffer to commit.
func (c CommitterProbe) GetMutations() CommitterMutations {
return c.mutations
}
// SetMutations replace the mutation buffer.
func (c CommitterProbe) SetMutations(muts CommitterMutations) {
c.mutations = muts.(*memBufferMutations)
}
// SetCommitTS resets the committer's commit ts.
func (c CommitterProbe) SetCommitTS(ts uint64) {
c.commitTS = ts
}
// GetCommitTS returns the commit ts of the committer.
func (c CommitterProbe) GetCommitTS() uint64 {
return c.commitTS
}
// GetMinCommitTS returns the minimal commit ts can be used.
func (c CommitterProbe) GetMinCommitTS() uint64 {
return c.minCommitTS
}
// SetMinCommitTS sets the minimal commit ts can be used.
func (c CommitterProbe) SetMinCommitTS(ts uint64) {
c.minCommitTS = ts
}
// SetMaxCommitTS sets the max commit ts can be used.
func (c CommitterProbe) SetMaxCommitTS(ts uint64) {
c.maxCommitTS = ts
}
// SetSessionID sets the session id of the committer.
func (c CommitterProbe) SetSessionID(id uint64) {
c.sessionID = id
}
// GetForUpdateTS returns the pessimistic ForUpdate ts.
func (c CommitterProbe) GetForUpdateTS() uint64 {
return c.forUpdateTS
}
// SetForUpdateTS sets pessimistic ForUpdate ts.
func (c CommitterProbe) SetForUpdateTS(ts uint64) {
c.forUpdateTS = ts
}
// GetStartTS returns the start ts of the transaction.
func (c CommitterProbe) GetStartTS() uint64 {
return c.startTS
}
// GetLockTTL returns the lock ttl duration of the transaction.
func (c CommitterProbe) GetLockTTL() uint64 {
return c.lockTTL
}
// SetLockTTL sets the lock ttl duration.
func (c CommitterProbe) SetLockTTL(ttl uint64) {
c.lockTTL = ttl
}
// SetLockTTLByTimeAndSize sets the lock ttl duration by time and size.
func (c CommitterProbe) SetLockTTLByTimeAndSize(start time.Time, size int) {
c.lockTTL = txnLockTTL(start, size)
}
// SetTxnSize resets the txn size of the committer and updates lock TTL.
func (c CommitterProbe) SetTxnSize(sz int) {
c.txnSize = sz
c.lockTTL = txnLockTTL(c.txn.startTime, sz)
}
// SetUseAsyncCommit enables async commit feature.
func (c CommitterProbe) SetUseAsyncCommit() {
c.useAsyncCommit = 1
}
// Execute runs the commit process.
func (c CommitterProbe) Execute(ctx context.Context) error {
return c.execute(ctx)
}
// PrewriteAllMutations performs the first phase of commit.
func (c CommitterProbe) PrewriteAllMutations(ctx context.Context) error {
return c.PrewriteMutations(ctx, c.mutations)
}
// PrewriteMutations performs the first phase of commit for given keys.
func (c CommitterProbe) PrewriteMutations(ctx context.Context, mutations CommitterMutations) error {
return c.prewriteMutations(NewBackofferWithVars(ctx, PrewriteMaxBackoff, nil), mutations)
}
// CommitMutations performs the second phase of commit.
func (c CommitterProbe) CommitMutations(ctx context.Context) error {
return c.commitMutations(NewBackofferWithVars(ctx, int(atomic.LoadUint64(&CommitMaxBackoff)), nil), c.mutationsOfKeys([][]byte{c.primaryKey}))
}
// MutationsOfKeys returns mutations match the keys.
func (c CommitterProbe) MutationsOfKeys(keys [][]byte) CommitterMutations {
return c.mutationsOfKeys(keys)
}
// PessimisticRollbackMutations rolls mutations back.
func (c CommitterProbe) PessimisticRollbackMutations(ctx context.Context, muts CommitterMutations) error {
return c.pessimisticRollbackMutations(NewBackofferWithVars(ctx, pessimisticRollbackMaxBackoff, nil), muts)
}
// Cleanup cleans dirty data of a committer.
func (c CommitterProbe) Cleanup(ctx context.Context) {
c.cleanup(ctx)
c.cleanWg.Wait()
}
// WaitCleanup waits for the committer to complete.
func (c CommitterProbe) WaitCleanup() {
c.cleanWg.Wait()
}
// IsOnePC returns if the committer is using one PC.
func (c CommitterProbe) IsOnePC() bool {
return c.isOnePC()
}
// BuildPrewriteRequest builds rpc request for mutation.
func (c CommitterProbe) BuildPrewriteRequest(regionID, regionConf, regionVersion uint64, mutations CommitterMutations, txnSize uint64) *tikvrpc.Request {
var batch batchMutations
batch.mutations = mutations
batch.region = RegionVerID{regionID, regionConf, regionVersion}
return c.buildPrewriteRequest(batch, txnSize)
}
// IsAsyncCommit returns if the committer uses async commit.
func (c CommitterProbe) IsAsyncCommit() bool {
return c.isAsyncCommit()
}
// CheckAsyncCommit returns if async commit is available.
func (c CommitterProbe) CheckAsyncCommit() bool {
return c.checkAsyncCommit()
}
// GetOnePCCommitTS returns the commit ts of one pc.
func (c CommitterProbe) GetOnePCCommitTS() uint64 {
return c.onePCCommitTS
}
// IsTTLUninitialized returns if the TTL manager is uninitialized.
func (c CommitterProbe) IsTTLUninitialized() bool {
state := atomic.LoadUint32((*uint32)(&c.ttlManager.state))
return state == uint32(stateUninitialized)
}
// IsTTLRunning returns if the TTL manager is running state.
func (c CommitterProbe) IsTTLRunning() bool {
state := atomic.LoadUint32((*uint32)(&c.ttlManager.state))
return state == uint32(stateRunning)
}
// CloseTTLManager closes the TTL manager.
func (c CommitterProbe) CloseTTLManager() {
c.ttlManager.close()
}
// GetUndeterminedErr returns the encountered undetermined error (if any).
func (c CommitterProbe) GetUndeterminedErr() error {
c.mu.RLock()
defer c.mu.RUnlock()
return c.mu.undeterminedErr
}
// SetNoFallBack disallows async commit to fall back to normal mode.
func (c CommitterProbe) SetNoFallBack() {
c.testingKnobs.noFallBack = true
}
// SetPrimaryKeyBlocker is used to block committer after primary is sent.
func (c CommitterProbe) SetPrimaryKeyBlocker(ac, bk chan struct{}) {
c.testingKnobs.acAfterCommitPrimary = ac
c.testingKnobs.bkAfterCommitPrimary = bk
}
// CleanupMutations performs the clean up phase.
func (c CommitterProbe) CleanupMutations(ctx context.Context) error {
bo := NewBackofferWithVars(ctx, cleanupMaxBackoff, nil)
return c.cleanupMutations(bo, c.mutations)
}
// LockProbe exposes some lock utilities for testing purpose.
type LockProbe struct {
}
// ExtractLockFromKeyErr makes a Lock based on a key error.
func (l LockProbe) ExtractLockFromKeyErr(err *pb.KeyError) (*Lock, error) {
return extractLockFromKeyErr(err)
}
// NewLockStatus returns a txn state that has been locked.
func (l LockProbe) NewLockStatus(keys [][]byte, useAsyncCommit bool, minCommitTS uint64) TxnStatus {
return TxnStatus{
primaryLock: &kvrpcpb.LockInfo{
Secondaries: keys,
UseAsyncCommit: useAsyncCommit,
MinCommitTs: minCommitTS,
},
}
}
// GetPrimaryKeyFromTxnStatus returns the primary key of the transaction.
func (l LockProbe) GetPrimaryKeyFromTxnStatus(s TxnStatus) []byte {
return s.primaryLock.Key
}
// LockResolverProbe wraps a LockResolver and exposes internal stats for testing purpose.
type LockResolverProbe struct {
*LockResolver
}
// ResolveLockAsync tries to resolve a lock using the txn states.
func (l LockResolverProbe) ResolveLockAsync(bo *Backoffer, lock *Lock, status TxnStatus) error {
return l.resolveLockAsync(bo, lock, status)
}
// ResolveLock resolves single lock.
func (l LockResolverProbe) ResolveLock(ctx context.Context, lock *Lock) error {
bo := NewBackofferWithVars(ctx, pessimisticLockMaxBackoff, nil)
return l.resolveLock(bo, lock, TxnStatus{}, false, make(map[RegionVerID]struct{}))
}
// ResolvePessimisticLock resolves single pessimistic lock.
func (l LockResolverProbe) ResolvePessimisticLock(ctx context.Context, lock *Lock) error {
bo := NewBackofferWithVars(ctx, pessimisticLockMaxBackoff, nil)
return l.resolvePessimisticLock(bo, lock, make(map[RegionVerID]struct{}))
}
// GetTxnStatus sends the CheckTxnStatus request to the TiKV server.
func (l LockResolverProbe) GetTxnStatus(bo *Backoffer, txnID uint64, primary []byte,
callerStartTS, currentTS uint64, rollbackIfNotExist bool, forceSyncCommit bool, lockInfo *Lock) (TxnStatus, error) {
return l.getTxnStatus(bo, txnID, primary, callerStartTS, currentTS, rollbackIfNotExist, forceSyncCommit, lockInfo)
}
// GetTxnStatusFromLock queries tikv for a txn's status.
func (l LockResolverProbe) GetTxnStatusFromLock(bo *Backoffer, lock *Lock, callerStartTS uint64, forceSyncCommit bool) (TxnStatus, error) {
return l.getTxnStatusFromLock(bo, lock, callerStartTS, forceSyncCommit)
}
// GetSecondariesFromTxnStatus returns the secondary locks from txn status.
func (l LockResolverProbe) GetSecondariesFromTxnStatus(status TxnStatus) [][]byte {
return status.primaryLock.GetSecondaries()
}
// SetMeetLockCallback is called whenever it meets locks.
func (l LockResolverProbe) SetMeetLockCallback(f func([]*Lock)) {
l.testingKnobs.meetLock = f
}
// CheckAllSecondaries checks the secondary locks of an async commit transaction to find out the final
// status of the transaction.
func (l LockResolverProbe) CheckAllSecondaries(bo *Backoffer, lock *Lock, status *TxnStatus) error {
_, err := l.checkAllSecondaries(bo, lock, status)
return err
}
// IsErrorNotFound checks if an error is caused by txnNotFoundErr.
func (l LockResolverProbe) IsErrorNotFound(err error) bool {
_, ok := errors.Cause(err).(txnNotFoundErr)
return ok
}
// IsNonAsyncCommitLock checks if an error is nonAsyncCommitLock error.
func (l LockResolverProbe) IsNonAsyncCommitLock(err error) bool {
_, ok := errors.Cause(err).(*nonAsyncCommitLock)
return ok
}
// ConfigProbe exposes configurations and global variables for testing purpose.
type ConfigProbe struct{}
// GetTxnCommitBatchSize returns the batch size to commit txn.
func (c ConfigProbe) GetTxnCommitBatchSize() uint64 {
return txnCommitBatchSize
}
// GetBigTxnThreshold returns the txn size to be considered as big txn.
func (c ConfigProbe) GetBigTxnThreshold() int {
return bigTxnThreshold
}
// GetScanBatchSize returns the batch size to scan ranges.
func (c ConfigProbe) GetScanBatchSize() int {
return scanBatchSize
}
// GetDefaultLockTTL returns the default lock TTL.
func (c ConfigProbe) GetDefaultLockTTL() uint64 {
return defaultLockTTL
}
// GetTTLFactor returns the factor to calculate txn TTL.
func (c ConfigProbe) GetTTLFactor() int {
return ttlFactor
}
// GetGetMaxBackoff returns the max sleep for get command.
func (c ConfigProbe) GetGetMaxBackoff() int {
return getMaxBackoff
}
// LoadPreSplitDetectThreshold returns presplit detect threshold config.
func (c ConfigProbe) LoadPreSplitDetectThreshold() uint32 {
return atomic.LoadUint32(&preSplitDetectThreshold)
}
// StorePreSplitDetectThreshold updates presplit detect threshold config.
func (c ConfigProbe) StorePreSplitDetectThreshold(v uint32) {
atomic.StoreUint32(&preSplitDetectThreshold, v)
}
// LoadPreSplitSizeThreshold returns presplit size threshold config.
func (c ConfigProbe) LoadPreSplitSizeThreshold() uint32 {
return atomic.LoadUint32(&preSplitSizeThreshold)
}
// StorePreSplitSizeThreshold updates presplit size threshold config.
func (c ConfigProbe) StorePreSplitSizeThreshold(v uint32) {
atomic.StoreUint32(&preSplitSizeThreshold, v)
}
// SetOracleUpdateInterval sets the interval of updating cached ts.
func (c ConfigProbe) SetOracleUpdateInterval(v int) {
oracleUpdateInterval = v
}
| store/tikv/test_probe.go | 0 | https://github.com/pingcap/tidb/commit/80edc8cd20b452089a26b817b4b450b7f31db77f | [
0.017354438081383705,
0.0006757827941328287,
0.0001628526661079377,
0.0001771206734701991,
0.002414903836324811
] |
{
"id": 4,
"code_window": [
"\t{Scope: ScopeGlobal | ScopeSession, Name: WindowingUseHighPrecision, Value: BoolOn, Type: TypeBool, IsHintUpdatable: true, SetSession: func(s *SessionVars, val string) error {\n",
"\t\ts.WindowingUseHighPrecision = TiDBOptOn(val)\n",
"\t\treturn nil\n",
"\t}},\n",
"\t{Scope: ScopeSession, Name: TiDBTxnScope, Value: func() string {\n",
"\t\tif isGlobal, _ := config.GetTxnScopeFromConfig(); isGlobal {\n",
"\t\t\treturn oracle.GlobalTxnScope\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t{Scope: ScopeNone, Name: \"license\", Value: \"Apache License 2.0\"},\n",
"\t{Scope: ScopeGlobal | ScopeSession, Name: BlockEncryptionMode, Value: \"aes-128-ecb\"},\n",
"\t{Scope: ScopeSession, Name: \"last_insert_id\", Value: \"\"},\n",
"\t{Scope: ScopeNone, Name: \"have_ssl\", Value: \"DISABLED\"},\n",
"\t{Scope: ScopeNone, Name: \"have_openssl\", Value: \"DISABLED\"},\n",
"\t{Scope: ScopeNone, Name: \"ssl_ca\", Value: \"\"},\n",
"\t{Scope: ScopeNone, Name: \"ssl_cert\", Value: \"\"},\n",
"\t{Scope: ScopeNone, Name: \"ssl_key\", Value: \"\"},\n",
"\n",
"\t/* TiDB specific variables */\n"
],
"file_path": "sessionctx/variable/sysvar.go",
"type": "add",
"edit_start_line_idx": 605
} | // Copyright 2016 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package tikv_test
import (
"context"
"sync"
. "github.com/pingcap/check"
"github.com/pingcap/errors"
"github.com/pingcap/kvproto/pkg/metapb"
"github.com/pingcap/kvproto/pkg/pdpb"
"github.com/pingcap/tidb/store/mockstore/mocktikv"
"github.com/pingcap/tidb/store/tikv"
"github.com/pingcap/tidb/store/tikv/mockstore/cluster"
pd "github.com/tikv/pd/client"
)
type testSplitSuite struct {
OneByOneSuite
cluster cluster.Cluster
store tikv.StoreProbe
bo *tikv.Backoffer
}
var _ = Suite(&testSplitSuite{})
func (s *testSplitSuite) SetUpTest(c *C) {
client, cluster, pdClient, err := mocktikv.NewTiKVAndPDClient("")
c.Assert(err, IsNil)
mocktikv.BootstrapWithSingleStore(cluster)
s.cluster = cluster
store, err := tikv.NewTestTiKVStore(client, pdClient, nil, nil, 0)
c.Assert(err, IsNil)
// TODO: make this possible
// store, err := mockstore.NewMockStore(
// mockstore.WithClusterInspector(func(c cluster.Cluster) {
// mockstore.BootstrapWithSingleStore(c)
// s.cluster = c
// }),
// )
// c.Assert(err, IsNil)
s.store = tikv.StoreProbe{KVStore: store}
s.bo = tikv.NewBackofferWithVars(context.Background(), 5000, nil)
}
func (s *testSplitSuite) begin(c *C) tikv.TxnProbe {
txn, err := s.store.Begin()
c.Assert(err, IsNil)
return txn
}
func (s *testSplitSuite) split(c *C, regionID uint64, key []byte) {
newRegionID, peerID := s.cluster.AllocID(), s.cluster.AllocID()
s.cluster.Split(regionID, newRegionID, key, []uint64{peerID}, peerID)
}
func (s *testSplitSuite) TestSplitBatchGet(c *C) {
loc, err := s.store.GetRegionCache().LocateKey(s.bo, []byte("a"))
c.Assert(err, IsNil)
txn := s.begin(c)
keys := [][]byte{{'a'}, {'b'}, {'c'}}
_, region, err := s.store.GetRegionCache().GroupKeysByRegion(s.bo, keys, nil)
c.Assert(err, IsNil)
s.split(c, loc.Region.GetID(), []byte("b"))
s.store.GetRegionCache().InvalidateCachedRegion(loc.Region)
// mocktikv will panic if it meets a not-in-region key.
err = txn.BatchGetSingleRegion(s.bo, region, keys, func([]byte, []byte) {})
c.Assert(err, IsNil)
}
func (s *testSplitSuite) TestStaleEpoch(c *C) {
mockPDClient := &mockPDClient{client: s.store.GetRegionCache().PDClient()}
s.store.SetRegionCachePDClient(mockPDClient)
loc, err := s.store.GetRegionCache().LocateKey(s.bo, []byte("a"))
c.Assert(err, IsNil)
txn := s.begin(c)
err = txn.Set([]byte("a"), []byte("a"))
c.Assert(err, IsNil)
err = txn.Set([]byte("c"), []byte("c"))
c.Assert(err, IsNil)
err = txn.Commit(context.Background())
c.Assert(err, IsNil)
// Initiate a split and disable the PD client. If it still works, the
// new region is updated from kvrpc.
s.split(c, loc.Region.GetID(), []byte("b"))
mockPDClient.disable()
txn = s.begin(c)
_, err = txn.Get(context.TODO(), []byte("a"))
c.Assert(err, IsNil)
_, err = txn.Get(context.TODO(), []byte("c"))
c.Assert(err, IsNil)
}
var errStopped = errors.New("stopped")
type mockPDClient struct {
sync.RWMutex
client pd.Client
stop bool
}
func (c *mockPDClient) enable() {
c.Lock()
defer c.Unlock()
c.stop = false
}
func (c *mockPDClient) disable() {
c.Lock()
defer c.Unlock()
c.stop = true
}
func (c *mockPDClient) GetAllMembers(ctx context.Context) ([]*pdpb.Member, error) {
return nil, nil
}
func (c *mockPDClient) GetClusterID(context.Context) uint64 {
return 1
}
func (c *mockPDClient) GetTS(ctx context.Context) (int64, int64, error) {
c.RLock()
defer c.RUnlock()
if c.stop {
return 0, 0, errors.Trace(errStopped)
}
return c.client.GetTS(ctx)
}
func (c *mockPDClient) GetLocalTS(ctx context.Context, dcLocation string) (int64, int64, error) {
return c.GetTS(ctx)
}
func (c *mockPDClient) GetTSAsync(ctx context.Context) pd.TSFuture {
return nil
}
func (c *mockPDClient) GetLocalTSAsync(ctx context.Context, dcLocation string) pd.TSFuture {
return nil
}
func (c *mockPDClient) GetRegion(ctx context.Context, key []byte) (*pd.Region, error) {
c.RLock()
defer c.RUnlock()
if c.stop {
return nil, errors.Trace(errStopped)
}
return c.client.GetRegion(ctx, key)
}
func (c *mockPDClient) GetRegionFromMember(ctx context.Context, key []byte, memberURLs []string) (*pd.Region, error) {
return nil, nil
}
func (c *mockPDClient) GetPrevRegion(ctx context.Context, key []byte) (*pd.Region, error) {
c.RLock()
defer c.RUnlock()
if c.stop {
return nil, errors.Trace(errStopped)
}
return c.client.GetPrevRegion(ctx, key)
}
func (c *mockPDClient) GetRegionByID(ctx context.Context, regionID uint64) (*pd.Region, error) {
c.RLock()
defer c.RUnlock()
if c.stop {
return nil, errors.Trace(errStopped)
}
return c.client.GetRegionByID(ctx, regionID)
}
func (c *mockPDClient) ScanRegions(ctx context.Context, startKey []byte, endKey []byte, limit int) ([]*pd.Region, error) {
c.RLock()
defer c.RUnlock()
if c.stop {
return nil, errors.Trace(errStopped)
}
return c.client.ScanRegions(ctx, startKey, endKey, limit)
}
func (c *mockPDClient) GetStore(ctx context.Context, storeID uint64) (*metapb.Store, error) {
c.RLock()
defer c.RUnlock()
if c.stop {
return nil, errors.Trace(errStopped)
}
return c.client.GetStore(ctx, storeID)
}
func (c *mockPDClient) GetAllStores(ctx context.Context, opts ...pd.GetStoreOption) ([]*metapb.Store, error) {
c.RLock()
defer c.Unlock()
if c.stop {
return nil, errors.Trace(errStopped)
}
return c.client.GetAllStores(ctx)
}
func (c *mockPDClient) UpdateGCSafePoint(ctx context.Context, safePoint uint64) (uint64, error) {
panic("unimplemented")
}
func (c *mockPDClient) UpdateServiceGCSafePoint(ctx context.Context, serviceID string, ttl int64, safePoint uint64) (uint64, error) {
panic("unimplemented")
}
func (c *mockPDClient) Close() {}
func (c *mockPDClient) ScatterRegion(ctx context.Context, regionID uint64) error {
return nil
}
func (c *mockPDClient) ScatterRegions(ctx context.Context, regionsID []uint64, opts ...pd.RegionsOption) (*pdpb.ScatterRegionResponse, error) {
return nil, nil
}
func (c *mockPDClient) SplitRegions(ctx context.Context, splitKeys [][]byte, opts ...pd.RegionsOption) (*pdpb.SplitRegionsResponse, error) {
return nil, nil
}
func (c *mockPDClient) GetOperator(ctx context.Context, regionID uint64) (*pdpb.GetOperatorResponse, error) {
return &pdpb.GetOperatorResponse{Status: pdpb.OperatorStatus_SUCCESS}, nil
}
func (c *mockPDClient) GetLeaderAddr() string { return "mockpd" }
| store/tikv/tests/split_test.go | 0 | https://github.com/pingcap/tidb/commit/80edc8cd20b452089a26b817b4b450b7f31db77f | [
0.00027561921160668135,
0.000172567117260769,
0.0001606390724191442,
0.00016891598352231085,
0.000020994497390347533
] |
{
"id": 4,
"code_window": [
"\t{Scope: ScopeGlobal | ScopeSession, Name: WindowingUseHighPrecision, Value: BoolOn, Type: TypeBool, IsHintUpdatable: true, SetSession: func(s *SessionVars, val string) error {\n",
"\t\ts.WindowingUseHighPrecision = TiDBOptOn(val)\n",
"\t\treturn nil\n",
"\t}},\n",
"\t{Scope: ScopeSession, Name: TiDBTxnScope, Value: func() string {\n",
"\t\tif isGlobal, _ := config.GetTxnScopeFromConfig(); isGlobal {\n",
"\t\t\treturn oracle.GlobalTxnScope\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t{Scope: ScopeNone, Name: \"license\", Value: \"Apache License 2.0\"},\n",
"\t{Scope: ScopeGlobal | ScopeSession, Name: BlockEncryptionMode, Value: \"aes-128-ecb\"},\n",
"\t{Scope: ScopeSession, Name: \"last_insert_id\", Value: \"\"},\n",
"\t{Scope: ScopeNone, Name: \"have_ssl\", Value: \"DISABLED\"},\n",
"\t{Scope: ScopeNone, Name: \"have_openssl\", Value: \"DISABLED\"},\n",
"\t{Scope: ScopeNone, Name: \"ssl_ca\", Value: \"\"},\n",
"\t{Scope: ScopeNone, Name: \"ssl_cert\", Value: \"\"},\n",
"\t{Scope: ScopeNone, Name: \"ssl_key\", Value: \"\"},\n",
"\n",
"\t/* TiDB specific variables */\n"
],
"file_path": "sessionctx/variable/sysvar.go",
"type": "add",
"edit_start_line_idx": 605
} | // Copyright 2017 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package expression
import (
"math"
"time"
. "github.com/pingcap/check"
"github.com/pingcap/parser/ast"
"github.com/pingcap/parser/mysql"
"github.com/pingcap/tidb/types"
"github.com/pingcap/tidb/util/chunk"
"github.com/pingcap/tidb/util/testutil"
"github.com/pingcap/tipb/go-tipb"
)
func (s *testEvaluatorSuite) TestSetFlenDecimal4RealOrDecimal(c *C) {
ret := &types.FieldType{}
a := &types.FieldType{
Decimal: 1,
Flen: 3,
}
b := &types.FieldType{
Decimal: 0,
Flen: 2,
}
setFlenDecimal4RealOrDecimal(ret, a, b, true, false)
c.Assert(ret.Decimal, Equals, 1)
c.Assert(ret.Flen, Equals, 6)
b.Flen = 65
setFlenDecimal4RealOrDecimal(ret, a, b, true, false)
c.Assert(ret.Decimal, Equals, 1)
c.Assert(ret.Flen, Equals, mysql.MaxRealWidth)
setFlenDecimal4RealOrDecimal(ret, a, b, false, false)
c.Assert(ret.Decimal, Equals, 1)
c.Assert(ret.Flen, Equals, mysql.MaxDecimalWidth)
b.Flen = types.UnspecifiedLength
setFlenDecimal4RealOrDecimal(ret, a, b, true, false)
c.Assert(ret.Decimal, Equals, 1)
c.Assert(ret.Flen, Equals, types.UnspecifiedLength)
b.Decimal = types.UnspecifiedLength
setFlenDecimal4RealOrDecimal(ret, a, b, true, false)
c.Assert(ret.Decimal, Equals, types.UnspecifiedLength)
c.Assert(ret.Flen, Equals, types.UnspecifiedLength)
ret = &types.FieldType{}
a = &types.FieldType{
Decimal: 1,
Flen: 3,
}
b = &types.FieldType{
Decimal: 0,
Flen: 2,
}
setFlenDecimal4RealOrDecimal(ret, a, b, true, true)
c.Assert(ret.Decimal, Equals, 1)
c.Assert(ret.Flen, Equals, 8)
b.Flen = 65
setFlenDecimal4RealOrDecimal(ret, a, b, true, true)
c.Assert(ret.Decimal, Equals, 1)
c.Assert(ret.Flen, Equals, mysql.MaxRealWidth)
setFlenDecimal4RealOrDecimal(ret, a, b, false, true)
c.Assert(ret.Decimal, Equals, 1)
c.Assert(ret.Flen, Equals, mysql.MaxDecimalWidth)
b.Flen = types.UnspecifiedLength
setFlenDecimal4RealOrDecimal(ret, a, b, true, true)
c.Assert(ret.Decimal, Equals, 1)
c.Assert(ret.Flen, Equals, types.UnspecifiedLength)
b.Decimal = types.UnspecifiedLength
setFlenDecimal4RealOrDecimal(ret, a, b, true, true)
c.Assert(ret.Decimal, Equals, types.UnspecifiedLength)
c.Assert(ret.Flen, Equals, types.UnspecifiedLength)
}
func (s *testEvaluatorSuite) TestSetFlenDecimal4Int(c *C) {
ret := &types.FieldType{}
a := &types.FieldType{
Decimal: 1,
Flen: 3,
}
b := &types.FieldType{
Decimal: 0,
Flen: 2,
}
setFlenDecimal4Int(ret, a, b)
c.Assert(ret.Decimal, Equals, 0)
c.Assert(ret.Flen, Equals, mysql.MaxIntWidth)
b.Flen = mysql.MaxIntWidth + 1
setFlenDecimal4Int(ret, a, b)
c.Assert(ret.Decimal, Equals, 0)
c.Assert(ret.Flen, Equals, mysql.MaxIntWidth)
b.Flen = types.UnspecifiedLength
setFlenDecimal4Int(ret, a, b)
c.Assert(ret.Decimal, Equals, 0)
c.Assert(ret.Flen, Equals, mysql.MaxIntWidth)
}
func (s *testEvaluatorSuite) TestArithmeticPlus(c *C) {
// case: 1
args := []interface{}{int64(12), int64(1)}
bf, err := funcs[ast.Plus].getFunction(s.ctx, s.datumsToConstants(types.MakeDatums(args...)))
c.Assert(err, IsNil)
c.Assert(bf, NotNil)
intSig, ok := bf.(*builtinArithmeticPlusIntSig)
c.Assert(ok, IsTrue)
c.Assert(intSig, NotNil)
intResult, isNull, err := intSig.evalInt(chunk.Row{})
c.Assert(err, IsNil)
c.Assert(isNull, IsFalse)
c.Assert(intResult, Equals, int64(13))
// case 2
args = []interface{}{float64(1.01001), float64(-0.01)}
bf, err = funcs[ast.Plus].getFunction(s.ctx, s.datumsToConstants(types.MakeDatums(args...)))
c.Assert(err, IsNil)
c.Assert(bf, NotNil)
realSig, ok := bf.(*builtinArithmeticPlusRealSig)
c.Assert(ok, IsTrue)
c.Assert(realSig, NotNil)
realResult, isNull, err := realSig.evalReal(chunk.Row{})
c.Assert(err, IsNil)
c.Assert(isNull, IsFalse)
c.Assert(realResult, Equals, float64(1.00001))
// case 3
args = []interface{}{nil, float64(-0.11101)}
bf, err = funcs[ast.Plus].getFunction(s.ctx, s.datumsToConstants(types.MakeDatums(args...)))
c.Assert(err, IsNil)
c.Assert(bf, NotNil)
realSig, ok = bf.(*builtinArithmeticPlusRealSig)
c.Assert(ok, IsTrue)
c.Assert(realSig, NotNil)
realResult, isNull, err = realSig.evalReal(chunk.Row{})
c.Assert(err, IsNil)
c.Assert(isNull, IsTrue)
c.Assert(realResult, Equals, float64(0))
// case 4
args = []interface{}{nil, nil}
bf, err = funcs[ast.Plus].getFunction(s.ctx, s.datumsToConstants(types.MakeDatums(args...)))
c.Assert(err, IsNil)
c.Assert(bf, NotNil)
realSig, ok = bf.(*builtinArithmeticPlusRealSig)
c.Assert(ok, IsTrue)
c.Assert(realSig, NotNil)
realResult, isNull, err = realSig.evalReal(chunk.Row{})
c.Assert(err, IsNil)
c.Assert(isNull, IsTrue)
c.Assert(realResult, Equals, float64(0))
// case 5
hexStr, err := types.ParseHexStr("0x20000000000000")
c.Assert(err, IsNil)
args = []interface{}{hexStr, int64(1)}
bf, err = funcs[ast.Plus].getFunction(s.ctx, s.datumsToConstants(types.MakeDatums(args...)))
c.Assert(err, IsNil)
c.Assert(bf, NotNil)
intSig, ok = bf.(*builtinArithmeticPlusIntSig)
c.Assert(ok, IsTrue)
c.Assert(intSig, NotNil)
intResult, _, err = intSig.evalInt(chunk.Row{})
c.Assert(err, IsNil)
c.Assert(intResult, Equals, int64(9007199254740993))
}
func (s *testEvaluatorSuite) TestArithmeticMinus(c *C) {
// case: 1
args := []interface{}{int64(12), int64(1)}
bf, err := funcs[ast.Minus].getFunction(s.ctx, s.datumsToConstants(types.MakeDatums(args...)))
c.Assert(err, IsNil)
c.Assert(bf, NotNil)
intSig, ok := bf.(*builtinArithmeticMinusIntSig)
c.Assert(ok, IsTrue)
c.Assert(intSig, NotNil)
intResult, isNull, err := intSig.evalInt(chunk.Row{})
c.Assert(err, IsNil)
c.Assert(isNull, IsFalse)
c.Assert(intResult, Equals, int64(11))
// case 2
args = []interface{}{float64(1.01001), float64(-0.01)}
bf, err = funcs[ast.Minus].getFunction(s.ctx, s.datumsToConstants(types.MakeDatums(args...)))
c.Assert(err, IsNil)
c.Assert(bf, NotNil)
realSig, ok := bf.(*builtinArithmeticMinusRealSig)
c.Assert(ok, IsTrue)
c.Assert(realSig, NotNil)
realResult, isNull, err := realSig.evalReal(chunk.Row{})
c.Assert(err, IsNil)
c.Assert(isNull, IsFalse)
c.Assert(realResult, Equals, float64(1.02001))
// case 3
args = []interface{}{nil, float64(-0.11101)}
bf, err = funcs[ast.Minus].getFunction(s.ctx, s.datumsToConstants(types.MakeDatums(args...)))
c.Assert(err, IsNil)
c.Assert(bf, NotNil)
realSig, ok = bf.(*builtinArithmeticMinusRealSig)
c.Assert(ok, IsTrue)
c.Assert(realSig, NotNil)
realResult, isNull, err = realSig.evalReal(chunk.Row{})
c.Assert(err, IsNil)
c.Assert(isNull, IsTrue)
c.Assert(realResult, Equals, float64(0))
// case 4
args = []interface{}{float64(1.01), nil}
bf, err = funcs[ast.Minus].getFunction(s.ctx, s.datumsToConstants(types.MakeDatums(args...)))
c.Assert(err, IsNil)
c.Assert(bf, NotNil)
realSig, ok = bf.(*builtinArithmeticMinusRealSig)
c.Assert(ok, IsTrue)
c.Assert(realSig, NotNil)
realResult, isNull, err = realSig.evalReal(chunk.Row{})
c.Assert(err, IsNil)
c.Assert(isNull, IsTrue)
c.Assert(realResult, Equals, float64(0))
// case 5
args = []interface{}{nil, nil}
bf, err = funcs[ast.Minus].getFunction(s.ctx, s.datumsToConstants(types.MakeDatums(args...)))
c.Assert(err, IsNil)
c.Assert(bf, NotNil)
realSig, ok = bf.(*builtinArithmeticMinusRealSig)
c.Assert(ok, IsTrue)
c.Assert(realSig, NotNil)
realResult, isNull, err = realSig.evalReal(chunk.Row{})
c.Assert(err, IsNil)
c.Assert(isNull, IsTrue)
c.Assert(realResult, Equals, float64(0))
}
func (s *testEvaluatorSuite) TestArithmeticMultiply(c *C) {
testCases := []struct {
args []interface{}
expect interface{}
err error
}{
{
args: []interface{}{int64(11), int64(11)},
expect: int64(121),
},
{
args: []interface{}{uint64(11), uint64(11)},
expect: int64(121),
},
{
args: []interface{}{float64(11), float64(11)},
expect: float64(121),
},
{
args: []interface{}{nil, float64(-0.11101)},
expect: nil,
},
{
args: []interface{}{float64(1.01), nil},
expect: nil,
},
{
args: []interface{}{nil, nil},
expect: nil,
},
}
for _, tc := range testCases {
sig, err := funcs[ast.Mul].getFunction(s.ctx, s.datumsToConstants(types.MakeDatums(tc.args...)))
c.Assert(err, IsNil)
c.Assert(sig, NotNil)
val, err := evalBuiltinFunc(sig, chunk.Row{})
c.Assert(err, IsNil)
c.Assert(val, testutil.DatumEquals, types.NewDatum(tc.expect))
}
}
func (s *testEvaluatorSuite) TestArithmeticDivide(c *C) {
testCases := []struct {
args []interface{}
expect interface{}
}{
{
args: []interface{}{float64(11.1111111), float64(11.1)},
expect: float64(1.001001),
},
{
args: []interface{}{float64(11.1111111), float64(0)},
expect: nil,
},
{
args: []interface{}{int64(11), int64(11)},
expect: float64(1),
},
{
args: []interface{}{int64(11), int64(2)},
expect: float64(5.5),
},
{
args: []interface{}{int64(11), int64(0)},
expect: nil,
},
{
args: []interface{}{uint64(11), uint64(11)},
expect: float64(1),
},
{
args: []interface{}{uint64(11), uint64(2)},
expect: float64(5.5),
},
{
args: []interface{}{uint64(11), uint64(0)},
expect: nil,
},
{
args: []interface{}{nil, float64(-0.11101)},
expect: nil,
},
{
args: []interface{}{float64(1.01), nil},
expect: nil,
},
{
args: []interface{}{nil, nil},
expect: nil,
},
}
for _, tc := range testCases {
sig, err := funcs[ast.Div].getFunction(s.ctx, s.datumsToConstants(types.MakeDatums(tc.args...)))
c.Assert(err, IsNil)
c.Assert(sig, NotNil)
switch sig.(type) {
case *builtinArithmeticIntDivideIntSig:
c.Assert(sig.PbCode(), Equals, tipb.ScalarFuncSig_IntDivideInt)
case *builtinArithmeticIntDivideDecimalSig:
c.Assert(sig.PbCode(), Equals, tipb.ScalarFuncSig_IntDivideDecimal)
}
val, err := evalBuiltinFunc(sig, chunk.Row{})
c.Assert(err, IsNil)
c.Assert(val, testutil.DatumEquals, types.NewDatum(tc.expect))
}
}
func (s *testEvaluatorSuite) TestArithmeticIntDivide(c *C) {
testCases := []struct {
args []interface{}
expect []interface{}
}{
{
args: []interface{}{int64(13), int64(11)},
expect: []interface{}{int64(1), nil},
},
{
args: []interface{}{int64(-13), int64(11)},
expect: []interface{}{int64(-1), nil},
},
{
args: []interface{}{int64(13), int64(-11)},
expect: []interface{}{int64(-1), nil},
},
{
args: []interface{}{int64(-13), int64(-11)},
expect: []interface{}{int64(1), nil},
},
{
args: []interface{}{int64(33), int64(11)},
expect: []interface{}{int64(3), nil},
},
{
args: []interface{}{int64(-33), int64(11)},
expect: []interface{}{int64(-3), nil},
},
{
args: []interface{}{int64(33), int64(-11)},
expect: []interface{}{int64(-3), nil},
},
{
args: []interface{}{int64(-33), int64(-11)},
expect: []interface{}{int64(3), nil},
},
{
args: []interface{}{int64(11), int64(0)},
expect: []interface{}{nil, nil},
},
{
args: []interface{}{int64(-11), int64(0)},
expect: []interface{}{nil, nil},
},
{
args: []interface{}{float64(11.01), float64(1.1)},
expect: []interface{}{int64(10), nil},
},
{
args: []interface{}{float64(-11.01), float64(1.1)},
expect: []interface{}{int64(-10), nil},
},
{
args: []interface{}{float64(11.01), float64(-1.1)},
expect: []interface{}{int64(-10), nil},
},
{
args: []interface{}{float64(-11.01), float64(-1.1)},
expect: []interface{}{int64(10), nil},
},
{
args: []interface{}{nil, float64(-0.11101)},
expect: []interface{}{nil, nil},
},
{
args: []interface{}{float64(1.01), nil},
expect: []interface{}{nil, nil},
},
{
args: []interface{}{nil, int64(-1001)},
expect: []interface{}{nil, nil},
},
{
args: []interface{}{int64(101), nil},
expect: []interface{}{nil, nil},
},
{
args: []interface{}{nil, nil},
expect: []interface{}{nil, nil},
},
{
args: []interface{}{float64(123456789100000.0), float64(-0.00001)},
expect: []interface{}{nil, "*BIGINT value is out of range in '\\(123456789100000 DIV -0.00001\\)'"},
},
{
args: []interface{}{int64(-9223372036854775808), float64(-1)},
expect: []interface{}{nil, "*BIGINT value is out of range in '\\(-9223372036854775808 DIV -1\\)'"},
},
{
args: []interface{}{uint64(1), float64(-2)},
expect: []interface{}{0, nil},
},
{
args: []interface{}{uint64(1), float64(-1)},
expect: []interface{}{nil, "*BIGINT UNSIGNED value is out of range in '\\(1 DIV -1\\)'"},
},
}
for _, tc := range testCases {
sig, err := funcs[ast.IntDiv].getFunction(s.ctx, s.datumsToConstants(types.MakeDatums(tc.args...)))
c.Assert(err, IsNil)
c.Assert(sig, NotNil)
val, err := evalBuiltinFunc(sig, chunk.Row{})
if tc.expect[1] == nil {
c.Assert(err, IsNil)
c.Assert(val, testutil.DatumEquals, types.NewDatum(tc.expect[0]))
} else {
c.Assert(err, ErrorMatches, tc.expect[1])
}
}
}
func (s *testEvaluatorSuite) TestArithmeticMod(c *C) {
testCases := []struct {
args []interface{}
expect interface{}
}{
{
args: []interface{}{int64(13), int64(11)},
expect: int64(2),
},
{
args: []interface{}{int64(13), int64(11)},
expect: int64(2),
},
{
args: []interface{}{int64(13), int64(0)},
expect: nil,
},
{
args: []interface{}{uint64(13), int64(0)},
expect: nil,
},
{
args: []interface{}{int64(13), uint64(0)},
expect: nil,
},
{
args: []interface{}{uint64(math.MaxInt64 + 1), int64(math.MinInt64)},
expect: int64(0),
},
{
args: []interface{}{int64(-22), uint64(10)},
expect: int64(-2),
},
{
args: []interface{}{int64(math.MinInt64), uint64(3)},
expect: int64(-2),
},
{
args: []interface{}{int64(-13), int64(11)},
expect: int64(-2),
},
{
args: []interface{}{int64(13), int64(-11)},
expect: int64(2),
},
{
args: []interface{}{int64(-13), int64(-11)},
expect: int64(-2),
},
{
args: []interface{}{int64(33), int64(11)},
expect: int64(0),
},
{
args: []interface{}{int64(-33), int64(11)},
expect: int64(0),
},
{
args: []interface{}{int64(33), int64(-11)},
expect: int64(0),
},
{
args: []interface{}{int64(-33), int64(-11)},
expect: int64(0),
},
{
args: []interface{}{int64(11), int64(0)},
expect: nil,
},
{
args: []interface{}{int64(-11), int64(0)},
expect: nil,
},
{
args: []interface{}{int64(1), float64(1.1)},
expect: float64(1),
},
{
args: []interface{}{int64(-1), float64(1.1)},
expect: float64(-1),
},
{
args: []interface{}{int64(1), float64(-1.1)},
expect: float64(1),
},
{
args: []interface{}{int64(-1), float64(-1.1)},
expect: float64(-1),
},
{
args: []interface{}{nil, float64(-0.11101)},
expect: nil,
},
{
args: []interface{}{float64(1.01), nil},
expect: nil,
},
{
args: []interface{}{nil, int64(-1001)},
expect: nil,
},
{
args: []interface{}{int64(101), nil},
expect: nil,
},
{
args: []interface{}{nil, nil},
expect: nil,
},
{
args: []interface{}{"1231", 12},
expect: 7,
},
{
args: []interface{}{"1231", "12"},
expect: float64(7),
},
{
args: []interface{}{types.Duration{Duration: 45296 * time.Second}, 122},
expect: 114,
},
{
args: []interface{}{types.Set{Value: 7, Name: "abc"}, "12"},
expect: float64(7),
},
}
for _, tc := range testCases {
sig, err := funcs[ast.Mod].getFunction(s.ctx, s.datumsToConstants(types.MakeDatums(tc.args...)))
c.Assert(err, IsNil)
c.Assert(sig, NotNil)
val, err := evalBuiltinFunc(sig, chunk.Row{})
switch sig.(type) {
case *builtinArithmeticModRealSig:
c.Assert(sig.PbCode(), Equals, tipb.ScalarFuncSig_ModReal)
case *builtinArithmeticModIntUnsignedUnsignedSig:
c.Assert(sig.PbCode(), Equals, tipb.ScalarFuncSig_ModIntUnsignedUnsigned)
case *builtinArithmeticModIntUnsignedSignedSig:
c.Assert(sig.PbCode(), Equals, tipb.ScalarFuncSig_ModIntUnsignedSigned)
case *builtinArithmeticModIntSignedUnsignedSig:
c.Assert(sig.PbCode(), Equals, tipb.ScalarFuncSig_ModIntSignedUnsigned)
case *builtinArithmeticModIntSignedSignedSig:
c.Assert(sig.PbCode(), Equals, tipb.ScalarFuncSig_ModIntSignedSigned)
case *builtinArithmeticModDecimalSig:
c.Assert(sig.PbCode(), Equals, tipb.ScalarFuncSig_ModDecimal)
}
c.Assert(err, IsNil)
c.Assert(val, testutil.DatumEquals, types.NewDatum(tc.expect))
}
}
| expression/builtin_arithmetic_test.go | 0 | https://github.com/pingcap/tidb/commit/80edc8cd20b452089a26b817b4b450b7f31db77f | [
0.0010348615469411016,
0.00020700329332612455,
0.0001618223759578541,
0.00017036615463439375,
0.0001560903328936547
] |
{
"id": 5,
"code_window": [
"\t\tif isGlobal, _ := config.GetTxnScopeFromConfig(); isGlobal {\n",
"\t\t\treturn oracle.GlobalTxnScope\n",
"\t\t}\n",
"\t\treturn oracle.LocalTxnScope\n",
"\t}()},\n",
"\t/* TiDB specific variables */\n",
"\t{Scope: ScopeGlobal | ScopeSession, Name: TiDBAllowMPPExecution, Type: TypeBool, Value: BoolToOnOff(DefTiDBAllowMPPExecution)},\n",
"\t{Scope: ScopeGlobal | ScopeSession, Name: TiDBBCJThresholdCount, Value: strconv.Itoa(DefBroadcastJoinThresholdCount), Type: TypeInt, MinValue: 0, MaxValue: math.MaxInt64, SetSession: func(s *SessionVars, val string) error {\n",
"\t\ts.BroadcastJoinThresholdCount = tidbOptInt64(val, DefBroadcastJoinThresholdCount)\n",
"\t\treturn nil\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "sessionctx/variable/sysvar.go",
"type": "replace",
"edit_start_line_idx": 611
} | // Copyright 2020 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package variable
import (
"math"
)
// The following sysVars are noops.
// Some applications will depend on certain variables to be present or settable,
// for example query_cache_time. These are included for MySQL compatibility,
// but changing them has no effect on behavior.
var noopSysVars = []*SysVar{
{Scope: ScopeGlobal, Name: ConnectTimeout, Value: "10", Type: TypeUnsigned, MinValue: 2, MaxValue: secondsPerYear, AutoConvertOutOfRange: true},
{Scope: ScopeGlobal | ScopeSession, Name: QueryCacheWlockInvalidate, Value: BoolOff, Type: TypeBool},
{Scope: ScopeGlobal | ScopeSession, Name: "sql_buffer_result", Value: BoolOff, IsHintUpdatable: true},
{Scope: ScopeGlobal, Name: MyISAMUseMmap, Value: BoolOff, Type: TypeBool, AutoConvertNegativeBool: true},
{Scope: ScopeGlobal, Name: "gtid_mode", Value: BoolOff, Type: TypeBool},
{Scope: ScopeGlobal, Name: FlushTime, Value: "0", Type: TypeUnsigned, MinValue: 0, MaxValue: secondsPerYear, AutoConvertOutOfRange: true},
{Scope: ScopeNone, Name: "performance_schema_max_mutex_classes", Value: "200"},
{Scope: ScopeGlobal | ScopeSession, Name: LowPriorityUpdates, Value: BoolOff, Type: TypeBool},
{Scope: ScopeGlobal | ScopeSession, Name: SessionTrackGtids, Value: BoolOff, Type: TypeEnum, PossibleValues: []string{BoolOff, "OWN_GTID", "ALL_GTIDS"}},
{Scope: ScopeGlobal | ScopeSession, Name: "ndbinfo_max_rows", Value: ""},
{Scope: ScopeGlobal | ScopeSession, Name: "ndb_index_stat_option", Value: ""},
{Scope: ScopeGlobal | ScopeSession, Name: OldPasswords, Value: "0", Type: TypeUnsigned, MinValue: 0, MaxValue: 2, AutoConvertOutOfRange: true},
{Scope: ScopeNone, Name: "innodb_version", Value: "5.6.25"},
{Scope: ScopeGlobal | ScopeSession, Name: BigTables, Value: BoolOff, Type: TypeBool},
{Scope: ScopeNone, Name: "skip_external_locking", Value: "1"},
{Scope: ScopeNone, Name: "innodb_sync_array_size", Value: "1"},
{Scope: ScopeSession, Name: "rand_seed2", Value: ""},
{Scope: ScopeGlobal, Name: ValidatePasswordCheckUserName, Value: BoolOff, Type: TypeBool},
{Scope: ScopeGlobal, Name: ValidatePasswordNumberCount, Value: "1", Type: TypeUnsigned, MinValue: 0, MaxValue: math.MaxUint64, AutoConvertOutOfRange: true},
{Scope: ScopeSession, Name: "gtid_next", Value: ""},
{Scope: ScopeGlobal, Name: "ndb_show_foreign_key_mock_tables", Value: ""},
{Scope: ScopeNone, Name: "multi_range_count", Value: "256"},
{Scope: ScopeGlobal | ScopeSession, Name: "binlog_error_action", Value: "IGNORE_ERROR"},
{Scope: ScopeGlobal | ScopeSession, Name: "default_storage_engine", Value: "InnoDB"},
{Scope: ScopeNone, Name: "ft_query_expansion_limit", Value: "20"},
{Scope: ScopeGlobal, Name: MaxConnectErrors, Value: "100", Type: TypeUnsigned, MinValue: 1, MaxValue: math.MaxUint64, AutoConvertOutOfRange: true},
{Scope: ScopeGlobal, Name: SyncBinlog, Value: "0", Type: TypeUnsigned, MinValue: 0, MaxValue: 4294967295, AutoConvertOutOfRange: true},
{Scope: ScopeNone, Name: "max_digest_length", Value: "1024"},
{Scope: ScopeNone, Name: "innodb_force_load_corrupted", Value: "0"},
{Scope: ScopeNone, Name: "performance_schema_max_table_handles", Value: "4000"},
{Scope: ScopeGlobal, Name: InnodbFastShutdown, Value: "1", Type: TypeUnsigned, MinValue: 0, MaxValue: 2, AutoConvertOutOfRange: true},
{Scope: ScopeNone, Name: "ft_max_word_len", Value: "84"},
{Scope: ScopeGlobal, Name: "log_backward_compatible_user_definitions", Value: ""},
{Scope: ScopeNone, Name: "lc_messages_dir", Value: "/usr/local/mysql-5.6.25-osx10.8-x86_64/share/"},
{Scope: ScopeGlobal, Name: "ft_boolean_syntax", Value: "+ -><()~*:\"\"&|"},
{Scope: ScopeGlobal, Name: TableDefinitionCache, Value: "-1", Type: TypeUnsigned, MinValue: 400, MaxValue: 524288, AutoConvertOutOfRange: true},
{Scope: ScopeNone, Name: SkipNameResolve, Value: BoolOff, Type: TypeBool},
{Scope: ScopeNone, Name: "performance_schema_max_file_handles", Value: "32768"},
{Scope: ScopeSession, Name: "transaction_allow_batching", Value: ""},
{Scope: ScopeNone, Name: "performance_schema_max_statement_classes", Value: "168"},
{Scope: ScopeGlobal, Name: "server_id", Value: "0"},
{Scope: ScopeGlobal, Name: "innodb_flushing_avg_loops", Value: "30"},
{Scope: ScopeGlobal | ScopeSession, Name: TmpTableSize, Value: "16777216", Type: TypeUnsigned, MinValue: 1024, MaxValue: math.MaxUint64, AutoConvertOutOfRange: true, IsHintUpdatable: true},
{Scope: ScopeGlobal, Name: "innodb_max_purge_lag", Value: "0"},
{Scope: ScopeGlobal | ScopeSession, Name: "preload_buffer_size", Value: "32768"},
{Scope: ScopeGlobal, Name: CheckProxyUsers, Value: BoolOff, Type: TypeBool},
{Scope: ScopeNone, Name: "have_query_cache", Value: "YES"},
{Scope: ScopeGlobal, Name: "innodb_flush_log_at_timeout", Value: "1"},
{Scope: ScopeGlobal, Name: "innodb_max_undo_log_size", Value: ""},
{Scope: ScopeGlobal | ScopeSession, Name: "range_alloc_block_size", Value: "4096", IsHintUpdatable: true},
{Scope: ScopeNone, Name: "have_rtree_keys", Value: "YES"},
{Scope: ScopeGlobal, Name: "innodb_old_blocks_pct", Value: "37"},
{Scope: ScopeGlobal, Name: "innodb_file_format", Value: "Barracuda", Type: TypeEnum, PossibleValues: []string{"Antelope", "Barracuda"}},
{Scope: ScopeGlobal, Name: "innodb_default_row_format", Value: "dynamic", Type: TypeEnum, PossibleValues: []string{"redundant", "compact", "dynamic"}},
{Scope: ScopeGlobal, Name: "innodb_compression_failure_threshold_pct", Value: "5"},
{Scope: ScopeNone, Name: "performance_schema_events_waits_history_long_size", Value: "10000"},
{Scope: ScopeGlobal, Name: "innodb_checksum_algorithm", Value: "innodb"},
{Scope: ScopeNone, Name: "innodb_ft_sort_pll_degree", Value: "2"},
{Scope: ScopeNone, Name: "thread_stack", Value: "262144"},
{Scope: ScopeGlobal, Name: "relay_log_info_repository", Value: "FILE"},
{Scope: ScopeGlobal, Name: SuperReadOnly, Value: "0", Type: TypeBool},
{Scope: ScopeGlobal | ScopeSession, Name: "max_delayed_threads", Value: "20"},
{Scope: ScopeNone, Name: "protocol_version", Value: "10"},
{Scope: ScopeGlobal | ScopeSession, Name: "new", Value: BoolOff},
{Scope: ScopeGlobal | ScopeSession, Name: "myisam_sort_buffer_size", Value: "8388608"},
{Scope: ScopeGlobal | ScopeSession, Name: "optimizer_trace_offset", Value: "-1"},
{Scope: ScopeGlobal, Name: InnodbBufferPoolDumpAtShutdown, Value: "0"},
{Scope: ScopeGlobal | ScopeSession, Name: SQLNotes, Value: "1"},
{Scope: ScopeGlobal, Name: InnodbCmpPerIndexEnabled, Value: BoolOff, Type: TypeBool, AutoConvertNegativeBool: true},
{Scope: ScopeGlobal, Name: "innodb_ft_server_stopword_table", Value: ""},
{Scope: ScopeNone, Name: "performance_schema_max_file_instances", Value: "7693"},
{Scope: ScopeNone, Name: "log_output", Value: "FILE"},
{Scope: ScopeGlobal, Name: "binlog_group_commit_sync_delay", Value: ""},
{Scope: ScopeGlobal, Name: "binlog_group_commit_sync_no_delay_count", Value: ""},
{Scope: ScopeNone, Name: "have_crypt", Value: "YES"},
{Scope: ScopeGlobal, Name: "innodb_log_write_ahead_size", Value: ""},
{Scope: ScopeNone, Name: "innodb_log_group_home_dir", Value: "./"},
{Scope: ScopeNone, Name: "performance_schema_events_statements_history_size", Value: "10"},
{Scope: ScopeGlobal, Name: GeneralLog, Value: BoolOff, Type: TypeBool},
{Scope: ScopeGlobal, Name: "validate_password_dictionary_file", Value: ""},
{Scope: ScopeGlobal, Name: BinlogOrderCommits, Value: BoolOn, Type: TypeBool},
{Scope: ScopeGlobal, Name: "key_cache_division_limit", Value: "100"},
{Scope: ScopeGlobal | ScopeSession, Name: "max_insert_delayed_threads", Value: "20"},
{Scope: ScopeNone, Name: "performance_schema_session_connect_attrs_size", Value: "512"},
{Scope: ScopeGlobal, Name: "innodb_max_dirty_pages_pct", Value: "75"},
{Scope: ScopeGlobal, Name: InnodbFilePerTable, Value: BoolOn, Type: TypeBool, AutoConvertNegativeBool: true},
{Scope: ScopeGlobal, Name: InnodbLogCompressedPages, Value: "1"},
{Scope: ScopeNone, Name: "skip_networking", Value: "0"},
{Scope: ScopeGlobal, Name: "innodb_monitor_reset", Value: ""},
{Scope: ScopeNone, Name: "have_ssl", Value: "DISABLED"},
{Scope: ScopeNone, Name: "have_openssl", Value: "DISABLED"},
{Scope: ScopeNone, Name: "ssl_ca", Value: ""},
{Scope: ScopeNone, Name: "ssl_cert", Value: ""},
{Scope: ScopeNone, Name: "ssl_key", Value: ""},
{Scope: ScopeNone, Name: "ssl_cipher", Value: ""},
{Scope: ScopeNone, Name: "tls_version", Value: "TLSv1,TLSv1.1,TLSv1.2"},
{Scope: ScopeGlobal, Name: InnodbPrintAllDeadlocks, Value: BoolOff, Type: TypeBool, AutoConvertNegativeBool: true},
{Scope: ScopeNone, Name: "innodb_autoinc_lock_mode", Value: "1"},
{Scope: ScopeGlobal, Name: "key_buffer_size", Value: "8388608"},
{Scope: ScopeGlobal, Name: "host_cache_size", Value: "279"},
{Scope: ScopeGlobal, Name: DelayKeyWrite, Value: BoolOn, Type: TypeEnum, PossibleValues: []string{BoolOff, BoolOn, "ALL"}},
{Scope: ScopeNone, Name: "metadata_locks_cache_size", Value: "1024"},
{Scope: ScopeNone, Name: "innodb_force_recovery", Value: "0"},
{Scope: ScopeGlobal, Name: "innodb_file_format_max", Value: "Antelope"},
{Scope: ScopeGlobal | ScopeSession, Name: "debug", Value: ""},
{Scope: ScopeGlobal, Name: "log_warnings", Value: "1"},
{Scope: ScopeGlobal, Name: OfflineMode, Value: "0", Type: TypeBool},
{Scope: ScopeGlobal | ScopeSession, Name: InnodbStrictMode, Value: "1", Type: TypeBool, AutoConvertNegativeBool: true},
{Scope: ScopeGlobal, Name: "innodb_rollback_segments", Value: "128"},
{Scope: ScopeGlobal | ScopeSession, Name: "join_buffer_size", Value: "262144", IsHintUpdatable: true},
{Scope: ScopeNone, Name: "innodb_mirrored_log_groups", Value: "1"},
{Scope: ScopeGlobal, Name: "max_binlog_size", Value: "1073741824"},
{Scope: ScopeGlobal, Name: "concurrent_insert", Value: "AUTO"},
{Scope: ScopeGlobal, Name: InnodbAdaptiveHashIndex, Value: BoolOn, Type: TypeBool, AutoConvertNegativeBool: true},
{Scope: ScopeGlobal, Name: InnodbFtEnableStopword, Value: BoolOn, Type: TypeBool, AutoConvertNegativeBool: true},
{Scope: ScopeGlobal, Name: "general_log_file", Value: "/usr/local/mysql/data/localhost.log"},
{Scope: ScopeGlobal | ScopeSession, Name: InnodbSupportXA, Value: "1"},
{Scope: ScopeGlobal, Name: "innodb_compression_level", Value: "6"},
{Scope: ScopeNone, Name: "innodb_file_format_check", Value: "1"},
{Scope: ScopeNone, Name: "myisam_mmap_size", Value: "18446744073709551615"},
{Scope: ScopeNone, Name: "innodb_buffer_pool_instances", Value: "8"},
{Scope: ScopeGlobal | ScopeSession, Name: BlockEncryptionMode, Value: "aes-128-ecb"},
{Scope: ScopeGlobal | ScopeSession, Name: "max_length_for_sort_data", Value: "1024", IsHintUpdatable: true},
{Scope: ScopeNone, Name: "character_set_system", Value: "utf8"},
{Scope: ScopeGlobal, Name: InnodbOptimizeFullTextOnly, Value: "0"},
{Scope: ScopeNone, Name: "character_sets_dir", Value: "/usr/local/mysql-5.6.25-osx10.8-x86_64/share/charsets/"},
{Scope: ScopeGlobal | ScopeSession, Name: QueryCacheType, Value: BoolOff, Type: TypeEnum, PossibleValues: []string{BoolOff, BoolOn, "DEMAND"}},
{Scope: ScopeNone, Name: "innodb_rollback_on_timeout", Value: "0"},
{Scope: ScopeGlobal | ScopeSession, Name: "query_alloc_block_size", Value: "8192"},
{Scope: ScopeGlobal | ScopeSession, Name: InitConnect, Value: ""},
{Scope: ScopeNone, Name: "have_compress", Value: "YES"},
{Scope: ScopeNone, Name: "thread_concurrency", Value: "10"},
{Scope: ScopeGlobal | ScopeSession, Name: "query_prealloc_size", Value: "8192"},
{Scope: ScopeNone, Name: "relay_log_space_limit", Value: "0"},
{Scope: ScopeGlobal | ScopeSession, Name: MaxUserConnections, Value: "0", Type: TypeUnsigned, MinValue: 0, MaxValue: 4294967295, AutoConvertOutOfRange: true},
{Scope: ScopeNone, Name: "performance_schema_max_thread_classes", Value: "50"},
{Scope: ScopeGlobal, Name: "innodb_api_trx_level", Value: "0"},
{Scope: ScopeNone, Name: "disconnect_on_expired_password", Value: "1"},
{Scope: ScopeNone, Name: "performance_schema_max_file_classes", Value: "50"},
{Scope: ScopeGlobal, Name: "expire_logs_days", Value: "0"},
{Scope: ScopeGlobal | ScopeSession, Name: BinlogRowQueryLogEvents, Value: BoolOff, Type: TypeBool},
{Scope: ScopeGlobal, Name: "default_password_lifetime", Value: ""},
{Scope: ScopeNone, Name: "pid_file", Value: "/usr/local/mysql/data/localhost.pid"},
{Scope: ScopeNone, Name: "innodb_undo_tablespaces", Value: "0"},
{Scope: ScopeGlobal, Name: InnodbStatusOutputLocks, Value: BoolOff, Type: TypeBool, AutoConvertNegativeBool: true},
{Scope: ScopeNone, Name: "performance_schema_accounts_size", Value: "100"},
{Scope: ScopeGlobal | ScopeSession, Name: "max_error_count", Value: "64", IsHintUpdatable: true},
{Scope: ScopeGlobal, Name: "max_write_lock_count", Value: "18446744073709551615"},
{Scope: ScopeNone, Name: "performance_schema_max_socket_instances", Value: "322"},
{Scope: ScopeNone, Name: "performance_schema_max_table_instances", Value: "12500"},
{Scope: ScopeGlobal, Name: "innodb_stats_persistent_sample_pages", Value: "20"},
{Scope: ScopeGlobal, Name: "show_compatibility_56", Value: ""},
{Scope: ScopeNone, Name: "innodb_open_files", Value: "2000"},
{Scope: ScopeGlobal, Name: "innodb_spin_wait_delay", Value: "6"},
{Scope: ScopeGlobal, Name: "thread_cache_size", Value: "9"},
{Scope: ScopeGlobal, Name: LogSlowAdminStatements, Value: BoolOff, Type: TypeBool},
{Scope: ScopeNone, Name: "innodb_checksums", Type: TypeBool, Value: BoolOn},
{Scope: ScopeNone, Name: "ft_stopword_file", Value: "(built-in)"},
{Scope: ScopeGlobal, Name: "innodb_max_dirty_pages_pct_lwm", Value: "0"},
{Scope: ScopeGlobal, Name: LogQueriesNotUsingIndexes, Value: BoolOff, Type: TypeBool},
{Scope: ScopeGlobal | ScopeSession, Name: "max_heap_table_size", Value: "16777216", IsHintUpdatable: true},
{Scope: ScopeGlobal | ScopeSession, Name: "div_precision_increment", Value: "4", IsHintUpdatable: true},
{Scope: ScopeGlobal, Name: "innodb_lru_scan_depth", Value: "1024"},
{Scope: ScopeGlobal, Name: "innodb_purge_rseg_truncate_frequency", Value: ""},
{Scope: ScopeGlobal | ScopeSession, Name: SQLAutoIsNull, Value: BoolOff, Type: TypeBool, IsHintUpdatable: true},
{Scope: ScopeNone, Name: "innodb_api_enable_binlog", Value: "0"},
{Scope: ScopeGlobal | ScopeSession, Name: "innodb_ft_user_stopword_table", Value: ""},
{Scope: ScopeNone, Name: "server_id_bits", Value: "32"},
{Scope: ScopeGlobal, Name: "innodb_log_checksum_algorithm", Value: ""},
{Scope: ScopeNone, Name: "innodb_buffer_pool_load_at_startup", Value: "1"},
{Scope: ScopeGlobal | ScopeSession, Name: "sort_buffer_size", Value: "262144", IsHintUpdatable: true},
{Scope: ScopeGlobal, Name: "innodb_flush_neighbors", Value: "1"},
{Scope: ScopeNone, Name: "innodb_use_sys_malloc", Value: "1"},
{Scope: ScopeSession, Name: PluginLoad, Value: ""},
{Scope: ScopeSession, Name: PluginDir, Value: "/data/deploy/plugin"},
{Scope: ScopeNone, Name: "performance_schema_max_socket_classes", Value: "10"},
{Scope: ScopeNone, Name: "performance_schema_max_stage_classes", Value: "150"},
{Scope: ScopeGlobal, Name: "innodb_purge_batch_size", Value: "300"},
{Scope: ScopeNone, Name: "have_profiling", Value: "NO"},
{Scope: ScopeGlobal, Name: InnodbBufferPoolDumpNow, Value: BoolOff, Type: TypeBool, AutoConvertNegativeBool: true},
{Scope: ScopeGlobal, Name: RelayLogPurge, Value: BoolOn, Type: TypeBool},
{Scope: ScopeGlobal, Name: "ndb_distribution", Value: ""},
{Scope: ScopeGlobal, Name: "myisam_data_pointer_size", Value: "6"},
{Scope: ScopeGlobal, Name: "ndb_optimization_delay", Value: ""},
{Scope: ScopeGlobal, Name: "innodb_ft_num_word_optimize", Value: "2000"},
{Scope: ScopeGlobal | ScopeSession, Name: "max_join_size", Value: "18446744073709551615", IsHintUpdatable: true},
{Scope: ScopeNone, Name: CoreFile, Value: BoolOff, Type: TypeBool},
{Scope: ScopeGlobal | ScopeSession, Name: "max_seeks_for_key", Value: "18446744073709551615", IsHintUpdatable: true},
{Scope: ScopeNone, Name: "innodb_log_buffer_size", Value: "8388608"},
{Scope: ScopeGlobal, Name: "delayed_insert_timeout", Value: "300"},
{Scope: ScopeGlobal, Name: "max_relay_log_size", Value: "0"},
{Scope: ScopeGlobal | ScopeSession, Name: MaxSortLength, Value: "1024", Type: TypeUnsigned, MinValue: 4, MaxValue: 8388608, AutoConvertOutOfRange: true, IsHintUpdatable: true},
{Scope: ScopeNone, Name: "metadata_locks_hash_instances", Value: "8"},
{Scope: ScopeGlobal, Name: "ndb_eventbuffer_free_percent", Value: ""},
{Scope: ScopeNone, Name: "large_files_support", Value: "1"},
{Scope: ScopeGlobal, Name: "binlog_max_flush_queue_time", Value: "0"},
{Scope: ScopeGlobal, Name: "innodb_fill_factor", Value: ""},
{Scope: ScopeGlobal, Name: "log_syslog_facility", Value: ""},
{Scope: ScopeNone, Name: "innodb_ft_min_token_size", Value: "3"},
{Scope: ScopeGlobal | ScopeSession, Name: "transaction_write_set_extraction", Value: ""},
{Scope: ScopeGlobal | ScopeSession, Name: "ndb_blob_write_batch_bytes", Value: ""},
{Scope: ScopeGlobal, Name: "automatic_sp_privileges", Value: "1"},
{Scope: ScopeGlobal, Name: "innodb_flush_sync", Value: ""},
{Scope: ScopeNone, Name: "performance_schema_events_statements_history_long_size", Value: "10000"},
{Scope: ScopeGlobal, Name: "innodb_monitor_disable", Value: ""},
{Scope: ScopeNone, Name: "innodb_doublewrite", Value: "1"},
{Scope: ScopeNone, Name: "log_bin_use_v1_row_events", Value: "0"},
{Scope: ScopeSession, Name: "innodb_optimize_point_storage", Value: ""},
{Scope: ScopeNone, Name: "innodb_api_disable_rowlock", Value: "0"},
{Scope: ScopeGlobal, Name: "innodb_adaptive_flushing_lwm", Value: "10"},
{Scope: ScopeNone, Name: "innodb_log_files_in_group", Value: "2"},
{Scope: ScopeGlobal, Name: InnodbBufferPoolLoadNow, Value: BoolOff, Type: TypeBool, AutoConvertNegativeBool: true},
{Scope: ScopeNone, Name: "performance_schema_max_rwlock_classes", Value: "40"},
{Scope: ScopeNone, Name: "binlog_gtid_simple_recovery", Value: "1"},
{Scope: ScopeNone, Name: "performance_schema_digests_size", Value: "10000"},
{Scope: ScopeGlobal | ScopeSession, Name: Profiling, Value: BoolOff, Type: TypeBool},
{Scope: ScopeSession, Name: "rand_seed1", Value: ""},
{Scope: ScopeGlobal, Name: "sha256_password_proxy_users", Value: ""},
{Scope: ScopeGlobal | ScopeSession, Name: SQLQuoteShowCreate, Value: BoolOn, Type: TypeBool},
{Scope: ScopeGlobal | ScopeSession, Name: "binlogging_impossible_mode", Value: "IGNORE_ERROR"},
{Scope: ScopeGlobal | ScopeSession, Name: QueryCacheSize, Value: "1048576"},
{Scope: ScopeGlobal, Name: "innodb_stats_transient_sample_pages", Value: "8"},
{Scope: ScopeGlobal, Name: InnodbStatsOnMetadata, Value: "0"},
{Scope: ScopeNone, Name: "server_uuid", Value: "00000000-0000-0000-0000-000000000000"},
{Scope: ScopeNone, Name: "open_files_limit", Value: "5000"},
{Scope: ScopeGlobal | ScopeSession, Name: "ndb_force_send", Value: ""},
{Scope: ScopeNone, Name: "skip_show_database", Value: "0"},
{Scope: ScopeGlobal, Name: "log_timestamps", Value: ""},
{Scope: ScopeNone, Name: "version_compile_machine", Value: "x86_64"},
{Scope: ScopeGlobal, Name: "event_scheduler", Value: BoolOff},
{Scope: ScopeGlobal | ScopeSession, Name: "ndb_deferred_constraints", Value: ""},
{Scope: ScopeGlobal, Name: "log_syslog_include_pid", Value: ""},
{Scope: ScopeSession, Name: "last_insert_id", Value: ""},
{Scope: ScopeNone, Name: "innodb_ft_cache_size", Value: "8000000"},
{Scope: ScopeGlobal, Name: InnodbDisableSortFileCache, Value: "0"},
{Scope: ScopeGlobal, Name: "log_error_verbosity", Value: ""},
{Scope: ScopeNone, Name: "performance_schema_hosts_size", Value: "100"},
{Scope: ScopeGlobal, Name: "innodb_replication_delay", Value: "0"},
{Scope: ScopeGlobal, Name: SlowQueryLog, Value: "0"},
{Scope: ScopeSession, Name: "debug_sync", Value: ""},
{Scope: ScopeGlobal, Name: InnodbStatsAutoRecalc, Value: "1"},
{Scope: ScopeGlobal | ScopeSession, Name: "lc_messages", Value: "en_US"},
{Scope: ScopeGlobal | ScopeSession, Name: "bulk_insert_buffer_size", Value: "8388608", IsHintUpdatable: true},
{Scope: ScopeGlobal | ScopeSession, Name: BinlogDirectNonTransactionalUpdates, Value: BoolOff, Type: TypeBool},
{Scope: ScopeGlobal, Name: "innodb_change_buffering", Value: "all"},
{Scope: ScopeGlobal | ScopeSession, Name: SQLBigSelects, Value: BoolOn, Type: TypeBool, IsHintUpdatable: true},
{Scope: ScopeGlobal, Name: "innodb_max_purge_lag_delay", Value: "0"},
{Scope: ScopeGlobal | ScopeSession, Name: "session_track_schema", Value: ""},
{Scope: ScopeGlobal, Name: "innodb_io_capacity_max", Value: "2000"},
{Scope: ScopeGlobal, Name: "innodb_autoextend_increment", Value: "64"},
{Scope: ScopeGlobal | ScopeSession, Name: "binlog_format", Value: "STATEMENT"},
{Scope: ScopeGlobal | ScopeSession, Name: "optimizer_trace", Value: "enabled=off,one_line=off"},
{Scope: ScopeGlobal | ScopeSession, Name: "read_rnd_buffer_size", Value: "262144", IsHintUpdatable: true},
{Scope: ScopeGlobal | ScopeSession, Name: NetWriteTimeout, Value: "60"},
{Scope: ScopeGlobal, Name: InnodbBufferPoolLoadAbort, Value: BoolOff, Type: TypeBool, AutoConvertNegativeBool: true},
{Scope: ScopeGlobal | ScopeSession, Name: "transaction_prealloc_size", Value: "4096"},
{Scope: ScopeNone, Name: "performance_schema_setup_objects_size", Value: "100"},
{Scope: ScopeGlobal, Name: "sync_relay_log", Value: "10000"},
{Scope: ScopeGlobal, Name: "innodb_ft_result_cache_limit", Value: "2000000000"},
{Scope: ScopeNone, Name: "innodb_sort_buffer_size", Value: "1048576"},
{Scope: ScopeGlobal, Name: "innodb_ft_enable_diag_print", Type: TypeBool, Value: BoolOff},
{Scope: ScopeNone, Name: "thread_handling", Value: "one-thread-per-connection"},
{Scope: ScopeGlobal, Name: "stored_program_cache", Value: "256"},
{Scope: ScopeNone, Name: "performance_schema_max_mutex_instances", Value: "15906"},
{Scope: ScopeGlobal, Name: "innodb_adaptive_max_sleep_delay", Value: "150000"},
{Scope: ScopeNone, Name: "large_pages", Value: BoolOff},
{Scope: ScopeGlobal | ScopeSession, Name: "session_track_system_variables", Value: ""},
{Scope: ScopeGlobal, Name: "innodb_change_buffer_max_size", Value: "25"},
{Scope: ScopeGlobal, Name: LogBinTrustFunctionCreators, Value: BoolOff, Type: TypeBool},
{Scope: ScopeNone, Name: "innodb_write_io_threads", Value: "4"},
{Scope: ScopeGlobal, Name: "mysql_native_password_proxy_users", Value: ""},
{Scope: ScopeGlobal, Name: serverReadOnly, Value: BoolOff, Type: TypeBool},
{Scope: ScopeNone, Name: "large_page_size", Value: "0"},
{Scope: ScopeNone, Name: "table_open_cache_instances", Value: "1"},
{Scope: ScopeGlobal, Name: InnodbStatsPersistent, Value: BoolOn, Type: TypeBool, AutoConvertNegativeBool: true},
{Scope: ScopeGlobal | ScopeSession, Name: "session_track_state_change", Value: ""},
{Scope: ScopeNone, Name: OptimizerSwitch, Value: "index_merge=on,index_merge_union=on,index_merge_sort_union=on,index_merge_intersection=on,engine_condition_pushdown=on,index_condition_pushdown=on,mrr=on,mrr_cost_based=on,block_nested_loop=on,batched_key_access=off,materialization=on,semijoin=on,loosescan=on,firstmatch=on,subquery_materialization_cost_based=on,use_index_extensions=on", IsHintUpdatable: true},
{Scope: ScopeGlobal, Name: "delayed_queue_size", Value: "1000"},
{Scope: ScopeNone, Name: "innodb_read_only", Value: "0"},
{Scope: ScopeNone, Name: "datetime_format", Value: "%Y-%m-%d %H:%i:%s"},
{Scope: ScopeGlobal, Name: "log_syslog", Value: ""},
{Scope: ScopeGlobal | ScopeSession, Name: "transaction_alloc_block_size", Value: "8192"},
{Scope: ScopeGlobal, Name: "innodb_large_prefix", Type: TypeBool, Value: BoolOff},
{Scope: ScopeNone, Name: "performance_schema_max_cond_classes", Value: "80"},
{Scope: ScopeGlobal, Name: "innodb_io_capacity", Value: "200"},
{Scope: ScopeGlobal, Name: "max_binlog_cache_size", Value: "18446744073709547520"},
{Scope: ScopeGlobal | ScopeSession, Name: "ndb_index_stat_enable", Value: ""},
{Scope: ScopeGlobal, Name: "executed_gtids_compression_period", Value: ""},
{Scope: ScopeNone, Name: "time_format", Value: "%H:%i:%s"},
{Scope: ScopeGlobal | ScopeSession, Name: OldAlterTable, Value: BoolOff, Type: TypeBool},
{Scope: ScopeGlobal | ScopeSession, Name: "long_query_time", Value: "10.000000"},
{Scope: ScopeNone, Name: "innodb_use_native_aio", Value: "0"},
{Scope: ScopeGlobal, Name: "log_throttle_queries_not_using_indexes", Value: "0"},
{Scope: ScopeNone, Name: "locked_in_memory", Value: "0"},
{Scope: ScopeNone, Name: "innodb_api_enable_mdl", Value: "0"},
{Scope: ScopeGlobal, Name: "binlog_cache_size", Value: "32768"},
{Scope: ScopeGlobal, Name: "innodb_compression_pad_pct_max", Value: "50"},
{Scope: ScopeGlobal, Name: InnodbCommitConcurrency, Value: "0", Type: TypeUnsigned, MinValue: 0, MaxValue: 1000, AutoConvertOutOfRange: true},
{Scope: ScopeNone, Name: "ft_min_word_len", Value: "4"},
{Scope: ScopeGlobal, Name: EnforceGtidConsistency, Value: BoolOff, Type: TypeEnum, PossibleValues: []string{BoolOff, BoolOn, "WARN"}},
{Scope: ScopeGlobal, Name: SecureAuth, Value: BoolOn, Type: TypeBool, Validation: func(vars *SessionVars, normalizedValue string, originalValue string, scope ScopeFlag) (string, error) {
if TiDBOptOn(normalizedValue) {
return BoolOn, nil
}
return normalizedValue, ErrWrongValueForVar.GenWithStackByArgs(SecureAuth, originalValue)
}},
{Scope: ScopeNone, Name: "max_tmp_tables", Value: "32"},
{Scope: ScopeGlobal, Name: InnodbRandomReadAhead, Value: BoolOff, Type: TypeBool, AutoConvertNegativeBool: true},
{Scope: ScopeGlobal | ScopeSession, Name: UniqueChecks, Value: BoolOn, Type: TypeBool, IsHintUpdatable: true},
{Scope: ScopeGlobal, Name: "internal_tmp_disk_storage_engine", Value: ""},
{Scope: ScopeGlobal | ScopeSession, Name: "myisam_repair_threads", Value: "1"},
{Scope: ScopeGlobal, Name: "ndb_eventbuffer_max_alloc", Value: ""},
{Scope: ScopeGlobal, Name: "innodb_read_ahead_threshold", Value: "56"},
{Scope: ScopeGlobal, Name: "key_cache_block_size", Value: "1024"},
{Scope: ScopeNone, Name: "ndb_recv_thread_cpu_mask", Value: ""},
{Scope: ScopeGlobal, Name: "gtid_purged", Value: ""},
{Scope: ScopeGlobal, Name: "max_binlog_stmt_cache_size", Value: "18446744073709547520"},
{Scope: ScopeGlobal | ScopeSession, Name: "lock_wait_timeout", Value: "31536000"},
{Scope: ScopeGlobal | ScopeSession, Name: "read_buffer_size", Value: "131072", IsHintUpdatable: true},
{Scope: ScopeNone, Name: "innodb_read_io_threads", Value: "4"},
{Scope: ScopeGlobal | ScopeSession, Name: MaxSpRecursionDepth, Value: "0", Type: TypeUnsigned, MinValue: 0, MaxValue: 255, AutoConvertOutOfRange: true},
{Scope: ScopeNone, Name: "ignore_builtin_innodb", Value: "0"},
{Scope: ScopeGlobal, Name: "slow_query_log_file", Value: "/usr/local/mysql/data/localhost-slow.log"},
{Scope: ScopeGlobal, Name: "innodb_thread_sleep_delay", Value: "10000"},
{Scope: ScopeNone, Name: "license", Value: "Apache License 2.0"},
{Scope: ScopeGlobal, Name: "innodb_ft_aux_table", Value: ""},
{Scope: ScopeGlobal | ScopeSession, Name: SQLWarnings, Value: BoolOff, Type: TypeBool},
{Scope: ScopeGlobal | ScopeSession, Name: KeepFilesOnCreate, Value: BoolOff, Type: TypeBool},
{Scope: ScopeNone, Name: "innodb_data_file_path", Value: "ibdata1:12M:autoextend"},
{Scope: ScopeNone, Name: "performance_schema_setup_actors_size", Value: "100"},
{Scope: ScopeNone, Name: "innodb_additional_mem_pool_size", Value: "8388608"},
{Scope: ScopeNone, Name: "log_error", Value: "/usr/local/mysql/data/localhost.err"},
{Scope: ScopeGlobal, Name: "binlog_stmt_cache_size", Value: "32768"},
{Scope: ScopeNone, Name: "relay_log_info_file", Value: "relay-log.info"},
{Scope: ScopeNone, Name: "innodb_ft_total_cache_size", Value: "640000000"},
{Scope: ScopeNone, Name: "performance_schema_max_rwlock_instances", Value: "9102"},
{Scope: ScopeGlobal, Name: "table_open_cache", Value: "2000"},
{Scope: ScopeNone, Name: "performance_schema_events_stages_history_long_size", Value: "10000"},
{Scope: ScopeSession, Name: "insert_id", Value: ""},
{Scope: ScopeGlobal | ScopeSession, Name: "default_tmp_storage_engine", Value: "InnoDB", IsHintUpdatable: true},
{Scope: ScopeGlobal | ScopeSession, Name: "optimizer_search_depth", Value: "62", IsHintUpdatable: true},
{Scope: ScopeGlobal | ScopeSession, Name: "max_points_in_geometry", Value: "65536", IsHintUpdatable: true},
{Scope: ScopeGlobal, Name: "innodb_stats_sample_pages", Value: "8"},
{Scope: ScopeGlobal | ScopeSession, Name: "profiling_history_size", Value: "15"},
{Scope: ScopeNone, Name: "have_symlink", Value: "YES"},
{Scope: ScopeGlobal | ScopeSession, Name: "storage_engine", Value: "InnoDB"},
{Scope: ScopeGlobal | ScopeSession, Name: "sql_log_off", Value: "0"},
// In MySQL, the default value of `explicit_defaults_for_timestamp` is `0`.
// But In TiDB, it's set to `1` to be consistent with TiDB timestamp behavior.
// See: https://github.com/pingcap/tidb/pull/6068 for details
{Scope: ScopeNone, Name: "explicit_defaults_for_timestamp", Value: BoolOn, Type: TypeBool},
{Scope: ScopeNone, Name: "performance_schema_events_waits_history_size", Value: "10"},
{Scope: ScopeGlobal, Name: "log_syslog_tag", Value: ""},
{Scope: ScopeGlobal, Name: "innodb_undo_log_truncate", Value: ""},
{Scope: ScopeSession, Name: "innodb_create_intrinsic", Value: ""},
{Scope: ScopeGlobal, Name: "gtid_executed_compression_period", Value: ""},
{Scope: ScopeGlobal, Name: "ndb_log_empty_epochs", Value: ""},
{Scope: ScopeNone, Name: "have_geometry", Value: "YES"},
{Scope: ScopeGlobal | ScopeSession, Name: "optimizer_trace_max_mem_size", Value: "16384"},
{Scope: ScopeGlobal | ScopeSession, Name: "net_retry_count", Value: "10"},
{Scope: ScopeSession, Name: "ndb_table_no_logging", Value: ""},
{Scope: ScopeGlobal | ScopeSession, Name: "optimizer_trace_features", Value: "greedy_search=on,range_optimizer=on,dynamic_range=on,repeated_subselect=on"},
{Scope: ScopeGlobal, Name: "innodb_flush_log_at_trx_commit", Value: "1"},
{Scope: ScopeGlobal, Name: "rewriter_enabled", Value: ""},
{Scope: ScopeGlobal, Name: "query_cache_min_res_unit", Value: "4096"},
{Scope: ScopeGlobal | ScopeSession, Name: "updatable_views_with_limit", Value: "YES", IsHintUpdatable: true},
{Scope: ScopeGlobal | ScopeSession, Name: "optimizer_prune_level", Value: "1", IsHintUpdatable: true},
{Scope: ScopeGlobal | ScopeSession, Name: "completion_type", Value: "NO_CHAIN"},
{Scope: ScopeGlobal, Name: "binlog_checksum", Value: "CRC32"},
{Scope: ScopeNone, Name: "report_port", Value: "3306"},
{Scope: ScopeGlobal | ScopeSession, Name: ShowOldTemporals, Value: BoolOff, Type: TypeBool},
{Scope: ScopeGlobal, Name: "query_cache_limit", Value: "1048576"},
{Scope: ScopeGlobal, Name: "innodb_buffer_pool_size", Value: "134217728"},
{Scope: ScopeGlobal, Name: InnodbAdaptiveFlushing, Value: BoolOn, Type: TypeBool, AutoConvertNegativeBool: true},
{Scope: ScopeGlobal, Name: "innodb_monitor_enable", Value: ""},
{Scope: ScopeNone, Name: "date_format", Value: "%Y-%m-%d"},
{Scope: ScopeGlobal, Name: "innodb_buffer_pool_filename", Value: "ib_buffer_pool"},
{Scope: ScopeGlobal, Name: "slow_launch_time", Value: "2"},
{Scope: ScopeGlobal | ScopeSession, Name: "ndb_use_transactions", Value: ""},
{Scope: ScopeNone, Name: "innodb_purge_threads", Value: "1"},
{Scope: ScopeGlobal, Name: "innodb_concurrency_tickets", Value: "5000"},
{Scope: ScopeGlobal, Name: "innodb_monitor_reset_all", Value: ""},
{Scope: ScopeNone, Name: "performance_schema_users_size", Value: "100"},
{Scope: ScopeGlobal, Name: "ndb_log_updated_only", Value: ""},
{Scope: ScopeNone, Name: "basedir", Value: "/usr/local/mysql"},
{Scope: ScopeGlobal, Name: "innodb_old_blocks_time", Value: "1000"},
{Scope: ScopeGlobal, Name: "innodb_stats_method", Value: "nulls_equal"},
{Scope: ScopeGlobal, Name: LocalInFile, Value: BoolOn, Type: TypeBool},
{Scope: ScopeGlobal | ScopeSession, Name: "myisam_stats_method", Value: "nulls_unequal"},
{Scope: ScopeNone, Name: "version_compile_os", Value: "osx10.8"},
{Scope: ScopeNone, Name: "relay_log_recovery", Value: "0"},
{Scope: ScopeNone, Name: "old", Value: "0"},
{Scope: ScopeGlobal | ScopeSession, Name: InnodbTableLocks, Value: BoolOn, Type: TypeBool, AutoConvertNegativeBool: true},
{Scope: ScopeNone, Name: PerformanceSchema, Value: BoolOff, Type: TypeBool},
{Scope: ScopeNone, Name: "myisam_recover_options", Value: BoolOff},
{Scope: ScopeGlobal | ScopeSession, Name: NetBufferLength, Value: "16384"},
{Scope: ScopeGlobal | ScopeSession, Name: "binlog_row_image", Value: "FULL"},
{Scope: ScopeNone, Name: "innodb_locks_unsafe_for_binlog", Value: "0"},
{Scope: ScopeSession, Name: "rbr_exec_mode", Value: ""},
{Scope: ScopeGlobal, Name: "myisam_max_sort_file_size", Value: "9223372036853727232"},
{Scope: ScopeNone, Name: "back_log", Value: "80"},
{Scope: ScopeSession, Name: "pseudo_thread_id", Value: ""},
{Scope: ScopeNone, Name: "have_dynamic_loading", Value: "YES"},
{Scope: ScopeGlobal, Name: "rewriter_verbose", Value: ""},
{Scope: ScopeGlobal, Name: "innodb_undo_logs", Value: "128"},
{Scope: ScopeNone, Name: "performance_schema_max_cond_instances", Value: "3504"},
{Scope: ScopeGlobal, Name: "delayed_insert_limit", Value: "100"},
{Scope: ScopeGlobal, Name: Flush, Value: BoolOff, Type: TypeBool},
{Scope: ScopeGlobal | ScopeSession, Name: "eq_range_index_dive_limit", Value: "200", IsHintUpdatable: true},
{Scope: ScopeNone, Name: "performance_schema_events_stages_history_size", Value: "10"},
{Scope: ScopeGlobal | ScopeSession, Name: "ndb_join_pushdown", Value: ""},
{Scope: ScopeGlobal, Name: "validate_password_special_char_count", Value: "1"},
{Scope: ScopeNone, Name: "performance_schema_max_thread_instances", Value: "402"},
{Scope: ScopeGlobal | ScopeSession, Name: "ndbinfo_show_hidden", Value: ""},
{Scope: ScopeGlobal | ScopeSession, Name: "net_read_timeout", Value: "30"},
{Scope: ScopeNone, Name: "innodb_page_size", Value: "16384"},
{Scope: ScopeNone, Name: "innodb_log_file_size", Value: "50331648"},
{Scope: ScopeGlobal, Name: "sync_relay_log_info", Value: "10000"},
{Scope: ScopeGlobal | ScopeSession, Name: "optimizer_trace_limit", Value: "1"},
{Scope: ScopeNone, Name: "innodb_ft_max_token_size", Value: "84"},
{Scope: ScopeGlobal, Name: ValidatePasswordLength, Value: "8", Type: TypeUnsigned, MinValue: 0, MaxValue: math.MaxUint64, AutoConvertOutOfRange: true},
{Scope: ScopeGlobal, Name: "ndb_log_binlog_index", Value: ""},
{Scope: ScopeGlobal, Name: "innodb_api_bk_commit_interval", Value: "5"},
{Scope: ScopeNone, Name: "innodb_undo_directory", Value: "."},
{Scope: ScopeNone, Name: "bind_address", Value: "*"},
{Scope: ScopeGlobal, Name: "innodb_sync_spin_loops", Value: "30"},
{Scope: ScopeGlobal | ScopeSession, Name: SQLSafeUpdates, Value: BoolOff, Type: TypeBool, IsHintUpdatable: true},
{Scope: ScopeNone, Name: "tmpdir", Value: "/var/tmp/"},
{Scope: ScopeGlobal, Name: "innodb_thread_concurrency", Value: "0"},
{Scope: ScopeGlobal, Name: "innodb_buffer_pool_dump_pct", Value: ""},
{Scope: ScopeGlobal | ScopeSession, Name: "lc_time_names", Value: "en_US"},
{Scope: ScopeGlobal | ScopeSession, Name: "max_statement_time", Value: ""},
{Scope: ScopeGlobal | ScopeSession, Name: EndMarkersInJSON, Value: BoolOff, Type: TypeBool, IsHintUpdatable: true},
{Scope: ScopeGlobal, Name: AvoidTemporalUpgrade, Value: BoolOff, Type: TypeBool},
{Scope: ScopeGlobal, Name: "key_cache_age_threshold", Value: "300"},
{Scope: ScopeGlobal, Name: InnodbStatusOutput, Value: BoolOff, Type: TypeBool, AutoConvertNegativeBool: true},
{Scope: ScopeSession, Name: "identity", Value: ""},
{Scope: ScopeGlobal | ScopeSession, Name: "min_examined_row_limit", Value: "0"},
{Scope: ScopeGlobal, Name: "sync_frm", Type: TypeBool, Value: BoolOn},
{Scope: ScopeGlobal, Name: "innodb_online_alter_log_max_size", Value: "134217728"},
{Scope: ScopeGlobal | ScopeSession, Name: "information_schema_stats_expiry", Value: "86400"},
{Scope: ScopeGlobal, Name: ThreadPoolSize, Value: "16", Type: TypeUnsigned, MinValue: 1, MaxValue: 64, AutoConvertOutOfRange: true},
{Scope: ScopeNone, Name: "lower_case_file_system", Value: "1"},
// for compatibility purpose, we should leave them alone.
// TODO: Follow the Terminology Updates of MySQL after their changes arrived.
// https://mysqlhighavailability.com/mysql-terminology-updates/
{Scope: ScopeSession, Name: PseudoSlaveMode, Value: "", Type: TypeInt},
{Scope: ScopeGlobal, Name: "slave_pending_jobs_size_max", Value: "16777216"},
{Scope: ScopeGlobal, Name: "slave_transaction_retries", Value: "10"},
{Scope: ScopeGlobal, Name: "slave_checkpoint_period", Value: "300"},
{Scope: ScopeGlobal, Name: MasterVerifyChecksum, Value: BoolOff, Type: TypeBool},
{Scope: ScopeGlobal, Name: "rpl_semi_sync_master_trace_level", Value: ""},
{Scope: ScopeGlobal, Name: "master_info_repository", Value: "FILE"},
{Scope: ScopeGlobal, Name: "rpl_stop_slave_timeout", Value: "31536000"},
{Scope: ScopeGlobal, Name: "slave_net_timeout", Value: "3600"},
{Scope: ScopeGlobal, Name: "sync_master_info", Value: "10000"},
{Scope: ScopeGlobal, Name: "init_slave", Value: ""},
{Scope: ScopeGlobal, Name: SlaveCompressedProtocol, Value: BoolOff, Type: TypeBool},
{Scope: ScopeGlobal, Name: "rpl_semi_sync_slave_trace_level", Value: ""},
{Scope: ScopeGlobal, Name: LogSlowSlaveStatements, Value: BoolOff, Type: TypeBool},
{Scope: ScopeGlobal, Name: "slave_checkpoint_group", Value: "512"},
{Scope: ScopeNone, Name: "slave_load_tmpdir", Value: "/var/tmp/"},
{Scope: ScopeGlobal, Name: "slave_parallel_type", Value: ""},
{Scope: ScopeGlobal, Name: "slave_parallel_workers", Value: "0"},
{Scope: ScopeGlobal, Name: "rpl_semi_sync_master_timeout", Value: "10000", Type: TypeInt},
{Scope: ScopeNone, Name: "slave_skip_errors", Value: BoolOff},
{Scope: ScopeGlobal, Name: "sql_slave_skip_counter", Value: "0"},
{Scope: ScopeGlobal, Name: "rpl_semi_sync_slave_enabled", Value: BoolOff, Type: TypeBool},
{Scope: ScopeGlobal, Name: "rpl_semi_sync_master_enabled", Value: BoolOff, Type: TypeBool},
{Scope: ScopeGlobal, Name: "slave_preserve_commit_order", Value: BoolOff, Type: TypeBool},
{Scope: ScopeGlobal, Name: "slave_exec_mode", Value: "STRICT"},
{Scope: ScopeNone, Name: "log_slave_updates", Value: BoolOff, Type: TypeBool},
{Scope: ScopeGlobal, Name: "rpl_semi_sync_master_wait_point", Value: "AFTER_SYNC", Type: TypeEnum, PossibleValues: []string{"AFTER_SYNC", "AFTER_COMMIT"}},
{Scope: ScopeGlobal, Name: "slave_sql_verify_checksum", Value: BoolOn, Type: TypeBool},
{Scope: ScopeGlobal, Name: "slave_max_allowed_packet", Value: "1073741824"},
{Scope: ScopeGlobal, Name: "rpl_semi_sync_master_wait_for_slave_count", Value: "1", Type: TypeInt, MinValue: 1, MaxValue: 65535},
{Scope: ScopeGlobal, Name: "rpl_semi_sync_master_wait_no_slave", Value: BoolOn, Type: TypeBool},
{Scope: ScopeGlobal, Name: "slave_rows_search_algorithms", Value: "TABLE_SCAN,INDEX_SCAN"},
{Scope: ScopeGlobal, Name: SlaveAllowBatching, Value: BoolOff, Type: TypeBool},
}
| sessionctx/variable/noop.go | 1 | https://github.com/pingcap/tidb/commit/80edc8cd20b452089a26b817b4b450b7f31db77f | [
0.0011191116645932198,
0.0002104661543853581,
0.0001642518473090604,
0.00017790061247069389,
0.00013321016740519553
] |
{
"id": 5,
"code_window": [
"\t\tif isGlobal, _ := config.GetTxnScopeFromConfig(); isGlobal {\n",
"\t\t\treturn oracle.GlobalTxnScope\n",
"\t\t}\n",
"\t\treturn oracle.LocalTxnScope\n",
"\t}()},\n",
"\t/* TiDB specific variables */\n",
"\t{Scope: ScopeGlobal | ScopeSession, Name: TiDBAllowMPPExecution, Type: TypeBool, Value: BoolToOnOff(DefTiDBAllowMPPExecution)},\n",
"\t{Scope: ScopeGlobal | ScopeSession, Name: TiDBBCJThresholdCount, Value: strconv.Itoa(DefBroadcastJoinThresholdCount), Type: TypeInt, MinValue: 0, MaxValue: math.MaxInt64, SetSession: func(s *SessionVars, val string) error {\n",
"\t\ts.BroadcastJoinThresholdCount = tidbOptInt64(val, DefBroadcastJoinThresholdCount)\n",
"\t\treturn nil\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "sessionctx/variable/sysvar.go",
"type": "replace",
"edit_start_line_idx": 611
} | // Copyright 2019 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package expression
import (
"math/rand"
"testing"
. "github.com/pingcap/check"
"github.com/pingcap/parser/ast"
"github.com/pingcap/parser/charset"
"github.com/pingcap/parser/mysql"
"github.com/pingcap/tidb/types"
)
type randSpaceStrGener struct {
lenBegin int
lenEnd int
}
func (g *randSpaceStrGener) gen() interface{} {
n := rand.Intn(g.lenEnd-g.lenBegin) + g.lenBegin
buf := make([]byte, n)
for i := range buf {
x := rand.Intn(150)
if x < 10 {
buf[i] = byte('0' + x)
} else if x-10 < 26 {
buf[i] = byte('a' + x - 10)
} else if x < 62 {
buf[i] = byte('A' + x - 10 - 26)
} else {
buf[i] = byte(' ')
}
}
return string(buf)
}
var vecBuiltinStringCases = map[string][]vecExprBenchCase{
ast.Length: {
{retEvalType: types.ETInt, childrenTypes: []types.EvalType{types.ETString}, geners: []dataGenerator{newDefaultGener(0.2, types.ETString)}},
},
ast.ASCII: {
{retEvalType: types.ETInt, childrenTypes: []types.EvalType{types.ETString}, geners: []dataGenerator{newDefaultGener(0.2, types.ETString)}},
},
ast.Concat: {
{retEvalType: types.ETString, childrenTypes: []types.EvalType{types.ETString, types.ETString, types.ETString}},
},
ast.ConcatWS: {
{
retEvalType: types.ETString,
childrenTypes: []types.EvalType{types.ETString, types.ETString, types.ETString, types.ETString},
geners: []dataGenerator{&constStrGener{","}},
},
{
retEvalType: types.ETString,
childrenTypes: []types.EvalType{types.ETString, types.ETString, types.ETString, types.ETString},
geners: []dataGenerator{newDefaultGener(1, types.ETString)},
},
{
retEvalType: types.ETString,
childrenTypes: []types.EvalType{types.ETString, types.ETString, types.ETString},
geners: []dataGenerator{
&constStrGener{"<------------------>"},
&constStrGener{"1413006"},
&constStrGener{"idlfmv"},
},
},
},
ast.Convert: {
{
retEvalType: types.ETString, childrenTypes: []types.EvalType{types.ETString, types.ETString},
constants: []*Constant{nil, {Value: types.NewDatum("utf8"), RetType: types.NewFieldType(mysql.TypeString)}},
},
{
retEvalType: types.ETString, childrenTypes: []types.EvalType{types.ETString, types.ETString},
constants: []*Constant{nil, {Value: types.NewDatum("binary"), RetType: types.NewFieldType(mysql.TypeString)}},
},
{
retEvalType: types.ETString, childrenTypes: []types.EvalType{types.ETString, types.ETString},
constants: []*Constant{nil, {Value: types.NewDatum("utf8mb4"), RetType: types.NewFieldType(mysql.TypeString)}},
},
{
retEvalType: types.ETString, childrenTypes: []types.EvalType{types.ETString, types.ETString},
constants: []*Constant{nil, {Value: types.NewDatum("ascii"), RetType: types.NewFieldType(mysql.TypeString)}},
},
{
retEvalType: types.ETString, childrenTypes: []types.EvalType{types.ETString, types.ETString},
constants: []*Constant{nil, {Value: types.NewDatum("latin1"), RetType: types.NewFieldType(mysql.TypeString)}},
},
},
ast.Substring: {
{
retEvalType: types.ETString,
childrenTypes: []types.EvalType{types.ETString, types.ETInt},
geners: []dataGenerator{newRandLenStrGener(0, 20), newRangeInt64Gener(-25, 25)},
},
{
retEvalType: types.ETString,
childrenTypes: []types.EvalType{types.ETString, types.ETInt, types.ETInt},
geners: []dataGenerator{newRandLenStrGener(0, 20), newRangeInt64Gener(-25, 25), newRangeInt64Gener(-25, 25)},
},
{
retEvalType: types.ETString,
childrenTypes: []types.EvalType{types.ETString, types.ETInt, types.ETInt},
childrenFieldTypes: []*types.FieldType{{Tp: mysql.TypeString, Flag: mysql.BinaryFlag, Collate: charset.CollationBin},
{Tp: mysql.TypeLonglong}, {Tp: mysql.TypeLonglong}},
geners: []dataGenerator{newRandLenStrGener(0, 20), newRangeInt64Gener(-25, 25), newRangeInt64Gener(-25, 25)},
},
},
ast.SubstringIndex: {
{
retEvalType: types.ETString,
childrenTypes: []types.EvalType{types.ETString, types.ETString, types.ETInt},
geners: []dataGenerator{newRandLenStrGener(0, 20), newRandLenStrGener(0, 2), newRangeInt64Gener(-4, 4)},
},
},
ast.Locate: {
{
retEvalType: types.ETInt,
childrenTypes: []types.EvalType{types.ETString, types.ETString},
geners: []dataGenerator{newRandLenStrGener(0, 10), newRandLenStrGener(0, 20)},
},
{
retEvalType: types.ETInt,
childrenTypes: []types.EvalType{types.ETString, types.ETString},
geners: []dataGenerator{newRandLenStrGener(1, 2), newRandLenStrGener(0, 20)},
},
{
retEvalType: types.ETInt,
childrenTypes: []types.EvalType{types.ETString, types.ETString},
geners: []dataGenerator{newSelectStringGener([]string{"01", "10", "001", "110", "0001", "1110"}), newSelectStringGener([]string{"010010001000010", "101101110111101"})},
},
{
retEvalType: types.ETInt,
childrenTypes: []types.EvalType{types.ETString, types.ETString, types.ETInt},
geners: []dataGenerator{newRandLenStrGener(0, 10), newRandLenStrGener(0, 20), newRangeInt64Gener(-10, 20)},
},
{
retEvalType: types.ETInt,
childrenTypes: []types.EvalType{types.ETString, types.ETString, types.ETInt},
geners: []dataGenerator{newRandLenStrGener(1, 2), newRandLenStrGener(0, 10), newRangeInt64Gener(0, 8)},
},
{
retEvalType: types.ETInt,
childrenTypes: []types.EvalType{types.ETString, types.ETString},
geners: []dataGenerator{newSelectStringGener([]string{"01", "10", "001", "110", "0001", "1110"}), newSelectStringGener([]string{"010010001000010", "101101110111101"})},
},
{
retEvalType: types.ETInt,
childrenTypes: []types.EvalType{types.ETString, types.ETString},
childrenFieldTypes: []*types.FieldType{nil, {Tp: mysql.TypeString, Flag: mysql.BinaryFlag, Collate: charset.CollationBin}},
geners: []dataGenerator{newRandLenStrGener(0, 10), newRandLenStrGener(0, 20)},
},
{
retEvalType: types.ETInt,
childrenTypes: []types.EvalType{types.ETString, types.ETString},
childrenFieldTypes: []*types.FieldType{nil, {Tp: mysql.TypeString, Flag: mysql.BinaryFlag, Collate: charset.CollationBin}},
geners: []dataGenerator{newRandLenStrGener(1, 2), newRandLenStrGener(0, 20)},
},
{
retEvalType: types.ETInt,
childrenTypes: []types.EvalType{types.ETString, types.ETString},
childrenFieldTypes: []*types.FieldType{nil, {Tp: mysql.TypeString, Flag: mysql.BinaryFlag, Collate: charset.CollationBin}},
geners: []dataGenerator{newSelectStringGener([]string{"01", "10", "001", "110", "0001", "1110"}), newSelectStringGener([]string{"010010001000010", "101101110111101"})},
},
{
retEvalType: types.ETInt,
childrenTypes: []types.EvalType{types.ETString, types.ETString},
childrenFieldTypes: []*types.FieldType{{Tp: mysql.TypeString, Flag: mysql.BinaryFlag, Collate: charset.CollationBin}, nil},
geners: []dataGenerator{newRandLenStrGener(0, 10), newRandLenStrGener(0, 20)},
},
{
retEvalType: types.ETInt,
childrenTypes: []types.EvalType{types.ETString, types.ETString},
childrenFieldTypes: []*types.FieldType{{Tp: mysql.TypeString, Flag: mysql.BinaryFlag, Collate: charset.CollationBin}, nil},
geners: []dataGenerator{newRandLenStrGener(1, 2), newRandLenStrGener(0, 20)},
},
{
retEvalType: types.ETInt,
childrenTypes: []types.EvalType{types.ETString, types.ETString},
childrenFieldTypes: []*types.FieldType{{Tp: mysql.TypeString, Flag: mysql.BinaryFlag, Collate: charset.CollationBin}, nil},
geners: []dataGenerator{newSelectStringGener([]string{"01", "10", "001", "110", "0001", "1110"}), newSelectStringGener([]string{"010010001000010", "101101110111101"})},
},
{
retEvalType: types.ETInt,
childrenTypes: []types.EvalType{types.ETString, types.ETString},
childrenFieldTypes: []*types.FieldType{{Tp: mysql.TypeString, Flag: mysql.BinaryFlag, Collate: charset.CollationBin}, {Tp: mysql.TypeString, Flag: mysql.BinaryFlag, Collate: charset.CollationBin}},
geners: []dataGenerator{newRandLenStrGener(0, 10), newRandLenStrGener(0, 20)},
},
{
retEvalType: types.ETInt,
childrenTypes: []types.EvalType{types.ETString, types.ETString},
childrenFieldTypes: []*types.FieldType{{Tp: mysql.TypeString, Flag: mysql.BinaryFlag, Collate: charset.CollationBin}, {Tp: mysql.TypeString, Flag: mysql.BinaryFlag, Collate: charset.CollationBin}},
geners: []dataGenerator{newRandLenStrGener(1, 2), newRandLenStrGener(0, 20)},
},
{
retEvalType: types.ETInt,
childrenTypes: []types.EvalType{types.ETString, types.ETString},
childrenFieldTypes: []*types.FieldType{{Tp: mysql.TypeString, Flag: mysql.BinaryFlag, Collate: charset.CollationBin}, {Tp: mysql.TypeString, Flag: mysql.BinaryFlag, Collate: charset.CollationBin}},
geners: []dataGenerator{newSelectStringGener([]string{"01", "10", "001", "110", "0001", "1110"}), newSelectStringGener([]string{"010010001000010", "101101110111101"})},
},
{
retEvalType: types.ETInt,
childrenTypes: []types.EvalType{types.ETString, types.ETString, types.ETInt},
childrenFieldTypes: []*types.FieldType{{Tp: mysql.TypeString, Flag: mysql.BinaryFlag, Collate: charset.CollationBin}, {Tp: mysql.TypeString, Flag: mysql.BinaryFlag, Collate: charset.CollationBin}, {Tp: mysql.TypeInt24}},
geners: []dataGenerator{newRandLenStrGener(0, 10), newRandLenStrGener(0, 20), newRangeInt64Gener(-10, 20)},
},
{
retEvalType: types.ETInt,
childrenTypes: []types.EvalType{types.ETString, types.ETString, types.ETInt},
childrenFieldTypes: []*types.FieldType{{Tp: mysql.TypeString, Flag: mysql.BinaryFlag, Collate: charset.CollationBin}, {Tp: mysql.TypeString, Flag: mysql.BinaryFlag, Collate: charset.CollationBin}, {Tp: mysql.TypeInt24}},
geners: []dataGenerator{newSelectStringGener([]string{"01", "10", "001", "110", "0001", "1110"}), newSelectStringGener([]string{"010010001000010", "101101110111101"}), newRangeInt64Gener(-10, 20)},
},
},
ast.Hex: {
{retEvalType: types.ETString, childrenTypes: []types.EvalType{types.ETString}, geners: []dataGenerator{newRandHexStrGener(10, 100)}},
{retEvalType: types.ETString, childrenTypes: []types.EvalType{types.ETInt}},
},
ast.Unhex: {
{retEvalType: types.ETString, childrenTypes: []types.EvalType{types.ETString}, geners: []dataGenerator{newRandHexStrGener(10, 100)}},
},
ast.Trim: {
{retEvalType: types.ETString, childrenTypes: []types.EvalType{types.ETString}, geners: []dataGenerator{&randSpaceStrGener{10, 100}}},
{retEvalType: types.ETString, childrenTypes: []types.EvalType{types.ETString, types.ETString}, geners: []dataGenerator{newRandLenStrGener(10, 20), newRandLenStrGener(5, 25)}},
{
retEvalType: types.ETString,
childrenTypes: []types.EvalType{types.ETString, types.ETString, types.ETInt},
geners: []dataGenerator{newRandLenStrGener(10, 20), newRandLenStrGener(5, 25), nil},
constants: []*Constant{nil, nil, {Value: types.NewDatum(ast.TrimBoth), RetType: types.NewFieldType(mysql.TypeLonglong)}},
},
{
retEvalType: types.ETString,
childrenTypes: []types.EvalType{types.ETString, types.ETString, types.ETInt},
geners: []dataGenerator{newRandLenStrGener(10, 20), newRandLenStrGener(5, 25), nil},
constants: []*Constant{nil, nil, {Value: types.NewDatum(ast.TrimLeading), RetType: types.NewFieldType(mysql.TypeLonglong)}},
},
{
retEvalType: types.ETString,
childrenTypes: []types.EvalType{types.ETString, types.ETString, types.ETInt},
geners: []dataGenerator{newRandLenStrGener(10, 20), newRandLenStrGener(5, 25), nil},
constants: []*Constant{nil, nil, {Value: types.NewDatum(ast.TrimTrailing), RetType: types.NewFieldType(mysql.TypeLonglong)}},
},
},
ast.LTrim: {
{retEvalType: types.ETString, childrenTypes: []types.EvalType{types.ETString}, geners: []dataGenerator{&randSpaceStrGener{10, 100}}},
},
ast.RTrim: {
{retEvalType: types.ETString, childrenTypes: []types.EvalType{types.ETString}, geners: []dataGenerator{&randSpaceStrGener{10, 100}}},
},
ast.Lpad: {
{
retEvalType: types.ETString,
childrenTypes: []types.EvalType{types.ETString, types.ETInt, types.ETString},
geners: []dataGenerator{newRandLenStrGener(0, 20), newRangeInt64Gener(168435456, 368435456), newRandLenStrGener(0, 10)},
},
{
retEvalType: types.ETString,
childrenTypes: []types.EvalType{types.ETString, types.ETInt, types.ETString},
geners: []dataGenerator{newDefaultGener(0.2, types.ETString), newDefaultGener(0.2, types.ETInt), newDefaultGener(0.2, types.ETString)},
},
{
retEvalType: types.ETString,
childrenTypes: []types.EvalType{types.ETString, types.ETInt, types.ETString},
childrenFieldTypes: []*types.FieldType{{Tp: mysql.TypeString, Flag: mysql.BinaryFlag, Collate: charset.CollationBin}},
geners: []dataGenerator{newRandLenStrGener(0, 20), newRangeInt64Gener(168435456, 368435456), newRandLenStrGener(0, 10)},
},
{
retEvalType: types.ETString,
childrenTypes: []types.EvalType{types.ETString, types.ETInt, types.ETString},
childrenFieldTypes: []*types.FieldType{{Tp: mysql.TypeString, Flag: mysql.BinaryFlag, Collate: charset.CollationBin}},
geners: []dataGenerator{newDefaultGener(0.2, types.ETString), newDefaultGener(0.2, types.ETInt), newDefaultGener(0.2, types.ETString)},
},
},
ast.Rpad: {
{
retEvalType: types.ETString,
childrenTypes: []types.EvalType{types.ETString, types.ETInt, types.ETString},
geners: []dataGenerator{newRandLenStrGener(0, 20), newRangeInt64Gener(168435456, 368435456), newRandLenStrGener(0, 10)},
},
{
retEvalType: types.ETString,
childrenTypes: []types.EvalType{types.ETString, types.ETInt, types.ETString},
geners: []dataGenerator{newDefaultGener(0.2, types.ETString), newDefaultGener(0.2, types.ETInt), newDefaultGener(0.2, types.ETString)},
},
{
retEvalType: types.ETString,
childrenTypes: []types.EvalType{types.ETString, types.ETInt, types.ETString},
childrenFieldTypes: []*types.FieldType{{Tp: mysql.TypeString, Flag: mysql.BinaryFlag, Collate: charset.CollationBin}},
geners: []dataGenerator{newRandLenStrGener(0, 20), newRangeInt64Gener(168435456, 368435456), newRandLenStrGener(0, 10)},
},
{
retEvalType: types.ETString,
childrenTypes: []types.EvalType{types.ETString, types.ETInt, types.ETString},
childrenFieldTypes: []*types.FieldType{{Tp: mysql.TypeString, Flag: mysql.BinaryFlag, Collate: charset.CollationBin}},
geners: []dataGenerator{newDefaultGener(0.2, types.ETString), newDefaultGener(0.2, types.ETInt), newDefaultGener(0.2, types.ETString)},
},
},
ast.CharLength: {
{retEvalType: types.ETInt, childrenTypes: []types.EvalType{types.ETString}},
{retEvalType: types.ETInt, childrenTypes: []types.EvalType{types.ETString},
childrenFieldTypes: []*types.FieldType{{Tp: mysql.TypeString, Flag: mysql.BinaryFlag, Collate: charset.CollationBin}},
},
},
ast.BitLength: {
{retEvalType: types.ETInt, childrenTypes: []types.EvalType{types.ETString}},
},
ast.CharFunc: {
{
retEvalType: types.ETString,
childrenTypes: []types.EvalType{types.ETInt, types.ETInt, types.ETInt, types.ETString},
geners: []dataGenerator{&charInt64Gener{}, &charInt64Gener{}, &charInt64Gener{}, nil},
constants: []*Constant{nil, nil, nil, {Value: types.NewDatum("ascii"), RetType: types.NewFieldType(mysql.TypeString)}},
},
},
ast.FindInSet: {
{retEvalType: types.ETInt, childrenTypes: []types.EvalType{types.ETString, types.ETString}},
{retEvalType: types.ETInt, childrenTypes: []types.EvalType{types.ETString, types.ETString}, geners: []dataGenerator{&constStrGener{"case"}, &constStrGener{"test,case"}}},
{retEvalType: types.ETInt, childrenTypes: []types.EvalType{types.ETString, types.ETString}, geners: []dataGenerator{&constStrGener{""}, &constStrGener{"test,case"}}},
},
ast.MakeSet: {
{retEvalType: types.ETString, childrenTypes: []types.EvalType{types.ETInt, types.ETString, types.ETString, types.ETString, types.ETString, types.ETString, types.ETString, types.ETString, types.ETString}},
},
ast.Oct: {
{retEvalType: types.ETString, childrenTypes: []types.EvalType{types.ETInt}},
{retEvalType: types.ETString, childrenTypes: []types.EvalType{types.ETString}, geners: []dataGenerator{&numStrGener{*newRangeInt64Gener(-10, 10)}}},
},
ast.Quote: {
{retEvalType: types.ETString, childrenTypes: []types.EvalType{types.ETString}},
},
ast.Ord: {
{retEvalType: types.ETInt, childrenTypes: []types.EvalType{types.ETString}},
},
ast.Bin: {
{retEvalType: types.ETString, childrenTypes: []types.EvalType{types.ETInt}},
},
ast.ToBase64: {
{retEvalType: types.ETString, childrenTypes: []types.EvalType{types.ETString}, geners: []dataGenerator{newRandLenStrGener(0, 10)}},
},
ast.FromBase64: {
{retEvalType: types.ETString, childrenTypes: []types.EvalType{types.ETString}, geners: []dataGenerator{newRandLenStrGener(10, 100)}},
},
ast.ExportSet: {
{
retEvalType: types.ETString,
childrenTypes: []types.EvalType{types.ETInt, types.ETString, types.ETString},
geners: []dataGenerator{newRangeInt64Gener(10, 100), &constStrGener{"Y"}, &constStrGener{"N"}},
},
{
retEvalType: types.ETString,
childrenTypes: []types.EvalType{types.ETInt, types.ETString, types.ETString, types.ETString},
geners: []dataGenerator{newRangeInt64Gener(10, 100), &constStrGener{"Y"}, &constStrGener{"N"}, &constStrGener{","}},
},
{
retEvalType: types.ETString,
childrenTypes: []types.EvalType{types.ETInt, types.ETString, types.ETString, types.ETString, types.ETInt},
geners: []dataGenerator{newRangeInt64Gener(10, 100), &constStrGener{"Y"}, &constStrGener{"N"}, &constStrGener{","}, newRangeInt64Gener(-10, 70)},
},
},
ast.Repeat: {
{retEvalType: types.ETString, childrenTypes: []types.EvalType{types.ETString, types.ETInt}, geners: []dataGenerator{newRandLenStrGener(10, 20), newRangeInt64Gener(-10, 10)}},
},
ast.Lower: {
{retEvalType: types.ETString, childrenTypes: []types.EvalType{types.ETString}},
{retEvalType: types.ETString, childrenTypes: []types.EvalType{types.ETString}, geners: []dataGenerator{newSelectStringGener([]string{"one week’s time TEST", "one week's time TEST", "ABC测试DEF", "ABCテストABC"})}},
},
ast.IsNull: {
{retEvalType: types.ETInt, childrenTypes: []types.EvalType{types.ETString}, geners: []dataGenerator{newRandLenStrGener(10, 20)}},
{retEvalType: types.ETInt, childrenTypes: []types.EvalType{types.ETString}, geners: []dataGenerator{newDefaultGener(0.2, types.ETString)}},
},
ast.Upper: {
{retEvalType: types.ETString, childrenTypes: []types.EvalType{types.ETString}},
{retEvalType: types.ETString, childrenTypes: []types.EvalType{types.ETString}, geners: []dataGenerator{newSelectStringGener([]string{"one week’s time TEST", "one week's time TEST", "abc测试DeF", "AbCテストAbC"})}},
},
ast.Right: {
{retEvalType: types.ETString, childrenTypes: []types.EvalType{types.ETString, types.ETInt}},
// need to add BinaryFlag for the Binary func
{retEvalType: types.ETString, childrenTypes: []types.EvalType{types.ETString, types.ETInt},
childrenFieldTypes: []*types.FieldType{{Tp: mysql.TypeString, Flag: mysql.BinaryFlag, Collate: charset.CollationBin},
{Tp: mysql.TypeLonglong}},
geners: []dataGenerator{
newRandLenStrGener(10, 20),
newRangeInt64Gener(-10, 20),
},
},
},
ast.Left: {
{retEvalType: types.ETString, childrenTypes: []types.EvalType{types.ETString, types.ETInt}},
// need to add BinaryFlag for the Binary func
{retEvalType: types.ETString, childrenTypes: []types.EvalType{types.ETString, types.ETInt},
childrenFieldTypes: []*types.FieldType{{Tp: mysql.TypeString, Flag: mysql.BinaryFlag, Collate: charset.CollationBin},
{Tp: mysql.TypeLonglong}},
geners: []dataGenerator{
newRandLenStrGener(10, 20),
newRangeInt64Gener(-10, 20),
},
},
},
ast.Space: {
{retEvalType: types.ETString, childrenTypes: []types.EvalType{types.ETInt}, geners: []dataGenerator{newRangeInt64Gener(-10, 2000)}},
{retEvalType: types.ETString, childrenTypes: []types.EvalType{types.ETInt}, geners: []dataGenerator{newRangeInt64Gener(5, 10)}},
},
ast.Reverse: {
{retEvalType: types.ETString, childrenTypes: []types.EvalType{types.ETString}, geners: []dataGenerator{newRandLenStrGener(10, 20)}},
{retEvalType: types.ETString, childrenTypes: []types.EvalType{types.ETString}, geners: []dataGenerator{newDefaultGener(0.2, types.ETString)}},
// need to add BinaryFlag for the Binary func
{retEvalType: types.ETString, childrenTypes: []types.EvalType{types.ETString},
childrenFieldTypes: []*types.FieldType{{Tp: mysql.TypeString, Flag: mysql.BinaryFlag, Collate: charset.CollationBin}},
},
},
ast.Instr: {
{retEvalType: types.ETInt, childrenTypes: []types.EvalType{types.ETString, types.ETString}},
{retEvalType: types.ETInt, childrenTypes: []types.EvalType{types.ETString, types.ETString}, geners: []dataGenerator{&constStrGener{"test,case"}, &constStrGener{"case"}}},
{retEvalType: types.ETInt, childrenTypes: []types.EvalType{types.ETString, types.ETString}, geners: []dataGenerator{&constStrGener{"test,case"}, &constStrGener{"testcase"}}},
{retEvalType: types.ETInt, childrenTypes: []types.EvalType{types.ETString, types.ETString},
childrenFieldTypes: []*types.FieldType{
{Tp: mysql.TypeString, Flag: mysql.BinaryFlag, Collate: charset.CollationBin},
{Tp: mysql.TypeString, Flag: mysql.BinaryFlag, Collate: charset.CollationBin},
},
},
{retEvalType: types.ETInt, childrenTypes: []types.EvalType{types.ETString, types.ETString},
childrenFieldTypes: []*types.FieldType{
{Tp: mysql.TypeString, Flag: mysql.BinaryFlag, Collate: charset.CollationBin},
{Tp: mysql.TypeString, Flag: mysql.BinaryFlag, Collate: charset.CollationBin},
},
geners: []dataGenerator{&constStrGener{"test,case"}, &constStrGener{"case"}},
},
{retEvalType: types.ETInt, childrenTypes: []types.EvalType{types.ETString, types.ETString},
childrenFieldTypes: []*types.FieldType{
{Tp: mysql.TypeString, Flag: mysql.BinaryFlag, Collate: charset.CollationBin},
{Tp: mysql.TypeString, Flag: mysql.BinaryFlag, Collate: charset.CollationBin},
},
geners: []dataGenerator{&constStrGener{"test,case"}, &constStrGener{""}},
},
},
ast.Replace: {
{retEvalType: types.ETString, childrenTypes: []types.EvalType{types.ETString, types.ETString, types.ETString}, geners: []dataGenerator{newRandLenStrGener(10, 20), newRandLenStrGener(0, 10), newRandLenStrGener(0, 10)}},
},
ast.InsertFunc: {
{retEvalType: types.ETString, childrenTypes: []types.EvalType{types.ETString, types.ETInt, types.ETInt, types.ETString}, geners: []dataGenerator{newRandLenStrGener(10, 20), newRangeInt64Gener(-10, 20), newRangeInt64Gener(0, 100), newRandLenStrGener(0, 10)}},
{retEvalType: types.ETString, childrenTypes: []types.EvalType{types.ETString, types.ETInt, types.ETInt, types.ETString},
childrenFieldTypes: []*types.FieldType{
{Tp: mysql.TypeString, Flag: mysql.BinaryFlag, Collate: charset.CollationBin},
{Tp: mysql.TypeLonglong},
{Tp: mysql.TypeLonglong},
{Tp: mysql.TypeString, Flag: mysql.BinaryFlag, Collate: charset.CollationBin},
},
},
},
ast.Elt: {
{retEvalType: types.ETString, childrenTypes: []types.EvalType{types.ETInt, types.ETString, types.ETString, types.ETString}, geners: []dataGenerator{newRangeInt64Gener(-1, 5)}},
},
ast.FromUnixTime: {
{retEvalType: types.ETString, childrenTypes: []types.EvalType{types.ETDecimal, types.ETString},
geners: []dataGenerator{
gener{*newDefaultGener(0.9, types.ETDecimal)},
&constStrGener{"%y-%m-%d"},
},
},
},
ast.Strcmp: {
{retEvalType: types.ETInt, childrenTypes: []types.EvalType{types.ETString, types.ETString}},
{retEvalType: types.ETInt, childrenTypes: []types.EvalType{types.ETString, types.ETString}, geners: []dataGenerator{
newSelectStringGener(
[]string{
"test",
},
),
newSelectStringGener(
[]string{
"test",
},
),
}},
},
ast.Format: {
{retEvalType: types.ETString, childrenTypes: []types.EvalType{types.ETDecimal, types.ETInt}, geners: []dataGenerator{
newRangeDecimalGener(-10000, 10000, 0),
newRangeInt64Gener(-10, 40),
}},
{retEvalType: types.ETString, childrenTypes: []types.EvalType{types.ETReal, types.ETInt}, geners: []dataGenerator{
newRangeRealGener(-10000, 10000, 0),
newRangeInt64Gener(-10, 40),
}},
{retEvalType: types.ETString, childrenTypes: []types.EvalType{types.ETDecimal, types.ETInt}, geners: []dataGenerator{
newRangeDecimalGener(-10000, 10000, 1),
newRangeInt64Gener(-10, 40),
}},
{retEvalType: types.ETString, childrenTypes: []types.EvalType{types.ETReal, types.ETInt}, geners: []dataGenerator{
newRangeRealGener(-10000, 10000, 1),
newRangeInt64Gener(-10, 40),
}},
{retEvalType: types.ETString, childrenTypes: []types.EvalType{types.ETString, types.ETString}, geners: []dataGenerator{
newRealStringGener(),
&numStrGener{*newRangeInt64Gener(-10, 40)},
}},
{retEvalType: types.ETString, childrenTypes: []types.EvalType{types.ETDecimal, types.ETInt, types.ETString}, geners: []dataGenerator{
newRangeDecimalGener(-10000, 10000, 0.5),
newRangeInt64Gener(-10, 40),
newNullWrappedGener(0.1, &constStrGener{"en_US"}),
}},
{retEvalType: types.ETString, childrenTypes: []types.EvalType{types.ETReal, types.ETInt, types.ETString}, geners: []dataGenerator{
newRangeRealGener(-10000, 10000, 0.5),
newRangeInt64Gener(-10, 40),
newNullWrappedGener(0.1, &constStrGener{"en_US"}),
}},
},
}
func (s *testVectorizeSuite1) TestVectorizedBuiltinStringEvalOneVec(c *C) {
testVectorizedEvalOneVec(c, vecBuiltinStringCases)
}
func (s *testVectorizeSuite1) TestVectorizedBuiltinStringFunc(c *C) {
testVectorizedBuiltinFunc(c, vecBuiltinStringCases)
}
func BenchmarkVectorizedBuiltinStringEvalOneVec(b *testing.B) {
benchmarkVectorizedEvalOneVec(b, vecBuiltinStringCases)
}
func BenchmarkVectorizedBuiltinStringFunc(b *testing.B) {
benchmarkVectorizedBuiltinFunc(b, vecBuiltinStringCases)
}
| expression/builtin_string_vec_test.go | 0 | https://github.com/pingcap/tidb/commit/80edc8cd20b452089a26b817b4b450b7f31db77f | [
0.0003573647409211844,
0.00017791900609154254,
0.00016706627502571791,
0.00017515887157060206,
0.00002475224027875811
] |
{
"id": 5,
"code_window": [
"\t\tif isGlobal, _ := config.GetTxnScopeFromConfig(); isGlobal {\n",
"\t\t\treturn oracle.GlobalTxnScope\n",
"\t\t}\n",
"\t\treturn oracle.LocalTxnScope\n",
"\t}()},\n",
"\t/* TiDB specific variables */\n",
"\t{Scope: ScopeGlobal | ScopeSession, Name: TiDBAllowMPPExecution, Type: TypeBool, Value: BoolToOnOff(DefTiDBAllowMPPExecution)},\n",
"\t{Scope: ScopeGlobal | ScopeSession, Name: TiDBBCJThresholdCount, Value: strconv.Itoa(DefBroadcastJoinThresholdCount), Type: TypeInt, MinValue: 0, MaxValue: math.MaxInt64, SetSession: func(s *SessionVars, val string) error {\n",
"\t\ts.BroadcastJoinThresholdCount = tidbOptInt64(val, DefBroadcastJoinThresholdCount)\n",
"\t\treturn nil\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "sessionctx/variable/sysvar.go",
"type": "replace",
"edit_start_line_idx": 611
} | // Copyright 2020 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package telemetry
import (
"bytes"
"context"
"encoding/json"
"net/http"
"time"
"github.com/pingcap/errors"
"github.com/pingcap/tidb/config"
"github.com/pingcap/tidb/sessionctx"
"github.com/pingcap/tidb/sessionctx/variable"
"go.etcd.io/etcd/clientv3"
)
const (
// OwnerKey is the telemetry owner path that is saved to etcd.
OwnerKey = "/tidb/telemetry/owner"
// Prompt is the prompt for telemetry owner manager.
Prompt = "telemetry"
// ReportInterval is the interval of the report.
ReportInterval = 6 * time.Hour
)
const (
etcdOpTimeout = 3 * time.Second
uploadTimeout = 60 * time.Second
apiEndpoint = "https://telemetry.pingcap.com/api/v1/tidb/report"
)
func getTelemetryGlobalVariable(ctx sessionctx.Context) (bool, error) {
val, err := ctx.GetSessionVars().GlobalVarsAccessor.GetGlobalSysVar(variable.TiDBEnableTelemetry)
return variable.TiDBOptOn(val), err
}
// IsTelemetryEnabled check whether telemetry enabled.
func IsTelemetryEnabled(ctx sessionctx.Context) (bool, error) {
if !config.GetGlobalConfig().EnableTelemetry {
return false, nil
}
enabled, err := getTelemetryGlobalVariable(ctx)
if err != nil {
return false, errors.Trace(err)
}
return enabled, nil
}
// PreviewUsageData returns a preview of the usage data that is going to be reported.
func PreviewUsageData(ctx sessionctx.Context, etcdClient *clientv3.Client) (string, error) {
if etcdClient == nil {
return "", nil
}
if enabled, err := IsTelemetryEnabled(ctx); err != nil || !enabled {
return "", err
}
trackingID, err := GetTrackingID(etcdClient)
if err != nil {
return "", errors.Trace(err)
}
// NOTE: trackingID may be empty. However, as a preview data, it is fine.
data := generateTelemetryData(ctx, trackingID)
prettyJSON, err := json.MarshalIndent(data, "", " ")
if err != nil {
return "", errors.Trace(err)
}
return string(prettyJSON), nil
}
func reportUsageData(ctx sessionctx.Context, etcdClient *clientv3.Client) (bool, error) {
if etcdClient == nil {
// silently ignore
return false, nil
}
enabled, err := IsTelemetryEnabled(ctx)
if err != nil {
return false, err
}
if !enabled {
return false, errors.Errorf("telemetry is disabled")
}
trackingID, err := GetTrackingID(etcdClient)
if err != nil {
return false, errors.Trace(err)
}
if len(trackingID) == 0 {
trackingID, err = ResetTrackingID(etcdClient)
if err != nil {
return false, errors.Trace(err)
}
}
data := generateTelemetryData(ctx, trackingID)
postReportTelemetryData()
rawJSON, err := json.Marshal(data)
if err != nil {
return false, errors.Trace(err)
}
// TODO: We should use the context from domain, so that when request is blocked for a long time it will not
// affect TiDB shutdown.
reqCtx, cancel := context.WithTimeout(context.Background(), uploadTimeout)
defer cancel()
req, err := http.NewRequestWithContext(reqCtx, "POST", apiEndpoint, bytes.NewReader(rawJSON))
if err != nil {
return false, errors.Trace(err)
}
req.Header.Add("Content-Type", "application/json")
resp, err := http.DefaultClient.Do(req)
if err != nil {
return false, errors.Trace(err)
}
err = resp.Body.Close() // We don't even want to know any response body. Just close it.
_ = err
if resp.StatusCode != http.StatusOK {
return false, errors.Errorf("Received non-Ok response when reporting usage data, http code: %d", resp.StatusCode)
}
return true, nil
}
// ReportUsageData generates the latest usage data and sends it to PingCAP. Status will be saved to etcd. Status update failures will be returned.
func ReportUsageData(ctx sessionctx.Context, etcdClient *clientv3.Client) error {
if etcdClient == nil {
// silently ignore
return nil
}
s := status{
CheckAt: time.Now().Format(time.RFC3339),
}
reported, err := reportUsageData(ctx, etcdClient)
if err != nil {
s.IsError = true
s.ErrorMessage = err.Error()
} else {
s.IsRequestSent = reported
}
return updateTelemetryStatus(s, etcdClient)
}
| telemetry/telemetry.go | 0 | https://github.com/pingcap/tidb/commit/80edc8cd20b452089a26b817b4b450b7f31db77f | [
0.00048218719894066453,
0.00020356276945676655,
0.00016279894043691456,
0.00017173701780848205,
0.00008732963033253327
] |
{
"id": 5,
"code_window": [
"\t\tif isGlobal, _ := config.GetTxnScopeFromConfig(); isGlobal {\n",
"\t\t\treturn oracle.GlobalTxnScope\n",
"\t\t}\n",
"\t\treturn oracle.LocalTxnScope\n",
"\t}()},\n",
"\t/* TiDB specific variables */\n",
"\t{Scope: ScopeGlobal | ScopeSession, Name: TiDBAllowMPPExecution, Type: TypeBool, Value: BoolToOnOff(DefTiDBAllowMPPExecution)},\n",
"\t{Scope: ScopeGlobal | ScopeSession, Name: TiDBBCJThresholdCount, Value: strconv.Itoa(DefBroadcastJoinThresholdCount), Type: TypeInt, MinValue: 0, MaxValue: math.MaxInt64, SetSession: func(s *SessionVars, val string) error {\n",
"\t\ts.BroadcastJoinThresholdCount = tidbOptInt64(val, DefBroadcastJoinThresholdCount)\n",
"\t\treturn nil\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "sessionctx/variable/sysvar.go",
"type": "replace",
"edit_start_line_idx": 611
} | # Code Review Guide
## Things to do before you start reviewing the PR
* Make sure you are familiar with the packages the PR modifies.
* Make sure you have enough continuous time to review the PR, use 300 LOC per hour to estimate.
* Make sure you can follow the updates of the PR in the next few work days.
* Read the description of the PR, if it's not easy to understand, ask the coder to improve it.
* For a bug fix PR, if there is no test case, ask the coder to add tests.
* For a performance PR, if no benchmark result is provided, ask the coder to add a benchmark result.
## Things to check during the review process
* Am I able to understand the purpose of each unit test?
* Do unit tests actually test that the code is performing the intended functionality?
* Do unit tests cover all the important code blocks and specially handled errors?
* Could procedure tests be rewritten to table driven tests?
* Is the code written following the style guide?
* Is the same code duplicated more than twice?
* Do comments exist and describe the intent of the code?
* Are hacks, workarounds and temporary fixes commented?
* Does this function do more than the name suggests?
* Can this function's behavior be inferred by its name?
* Do tests exist and are they comprehensive?
* Do unit tests cover all the important code branches?
* Could the test code be extracted into a table-driven test?
## Things to keep in mind when you are writing a review comment
* Be kind to the coder, not to the code.
* Ask questions rather than make statements.
* Treat people who know less than you with respect, deference, and patience.
* Remember to praise when the code quality exceeds your expectation.
* It isn't necessarily wrong if the coder's solution is different than yours.
* Refer to the code style document when necessary.
## Things to remember after you submitted the review comment
* Checkout Github notification regularly to keep track of the updates of the PR.
* When the PR has been updated, start another round of review or give it a LGTM.
| code_review_guide.md | 0 | https://github.com/pingcap/tidb/commit/80edc8cd20b452089a26b817b4b450b7f31db77f | [
0.00017471634782850742,
0.00017270589887630194,
0.00017020963423419744,
0.00017329842376057059,
0.0000014575730347132776
] |
{
"id": 0,
"code_window": [
"\n",
"\tconfigReqs := configDeps.AllPluginRequirements()\n",
"\t// FIXME: This is weird because ConfigTreeDependencies was written before\n",
"\t// we switched over to using earlyConfig as the main source of dependencies.\n",
"\t// In future we should clean this up to be a more reasoable API.\n",
"\tstateReqs := terraform.ConfigTreeDependencies(nil, state).AllPluginRequirements()\n",
"\n",
"\trequirements := configReqs.Merge(stateReqs)\n",
"\tif len(requirements) == 0 {\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t// In future we should clean this up to be a more reasonable API.\n"
],
"file_path": "command/init.go",
"type": "replace",
"edit_start_line_idx": 498
} | package command
import (
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"log"
"os"
"os/exec"
"path/filepath"
"runtime"
"strings"
plugin "github.com/hashicorp/go-plugin"
"github.com/kardianos/osext"
"github.com/hashicorp/terraform/addrs"
terraformProvider "github.com/hashicorp/terraform/builtin/providers/terraform"
tfplugin "github.com/hashicorp/terraform/plugin"
"github.com/hashicorp/terraform/plugin/discovery"
"github.com/hashicorp/terraform/providers"
"github.com/hashicorp/terraform/provisioners"
"github.com/hashicorp/terraform/terraform"
)
// multiVersionProviderResolver is an implementation of
// terraform.ResourceProviderResolver that matches the given version constraints
// against a set of versioned provider plugins to find the newest version of
// each that satisfies the given constraints.
type multiVersionProviderResolver struct {
Available discovery.PluginMetaSet
// Internal is a map that overrides the usual plugin selection process
// for internal plugins. These plugins do not support version constraints
// (will produce an error if one is set). This should be used only in
// exceptional circumstances since it forces the provider's release
// schedule to be tied to that of Terraform Core.
Internal map[addrs.Provider]providers.Factory
}
func choosePlugins(avail discovery.PluginMetaSet, internal map[addrs.Provider]providers.Factory, reqd discovery.PluginRequirements) map[string]discovery.PluginMeta {
candidates := avail.ConstrainVersions(reqd)
ret := map[string]discovery.PluginMeta{}
for name, metas := range candidates {
// If the provider is in our internal map then we ignore any
// discovered plugins for it since these are dealt with separately.
if _, isInternal := internal[addrs.NewLegacyProvider(name)]; isInternal {
continue
}
if len(metas) == 0 {
continue
}
ret[name] = metas.Newest()
}
return ret
}
func (r *multiVersionProviderResolver) ResolveProviders(
reqd discovery.PluginRequirements,
) (map[addrs.Provider]providers.Factory, []error) {
factories := make(map[addrs.Provider]providers.Factory, len(reqd))
var errs []error
chosen := choosePlugins(r.Available, r.Internal, reqd)
for name, req := range reqd {
if factory, isInternal := r.Internal[addrs.NewLegacyProvider(name)]; isInternal {
if !req.Versions.Unconstrained() {
errs = append(errs, fmt.Errorf("provider.%s: this provider is built in to Terraform and so it does not support version constraints", name))
continue
}
factories[addrs.NewLegacyProvider(name)] = factory
continue
}
if newest, available := chosen[name]; available {
digest, err := newest.SHA256()
if err != nil {
errs = append(errs, fmt.Errorf("provider.%s: failed to load plugin to verify its signature: %s", name, err))
continue
}
if !reqd[name].AcceptsSHA256(digest) {
errs = append(errs, fmt.Errorf("provider.%s: new or changed plugin executable", name))
continue
}
factories[addrs.NewLegacyProvider(name)] = providerFactory(newest)
} else {
msg := fmt.Sprintf("provider.%s: no suitable version installed", name)
required := req.Versions.String()
// no version is unconstrained
if required == "" {
required = "(any version)"
}
foundVersions := []string{}
for meta := range r.Available.WithName(name) {
foundVersions = append(foundVersions, fmt.Sprintf("%q", meta.Version))
}
found := "none"
if len(foundVersions) > 0 {
found = strings.Join(foundVersions, ", ")
}
msg += fmt.Sprintf("\n version requirements: %q\n versions installed: %s", required, found)
errs = append(errs, errors.New(msg))
}
}
return factories, errs
}
// store the user-supplied path for plugin discovery
func (m *Meta) storePluginPath(pluginPath []string) error {
if len(pluginPath) == 0 {
return nil
}
path := filepath.Join(m.DataDir(), PluginPathFile)
// remove the plugin dir record if the path was set to an empty string
if len(pluginPath) == 1 && (pluginPath[0] == "") {
err := os.Remove(path)
if !os.IsNotExist(err) {
return err
}
return nil
}
js, err := json.MarshalIndent(pluginPath, "", " ")
if err != nil {
return err
}
// if this fails, so will WriteFile
os.MkdirAll(m.DataDir(), 0755)
return ioutil.WriteFile(path, js, 0644)
}
// Load the user-defined plugin search path into Meta.pluginPath if the file
// exists.
func (m *Meta) loadPluginPath() ([]string, error) {
js, err := ioutil.ReadFile(filepath.Join(m.DataDir(), PluginPathFile))
if os.IsNotExist(err) {
return nil, nil
}
if err != nil {
return nil, err
}
var pluginPath []string
if err := json.Unmarshal(js, &pluginPath); err != nil {
return nil, err
}
return pluginPath, nil
}
// the default location for automatically installed plugins
func (m *Meta) pluginDir() string {
return filepath.Join(m.DataDir(), "plugins", fmt.Sprintf("%s_%s", runtime.GOOS, runtime.GOARCH))
}
// pluginDirs return a list of directories to search for plugins.
//
// Earlier entries in this slice get priority over later when multiple copies
// of the same plugin version are found, but newer versions always override
// older versions where both satisfy the provider version constraints.
func (m *Meta) pluginDirs(includeAutoInstalled bool) []string {
// user defined paths take precedence
if len(m.pluginPath) > 0 {
return m.pluginPath
}
// When searching the following directories, earlier entries get precedence
// if the same plugin version is found twice, but newer versions will
// always get preference below regardless of where they are coming from.
// TODO: Add auto-install dir, default vendor dir and optional override
// vendor dir(s).
dirs := []string{"."}
// Look in the same directory as the Terraform executable.
// If found, this replaces what we found in the config path.
exePath, err := osext.Executable()
if err != nil {
log.Printf("[ERROR] Error discovering exe directory: %s", err)
} else {
dirs = append(dirs, filepath.Dir(exePath))
}
// add the user vendor directory
dirs = append(dirs, DefaultPluginVendorDir)
if includeAutoInstalled {
dirs = append(dirs, m.pluginDir())
}
dirs = append(dirs, m.GlobalPluginDirs...)
return dirs
}
func (m *Meta) pluginCache() discovery.PluginCache {
dir := m.PluginCacheDir
if dir == "" {
return nil // cache disabled
}
dir = filepath.Join(dir, pluginMachineName)
return discovery.NewLocalPluginCache(dir)
}
// providerPluginSet returns the set of valid providers that were discovered in
// the defined search paths.
func (m *Meta) providerPluginSet() discovery.PluginMetaSet {
plugins := discovery.FindPlugins("provider", m.pluginDirs(true))
// Add providers defined in the legacy .terraformrc,
if m.PluginOverrides != nil {
for k, v := range m.PluginOverrides.Providers {
log.Printf("[DEBUG] found plugin override in .terraformrc: %q, %q", k, v)
}
plugins = plugins.OverridePaths(m.PluginOverrides.Providers)
}
plugins, _ = plugins.ValidateVersions()
for p := range plugins {
log.Printf("[DEBUG] found valid plugin: %q, %q, %q", p.Name, p.Version, p.Path)
}
return plugins
}
// providerPluginAutoInstalledSet returns the set of providers that exist
// within the auto-install directory.
func (m *Meta) providerPluginAutoInstalledSet() discovery.PluginMetaSet {
plugins := discovery.FindPlugins("provider", []string{m.pluginDir()})
plugins, _ = plugins.ValidateVersions()
for p := range plugins {
log.Printf("[DEBUG] found valid plugin: %q", p.Name)
}
return plugins
}
// providerPluginManuallyInstalledSet returns the set of providers that exist
// in all locations *except* the auto-install directory.
func (m *Meta) providerPluginManuallyInstalledSet() discovery.PluginMetaSet {
plugins := discovery.FindPlugins("provider", m.pluginDirs(false))
// Add providers defined in the legacy .terraformrc,
if m.PluginOverrides != nil {
for k, v := range m.PluginOverrides.Providers {
log.Printf("[DEBUG] found plugin override in .terraformrc: %q, %q", k, v)
}
plugins = plugins.OverridePaths(m.PluginOverrides.Providers)
}
plugins, _ = plugins.ValidateVersions()
for p := range plugins {
log.Printf("[DEBUG] found valid plugin: %q, %q, %q", p.Name, p.Version, p.Path)
}
return plugins
}
func (m *Meta) providerResolver() providers.Resolver {
return &multiVersionProviderResolver{
Available: m.providerPluginSet(),
Internal: m.internalProviders(),
}
}
func (m *Meta) internalProviders() map[addrs.Provider]providers.Factory {
return map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("terraform"): func() (providers.Interface, error) {
return terraformProvider.NewProvider(), nil
},
}
}
// filter the requirements returning only the providers that we can't resolve
func (m *Meta) missingPlugins(avail discovery.PluginMetaSet, reqd discovery.PluginRequirements) discovery.PluginRequirements {
missing := make(discovery.PluginRequirements)
candidates := avail.ConstrainVersions(reqd)
internal := m.internalProviders()
for name, versionSet := range reqd {
// internal providers can't be missing
if _, ok := internal[addrs.NewLegacyProvider(name)]; ok {
continue
}
log.Printf("[DEBUG] plugin requirements: %q=%q", name, versionSet.Versions)
if metas := candidates[name]; metas.Count() == 0 {
missing[name] = versionSet
}
}
return missing
}
func (m *Meta) provisionerFactories() map[string]terraform.ProvisionerFactory {
dirs := m.pluginDirs(true)
plugins := discovery.FindPlugins("provisioner", dirs)
plugins, _ = plugins.ValidateVersions()
// For now our goal is to just find the latest version of each plugin
// we have on the system. All provisioners should be at version 0.0.0
// currently, so there should actually only be one instance of each plugin
// name here, even though the discovery interface forces us to pretend
// that might not be true.
factories := make(map[string]terraform.ProvisionerFactory)
// Wire up the internal provisioners first. These might be overridden
// by discovered provisioners below.
for name := range InternalProvisioners {
factories[name] = internalProvisionerFactory(discovery.PluginMeta{Name: name})
}
byName := plugins.ByName()
for name, metas := range byName {
// Since we validated versions above and we partitioned the sets
// by name, we're guaranteed that the metas in our set all have
// valid versions and that there's at least one meta.
newest := metas.Newest()
factories[name] = provisionerFactory(newest)
}
return factories
}
func internalPluginClient(kind, name string) (*plugin.Client, error) {
cmdLine, err := BuildPluginCommandString(kind, name)
if err != nil {
return nil, err
}
// See the docstring for BuildPluginCommandString for why we need to do
// this split here.
cmdArgv := strings.Split(cmdLine, TFSPACE)
cfg := &plugin.ClientConfig{
Cmd: exec.Command(cmdArgv[0], cmdArgv[1:]...),
HandshakeConfig: tfplugin.Handshake,
Managed: true,
VersionedPlugins: tfplugin.VersionedPlugins,
AllowedProtocols: []plugin.Protocol{plugin.ProtocolGRPC},
}
return plugin.NewClient(cfg), nil
}
func providerFactory(meta discovery.PluginMeta) providers.Factory {
return func() (providers.Interface, error) {
client := tfplugin.Client(meta)
// Request the RPC client so we can get the provider
// so we can build the actual RPC-implemented provider.
rpcClient, err := client.Client()
if err != nil {
return nil, err
}
raw, err := rpcClient.Dispense(tfplugin.ProviderPluginName)
if err != nil {
return nil, err
}
// store the client so that the plugin can kill the child process
p := raw.(*tfplugin.GRPCProvider)
p.PluginClient = client
return p, nil
}
}
func provisionerFactory(meta discovery.PluginMeta) terraform.ProvisionerFactory {
return func() (provisioners.Interface, error) {
client := tfplugin.Client(meta)
return newProvisionerClient(client)
}
}
func internalProvisionerFactory(meta discovery.PluginMeta) terraform.ProvisionerFactory {
return func() (provisioners.Interface, error) {
client, err := internalPluginClient("provisioner", meta.Name)
if err != nil {
return nil, fmt.Errorf("[WARN] failed to build command line for internal plugin %q: %s", meta.Name, err)
}
return newProvisionerClient(client)
}
}
func newProvisionerClient(client *plugin.Client) (provisioners.Interface, error) {
// Request the RPC client so we can get the provisioner
// so we can build the actual RPC-implemented provisioner.
rpcClient, err := client.Client()
if err != nil {
return nil, err
}
raw, err := rpcClient.Dispense(tfplugin.ProvisionerPluginName)
if err != nil {
return nil, err
}
// store the client so that the plugin can kill the child process
p := raw.(*tfplugin.GRPCProvisioner)
p.PluginClient = client
return p, nil
}
| command/plugins.go | 1 | https://github.com/hashicorp/terraform/commit/efafadbe5edbfd8faa250aa1eaed46fbb2d52ee2 | [
0.0032794338185340166,
0.0003028864157386124,
0.00015979348972905427,
0.00017221255984622985,
0.0004889043048024178
] |
{
"id": 0,
"code_window": [
"\n",
"\tconfigReqs := configDeps.AllPluginRequirements()\n",
"\t// FIXME: This is weird because ConfigTreeDependencies was written before\n",
"\t// we switched over to using earlyConfig as the main source of dependencies.\n",
"\t// In future we should clean this up to be a more reasoable API.\n",
"\tstateReqs := terraform.ConfigTreeDependencies(nil, state).AllPluginRequirements()\n",
"\n",
"\trequirements := configReqs.Merge(stateReqs)\n",
"\tif len(requirements) == 0 {\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t// In future we should clean this up to be a more reasonable API.\n"
],
"file_path": "command/init.go",
"type": "replace",
"edit_start_line_idx": 498
} | package addressscopes
import (
"github.com/gophercloud/gophercloud"
"github.com/gophercloud/gophercloud/pagination"
)
// ListOptsBuilder allows extensions to add additional parameters to the
// List request.
type ListOptsBuilder interface {
ToAddressScopeListQuery() (string, error)
}
// ListOpts allows the filtering and sorting of paginated collections through
// the Neutron API. Filtering is achieved by passing in struct field values
// that map to the address-scope attributes you want to see returned.
// SortKey allows you to sort by a particular address-scope attribute.
// SortDir sets the direction, and is either `asc' or `desc'.
// Marker and Limit are used for the pagination.
type ListOpts struct {
ID string `q:"id"`
Name string `q:"name"`
TenantID string `q:"tenant_id"`
ProjectID string `q:"project_id"`
IPVersion int `q:"ip_version"`
Shared *bool `q:"shared"`
Description string `q:"description"`
Limit int `q:"limit"`
Marker string `q:"marker"`
SortKey string `q:"sort_key"`
SortDir string `q:"sort_dir"`
}
// ToAddressScopeListQuery formats a ListOpts into a query string.
func (opts ListOpts) ToAddressScopeListQuery() (string, error) {
q, err := gophercloud.BuildQueryString(opts)
return q.String(), err
}
// List returns a Pager which allows you to iterate over a collection of
// address-scopes. It accepts a ListOpts struct, which allows you to filter and
// sort the returned collection for greater efficiency.
//
// Default policy settings return only the address-scopes owned by the project
// of the user submitting the request, unless the user has the administrative
// role.
func List(c *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager {
url := listURL(c)
if opts != nil {
query, err := opts.ToAddressScopeListQuery()
if err != nil {
return pagination.Pager{Err: err}
}
url += query
}
return pagination.NewPager(c, url, func(r pagination.PageResult) pagination.Page {
return AddressScopePage{pagination.LinkedPageBase{PageResult: r}}
})
}
// Get retrieves a specific address-scope based on its ID.
func Get(c *gophercloud.ServiceClient, id string) (r GetResult) {
_, r.Err = c.Get(getURL(c, id), &r.Body, nil)
return
}
// CreateOptsBuilder allows to add additional parameters to the
// Create request.
type CreateOptsBuilder interface {
ToAddressScopeCreateMap() (map[string]interface{}, error)
}
// CreateOpts specifies parameters of a new address-scope.
type CreateOpts struct {
// Name is the human-readable name of the address-scope.
Name string `json:"name"`
// TenantID is the id of the Identity project.
TenantID string `json:"tenant_id,omitempty"`
// ProjectID is the id of the Identity project.
ProjectID string `json:"project_id,omitempty"`
// IPVersion is the IP protocol version.
IPVersion int `json:"ip_version"`
// Shared indicates whether this address-scope is shared across all projects.
Shared bool `json:"shared,omitempty"`
}
// ToAddressScopeCreateMap constructs a request body from CreateOpts.
func (opts CreateOpts) ToAddressScopeCreateMap() (map[string]interface{}, error) {
return gophercloud.BuildRequestBody(opts, "address_scope")
}
// Create requests the creation of a new address-scope on the server.
func Create(client *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) {
b, err := opts.ToAddressScopeCreateMap()
if err != nil {
r.Err = err
return
}
_, r.Err = client.Post(createURL(client), b, &r.Body, &gophercloud.RequestOpts{
OkCodes: []int{201},
})
return
}
// UpdateOptsBuilder allows extensions to add additional parameters to the
// Update request.
type UpdateOptsBuilder interface {
ToAddressScopeUpdateMap() (map[string]interface{}, error)
}
// UpdateOpts represents options used to update an address-scope.
type UpdateOpts struct {
// Name is the human-readable name of the address-scope.
Name *string `json:"name,omitempty"`
// Shared indicates whether this address-scope is shared across all projects.
Shared *bool `json:"shared,omitempty"`
}
// ToAddressScopeUpdateMap builds a request body from UpdateOpts.
func (opts UpdateOpts) ToAddressScopeUpdateMap() (map[string]interface{}, error) {
return gophercloud.BuildRequestBody(opts, "address_scope")
}
// Update accepts a UpdateOpts struct and updates an existing address-scope
// using the values provided.
func Update(c *gophercloud.ServiceClient, addressScopeID string, opts UpdateOptsBuilder) (r UpdateResult) {
b, err := opts.ToAddressScopeUpdateMap()
if err != nil {
r.Err = err
return
}
_, r.Err = c.Put(updateURL(c, addressScopeID), b, &r.Body, &gophercloud.RequestOpts{
OkCodes: []int{200},
})
return
}
// Delete accepts a unique ID and deletes the address-scope associated with it.
func Delete(c *gophercloud.ServiceClient, id string) (r DeleteResult) {
_, r.Err = c.Delete(deleteURL(c, id), nil)
return
}
| vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/addressscopes/requests.go | 0 | https://github.com/hashicorp/terraform/commit/efafadbe5edbfd8faa250aa1eaed46fbb2d52ee2 | [
0.0002456025977153331,
0.0001745590998325497,
0.00016284181037917733,
0.00017091823974624276,
0.00001927456287376117
] |
{
"id": 0,
"code_window": [
"\n",
"\tconfigReqs := configDeps.AllPluginRequirements()\n",
"\t// FIXME: This is weird because ConfigTreeDependencies was written before\n",
"\t// we switched over to using earlyConfig as the main source of dependencies.\n",
"\t// In future we should clean this up to be a more reasoable API.\n",
"\tstateReqs := terraform.ConfigTreeDependencies(nil, state).AllPluginRequirements()\n",
"\n",
"\trequirements := configReqs.Merge(stateReqs)\n",
"\tif len(requirements) == 0 {\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t// In future we should clean this up to be a more reasonable API.\n"
],
"file_path": "command/init.go",
"type": "replace",
"edit_start_line_idx": 498
} | // Package resources implements the Azure ARM Resources service API version 2016-02-01.
//
//
package resources
// Copyright (c) Microsoft and contributors. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
//
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Code generated by Microsoft (R) AutoRest Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
import (
"github.com/Azure/go-autorest/autorest"
)
const (
// DefaultBaseURI is the default URI used for the service Resources
DefaultBaseURI = "https://management.azure.com"
)
// BaseClient is the base client for Resources.
type BaseClient struct {
autorest.Client
BaseURI string
SubscriptionID string
}
// New creates an instance of the BaseClient client.
func New(subscriptionID string) BaseClient {
return NewWithBaseURI(DefaultBaseURI, subscriptionID)
}
// NewWithBaseURI creates an instance of the BaseClient client.
func NewWithBaseURI(baseURI string, subscriptionID string) BaseClient {
return BaseClient{
Client: autorest.NewClientWithUserAgent(UserAgent()),
BaseURI: baseURI,
SubscriptionID: subscriptionID,
}
}
| vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2016-02-01/resources/client.go | 0 | https://github.com/hashicorp/terraform/commit/efafadbe5edbfd8faa250aa1eaed46fbb2d52ee2 | [
0.00017864888650365174,
0.0001709410425974056,
0.00016170437447726727,
0.0001719887659419328,
0.000006760654287063517
] |
{
"id": 0,
"code_window": [
"\n",
"\tconfigReqs := configDeps.AllPluginRequirements()\n",
"\t// FIXME: This is weird because ConfigTreeDependencies was written before\n",
"\t// we switched over to using earlyConfig as the main source of dependencies.\n",
"\t// In future we should clean this up to be a more reasoable API.\n",
"\tstateReqs := terraform.ConfigTreeDependencies(nil, state).AllPluginRequirements()\n",
"\n",
"\trequirements := configReqs.Merge(stateReqs)\n",
"\tif len(requirements) == 0 {\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t// In future we should clean this up to be a more reasonable API.\n"
],
"file_path": "command/init.go",
"type": "replace",
"edit_start_line_idx": 498
} | | json type \ dest type | bool | int | uint | float |string|
| --- | --- | --- | --- |--|--|
| number | positive => true <br/> negative => true <br/> zero => false| 23.2 => 23 <br/> -32.1 => -32| 12.1 => 12 <br/> -12.1 => 0|as normal|same as origin|
| string | empty string => false <br/> string "0" => false <br/> other strings => true | "123.32" => 123 <br/> "-123.4" => -123 <br/> "123.23xxxw" => 123 <br/> "abcde12" => 0 <br/> "-32.1" => -32| 13.2 => 13 <br/> -1.1 => 0 |12.1 => 12.1 <br/> -12.3 => -12.3<br/> 12.4xxa => 12.4 <br/> +1.1e2 =>110 |same as origin|
| bool | true => true <br/> false => false| true => 1 <br/> false => 0 | true => 1 <br/> false => 0 |true => 1 <br/>false => 0|true => "true" <br/> false => "false"|
| object | true | 0 | 0 |0|originnal json|
| array | empty array => false <br/> nonempty array => true| [] => 0 <br/> [1,2] => 1 | [] => 0 <br/> [1,2] => 1 |[] => 0<br/>[1,2] => 1|original json| | vendor/github.com/json-iterator/go/fuzzy_mode_convert_table.md | 0 | https://github.com/hashicorp/terraform/commit/efafadbe5edbfd8faa250aa1eaed46fbb2d52ee2 | [
0.0001686109317233786,
0.0001686109317233786,
0.0001686109317233786,
0.0001686109317233786,
0
] |
{
"id": 1,
"code_window": [
"\tavailable = c.providerPluginSet() // re-discover to see newly-installed plugins\n",
"\n",
"\t// internal providers were already filtered out, since we don't need to get them.\n",
"\tchosen := choosePlugins(available, nil, requirements)\n",
"\n",
"\tdigests := map[string][]byte{}\n",
"\tfor name, meta := range chosen {\n",
"\t\tdigest, err := meta.SHA256()\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tchosen := chooseProviders(available, nil, requirements)\n"
],
"file_path": "command/init.go",
"type": "replace",
"edit_start_line_idx": 599
} | package command
import (
"fmt"
"log"
"os"
"sort"
"strings"
"github.com/hashicorp/hcl/v2"
"github.com/hashicorp/terraform-config-inspect/tfconfig"
"github.com/posener/complete"
"github.com/zclconf/go-cty/cty"
"github.com/hashicorp/errwrap"
"github.com/hashicorp/terraform/addrs"
"github.com/hashicorp/terraform/backend"
backendInit "github.com/hashicorp/terraform/backend/init"
"github.com/hashicorp/terraform/configs"
"github.com/hashicorp/terraform/configs/configschema"
"github.com/hashicorp/terraform/configs/configupgrade"
"github.com/hashicorp/terraform/internal/earlyconfig"
"github.com/hashicorp/terraform/internal/initwd"
"github.com/hashicorp/terraform/plugin/discovery"
"github.com/hashicorp/terraform/states"
"github.com/hashicorp/terraform/terraform"
"github.com/hashicorp/terraform/tfdiags"
)
// InitCommand is a Command implementation that takes a Terraform
// module and clones it to the working directory.
type InitCommand struct {
Meta
// getPlugins is for the -get-plugins flag
getPlugins bool
// providerInstaller is used to download and install providers that
// aren't found locally. This uses a discovery.ProviderInstaller instance
// by default, but it can be overridden here as a way to mock fetching
// providers for tests.
providerInstaller discovery.Installer
}
func (c *InitCommand) Run(args []string) int {
var flagFromModule string
var flagBackend, flagGet, flagUpgrade bool
var flagPluginPath FlagStringSlice
var flagVerifyPlugins bool
flagConfigExtra := newRawFlags("-backend-config")
args, err := c.Meta.process(args, false)
if err != nil {
return 1
}
cmdFlags := c.Meta.extendedFlagSet("init")
cmdFlags.BoolVar(&flagBackend, "backend", true, "")
cmdFlags.Var(flagConfigExtra, "backend-config", "")
cmdFlags.StringVar(&flagFromModule, "from-module", "", "copy the source of the given module into the directory before init")
cmdFlags.BoolVar(&flagGet, "get", true, "")
cmdFlags.BoolVar(&c.getPlugins, "get-plugins", true, "")
cmdFlags.BoolVar(&c.forceInitCopy, "force-copy", false, "suppress prompts about copying state data")
cmdFlags.BoolVar(&c.Meta.stateLock, "lock", true, "lock state")
cmdFlags.DurationVar(&c.Meta.stateLockTimeout, "lock-timeout", 0, "lock timeout")
cmdFlags.BoolVar(&c.reconfigure, "reconfigure", false, "reconfigure")
cmdFlags.BoolVar(&flagUpgrade, "upgrade", false, "")
cmdFlags.Var(&flagPluginPath, "plugin-dir", "plugin directory")
cmdFlags.BoolVar(&flagVerifyPlugins, "verify-plugins", true, "verify plugins")
cmdFlags.Usage = func() { c.Ui.Error(c.Help()) }
if err := cmdFlags.Parse(args); err != nil {
return 1
}
var diags tfdiags.Diagnostics
if len(flagPluginPath) > 0 {
c.pluginPath = flagPluginPath
c.getPlugins = false
}
// set providerInstaller if we don't have a test version already
if c.providerInstaller == nil {
c.providerInstaller = &discovery.ProviderInstaller{
Dir: c.pluginDir(),
Cache: c.pluginCache(),
PluginProtocolVersion: discovery.PluginInstallProtocolVersion,
SkipVerify: !flagVerifyPlugins,
Ui: c.Ui,
Services: c.Services,
}
}
// Validate the arg count
args = cmdFlags.Args()
if len(args) > 1 {
c.Ui.Error("The init command expects at most one argument.\n")
cmdFlags.Usage()
return 1
}
if err := c.storePluginPath(c.pluginPath); err != nil {
c.Ui.Error(fmt.Sprintf("Error saving -plugin-path values: %s", err))
return 1
}
// Get our pwd. We don't always need it but always getting it is easier
// than the logic to determine if it is or isn't needed.
pwd, err := os.Getwd()
if err != nil {
c.Ui.Error(fmt.Sprintf("Error getting pwd: %s", err))
return 1
}
// If an argument is provided then it overrides our working directory.
path := pwd
if len(args) == 1 {
path = args[0]
}
// This will track whether we outputted anything so that we know whether
// to output a newline before the success message
var header bool
if flagFromModule != "" {
src := flagFromModule
empty, err := configs.IsEmptyDir(path)
if err != nil {
c.Ui.Error(fmt.Sprintf("Error validating destination directory: %s", err))
return 1
}
if !empty {
c.Ui.Error(strings.TrimSpace(errInitCopyNotEmpty))
return 1
}
c.Ui.Output(c.Colorize().Color(fmt.Sprintf(
"[reset][bold]Copying configuration[reset] from %q...", src,
)))
header = true
hooks := uiModuleInstallHooks{
Ui: c.Ui,
ShowLocalPaths: false, // since they are in a weird location for init
}
initDiags := c.initDirFromModule(path, src, hooks)
diags = diags.Append(initDiags)
if initDiags.HasErrors() {
c.showDiagnostics(diags)
return 1
}
c.Ui.Output("")
}
// If our directory is empty, then we're done. We can't get or setup
// the backend with an empty directory.
empty, err := configs.IsEmptyDir(path)
if err != nil {
diags = diags.Append(fmt.Errorf("Error checking configuration: %s", err))
return 1
}
if empty {
c.Ui.Output(c.Colorize().Color(strings.TrimSpace(outputInitEmpty)))
return 0
}
// Before we do anything else, we'll try loading configuration with both
// our "normal" and "early" configuration codepaths. If early succeeds
// while normal fails, that strongly suggests that the configuration is
// using syntax that worked in 0.11 but no longer in 0.12, which requires
// some special behavior here to get the directory initialized just enough
// to run "terraform 0.12upgrade".
//
// FIXME: Once we reach 0.13 and remove 0.12upgrade, we should rework this
// so that we first use the early config to do a general compatibility
// check with dependencies, producing version-oriented error messages if
// dependencies aren't right, and only then use the real loader to deal
// with the backend configuration.
rootMod, confDiags := c.loadSingleModule(path)
rootModEarly, earlyConfDiags := c.loadSingleModuleEarly(path)
configUpgradeProbablyNeeded := false
if confDiags.HasErrors() {
if earlyConfDiags.HasErrors() {
// If both parsers produced errors then we'll assume the config
// is _truly_ invalid and produce error messages as normal.
// Since this may be the user's first ever interaction with Terraform,
// we'll provide some additional context in this case.
c.Ui.Error(strings.TrimSpace(errInitConfigError))
diags = diags.Append(confDiags)
c.showDiagnostics(diags)
return 1
}
// If _only_ the main loader produced errors then that suggests an
// upgrade may help. To give us more certainty here, we'll use the
// same heuristic that "terraform 0.12upgrade" uses to guess if a
// configuration has already been upgraded, to reduce the risk that
// we'll produce a misleading message if the problem is just a regular
// syntax error that the early loader just didn't catch.
sources, err := configupgrade.LoadModule(path)
if err == nil {
if already, _ := sources.MaybeAlreadyUpgraded(); already {
// Just report the errors as normal, then.
c.Ui.Error(strings.TrimSpace(errInitConfigError))
diags = diags.Append(confDiags)
c.showDiagnostics(diags)
return 1
}
}
configUpgradeProbablyNeeded = true
}
if earlyConfDiags.HasErrors() {
// If _only_ the early loader encountered errors then that's unusual
// (it should generally be a superset of the normal loader) but we'll
// return those errors anyway since otherwise we'll probably get
// some weird behavior downstream. Errors from the early loader are
// generally not as high-quality since it has less context to work with.
c.Ui.Error(strings.TrimSpace(errInitConfigError))
diags = diags.Append(earlyConfDiags)
c.showDiagnostics(diags)
return 1
}
if flagGet {
modsOutput, modsDiags := c.getModules(path, rootModEarly, flagUpgrade)
diags = diags.Append(modsDiags)
if modsDiags.HasErrors() {
c.showDiagnostics(diags)
return 1
}
if modsOutput {
header = true
}
}
// With all of the modules (hopefully) installed, we can now try to load
// the whole configuration tree.
//
// Just as above, we'll try loading both with the early and normal config
// loaders here. Subsequent work will only use the early config, but
// loading both gives us an opportunity to prefer the better error messages
// from the normal loader if both fail.
_, confDiags = c.loadConfig(path)
earlyConfig, earlyConfDiags := c.loadConfigEarly(path)
if confDiags.HasErrors() && !configUpgradeProbablyNeeded {
c.Ui.Error(strings.TrimSpace(errInitConfigError))
diags = diags.Append(confDiags)
c.showDiagnostics(diags)
return 1
}
if earlyConfDiags.HasErrors() {
c.Ui.Error(strings.TrimSpace(errInitConfigError))
diags = diags.Append(earlyConfDiags)
c.showDiagnostics(diags)
return 1
}
{
// Before we go further, we'll check to make sure none of the modules
// in the configuration declare that they don't support this Terraform
// version, so we can produce a version-related error message rather
// than potentially-confusing downstream errors.
versionDiags := initwd.CheckCoreVersionRequirements(earlyConfig)
diags = diags.Append(versionDiags)
if versionDiags.HasErrors() {
c.showDiagnostics(diags)
return 1
}
}
var back backend.Backend
if flagBackend {
switch {
case configUpgradeProbablyNeeded:
diags = diags.Append(tfdiags.Sourceless(
tfdiags.Warning,
"Skipping backend initialization pending configuration upgrade",
// The "below" in this message is referring to the special
// note about running "terraform 0.12upgrade" that we'll
// print out at the end when configUpgradeProbablyNeeded is set.
"The root module configuration contains errors that may be fixed by running the configuration upgrade tool, so Terraform is skipping backend initialization. See below for more information.",
))
default:
be, backendOutput, backendDiags := c.initBackend(rootMod, flagConfigExtra)
diags = diags.Append(backendDiags)
if backendDiags.HasErrors() {
c.showDiagnostics(diags)
return 1
}
if backendOutput {
header = true
}
back = be
}
}
if back == nil {
// If we didn't initialize a backend then we'll try to at least
// instantiate one. This might fail if it wasn't already initialized
// by a previous run, so we must still expect that "back" may be nil
// in code that follows.
var backDiags tfdiags.Diagnostics
back, backDiags = c.Backend(nil)
if backDiags.HasErrors() {
// This is fine. We'll proceed with no backend, then.
back = nil
}
}
var state *states.State
// If we have a functional backend (either just initialized or initialized
// on a previous run) we'll use the current state as a potential source
// of provider dependencies.
if back != nil {
sMgr, err := back.StateMgr(c.Workspace())
if err != nil {
c.Ui.Error(fmt.Sprintf("Error loading state: %s", err))
return 1
}
if err := sMgr.RefreshState(); err != nil {
c.Ui.Error(fmt.Sprintf("Error refreshing state: %s", err))
return 1
}
state = sMgr.State()
}
if v := os.Getenv(ProviderSkipVerifyEnvVar); v != "" {
c.ignorePluginChecksum = true
}
// Now that we have loaded all modules, check the module tree for missing providers.
providersOutput, providerDiags := c.getProviders(earlyConfig, state, flagUpgrade)
diags = diags.Append(providerDiags)
if providerDiags.HasErrors() {
c.showDiagnostics(diags)
return 1
}
if providersOutput {
header = true
}
// If we outputted information, then we need to output a newline
// so that our success message is nicely spaced out from prior text.
if header {
c.Ui.Output("")
}
// If we accumulated any warnings along the way that weren't accompanied
// by errors then we'll output them here so that the success message is
// still the final thing shown.
c.showDiagnostics(diags)
if configUpgradeProbablyNeeded {
switch {
case c.RunningInAutomation:
c.Ui.Output(c.Colorize().Color(strings.TrimSpace(outputInitSuccessConfigUpgrade)))
default:
c.Ui.Output(c.Colorize().Color(strings.TrimSpace(outputInitSuccessConfigUpgradeCLI)))
}
return 0
}
c.Ui.Output(c.Colorize().Color(strings.TrimSpace(outputInitSuccess)))
if !c.RunningInAutomation {
// If we're not running in an automation wrapper, give the user
// some more detailed next steps that are appropriate for interactive
// shell usage.
c.Ui.Output(c.Colorize().Color(strings.TrimSpace(outputInitSuccessCLI)))
}
return 0
}
func (c *InitCommand) getModules(path string, earlyRoot *tfconfig.Module, upgrade bool) (output bool, diags tfdiags.Diagnostics) {
if len(earlyRoot.ModuleCalls) == 0 {
// Nothing to do
return false, nil
}
if upgrade {
c.Ui.Output(c.Colorize().Color(fmt.Sprintf("[reset][bold]Upgrading modules...")))
} else {
c.Ui.Output(c.Colorize().Color(fmt.Sprintf("[reset][bold]Initializing modules...")))
}
hooks := uiModuleInstallHooks{
Ui: c.Ui,
ShowLocalPaths: true,
}
instDiags := c.installModules(path, upgrade, hooks)
diags = diags.Append(instDiags)
// Since module installer has modified the module manifest on disk, we need
// to refresh the cache of it in the loader.
if c.configLoader != nil {
if err := c.configLoader.RefreshModules(); err != nil {
// Should never happen
diags = diags.Append(tfdiags.Sourceless(
tfdiags.Error,
"Failed to read module manifest",
fmt.Sprintf("After installing modules, Terraform could not re-read the manifest of installed modules. This is a bug in Terraform. %s.", err),
))
}
}
return true, diags
}
func (c *InitCommand) initBackend(root *configs.Module, extraConfig rawFlags) (be backend.Backend, output bool, diags tfdiags.Diagnostics) {
c.Ui.Output(c.Colorize().Color(fmt.Sprintf("\n[reset][bold]Initializing the backend...")))
var backendConfig *configs.Backend
var backendConfigOverride hcl.Body
if root.Backend != nil {
backendType := root.Backend.Type
bf := backendInit.Backend(backendType)
if bf == nil {
diags = diags.Append(&hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Unsupported backend type",
Detail: fmt.Sprintf("There is no backend type named %q.", backendType),
Subject: &root.Backend.TypeRange,
})
return nil, true, diags
}
b := bf()
backendSchema := b.ConfigSchema()
backendConfig = root.Backend
var overrideDiags tfdiags.Diagnostics
backendConfigOverride, overrideDiags = c.backendConfigOverrideBody(extraConfig, backendSchema)
diags = diags.Append(overrideDiags)
if overrideDiags.HasErrors() {
return nil, true, diags
}
} else {
// If the user supplied a -backend-config on the CLI but no backend
// block was found in the configuration, it's likely - but not
// necessarily - a mistake. Return a warning.
if !extraConfig.Empty() {
diags = diags.Append(tfdiags.Sourceless(
tfdiags.Warning,
"Missing backend configuration",
`-backend-config was used without a "backend" block in the configuration.
If you intended to override the default local backend configuration,
no action is required, but you may add an explicit backend block to your
configuration to clear this warning:
terraform {
backend "local" {}
}
However, if you intended to override a defined backend, please verify that
the backend configuration is present and valid.
`,
))
}
}
opts := &BackendOpts{
Config: backendConfig,
ConfigOverride: backendConfigOverride,
Init: true,
}
back, backDiags := c.Backend(opts)
diags = diags.Append(backDiags)
return back, true, diags
}
// Load the complete module tree, and fetch any missing providers.
// This method outputs its own Ui.
func (c *InitCommand) getProviders(earlyConfig *earlyconfig.Config, state *states.State, upgrade bool) (output bool, diags tfdiags.Diagnostics) {
var available discovery.PluginMetaSet
if upgrade {
// If we're in upgrade mode, we ignore any auto-installed plugins
// in "available", causing us to reinstall and possibly upgrade them.
available = c.providerPluginManuallyInstalledSet()
} else {
available = c.providerPluginSet()
}
configDeps, depsDiags := earlyConfig.ProviderDependencies()
diags = diags.Append(depsDiags)
if depsDiags.HasErrors() {
return false, diags
}
configReqs := configDeps.AllPluginRequirements()
// FIXME: This is weird because ConfigTreeDependencies was written before
// we switched over to using earlyConfig as the main source of dependencies.
// In future we should clean this up to be a more reasoable API.
stateReqs := terraform.ConfigTreeDependencies(nil, state).AllPluginRequirements()
requirements := configReqs.Merge(stateReqs)
if len(requirements) == 0 {
// nothing to initialize
return false, nil
}
c.Ui.Output(c.Colorize().Color(
"\n[reset][bold]Initializing provider plugins...",
))
missing := c.missingPlugins(available, requirements)
if c.getPlugins {
if len(missing) > 0 {
c.Ui.Output("- Checking for available provider plugins...")
}
for provider, reqd := range missing {
pty := addrs.Provider{Type: provider}
_, providerDiags, err := c.providerInstaller.Get(pty, reqd.Versions)
diags = diags.Append(providerDiags)
if err != nil {
constraint := reqd.Versions.String()
if constraint == "" {
constraint = "(any version)"
}
switch {
case err == discovery.ErrorServiceUnreachable, err == discovery.ErrorPublicRegistryUnreachable:
c.Ui.Error(errDiscoveryServiceUnreachable)
case err == discovery.ErrorNoSuchProvider:
c.Ui.Error(fmt.Sprintf(errProviderNotFound, provider, DefaultPluginVendorDir))
case err == discovery.ErrorNoSuitableVersion:
if reqd.Versions.Unconstrained() {
// This should never happen, but might crop up if we catch
// the releases server in a weird state where the provider's
// directory is present but does not yet contain any
// versions. We'll treat it like ErrorNoSuchProvider, then.
c.Ui.Error(fmt.Sprintf(errProviderNotFound, provider, DefaultPluginVendorDir))
} else {
c.Ui.Error(fmt.Sprintf(errProviderVersionsUnsuitable, provider, reqd.Versions))
}
case errwrap.Contains(err, discovery.ErrorVersionIncompatible.Error()):
// Attempt to fetch nested error to display to the user which versions
// we considered and which versions might be compatible. Otherwise,
// we'll just display a generic version incompatible msg
incompatErr := errwrap.GetType(err, fmt.Errorf(""))
if incompatErr != nil {
c.Ui.Error(incompatErr.Error())
} else {
// Generic version incompatible msg
c.Ui.Error(fmt.Sprintf(errProviderIncompatible, provider, constraint))
}
// Reset nested errors
err = discovery.ErrorVersionIncompatible
case err == discovery.ErrorNoVersionCompatible:
// Generic version incompatible msg
c.Ui.Error(fmt.Sprintf(errProviderIncompatible, provider, constraint))
case err == discovery.ErrorSignatureVerification:
c.Ui.Error(fmt.Sprintf(errSignatureVerification, provider))
case err == discovery.ErrorChecksumVerification,
err == discovery.ErrorMissingChecksumVerification:
c.Ui.Error(fmt.Sprintf(errChecksumVerification, provider))
default:
c.Ui.Error(fmt.Sprintf(errProviderInstallError, provider, err.Error(), DefaultPluginVendorDir))
}
diags = diags.Append(err)
}
}
if diags.HasErrors() {
return true, diags
}
} else if len(missing) > 0 {
// we have missing providers, but aren't going to try and download them
var lines []string
for provider, reqd := range missing {
if reqd.Versions.Unconstrained() {
lines = append(lines, fmt.Sprintf("* %s (any version)\n", provider))
} else {
lines = append(lines, fmt.Sprintf("* %s (%s)\n", provider, reqd.Versions))
}
diags = diags.Append(fmt.Errorf("missing provider %q", provider))
}
sort.Strings(lines)
c.Ui.Error(fmt.Sprintf(errMissingProvidersNoInstall, strings.Join(lines, ""), DefaultPluginVendorDir))
return true, diags
}
// With all the providers downloaded, we'll generate our lock file
// that ensures the provider binaries remain unchanged until we init
// again. If anything changes, other commands that use providers will
// fail with an error instructing the user to re-run this command.
available = c.providerPluginSet() // re-discover to see newly-installed plugins
// internal providers were already filtered out, since we don't need to get them.
chosen := choosePlugins(available, nil, requirements)
digests := map[string][]byte{}
for name, meta := range chosen {
digest, err := meta.SHA256()
if err != nil {
diags = diags.Append(fmt.Errorf("Failed to read provider plugin %s: %s", meta.Path, err))
return true, diags
}
digests[name] = digest
if c.ignorePluginChecksum {
digests[name] = nil
}
}
err := c.providerPluginsLock().Write(digests)
if err != nil {
diags = diags.Append(fmt.Errorf("failed to save provider manifest: %s", err))
return true, diags
}
{
// Purge any auto-installed plugins that aren't being used.
purged, err := c.providerInstaller.PurgeUnused(chosen)
if err != nil {
// Failure to purge old plugins is not a fatal error
c.Ui.Warn(fmt.Sprintf("failed to purge unused plugins: %s", err))
}
if purged != nil {
for meta := range purged {
log.Printf("[DEBUG] Purged unused %s plugin %s", meta.Name, meta.Path)
}
}
}
// If any providers have "floating" versions (completely unconstrained)
// we'll suggest the user constrain with a pessimistic constraint to
// avoid implicitly adopting a later major release.
constraintSuggestions := make(map[string]discovery.ConstraintStr)
for name, meta := range chosen {
req := requirements[name]
if req == nil {
// should never happen, but we don't want to crash here, so we'll
// be cautious.
continue
}
if req.Versions.Unconstrained() && meta.Version != discovery.VersionZero {
// meta.Version.MustParse is safe here because our "chosen" metas
// were already filtered for validity of versions.
constraintSuggestions[name] = meta.Version.MustParse().MinorUpgradeConstraintStr()
}
}
if len(constraintSuggestions) != 0 {
names := make([]string, 0, len(constraintSuggestions))
for name := range constraintSuggestions {
names = append(names, name)
}
sort.Strings(names)
c.Ui.Output(outputInitProvidersUnconstrained)
for _, name := range names {
c.Ui.Output(fmt.Sprintf("* provider.%s: version = %q", name, constraintSuggestions[name]))
}
}
return true, diags
}
// backendConfigOverrideBody interprets the raw values of -backend-config
// arguments into a hcl Body that should override the backend settings given
// in the configuration.
//
// If the result is nil then no override needs to be provided.
//
// If the returned diagnostics contains errors then the returned body may be
// incomplete or invalid.
func (c *InitCommand) backendConfigOverrideBody(flags rawFlags, schema *configschema.Block) (hcl.Body, tfdiags.Diagnostics) {
items := flags.AllItems()
if len(items) == 0 {
return nil, nil
}
var ret hcl.Body
var diags tfdiags.Diagnostics
synthVals := make(map[string]cty.Value)
mergeBody := func(newBody hcl.Body) {
if ret == nil {
ret = newBody
} else {
ret = configs.MergeBodies(ret, newBody)
}
}
flushVals := func() {
if len(synthVals) == 0 {
return
}
newBody := configs.SynthBody("-backend-config=...", synthVals)
mergeBody(newBody)
synthVals = make(map[string]cty.Value)
}
if len(items) == 1 && items[0].Value == "" {
// Explicitly remove all -backend-config options.
// We do this by setting an empty but non-nil ConfigOverrides.
return configs.SynthBody("-backend-config=''", synthVals), diags
}
for _, item := range items {
eq := strings.Index(item.Value, "=")
if eq == -1 {
// The value is interpreted as a filename.
newBody, fileDiags := c.loadHCLFile(item.Value)
diags = diags.Append(fileDiags)
flushVals() // deal with any accumulated individual values first
mergeBody(newBody)
} else {
name := item.Value[:eq]
rawValue := item.Value[eq+1:]
attrS := schema.Attributes[name]
if attrS == nil {
diags = diags.Append(tfdiags.Sourceless(
tfdiags.Error,
"Invalid backend configuration argument",
fmt.Sprintf("The backend configuration argument %q given on the command line is not expected for the selected backend type.", name),
))
continue
}
value, valueDiags := configValueFromCLI(item.String(), rawValue, attrS.Type)
diags = diags.Append(valueDiags)
if valueDiags.HasErrors() {
continue
}
synthVals[name] = value
}
}
flushVals()
return ret, diags
}
func (c *InitCommand) AutocompleteArgs() complete.Predictor {
return complete.PredictDirs("")
}
func (c *InitCommand) AutocompleteFlags() complete.Flags {
return complete.Flags{
"-backend": completePredictBoolean,
"-backend-config": complete.PredictFiles("*.tfvars"), // can also be key=value, but we can't "predict" that
"-force-copy": complete.PredictNothing,
"-from-module": completePredictModuleSource,
"-get": completePredictBoolean,
"-get-plugins": completePredictBoolean,
"-input": completePredictBoolean,
"-lock": completePredictBoolean,
"-lock-timeout": complete.PredictAnything,
"-no-color": complete.PredictNothing,
"-plugin-dir": complete.PredictDirs(""),
"-reconfigure": complete.PredictNothing,
"-upgrade": completePredictBoolean,
"-verify-plugins": completePredictBoolean,
}
}
func (c *InitCommand) Help() string {
helpText := `
Usage: terraform init [options] [DIR]
Initialize a new or existing Terraform working directory by creating
initial files, loading any remote state, downloading modules, etc.
This is the first command that should be run for any new or existing
Terraform configuration per machine. This sets up all the local data
necessary to run Terraform that is typically not committed to version
control.
This command is always safe to run multiple times. Though subsequent runs
may give errors, this command will never delete your configuration or
state. Even so, if you have important information, please back it up prior
to running this command, just in case.
If no arguments are given, the configuration in this working directory
is initialized.
Options:
-backend=true Configure the backend for this configuration.
-backend-config=path This can be either a path to an HCL file with key/value
assignments (same format as terraform.tfvars) or a
'key=value' format. This is merged with what is in the
configuration file. This can be specified multiple
times. The backend type must be in the configuration
itself.
-force-copy Suppress prompts about copying state data. This is
equivalent to providing a "yes" to all confirmation
prompts.
-from-module=SOURCE Copy the contents of the given module into the target
directory before initialization.
-get=true Download any modules for this configuration.
-get-plugins=true Download any missing plugins for this configuration.
-input=true Ask for input if necessary. If false, will error if
input was required.
-lock=true Lock the state file when locking is supported.
-lock-timeout=0s Duration to retry a state lock.
-no-color If specified, output won't contain any color.
-plugin-dir Directory containing plugin binaries. This overrides all
default search paths for plugins, and prevents the
automatic installation of plugins. This flag can be used
multiple times.
-reconfigure Reconfigure the backend, ignoring any saved
configuration.
-upgrade=false If installing modules (-get) or plugins (-get-plugins),
ignore previously-downloaded objects and install the
latest version allowed within configured constraints.
-verify-plugins=true Verify the authenticity and integrity of automatically
downloaded plugins.
`
return strings.TrimSpace(helpText)
}
func (c *InitCommand) Synopsis() string {
return "Initialize a Terraform working directory"
}
const errInitConfigError = `
There are some problems with the configuration, described below.
The Terraform configuration must be valid before initialization so that
Terraform can determine which modules and providers need to be installed.
`
const errInitCopyNotEmpty = `
The working directory already contains files. The -from-module option requires
an empty directory into which a copy of the referenced module will be placed.
To initialize the configuration already in this working directory, omit the
-from-module option.
`
const outputInitEmpty = `
[reset][bold]Terraform initialized in an empty directory![reset]
The directory has no Terraform configuration files. You may begin working
with Terraform immediately by creating Terraform configuration files.
`
const outputInitSuccess = `
[reset][bold][green]Terraform has been successfully initialized![reset][green]
`
const outputInitSuccessCLI = `[reset][green]
You may now begin working with Terraform. Try running "terraform plan" to see
any changes that are required for your infrastructure. All Terraform commands
should now work.
If you ever set or change modules or backend configuration for Terraform,
rerun this command to reinitialize your working directory. If you forget, other
commands will detect it and remind you to do so if necessary.
`
const outputInitSuccessConfigUpgrade = `
[reset][bold]Terraform has initialized, but configuration upgrades may be needed.[reset]
Terraform found syntax errors in the configuration that prevented full
initialization. If you've recently upgraded to Terraform v0.12, this may be
because your configuration uses syntax constructs that are no longer valid,
and so must be updated before full initialization is possible.
Run terraform init for this configuration at a shell prompt for more information
on how to update it for Terraform v0.12 compatibility.
`
const outputInitSuccessConfigUpgradeCLI = `[reset][green]
[reset][bold]Terraform has initialized, but configuration upgrades may be needed.[reset]
Terraform found syntax errors in the configuration that prevented full
initialization. If you've recently upgraded to Terraform v0.12, this may be
because your configuration uses syntax constructs that are no longer valid,
and so must be updated before full initialization is possible.
Terraform has installed the required providers to support the configuration
upgrade process. To begin upgrading your configuration, run the following:
terraform 0.12upgrade
To see the full set of errors that led to this message, run:
terraform validate
`
const outputInitProvidersUnconstrained = `
The following providers do not have any version constraints in configuration,
so the latest version was installed.
To prevent automatic upgrades to new major versions that may contain breaking
changes, it is recommended to add version = "..." constraints to the
corresponding provider blocks in configuration, with the constraint strings
suggested below.
`
const errDiscoveryServiceUnreachable = `
[reset][bold][red]Registry service unreachable.[reset][red]
This may indicate a network issue, or an issue with the requested Terraform Registry.
`
const errProviderNotFound = `
[reset][bold][red]Provider %[1]q not available for installation.[reset][red]
A provider named %[1]q could not be found in the Terraform Registry.
This may result from mistyping the provider name, or the given provider may
be a third-party provider that cannot be installed automatically.
In the latter case, the plugin must be installed manually by locating and
downloading a suitable distribution package and placing the plugin's executable
file in the following directory:
%[2]s
Terraform detects necessary plugins by inspecting the configuration and state.
To view the provider versions requested by each module, run
"terraform providers".
`
const errProviderVersionsUnsuitable = `
[reset][bold][red]No provider %[1]q plugins meet the constraint %[2]q.[reset][red]
The version constraint is derived from the "version" argument within the
provider %[1]q block in configuration. Child modules may also apply
provider version constraints. To view the provider versions requested by each
module in the current configuration, run "terraform providers".
To proceed, the version constraints for this provider must be relaxed by
either adjusting or removing the "version" argument in the provider blocks
throughout the configuration.
`
const errProviderIncompatible = `
[reset][bold][red]No available provider %[1]q plugins are compatible with this Terraform version.[reset][red]
From time to time, new Terraform major releases can change the requirements for
plugins such that older plugins become incompatible.
Terraform checked all of the plugin versions matching the given constraint:
%[2]s
Unfortunately, none of the suitable versions are compatible with this version
of Terraform. If you have recently upgraded Terraform, it may be necessary to
move to a newer major release of this provider. Alternatively, if you are
attempting to upgrade the provider to a new major version you may need to
also upgrade Terraform to support the new version.
Consult the documentation for this provider for more information on
compatibility between provider versions and Terraform versions.
`
const errProviderInstallError = `
[reset][bold][red]Error installing provider %[1]q: %[2]s.[reset][red]
Terraform analyses the configuration and state and automatically downloads
plugins for the providers used. However, when attempting to download this
plugin an unexpected error occurred.
This may be caused if for some reason Terraform is unable to reach the
plugin repository. The repository may be unreachable if access is blocked
by a firewall.
If automatic installation is not possible or desirable in your environment,
you may alternatively manually install plugins by downloading a suitable
distribution package and placing the plugin's executable file in the
following directory:
%[3]s
`
const errMissingProvidersNoInstall = `
[reset][bold][red]Missing required providers.[reset][red]
The following provider constraints are not met by the currently-installed
provider plugins:
%[1]s
Terraform can automatically download and install plugins to meet the given
constraints, but this step was skipped due to the use of -get-plugins=false
and/or -plugin-dir on the command line.
If automatic installation is not possible or desirable in your environment,
you may manually install plugins by downloading a suitable distribution package
and placing the plugin's executable file in one of the directories given in
by -plugin-dir on the command line, or in the following directory if custom
plugin directories are not set:
%[2]s
`
const errChecksumVerification = `
[reset][bold][red]Error verifying checksum for provider %[1]q[reset][red]
The checksum for provider distribution from the Terraform Registry
did not match the source. This may mean that the distributed files
were changed after this version was released to the Registry.
`
const errSignatureVerification = `
[reset][bold][red]Error verifying GPG signature for provider %[1]q[reset][red]
Terraform was unable to verify the GPG signature of the downloaded provider
files using the keys downloaded from the Terraform Registry. This may mean that
the publisher of the provider removed the key it was signed with, or that the
distributed files were changed after this version was released.
`
| command/init.go | 1 | https://github.com/hashicorp/terraform/commit/efafadbe5edbfd8faa250aa1eaed46fbb2d52ee2 | [
0.998819887638092,
0.059405744075775146,
0.00016202586994040757,
0.00017031142488121986,
0.23423391580581665
] |
{
"id": 1,
"code_window": [
"\tavailable = c.providerPluginSet() // re-discover to see newly-installed plugins\n",
"\n",
"\t// internal providers were already filtered out, since we don't need to get them.\n",
"\tchosen := choosePlugins(available, nil, requirements)\n",
"\n",
"\tdigests := map[string][]byte{}\n",
"\tfor name, meta := range chosen {\n",
"\t\tdigest, err := meta.SHA256()\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tchosen := chooseProviders(available, nil, requirements)\n"
],
"file_path": "command/init.go",
"type": "replace",
"edit_start_line_idx": 599
} | // Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build gccgo
// +build !aix
#include <errno.h>
#include <stdint.h>
#include <unistd.h>
#define _STRINGIFY2_(x) #x
#define _STRINGIFY_(x) _STRINGIFY2_(x)
#define GOSYM_PREFIX _STRINGIFY_(__USER_LABEL_PREFIX__)
// Call syscall from C code because the gccgo support for calling from
// Go to C does not support varargs functions.
struct ret {
uintptr_t r;
uintptr_t err;
};
struct ret
gccgoRealSyscall(uintptr_t trap, uintptr_t a1, uintptr_t a2, uintptr_t a3, uintptr_t a4, uintptr_t a5, uintptr_t a6, uintptr_t a7, uintptr_t a8, uintptr_t a9)
{
struct ret r;
errno = 0;
r.r = syscall(trap, a1, a2, a3, a4, a5, a6, a7, a8, a9);
r.err = errno;
return r;
}
uintptr_t
gccgoRealSyscallNoError(uintptr_t trap, uintptr_t a1, uintptr_t a2, uintptr_t a3, uintptr_t a4, uintptr_t a5, uintptr_t a6, uintptr_t a7, uintptr_t a8, uintptr_t a9)
{
return syscall(trap, a1, a2, a3, a4, a5, a6, a7, a8, a9);
}
| vendor/golang.org/x/sys/unix/gccgo_c.c | 0 | https://github.com/hashicorp/terraform/commit/efafadbe5edbfd8faa250aa1eaed46fbb2d52ee2 | [
0.00017916355864144862,
0.00017084961291402578,
0.0001662142894929275,
0.00016901030903682113,
0.000005143636826687725
] |
{
"id": 1,
"code_window": [
"\tavailable = c.providerPluginSet() // re-discover to see newly-installed plugins\n",
"\n",
"\t// internal providers were already filtered out, since we don't need to get them.\n",
"\tchosen := choosePlugins(available, nil, requirements)\n",
"\n",
"\tdigests := map[string][]byte{}\n",
"\tfor name, meta := range chosen {\n",
"\t\tdigest, err := meta.SHA256()\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tchosen := chooseProviders(available, nil, requirements)\n"
],
"file_path": "command/init.go",
"type": "replace",
"edit_start_line_idx": 599
} | resource "test_object" "A" {
lifecycle {
create_before_destroy = true
}
}
resource "test_object" "B" {
test_string = "${test_object.A.id}"
}
| terraform/testdata/transform-destroy-cbd-edge-basic/main.tf | 0 | https://github.com/hashicorp/terraform/commit/efafadbe5edbfd8faa250aa1eaed46fbb2d52ee2 | [
0.00017501179536338896,
0.00017501179536338896,
0.00017501179536338896,
0.00017501179536338896,
0
] |
{
"id": 1,
"code_window": [
"\tavailable = c.providerPluginSet() // re-discover to see newly-installed plugins\n",
"\n",
"\t// internal providers were already filtered out, since we don't need to get them.\n",
"\tchosen := choosePlugins(available, nil, requirements)\n",
"\n",
"\tdigests := map[string][]byte{}\n",
"\tfor name, meta := range chosen {\n",
"\t\tdigest, err := meta.SHA256()\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tchosen := chooseProviders(available, nil, requirements)\n"
],
"file_path": "command/init.go",
"type": "replace",
"edit_start_line_idx": 599
} | // Copyright 2010 Google Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package gomock
import (
"fmt"
"reflect"
)
// A Matcher is a representation of a class of values.
// It is used to represent the valid or expected arguments to a mocked method.
type Matcher interface {
// Matches returns whether x is a match.
Matches(x interface{}) bool
// String describes what the matcher matches.
String() string
}
type anyMatcher struct{}
func (anyMatcher) Matches(x interface{}) bool {
return true
}
func (anyMatcher) String() string {
return "is anything"
}
type eqMatcher struct {
x interface{}
}
func (e eqMatcher) Matches(x interface{}) bool {
return reflect.DeepEqual(e.x, x)
}
func (e eqMatcher) String() string {
return fmt.Sprintf("is equal to %v", e.x)
}
type nilMatcher struct{}
func (nilMatcher) Matches(x interface{}) bool {
if x == nil {
return true
}
v := reflect.ValueOf(x)
switch v.Kind() {
case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map,
reflect.Ptr, reflect.Slice:
return v.IsNil()
}
return false
}
func (nilMatcher) String() string {
return "is nil"
}
type notMatcher struct {
m Matcher
}
func (n notMatcher) Matches(x interface{}) bool {
return !n.m.Matches(x)
}
func (n notMatcher) String() string {
// TODO: Improve this if we add a NotString method to the Matcher interface.
return "not(" + n.m.String() + ")"
}
type assignableToTypeOfMatcher struct {
targetType reflect.Type
}
func (m assignableToTypeOfMatcher) Matches(x interface{}) bool {
return reflect.TypeOf(x).AssignableTo(m.targetType)
}
func (m assignableToTypeOfMatcher) String() string {
return "is assignable to " + m.targetType.Name()
}
// Constructors
// Any returns a matcher that always matches.
func Any() Matcher { return anyMatcher{} }
// Eq returns a matcher that matches on equality.
//
// Example usage:
// Eq(5).Matches(5) // returns true
// Eq(5).Matches(4) // returns false
func Eq(x interface{}) Matcher { return eqMatcher{x} }
// Nil returns a matcher that matches if the received value is nil.
//
// Example usage:
// var x *bytes.Buffer
// Nil().Matches(x) // returns true
// x = &bytes.Buffer{}
// Nil().Matches(x) // returns false
func Nil() Matcher { return nilMatcher{} }
// Not reverses the results of its given child matcher.
//
// Example usage:
// Not(Eq(5)).Matches(4) // returns true
// Not(Eq(5)).Matches(5) // returns false
func Not(x interface{}) Matcher {
if m, ok := x.(Matcher); ok {
return notMatcher{m}
}
return notMatcher{Eq(x)}
}
// AssignableToTypeOf is a Matcher that matches if the parameter to the mock
// function is assignable to the type of the parameter to this function.
//
// Example usage:
// var s fmt.Stringer = &bytes.Buffer{}
// AssignableToTypeOf(s).Matches(time.Second) // returns true
// AssignableToTypeOf(s).Matches(99) // returns false
func AssignableToTypeOf(x interface{}) Matcher {
return assignableToTypeOfMatcher{reflect.TypeOf(x)}
}
| vendor/github.com/golang/mock/gomock/matchers.go | 0 | https://github.com/hashicorp/terraform/commit/efafadbe5edbfd8faa250aa1eaed46fbb2d52ee2 | [
0.0005413904436863959,
0.00019627435540314764,
0.00016078817134257406,
0.00017184315947815776,
0.00009240787767339498
] |
{
"id": 2,
"code_window": [
"\t// exceptional circumstances since it forces the provider's release\n",
"\t// schedule to be tied to that of Terraform Core.\n",
"\tInternal map[addrs.Provider]providers.Factory\n",
"}\n",
"\n",
"func choosePlugins(avail discovery.PluginMetaSet, internal map[addrs.Provider]providers.Factory, reqd discovery.PluginRequirements) map[string]discovery.PluginMeta {\n",
"\tcandidates := avail.ConstrainVersions(reqd)\n",
"\tret := map[string]discovery.PluginMeta{}\n",
"\tfor name, metas := range candidates {\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"func chooseProviders(avail discovery.PluginMetaSet, internal map[addrs.Provider]providers.Factory, reqd discovery.PluginRequirements) map[string]discovery.PluginMeta {\n"
],
"file_path": "command/plugins.go",
"type": "replace",
"edit_start_line_idx": 41
} | package command
import (
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"log"
"os"
"os/exec"
"path/filepath"
"runtime"
"strings"
plugin "github.com/hashicorp/go-plugin"
"github.com/kardianos/osext"
"github.com/hashicorp/terraform/addrs"
terraformProvider "github.com/hashicorp/terraform/builtin/providers/terraform"
tfplugin "github.com/hashicorp/terraform/plugin"
"github.com/hashicorp/terraform/plugin/discovery"
"github.com/hashicorp/terraform/providers"
"github.com/hashicorp/terraform/provisioners"
"github.com/hashicorp/terraform/terraform"
)
// multiVersionProviderResolver is an implementation of
// terraform.ResourceProviderResolver that matches the given version constraints
// against a set of versioned provider plugins to find the newest version of
// each that satisfies the given constraints.
type multiVersionProviderResolver struct {
Available discovery.PluginMetaSet
// Internal is a map that overrides the usual plugin selection process
// for internal plugins. These plugins do not support version constraints
// (will produce an error if one is set). This should be used only in
// exceptional circumstances since it forces the provider's release
// schedule to be tied to that of Terraform Core.
Internal map[addrs.Provider]providers.Factory
}
func choosePlugins(avail discovery.PluginMetaSet, internal map[addrs.Provider]providers.Factory, reqd discovery.PluginRequirements) map[string]discovery.PluginMeta {
candidates := avail.ConstrainVersions(reqd)
ret := map[string]discovery.PluginMeta{}
for name, metas := range candidates {
// If the provider is in our internal map then we ignore any
// discovered plugins for it since these are dealt with separately.
if _, isInternal := internal[addrs.NewLegacyProvider(name)]; isInternal {
continue
}
if len(metas) == 0 {
continue
}
ret[name] = metas.Newest()
}
return ret
}
func (r *multiVersionProviderResolver) ResolveProviders(
reqd discovery.PluginRequirements,
) (map[addrs.Provider]providers.Factory, []error) {
factories := make(map[addrs.Provider]providers.Factory, len(reqd))
var errs []error
chosen := choosePlugins(r.Available, r.Internal, reqd)
for name, req := range reqd {
if factory, isInternal := r.Internal[addrs.NewLegacyProvider(name)]; isInternal {
if !req.Versions.Unconstrained() {
errs = append(errs, fmt.Errorf("provider.%s: this provider is built in to Terraform and so it does not support version constraints", name))
continue
}
factories[addrs.NewLegacyProvider(name)] = factory
continue
}
if newest, available := chosen[name]; available {
digest, err := newest.SHA256()
if err != nil {
errs = append(errs, fmt.Errorf("provider.%s: failed to load plugin to verify its signature: %s", name, err))
continue
}
if !reqd[name].AcceptsSHA256(digest) {
errs = append(errs, fmt.Errorf("provider.%s: new or changed plugin executable", name))
continue
}
factories[addrs.NewLegacyProvider(name)] = providerFactory(newest)
} else {
msg := fmt.Sprintf("provider.%s: no suitable version installed", name)
required := req.Versions.String()
// no version is unconstrained
if required == "" {
required = "(any version)"
}
foundVersions := []string{}
for meta := range r.Available.WithName(name) {
foundVersions = append(foundVersions, fmt.Sprintf("%q", meta.Version))
}
found := "none"
if len(foundVersions) > 0 {
found = strings.Join(foundVersions, ", ")
}
msg += fmt.Sprintf("\n version requirements: %q\n versions installed: %s", required, found)
errs = append(errs, errors.New(msg))
}
}
return factories, errs
}
// store the user-supplied path for plugin discovery
func (m *Meta) storePluginPath(pluginPath []string) error {
if len(pluginPath) == 0 {
return nil
}
path := filepath.Join(m.DataDir(), PluginPathFile)
// remove the plugin dir record if the path was set to an empty string
if len(pluginPath) == 1 && (pluginPath[0] == "") {
err := os.Remove(path)
if !os.IsNotExist(err) {
return err
}
return nil
}
js, err := json.MarshalIndent(pluginPath, "", " ")
if err != nil {
return err
}
// if this fails, so will WriteFile
os.MkdirAll(m.DataDir(), 0755)
return ioutil.WriteFile(path, js, 0644)
}
// Load the user-defined plugin search path into Meta.pluginPath if the file
// exists.
func (m *Meta) loadPluginPath() ([]string, error) {
js, err := ioutil.ReadFile(filepath.Join(m.DataDir(), PluginPathFile))
if os.IsNotExist(err) {
return nil, nil
}
if err != nil {
return nil, err
}
var pluginPath []string
if err := json.Unmarshal(js, &pluginPath); err != nil {
return nil, err
}
return pluginPath, nil
}
// the default location for automatically installed plugins
func (m *Meta) pluginDir() string {
return filepath.Join(m.DataDir(), "plugins", fmt.Sprintf("%s_%s", runtime.GOOS, runtime.GOARCH))
}
// pluginDirs return a list of directories to search for plugins.
//
// Earlier entries in this slice get priority over later when multiple copies
// of the same plugin version are found, but newer versions always override
// older versions where both satisfy the provider version constraints.
func (m *Meta) pluginDirs(includeAutoInstalled bool) []string {
// user defined paths take precedence
if len(m.pluginPath) > 0 {
return m.pluginPath
}
// When searching the following directories, earlier entries get precedence
// if the same plugin version is found twice, but newer versions will
// always get preference below regardless of where they are coming from.
// TODO: Add auto-install dir, default vendor dir and optional override
// vendor dir(s).
dirs := []string{"."}
// Look in the same directory as the Terraform executable.
// If found, this replaces what we found in the config path.
exePath, err := osext.Executable()
if err != nil {
log.Printf("[ERROR] Error discovering exe directory: %s", err)
} else {
dirs = append(dirs, filepath.Dir(exePath))
}
// add the user vendor directory
dirs = append(dirs, DefaultPluginVendorDir)
if includeAutoInstalled {
dirs = append(dirs, m.pluginDir())
}
dirs = append(dirs, m.GlobalPluginDirs...)
return dirs
}
func (m *Meta) pluginCache() discovery.PluginCache {
dir := m.PluginCacheDir
if dir == "" {
return nil // cache disabled
}
dir = filepath.Join(dir, pluginMachineName)
return discovery.NewLocalPluginCache(dir)
}
// providerPluginSet returns the set of valid providers that were discovered in
// the defined search paths.
func (m *Meta) providerPluginSet() discovery.PluginMetaSet {
plugins := discovery.FindPlugins("provider", m.pluginDirs(true))
// Add providers defined in the legacy .terraformrc,
if m.PluginOverrides != nil {
for k, v := range m.PluginOverrides.Providers {
log.Printf("[DEBUG] found plugin override in .terraformrc: %q, %q", k, v)
}
plugins = plugins.OverridePaths(m.PluginOverrides.Providers)
}
plugins, _ = plugins.ValidateVersions()
for p := range plugins {
log.Printf("[DEBUG] found valid plugin: %q, %q, %q", p.Name, p.Version, p.Path)
}
return plugins
}
// providerPluginAutoInstalledSet returns the set of providers that exist
// within the auto-install directory.
func (m *Meta) providerPluginAutoInstalledSet() discovery.PluginMetaSet {
plugins := discovery.FindPlugins("provider", []string{m.pluginDir()})
plugins, _ = plugins.ValidateVersions()
for p := range plugins {
log.Printf("[DEBUG] found valid plugin: %q", p.Name)
}
return plugins
}
// providerPluginManuallyInstalledSet returns the set of providers that exist
// in all locations *except* the auto-install directory.
func (m *Meta) providerPluginManuallyInstalledSet() discovery.PluginMetaSet {
plugins := discovery.FindPlugins("provider", m.pluginDirs(false))
// Add providers defined in the legacy .terraformrc,
if m.PluginOverrides != nil {
for k, v := range m.PluginOverrides.Providers {
log.Printf("[DEBUG] found plugin override in .terraformrc: %q, %q", k, v)
}
plugins = plugins.OverridePaths(m.PluginOverrides.Providers)
}
plugins, _ = plugins.ValidateVersions()
for p := range plugins {
log.Printf("[DEBUG] found valid plugin: %q, %q, %q", p.Name, p.Version, p.Path)
}
return plugins
}
func (m *Meta) providerResolver() providers.Resolver {
return &multiVersionProviderResolver{
Available: m.providerPluginSet(),
Internal: m.internalProviders(),
}
}
func (m *Meta) internalProviders() map[addrs.Provider]providers.Factory {
return map[addrs.Provider]providers.Factory{
addrs.NewLegacyProvider("terraform"): func() (providers.Interface, error) {
return terraformProvider.NewProvider(), nil
},
}
}
// filter the requirements returning only the providers that we can't resolve
func (m *Meta) missingPlugins(avail discovery.PluginMetaSet, reqd discovery.PluginRequirements) discovery.PluginRequirements {
missing := make(discovery.PluginRequirements)
candidates := avail.ConstrainVersions(reqd)
internal := m.internalProviders()
for name, versionSet := range reqd {
// internal providers can't be missing
if _, ok := internal[addrs.NewLegacyProvider(name)]; ok {
continue
}
log.Printf("[DEBUG] plugin requirements: %q=%q", name, versionSet.Versions)
if metas := candidates[name]; metas.Count() == 0 {
missing[name] = versionSet
}
}
return missing
}
func (m *Meta) provisionerFactories() map[string]terraform.ProvisionerFactory {
dirs := m.pluginDirs(true)
plugins := discovery.FindPlugins("provisioner", dirs)
plugins, _ = plugins.ValidateVersions()
// For now our goal is to just find the latest version of each plugin
// we have on the system. All provisioners should be at version 0.0.0
// currently, so there should actually only be one instance of each plugin
// name here, even though the discovery interface forces us to pretend
// that might not be true.
factories := make(map[string]terraform.ProvisionerFactory)
// Wire up the internal provisioners first. These might be overridden
// by discovered provisioners below.
for name := range InternalProvisioners {
factories[name] = internalProvisionerFactory(discovery.PluginMeta{Name: name})
}
byName := plugins.ByName()
for name, metas := range byName {
// Since we validated versions above and we partitioned the sets
// by name, we're guaranteed that the metas in our set all have
// valid versions and that there's at least one meta.
newest := metas.Newest()
factories[name] = provisionerFactory(newest)
}
return factories
}
func internalPluginClient(kind, name string) (*plugin.Client, error) {
cmdLine, err := BuildPluginCommandString(kind, name)
if err != nil {
return nil, err
}
// See the docstring for BuildPluginCommandString for why we need to do
// this split here.
cmdArgv := strings.Split(cmdLine, TFSPACE)
cfg := &plugin.ClientConfig{
Cmd: exec.Command(cmdArgv[0], cmdArgv[1:]...),
HandshakeConfig: tfplugin.Handshake,
Managed: true,
VersionedPlugins: tfplugin.VersionedPlugins,
AllowedProtocols: []plugin.Protocol{plugin.ProtocolGRPC},
}
return plugin.NewClient(cfg), nil
}
func providerFactory(meta discovery.PluginMeta) providers.Factory {
return func() (providers.Interface, error) {
client := tfplugin.Client(meta)
// Request the RPC client so we can get the provider
// so we can build the actual RPC-implemented provider.
rpcClient, err := client.Client()
if err != nil {
return nil, err
}
raw, err := rpcClient.Dispense(tfplugin.ProviderPluginName)
if err != nil {
return nil, err
}
// store the client so that the plugin can kill the child process
p := raw.(*tfplugin.GRPCProvider)
p.PluginClient = client
return p, nil
}
}
func provisionerFactory(meta discovery.PluginMeta) terraform.ProvisionerFactory {
return func() (provisioners.Interface, error) {
client := tfplugin.Client(meta)
return newProvisionerClient(client)
}
}
func internalProvisionerFactory(meta discovery.PluginMeta) terraform.ProvisionerFactory {
return func() (provisioners.Interface, error) {
client, err := internalPluginClient("provisioner", meta.Name)
if err != nil {
return nil, fmt.Errorf("[WARN] failed to build command line for internal plugin %q: %s", meta.Name, err)
}
return newProvisionerClient(client)
}
}
func newProvisionerClient(client *plugin.Client) (provisioners.Interface, error) {
// Request the RPC client so we can get the provisioner
// so we can build the actual RPC-implemented provisioner.
rpcClient, err := client.Client()
if err != nil {
return nil, err
}
raw, err := rpcClient.Dispense(tfplugin.ProvisionerPluginName)
if err != nil {
return nil, err
}
// store the client so that the plugin can kill the child process
p := raw.(*tfplugin.GRPCProvisioner)
p.PluginClient = client
return p, nil
}
| command/plugins.go | 1 | https://github.com/hashicorp/terraform/commit/efafadbe5edbfd8faa250aa1eaed46fbb2d52ee2 | [
0.9989596605300903,
0.19013336300849915,
0.00016583768592681736,
0.0038350147660821676,
0.34786778688430786
] |
{
"id": 2,
"code_window": [
"\t// exceptional circumstances since it forces the provider's release\n",
"\t// schedule to be tied to that of Terraform Core.\n",
"\tInternal map[addrs.Provider]providers.Factory\n",
"}\n",
"\n",
"func choosePlugins(avail discovery.PluginMetaSet, internal map[addrs.Provider]providers.Factory, reqd discovery.PluginRequirements) map[string]discovery.PluginMeta {\n",
"\tcandidates := avail.ConstrainVersions(reqd)\n",
"\tret := map[string]discovery.PluginMeta{}\n",
"\tfor name, metas := range candidates {\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"func chooseProviders(avail discovery.PluginMetaSet, internal map[addrs.Provider]providers.Factory, reqd discovery.PluginRequirements) map[string]discovery.PluginMeta {\n"
],
"file_path": "command/plugins.go",
"type": "replace",
"edit_start_line_idx": 41
} | package planfile
import (
"archive/zip"
"encoding/json"
"fmt"
"io/ioutil"
"path"
"sort"
"strings"
"time"
version "github.com/hashicorp/go-version"
"github.com/hashicorp/terraform/configs/configload"
)
const configSnapshotPrefix = "tfconfig/"
const configSnapshotManifestFile = configSnapshotPrefix + "modules.json"
const configSnapshotModulePrefix = configSnapshotPrefix + "m-"
type configSnapshotModuleRecord struct {
Key string `json:"Key"`
SourceAddr string `json:"Source,omitempty"`
VersionStr string `json:"Version,omitempty"`
Dir string `json:"Dir"`
}
type configSnapshotModuleManifest []configSnapshotModuleRecord
func readConfigSnapshot(z *zip.Reader) (*configload.Snapshot, error) {
// Errors from this function are expected to be reported with some
// additional prefix context about them being in a config snapshot,
// so they should not themselves refer to the config snapshot.
// They are also generally indicative of an invalid file, and so since
// plan files should not be hand-constructed we don't need to worry
// about making the messages user-actionable.
snap := &configload.Snapshot{
Modules: map[string]*configload.SnapshotModule{},
}
var manifestSrc []byte
// For processing our source files, we'll just sweep over all the files
// and react to the one-by-one to start, and then clean up afterwards
// when we'll presumably have found the manifest file.
for _, file := range z.File {
switch {
case file.Name == configSnapshotManifestFile:
// It's the manifest file, so we'll just read it raw into
// manifestSrc for now and process it below.
r, err := file.Open()
if err != nil {
return nil, fmt.Errorf("failed to open module manifest: %s", r)
}
manifestSrc, err = ioutil.ReadAll(r)
if err != nil {
return nil, fmt.Errorf("failed to read module manifest: %s", r)
}
case strings.HasPrefix(file.Name, configSnapshotModulePrefix):
relName := file.Name[len(configSnapshotModulePrefix):]
moduleKey, fileName := path.Split(relName)
// moduleKey should currently have a trailing slash on it, which we
// can use to recognize the difference between the root module
// (just a trailing slash) and no module path at all (empty string).
if moduleKey == "" {
// ignore invalid config entry
continue
}
moduleKey = moduleKey[:len(moduleKey)-1] // trim trailing slash
r, err := file.Open()
if err != nil {
return nil, fmt.Errorf("failed to open snapshot of %s from module %q: %s", fileName, moduleKey, err)
}
fileSrc, err := ioutil.ReadAll(r)
if err != nil {
return nil, fmt.Errorf("failed to read snapshot of %s from module %q: %s", fileName, moduleKey, err)
}
if _, exists := snap.Modules[moduleKey]; !exists {
snap.Modules[moduleKey] = &configload.SnapshotModule{
Files: map[string][]byte{},
// Will fill in everything else afterwards, when we
// process the manifest.
}
}
snap.Modules[moduleKey].Files[fileName] = fileSrc
}
}
if manifestSrc == nil {
return nil, fmt.Errorf("config snapshot does not have manifest file")
}
var manifest configSnapshotModuleManifest
err := json.Unmarshal(manifestSrc, &manifest)
if err != nil {
return nil, fmt.Errorf("invalid module manifest: %s", err)
}
for _, record := range manifest {
modSnap, exists := snap.Modules[record.Key]
if !exists {
// We'll allow this, assuming that it's a module with no files.
// This is still weird, since we generally reject modules with
// no files, but we'll allow it because downstream errors will
// catch it in that case.
modSnap = &configload.SnapshotModule{
Files: map[string][]byte{},
}
snap.Modules[record.Key] = modSnap
}
modSnap.SourceAddr = record.SourceAddr
modSnap.Dir = record.Dir
if record.VersionStr != "" {
v, err := version.NewVersion(record.VersionStr)
if err != nil {
return nil, fmt.Errorf("manifest has invalid version string %q for module %q", record.VersionStr, record.Key)
}
modSnap.Version = v
}
}
// Finally, we'll make sure we don't have any errant files for modules that
// aren't in the manifest.
for k := range snap.Modules {
found := false
for _, record := range manifest {
if record.Key == k {
found = true
break
}
}
if !found {
return nil, fmt.Errorf("found files for module %q that isn't recorded in the manifest", k)
}
}
return snap, nil
}
// writeConfigSnapshot adds to the given zip.Writer one or more files
// representing the given snapshot.
//
// This file creates new files in the writer, so any already-open writer
// for the file will be invalidated by this call. The writer remains open
// when this function returns.
func writeConfigSnapshot(snap *configload.Snapshot, z *zip.Writer) error {
// Errors from this function are expected to be reported with some
// additional prefix context about them being in a config snapshot,
// so they should not themselves refer to the config snapshot.
// They are also indicative of a bug in the caller, so they do not
// need to be user-actionable.
var manifest configSnapshotModuleManifest
keys := make([]string, 0, len(snap.Modules))
for k := range snap.Modules {
keys = append(keys, k)
}
sort.Strings(keys)
// We'll re-use this fileheader for each Create we do below.
for _, k := range keys {
snapMod := snap.Modules[k]
record := configSnapshotModuleRecord{
Dir: snapMod.Dir,
Key: k,
SourceAddr: snapMod.SourceAddr,
}
if snapMod.Version != nil {
record.VersionStr = snapMod.Version.String()
}
manifest = append(manifest, record)
pathPrefix := fmt.Sprintf("%s%s/", configSnapshotModulePrefix, k)
for filename, src := range snapMod.Files {
zh := &zip.FileHeader{
Name: pathPrefix + filename,
Method: zip.Deflate,
Modified: time.Now(),
}
w, err := z.CreateHeader(zh)
if err != nil {
return fmt.Errorf("failed to create snapshot of %s from module %q: %s", zh.Name, k, err)
}
_, err = w.Write(src)
if err != nil {
return fmt.Errorf("failed to write snapshot of %s from module %q: %s", zh.Name, k, err)
}
}
}
// Now we'll write our manifest
{
zh := &zip.FileHeader{
Name: configSnapshotManifestFile,
Method: zip.Deflate,
Modified: time.Now(),
}
src, err := json.MarshalIndent(manifest, "", " ")
if err != nil {
return fmt.Errorf("failed to serialize module manifest: %s", err)
}
w, err := z.CreateHeader(zh)
if err != nil {
return fmt.Errorf("failed to create module manifest: %s", err)
}
_, err = w.Write(src)
if err != nil {
return fmt.Errorf("failed to write module manifest: %s", err)
}
}
return nil
}
| plans/planfile/config_snapshot.go | 0 | https://github.com/hashicorp/terraform/commit/efafadbe5edbfd8faa250aa1eaed46fbb2d52ee2 | [
0.00017419832875020802,
0.00016938158660195768,
0.00016740082355681807,
0.00016935051826294512,
0.0000014320195305117522
] |
{
"id": 2,
"code_window": [
"\t// exceptional circumstances since it forces the provider's release\n",
"\t// schedule to be tied to that of Terraform Core.\n",
"\tInternal map[addrs.Provider]providers.Factory\n",
"}\n",
"\n",
"func choosePlugins(avail discovery.PluginMetaSet, internal map[addrs.Provider]providers.Factory, reqd discovery.PluginRequirements) map[string]discovery.PluginMeta {\n",
"\tcandidates := avail.ConstrainVersions(reqd)\n",
"\tret := map[string]discovery.PluginMeta{}\n",
"\tfor name, metas := range candidates {\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"func chooseProviders(avail discovery.PluginMetaSet, internal map[addrs.Provider]providers.Factory, reqd discovery.PluginRequirements) map[string]discovery.PluginMeta {\n"
],
"file_path": "command/plugins.go",
"type": "replace",
"edit_start_line_idx": 41
} | package states
import (
"fmt"
"math/rand"
"time"
"github.com/hashicorp/terraform/addrs"
)
// Resource represents the state of a resource.
type Resource struct {
// Addr is the module-relative address for the resource this state object
// belongs to.
Addr addrs.Resource
// EachMode is the multi-instance mode currently in use for this resource,
// or NoEach if this is a single-instance resource. This dictates what
// type of value is returned when accessing this resource via expressions
// in the Terraform language.
EachMode EachMode
// Instances contains the potentially-multiple instances associated with
// this resource. This map can contain a mixture of different key types,
// but only the ones of InstanceKeyType are considered current.
Instances map[addrs.InstanceKey]*ResourceInstance
// ProviderConfig is the absolute address for the provider configuration that
// most recently managed this resource. This is used to connect a resource
// with a provider configuration when the resource configuration block is
// not available, such as if it has been removed from configuration
// altogether.
ProviderConfig addrs.AbsProviderConfig
}
// Instance returns the state for the instance with the given key, or nil
// if no such instance is tracked within the state.
func (rs *Resource) Instance(key addrs.InstanceKey) *ResourceInstance {
return rs.Instances[key]
}
// CreateInstance creates an instance and adds it to the resource
func (rs *Resource) CreateInstance(key addrs.InstanceKey) *ResourceInstance {
is := NewResourceInstance()
rs.Instances[key] = is
return is
}
// EnsureInstance returns the state for the instance with the given key,
// creating a new empty state for it if one doesn't already exist.
//
// Because this may create and save a new state, it is considered to be
// a write operation.
func (rs *Resource) EnsureInstance(key addrs.InstanceKey) *ResourceInstance {
ret := rs.Instance(key)
if ret == nil {
ret = NewResourceInstance()
rs.Instances[key] = ret
}
return ret
}
// ResourceInstance represents the state of a particular instance of a resource.
type ResourceInstance struct {
// Current, if non-nil, is the remote object that is currently represented
// by the corresponding resource instance.
Current *ResourceInstanceObjectSrc
// Deposed, if len > 0, contains any remote objects that were previously
// represented by the corresponding resource instance but have been
// replaced and are pending destruction due to the create_before_destroy
// lifecycle mode.
Deposed map[DeposedKey]*ResourceInstanceObjectSrc
}
// NewResourceInstance constructs and returns a new ResourceInstance, ready to
// use.
func NewResourceInstance() *ResourceInstance {
return &ResourceInstance{
Deposed: map[DeposedKey]*ResourceInstanceObjectSrc{},
}
}
// HasCurrent returns true if this resource instance has a "current"-generation
// object. Most instances do, but this can briefly be false during a
// create-before-destroy replace operation when the current has been deposed
// but its replacement has not yet been created.
func (i *ResourceInstance) HasCurrent() bool {
return i != nil && i.Current != nil
}
// HasDeposed returns true if this resource instance has a deposed object
// with the given key.
func (i *ResourceInstance) HasDeposed(key DeposedKey) bool {
return i != nil && i.Deposed[key] != nil
}
// HasAnyDeposed returns true if this resource instance has one or more
// deposed objects.
func (i *ResourceInstance) HasAnyDeposed() bool {
return i != nil && len(i.Deposed) > 0
}
// HasObjects returns true if this resource has any objects at all, whether
// current or deposed.
func (i *ResourceInstance) HasObjects() bool {
return i.Current != nil || len(i.Deposed) != 0
}
// deposeCurrentObject is part of the real implementation of
// SyncState.DeposeResourceInstanceObject. The exported method uses a lock
// to ensure that we can safely allocate an unused deposed key without
// collision.
func (i *ResourceInstance) deposeCurrentObject(forceKey DeposedKey) DeposedKey {
if !i.HasCurrent() {
return NotDeposed
}
key := forceKey
if key == NotDeposed {
key = i.findUnusedDeposedKey()
} else {
if _, exists := i.Deposed[key]; exists {
panic(fmt.Sprintf("forced key %s is already in use", forceKey))
}
}
i.Deposed[key] = i.Current
i.Current = nil
return key
}
// GetGeneration retrieves the object of the given generation from the
// ResourceInstance, or returns nil if there is no such object.
//
// If the given generation is nil or invalid, this method will panic.
func (i *ResourceInstance) GetGeneration(gen Generation) *ResourceInstanceObjectSrc {
if gen == CurrentGen {
return i.Current
}
if dk, ok := gen.(DeposedKey); ok {
return i.Deposed[dk]
}
if gen == nil {
panic(fmt.Sprintf("get with nil Generation"))
}
// Should never fall out here, since the above covers all possible
// Generation values.
panic(fmt.Sprintf("get invalid Generation %#v", gen))
}
// FindUnusedDeposedKey generates a unique DeposedKey that is guaranteed not to
// already be in use for this instance at the time of the call.
//
// Note that the validity of this result may change if new deposed keys are
// allocated before it is used. To avoid this risk, instead use the
// DeposeResourceInstanceObject method on the SyncState wrapper type, which
// allocates a key and uses it atomically.
func (i *ResourceInstance) FindUnusedDeposedKey() DeposedKey {
return i.findUnusedDeposedKey()
}
// findUnusedDeposedKey generates a unique DeposedKey that is guaranteed not to
// already be in use for this instance.
func (i *ResourceInstance) findUnusedDeposedKey() DeposedKey {
for {
key := NewDeposedKey()
if _, exists := i.Deposed[key]; !exists {
return key
}
// Spin until we find a unique one. This shouldn't take long, because
// we have a 32-bit keyspace and there's rarely more than one deposed
// instance.
}
}
// EachMode specifies the multi-instance mode for a resource.
type EachMode rune
const (
NoEach EachMode = 0
EachList EachMode = 'L'
EachMap EachMode = 'M'
)
//go:generate go run golang.org/x/tools/cmd/stringer -type EachMode
func eachModeForInstanceKey(key addrs.InstanceKey) EachMode {
switch key.(type) {
case addrs.IntKey:
return EachList
case addrs.StringKey:
return EachMap
default:
if key == addrs.NoKey {
return NoEach
}
panic(fmt.Sprintf("don't know an each mode for instance key %#v", key))
}
}
// DeposedKey is a 8-character hex string used to uniquely identify deposed
// instance objects in the state.
type DeposedKey string
// NotDeposed is a special invalid value of DeposedKey that is used to represent
// the absense of a deposed key. It must not be used as an actual deposed key.
const NotDeposed = DeposedKey("")
var deposedKeyRand = rand.New(rand.NewSource(time.Now().UnixNano()))
// NewDeposedKey generates a pseudo-random deposed key. Because of the short
// length of these keys, uniqueness is not a natural consequence and so the
// caller should test to see if the generated key is already in use and generate
// another if so, until a unique key is found.
func NewDeposedKey() DeposedKey {
v := deposedKeyRand.Uint32()
return DeposedKey(fmt.Sprintf("%08x", v))
}
func (k DeposedKey) String() string {
return string(k)
}
func (k DeposedKey) GoString() string {
ks := string(k)
switch {
case ks == "":
return "states.NotDeposed"
default:
return fmt.Sprintf("states.DeposedKey(%s)", ks)
}
}
// Generation is a helper method to convert a DeposedKey into a Generation.
// If the reciever is anything other than NotDeposed then the result is
// just the same value as a Generation. If the receiver is NotDeposed then
// the result is CurrentGen.
func (k DeposedKey) Generation() Generation {
if k == NotDeposed {
return CurrentGen
}
return k
}
// generation is an implementation of Generation.
func (k DeposedKey) generation() {}
| states/resource.go | 0 | https://github.com/hashicorp/terraform/commit/efafadbe5edbfd8faa250aa1eaed46fbb2d52ee2 | [
0.00039787727291695774,
0.0001939339708769694,
0.00016290516941808164,
0.00016844911442603916,
0.000056963679526234046
] |
{
"id": 2,
"code_window": [
"\t// exceptional circumstances since it forces the provider's release\n",
"\t// schedule to be tied to that of Terraform Core.\n",
"\tInternal map[addrs.Provider]providers.Factory\n",
"}\n",
"\n",
"func choosePlugins(avail discovery.PluginMetaSet, internal map[addrs.Provider]providers.Factory, reqd discovery.PluginRequirements) map[string]discovery.PluginMeta {\n",
"\tcandidates := avail.ConstrainVersions(reqd)\n",
"\tret := map[string]discovery.PluginMeta{}\n",
"\tfor name, metas := range candidates {\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"func chooseProviders(avail discovery.PluginMetaSet, internal map[addrs.Provider]providers.Factory, reqd discovery.PluginRequirements) map[string]discovery.PluginMeta {\n"
],
"file_path": "command/plugins.go",
"type": "replace",
"edit_start_line_idx": 41
} | // +build windows
package initwd
// no syscall.Stat_t on windows, return 0 for inodes
func inode(path string) (uint64, error) {
return 0, nil
}
| internal/initwd/inode_windows.go | 0 | https://github.com/hashicorp/terraform/commit/efafadbe5edbfd8faa250aa1eaed46fbb2d52ee2 | [
0.00016999895160552114,
0.00016999895160552114,
0.00016999895160552114,
0.00016999895160552114,
0
] |
Subsets and Splits