hunk
dict | file
stringlengths 0
11.8M
| file_path
stringlengths 2
234
| label
int64 0
1
| commit_url
stringlengths 74
103
| dependency_score
sequencelengths 5
5
|
---|---|---|---|---|---|
{
"id": 2,
"code_window": [
"\t\tif _, err := tee.Write(zero16[:16-padding]); err != nil {\n",
"\t\t\treturn err\n",
"\t\t}\n",
"\t}\n",
"\n",
"\t// write packet-mac. egress MAC is up to date because\n",
"\t// frame content was written to it as well.\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep"
],
"after_edit": [
"\t// write frame MAC. egress MAC hash is up to date because\n"
],
"file_path": "p2p/rlpx.go",
"type": "replace",
"edit_start_line_idx": 83
} | package p2p
import (
"bytes"
"crypto/aes"
"crypto/cipher"
"crypto/hmac"
"errors"
"hash"
"io"
"github.com/ethereum/go-ethereum/rlp"
)
var (
// this is used in place of actual frame header data.
// TODO: replace this when Msg contains the protocol type code.
zeroHeader = []byte{0xC2, 0x80, 0x80}
// sixteen zero bytes
zero16 = make([]byte, 16)
)
type rlpxFrameRW struct {
conn io.ReadWriter
enc cipher.Stream
dec cipher.Stream
macCipher cipher.Block
egressMAC hash.Hash
ingressMAC hash.Hash
}
func newRlpxFrameRW(conn io.ReadWriter, s secrets) *rlpxFrameRW {
macc, err := aes.NewCipher(s.MAC)
if err != nil {
panic("invalid MAC secret: " + err.Error())
}
encc, err := aes.NewCipher(s.AES)
if err != nil {
panic("invalid AES secret: " + err.Error())
}
// we use an all-zeroes IV for AES because the key used
// for encryption is ephemeral.
iv := make([]byte, encc.BlockSize())
return &rlpxFrameRW{
conn: conn,
enc: cipher.NewCTR(encc, iv),
dec: cipher.NewCTR(encc, iv),
macCipher: macc,
egressMAC: s.EgressMAC,
ingressMAC: s.IngressMAC,
}
}
func (rw *rlpxFrameRW) WriteMsg(msg Msg) error {
ptype, _ := rlp.EncodeToBytes(msg.Code)
// write header
headbuf := make([]byte, 32)
fsize := uint32(len(ptype)) + msg.Size
putInt24(fsize, headbuf) // TODO: check overflow
copy(headbuf[3:], zeroHeader)
rw.enc.XORKeyStream(headbuf[:16], headbuf[:16]) // first half is now encrypted
copy(headbuf[16:], updateHeaderMAC(rw.egressMAC, rw.macCipher, headbuf[:16]))
if _, err := rw.conn.Write(headbuf); err != nil {
return err
}
// write encrypted frame, updating the egress MAC while writing to conn.
tee := cipher.StreamWriter{S: rw.enc, W: io.MultiWriter(rw.conn, rw.egressMAC)}
if _, err := tee.Write(ptype); err != nil {
return err
}
if _, err := io.Copy(tee, msg.Payload); err != nil {
return err
}
if padding := fsize % 16; padding > 0 {
if _, err := tee.Write(zero16[:16-padding]); err != nil {
return err
}
}
// write packet-mac. egress MAC is up to date because
// frame content was written to it as well.
mac := updateHeaderMAC(rw.egressMAC, rw.macCipher, rw.egressMAC.Sum(nil))
_, err := rw.conn.Write(mac)
return err
}
func (rw *rlpxFrameRW) ReadMsg() (msg Msg, err error) {
// read the header
headbuf := make([]byte, 32)
if _, err := io.ReadFull(rw.conn, headbuf); err != nil {
return msg, err
}
// verify header mac
shouldMAC := updateHeaderMAC(rw.ingressMAC, rw.macCipher, headbuf[:16])
if !hmac.Equal(shouldMAC[:16], headbuf[16:]) {
return msg, errors.New("bad header MAC")
}
rw.dec.XORKeyStream(headbuf[:16], headbuf[:16]) // first half is now decrypted
fsize := readInt24(headbuf)
// ignore protocol type for now
// read the frame content
var rsize = fsize // frame size rounded up to 16 byte boundary
if padding := fsize % 16; padding > 0 {
rsize += 16 - padding
}
framebuf := make([]byte, rsize)
if _, err := io.ReadFull(rw.conn, framebuf); err != nil {
return msg, err
}
// read and validate frame MAC. we can re-use headbuf for that.
rw.ingressMAC.Write(framebuf)
if _, err := io.ReadFull(rw.conn, headbuf); err != nil {
return msg, err
}
shouldMAC = updateHeaderMAC(rw.ingressMAC, rw.macCipher, rw.ingressMAC.Sum(nil))
if !hmac.Equal(shouldMAC, headbuf) {
return msg, errors.New("bad frame MAC")
}
// decrypt frame content
rw.dec.XORKeyStream(framebuf, framebuf)
// decode message code
content := bytes.NewReader(framebuf[:fsize])
if err := rlp.Decode(content, &msg.Code); err != nil {
return msg, err
}
msg.Size = uint32(content.Len())
msg.Payload = content
return msg, nil
}
func updateHeaderMAC(mac hash.Hash, block cipher.Block, header []byte) []byte {
aesbuf := make([]byte, aes.BlockSize)
block.Encrypt(aesbuf, mac.Sum(nil))
for i := range aesbuf {
aesbuf[i] ^= header[i]
}
mac.Write(aesbuf)
return mac.Sum(nil)
}
func readInt24(b []byte) uint32 {
return uint32(b[2]) | uint32(b[1])<<8 | uint32(b[0])<<16
}
func putInt24(v uint32, b []byte) {
b[0] = byte(v >> 16)
b[1] = byte(v >> 8)
b[2] = byte(v)
}
| p2p/rlpx.go | 1 | https://github.com/ethereum/go-ethereum/commit/d344054e5a2844241bf0e4f64ccfc4d2ad259718 | [
0.9964550733566284,
0.11655798554420471,
0.00016469582624267787,
0.002658912679180503,
0.3017441928386688
] |
{
"id": 2,
"code_window": [
"\t\tif _, err := tee.Write(zero16[:16-padding]); err != nil {\n",
"\t\t\treturn err\n",
"\t\t}\n",
"\t}\n",
"\n",
"\t// write packet-mac. egress MAC is up to date because\n",
"\t// frame content was written to it as well.\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep"
],
"after_edit": [
"\t// write frame MAC. egress MAC hash is up to date because\n"
],
"file_path": "p2p/rlpx.go",
"type": "replace",
"edit_start_line_idx": 83
} | ---
- include: host-config.yml
- include: testrunner-config.yml
| tests/files/ansible/site.yml | 0 | https://github.com/ethereum/go-ethereum/commit/d344054e5a2844241bf0e4f64ccfc4d2ad259718 | [
0.00017292305710725486,
0.00017292305710725486,
0.00017292305710725486,
0.00017292305710725486,
0
] |
{
"id": 2,
"code_window": [
"\t\tif _, err := tee.Write(zero16[:16-padding]); err != nil {\n",
"\t\t\treturn err\n",
"\t\t}\n",
"\t}\n",
"\n",
"\t// write packet-mac. egress MAC is up to date because\n",
"\t// frame content was written to it as well.\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep"
],
"after_edit": [
"\t// write frame MAC. egress MAC hash is up to date because\n"
],
"file_path": "p2p/rlpx.go",
"type": "replace",
"edit_start_line_idx": 83
} | package otto
import (
"fmt"
"runtime"
"github.com/robertkrimen/otto/token"
)
func (self *_runtime) cmpl_evaluate_nodeStatement(node _nodeStatement) Value {
// Allow interpreter interruption
// If the Interrupt channel is nil, then
// we avoid runtime.Gosched() overhead (if any)
// FIXME: Test this
if self.Otto.Interrupt != nil {
runtime.Gosched()
select {
case value := <-self.Otto.Interrupt:
value()
default:
}
}
switch node := node.(type) {
case *_nodeBlockStatement:
// FIXME If result is break, then return the empty value?
return self.cmpl_evaluate_nodeStatementList(node.list)
case *_nodeBranchStatement:
target := node.label
switch node.branch { // FIXME Maybe node.kind? node.operator?
case token.BREAK:
return toValue(newBreakResult(target))
case token.CONTINUE:
return toValue(newContinueResult(target))
}
case *_nodeDebuggerStatement:
return Value{} // Nothing happens.
case *_nodeDoWhileStatement:
return self.cmpl_evaluate_nodeDoWhileStatement(node)
case *_nodeEmptyStatement:
return Value{}
case *_nodeExpressionStatement:
return self.cmpl_evaluate_nodeExpression(node.expression)
case *_nodeForInStatement:
return self.cmpl_evaluate_nodeForInStatement(node)
case *_nodeForStatement:
return self.cmpl_evaluate_nodeForStatement(node)
case *_nodeIfStatement:
return self.cmpl_evaluate_nodeIfStatement(node)
case *_nodeLabelledStatement:
self.labels = append(self.labels, node.label)
defer func() {
if len(self.labels) > 0 {
self.labels = self.labels[:len(self.labels)-1] // Pop the label
} else {
self.labels = nil
}
}()
return self.cmpl_evaluate_nodeStatement(node.statement)
case *_nodeReturnStatement:
if node.argument != nil {
return toValue(newReturnResult(self.GetValue(self.cmpl_evaluate_nodeExpression(node.argument))))
}
return toValue(newReturnResult(UndefinedValue()))
case *_nodeSwitchStatement:
return self.cmpl_evaluate_nodeSwitchStatement(node)
case *_nodeThrowStatement:
value := self.GetValue(self.cmpl_evaluate_nodeExpression(node.argument))
panic(newException(value))
case *_nodeTryStatement:
return self.cmpl_evaluate_nodeTryStatement(node)
case *_nodeVariableStatement:
// Variables are already defined, this is initialization only
for _, variable := range node.list {
self.cmpl_evaluate_nodeVariableExpression(variable.(*_nodeVariableExpression))
}
return Value{}
case *_nodeWhileStatement:
return self.cmpl_evaluate_nodeWhileStatement(node)
case *_nodeWithStatement:
return self.cmpl_evaluate_nodeWithStatement(node)
}
panic(fmt.Errorf("Here be dragons: evaluate_nodeStatement(%T)", node))
}
func (self *_runtime) cmpl_evaluate_nodeStatementList(list []_nodeStatement) Value {
var result Value
for _, node := range list {
value := self.cmpl_evaluate_nodeStatement(node)
switch value._valueType {
case valueResult:
return value
case valueEmpty:
default:
// We have GetValue here to (for example) trigger a
// ReferenceError (of the not defined variety)
// Not sure if this is the best way to error out early
// for such errors or if there is a better way
// TODO Do we still need this?
result = self.GetValue(value)
}
}
return result
}
func (self *_runtime) cmpl_evaluate_nodeDoWhileStatement(node *_nodeDoWhileStatement) Value {
labels := append(self.labels, "")
self.labels = nil
test := node.test
result := Value{}
resultBreak:
for {
for _, node := range node.body {
value := self.cmpl_evaluate_nodeStatement(node)
switch value._valueType {
case valueResult:
switch value.evaluateBreakContinue(labels) {
case resultReturn:
return value
case resultBreak:
break resultBreak
case resultContinue:
goto resultContinue
}
case valueEmpty:
default:
result = value
}
}
resultContinue:
if !self.GetValue(self.cmpl_evaluate_nodeExpression(test)).isTrue() {
// Stahp: do ... while (false)
break
}
}
return result
}
func (self *_runtime) cmpl_evaluate_nodeForInStatement(node *_nodeForInStatement) Value {
labels := append(self.labels, "")
self.labels = nil
source := self.cmpl_evaluate_nodeExpression(node.source)
sourceValue := self.GetValue(source)
switch sourceValue._valueType {
case valueUndefined, valueNull:
return emptyValue()
}
sourceObject := self.toObject(sourceValue)
into := node.into
body := node.body
result := Value{}
object := sourceObject
for object != nil {
enumerateValue := Value{}
object.enumerate(false, func(name string) bool {
into := self.cmpl_evaluate_nodeExpression(into)
// In the case of: for (var abc in def) ...
if into.reference() == nil {
identifier := toString(into)
// TODO Should be true or false (strictness) depending on context
into = toValue(getIdentifierReference(self.LexicalEnvironment(), identifier, false))
}
self.PutValue(into.reference(), toValue_string(name))
for _, node := range body {
value := self.cmpl_evaluate_nodeStatement(node)
switch value._valueType {
case valueResult:
switch value.evaluateBreakContinue(labels) {
case resultReturn:
enumerateValue = value
return false
case resultBreak:
object = nil
return false
case resultContinue:
return true
}
case valueEmpty:
default:
enumerateValue = value
}
}
return true
})
if object == nil {
break
}
object = object.prototype
if !enumerateValue.isEmpty() {
result = enumerateValue
}
}
return result
}
func (self *_runtime) cmpl_evaluate_nodeForStatement(node *_nodeForStatement) Value {
labels := append(self.labels, "")
self.labels = nil
initializer := node.initializer
test := node.test
update := node.update
body := node.body
if initializer != nil {
initialResult := self.cmpl_evaluate_nodeExpression(initializer)
self.GetValue(initialResult) // Side-effect trigger
}
result := Value{}
resultBreak:
for {
if test != nil {
testResult := self.cmpl_evaluate_nodeExpression(test)
testResultValue := self.GetValue(testResult)
if toBoolean(testResultValue) == false {
break
}
}
for _, node := range body {
value := self.cmpl_evaluate_nodeStatement(node)
switch value._valueType {
case valueResult:
switch value.evaluateBreakContinue(labels) {
case resultReturn:
return value
case resultBreak:
break resultBreak
case resultContinue:
goto resultContinue
}
case valueEmpty:
default:
result = value
}
}
resultContinue:
if update != nil {
updateResult := self.cmpl_evaluate_nodeExpression(update)
self.GetValue(updateResult) // Side-effect trigger
}
}
return result
}
func (self *_runtime) cmpl_evaluate_nodeIfStatement(node *_nodeIfStatement) Value {
test := self.cmpl_evaluate_nodeExpression(node.test)
testValue := self.GetValue(test)
if toBoolean(testValue) {
return self.cmpl_evaluate_nodeStatement(node.consequent)
} else if node.alternate != nil {
return self.cmpl_evaluate_nodeStatement(node.alternate)
}
return Value{}
}
func (self *_runtime) cmpl_evaluate_nodeSwitchStatement(node *_nodeSwitchStatement) Value {
labels := append(self.labels, "")
self.labels = nil
discriminantResult := self.cmpl_evaluate_nodeExpression(node.discriminant)
target := node.default_
for index, clause := range node.body {
test := clause.test
if test != nil {
if self.calculateComparison(token.STRICT_EQUAL, discriminantResult, self.cmpl_evaluate_nodeExpression(test)) {
target = index
break
}
}
}
result := Value{}
if target != -1 {
for _, clause := range node.body[target:] {
for _, statement := range clause.consequent {
value := self.cmpl_evaluate_nodeStatement(statement)
switch value._valueType {
case valueResult:
switch value.evaluateBreak(labels) {
case resultReturn:
return value
case resultBreak:
return Value{}
}
case valueEmpty:
default:
result = value
}
}
}
}
return result
}
func (self *_runtime) cmpl_evaluate_nodeTryStatement(node *_nodeTryStatement) Value {
tryCatchValue, exception := self.tryCatchEvaluate(func() Value {
return self.cmpl_evaluate_nodeStatement(node.body)
})
if exception && node.catch != nil {
lexicalEnvironment := self._executionContext(0).newDeclarativeEnvironment(self)
defer func() {
self._executionContext(0).LexicalEnvironment = lexicalEnvironment
}()
// TODO If necessary, convert TypeError<runtime> => TypeError
// That, is, such errors can be thrown despite not being JavaScript "native"
self.localSet(node.catch.parameter, tryCatchValue)
// FIXME node.CatchParameter
// FIXME node.Catch
tryCatchValue, exception = self.tryCatchEvaluate(func() Value {
return self.cmpl_evaluate_nodeStatement(node.catch.body)
})
}
if node.finally != nil {
finallyValue := self.cmpl_evaluate_nodeStatement(node.finally)
if finallyValue.isResult() {
return finallyValue
}
}
if exception {
panic(newException(tryCatchValue))
}
return tryCatchValue
}
func (self *_runtime) cmpl_evaluate_nodeWhileStatement(node *_nodeWhileStatement) Value {
test := node.test
body := node.body
labels := append(self.labels, "")
self.labels = nil
result := Value{}
resultBreakContinue:
for {
if !self.GetValue(self.cmpl_evaluate_nodeExpression(test)).isTrue() {
// Stahp: while (false) ...
break
}
for _, node := range body {
value := self.cmpl_evaluate_nodeStatement(node)
switch value._valueType {
case valueResult:
switch value.evaluateBreakContinue(labels) {
case resultReturn:
return value
case resultBreak:
break resultBreakContinue
case resultContinue:
continue resultBreakContinue
}
case valueEmpty:
default:
result = value
}
}
}
return result
}
func (self *_runtime) cmpl_evaluate_nodeWithStatement(node *_nodeWithStatement) Value {
object := self.cmpl_evaluate_nodeExpression(node.object)
objectValue := self.GetValue(object)
previousLexicalEnvironment, lexicalEnvironment := self._executionContext(0).newLexicalEnvironment(self.toObject(objectValue))
lexicalEnvironment.ProvideThis = true
defer func() {
self._executionContext(0).LexicalEnvironment = previousLexicalEnvironment
}()
return self.cmpl_evaluate_nodeStatement(node.body)
}
| Godeps/_workspace/src/github.com/obscuren/otto/cmpl_evaluate_statement.go | 0 | https://github.com/ethereum/go-ethereum/commit/d344054e5a2844241bf0e4f64ccfc4d2ad259718 | [
0.0004659189435187727,
0.00019569981668610126,
0.00016404336201958358,
0.0001725327456369996,
0.00007195089710876346
] |
{
"id": 2,
"code_window": [
"\t\tif _, err := tee.Write(zero16[:16-padding]); err != nil {\n",
"\t\t\treturn err\n",
"\t\t}\n",
"\t}\n",
"\n",
"\t// write packet-mac. egress MAC is up to date because\n",
"\t// frame content was written to it as well.\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep"
],
"after_edit": [
"\t// write frame MAC. egress MAC hash is up to date because\n"
],
"file_path": "p2p/rlpx.go",
"type": "replace",
"edit_start_line_idx": 83
} | package xeth
import (
"bytes"
"fmt"
"strings"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/ethutil"
"github.com/ethereum/go-ethereum/p2p"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/state"
)
func toHex(b []byte) string {
return "0x" + ethutil.Bytes2Hex(b)
}
func fromHex(s string) []byte {
if len(s) > 1 {
if s[0:2] == "0x" {
s = s[2:]
}
return ethutil.Hex2Bytes(s)
}
return nil
}
type Object struct {
*state.StateObject
}
func NewObject(state *state.StateObject) *Object {
return &Object{state}
}
func (self *Object) StorageString(str string) *ethutil.Value {
if ethutil.IsHex(str) {
return self.storage(ethutil.Hex2Bytes(str[2:]))
} else {
return self.storage(ethutil.RightPadBytes([]byte(str), 32))
}
}
func (self *Object) StorageValue(addr *ethutil.Value) *ethutil.Value {
return self.storage(addr.Bytes())
}
func (self *Object) storage(addr []byte) *ethutil.Value {
return self.StateObject.GetStorage(ethutil.BigD(addr))
}
func (self *Object) Storage() (storage map[string]string) {
storage = make(map[string]string)
it := self.StateObject.Trie().Iterator()
for it.Next() {
var data []byte
rlp.Decode(bytes.NewReader(it.Value), &data)
storage[toHex(it.Key)] = toHex(data)
}
return
}
// Block interface exposed to QML
type Block struct {
//Transactions string `json:"transactions"`
ref *types.Block
Size string `json:"size"`
Number int `json:"number"`
Hash string `json:"hash"`
Transactions *ethutil.List `json:"transactions"`
Uncles *ethutil.List `json:"uncles"`
Time int64 `json:"time"`
Coinbase string `json:"coinbase"`
Name string `json:"name"`
GasLimit string `json:"gasLimit"`
GasUsed string `json:"gasUsed"`
PrevHash string `json:"prevHash"`
Bloom string `json:"bloom"`
Raw string `json:"raw"`
}
// Creates a new QML Block from a chain block
func NewBlock(block *types.Block) *Block {
if block == nil {
return &Block{}
}
ptxs := make([]*Transaction, len(block.Transactions()))
for i, tx := range block.Transactions() {
ptxs[i] = NewTx(tx)
}
txlist := ethutil.NewList(ptxs)
puncles := make([]*Block, len(block.Uncles()))
for i, uncle := range block.Uncles() {
puncles[i] = NewBlock(types.NewBlockWithHeader(uncle))
}
ulist := ethutil.NewList(puncles)
return &Block{
ref: block, Size: block.Size().String(),
Number: int(block.NumberU64()), GasUsed: block.GasUsed().String(),
GasLimit: block.GasLimit().String(), Hash: toHex(block.Hash()),
Transactions: txlist, Uncles: ulist,
Time: block.Time(),
Coinbase: toHex(block.Coinbase()),
PrevHash: toHex(block.ParentHash()),
Bloom: toHex(block.Bloom()),
Raw: block.String(),
}
}
func (self *Block) ToString() string {
if self.ref != nil {
return self.ref.String()
}
return ""
}
func (self *Block) GetTransaction(hash string) *Transaction {
tx := self.ref.Transaction(fromHex(hash))
if tx == nil {
return nil
}
return NewTx(tx)
}
type Transaction struct {
ref *types.Transaction
Value string `json:"value"`
Gas string `json:"gas"`
GasPrice string `json:"gasPrice"`
Hash string `json:"hash"`
Address string `json:"address"`
Sender string `json:"sender"`
RawData string `json:"rawData"`
Data string `json:"data"`
Contract bool `json:"isContract"`
CreatesContract bool `json:"createsContract"`
Confirmations int `json:"confirmations"`
}
func NewTx(tx *types.Transaction) *Transaction {
hash := toHex(tx.Hash())
receiver := toHex(tx.To())
if len(receiver) == 0 {
receiver = toHex(core.AddressFromMessage(tx))
}
sender := toHex(tx.From())
createsContract := core.MessageCreatesContract(tx)
var data string
if createsContract {
data = strings.Join(core.Disassemble(tx.Data()), "\n")
} else {
data = toHex(tx.Data())
}
return &Transaction{ref: tx, Hash: hash, Value: ethutil.CurrencyToString(tx.Value()), Address: receiver, Contract: createsContract, Gas: tx.Gas().String(), GasPrice: tx.GasPrice().String(), Data: data, Sender: sender, CreatesContract: createsContract, RawData: toHex(tx.Data())}
}
func (self *Transaction) ToString() string {
return self.ref.String()
}
type Key struct {
Address string `json:"address"`
PrivateKey string `json:"privateKey"`
PublicKey string `json:"publicKey"`
}
func NewKey(key *crypto.KeyPair) *Key {
return &Key{toHex(key.Address()), toHex(key.PrivateKey), toHex(key.PublicKey)}
}
type PReceipt struct {
CreatedContract bool `json:"createdContract"`
Address string `json:"address"`
Hash string `json:"hash"`
Sender string `json:"sender"`
}
func NewPReciept(contractCreation bool, creationAddress, hash, address []byte) *PReceipt {
return &PReceipt{
contractCreation,
toHex(creationAddress),
toHex(hash),
toHex(address),
}
}
// Peer interface exposed to QML
type Peer struct {
ref *p2p.Peer
Ip string `json:"ip"`
Version string `json:"version"`
Caps string `json:"caps"`
}
func NewPeer(peer *p2p.Peer) *Peer {
var caps []string
for _, cap := range peer.Caps() {
caps = append(caps, fmt.Sprintf("%s/%d", cap.Name, cap.Version))
}
return &Peer{
ref: peer,
Ip: fmt.Sprintf("%v", peer.RemoteAddr()),
Version: fmt.Sprintf("%v", peer.ID()),
Caps: fmt.Sprintf("%v", caps),
}
}
type Receipt struct {
CreatedContract bool `json:"createdContract"`
Address string `json:"address"`
Hash string `json:"hash"`
Sender string `json:"sender"`
}
func NewReciept(contractCreation bool, creationAddress, hash, address []byte) *Receipt {
return &Receipt{
contractCreation,
toHex(creationAddress),
toHex(hash),
toHex(address),
}
}
| xeth/types.go | 0 | https://github.com/ethereum/go-ethereum/commit/d344054e5a2844241bf0e4f64ccfc4d2ad259718 | [
0.0002628874499350786,
0.00017598969861865044,
0.0001659358968026936,
0.0001706071780063212,
0.000018845606973627582
] |
{
"id": 3,
"code_window": [
"\t// frame content was written to it as well.\n",
"\tmac := updateHeaderMAC(rw.egressMAC, rw.macCipher, rw.egressMAC.Sum(nil))\n",
"\t_, err := rw.conn.Write(mac)\n",
"\treturn err\n",
"}\n"
],
"labels": [
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tfmacseed := rw.egressMAC.Sum(nil)\n",
"\tmac := updateMAC(rw.egressMAC, rw.macCipher, fmacseed)\n"
],
"file_path": "p2p/rlpx.go",
"type": "replace",
"edit_start_line_idx": 85
} | package p2p
import (
"bytes"
"crypto/aes"
"crypto/cipher"
"crypto/hmac"
"errors"
"hash"
"io"
"github.com/ethereum/go-ethereum/rlp"
)
var (
// this is used in place of actual frame header data.
// TODO: replace this when Msg contains the protocol type code.
zeroHeader = []byte{0xC2, 0x80, 0x80}
// sixteen zero bytes
zero16 = make([]byte, 16)
)
type rlpxFrameRW struct {
conn io.ReadWriter
enc cipher.Stream
dec cipher.Stream
macCipher cipher.Block
egressMAC hash.Hash
ingressMAC hash.Hash
}
func newRlpxFrameRW(conn io.ReadWriter, s secrets) *rlpxFrameRW {
macc, err := aes.NewCipher(s.MAC)
if err != nil {
panic("invalid MAC secret: " + err.Error())
}
encc, err := aes.NewCipher(s.AES)
if err != nil {
panic("invalid AES secret: " + err.Error())
}
// we use an all-zeroes IV for AES because the key used
// for encryption is ephemeral.
iv := make([]byte, encc.BlockSize())
return &rlpxFrameRW{
conn: conn,
enc: cipher.NewCTR(encc, iv),
dec: cipher.NewCTR(encc, iv),
macCipher: macc,
egressMAC: s.EgressMAC,
ingressMAC: s.IngressMAC,
}
}
func (rw *rlpxFrameRW) WriteMsg(msg Msg) error {
ptype, _ := rlp.EncodeToBytes(msg.Code)
// write header
headbuf := make([]byte, 32)
fsize := uint32(len(ptype)) + msg.Size
putInt24(fsize, headbuf) // TODO: check overflow
copy(headbuf[3:], zeroHeader)
rw.enc.XORKeyStream(headbuf[:16], headbuf[:16]) // first half is now encrypted
copy(headbuf[16:], updateHeaderMAC(rw.egressMAC, rw.macCipher, headbuf[:16]))
if _, err := rw.conn.Write(headbuf); err != nil {
return err
}
// write encrypted frame, updating the egress MAC while writing to conn.
tee := cipher.StreamWriter{S: rw.enc, W: io.MultiWriter(rw.conn, rw.egressMAC)}
if _, err := tee.Write(ptype); err != nil {
return err
}
if _, err := io.Copy(tee, msg.Payload); err != nil {
return err
}
if padding := fsize % 16; padding > 0 {
if _, err := tee.Write(zero16[:16-padding]); err != nil {
return err
}
}
// write packet-mac. egress MAC is up to date because
// frame content was written to it as well.
mac := updateHeaderMAC(rw.egressMAC, rw.macCipher, rw.egressMAC.Sum(nil))
_, err := rw.conn.Write(mac)
return err
}
func (rw *rlpxFrameRW) ReadMsg() (msg Msg, err error) {
// read the header
headbuf := make([]byte, 32)
if _, err := io.ReadFull(rw.conn, headbuf); err != nil {
return msg, err
}
// verify header mac
shouldMAC := updateHeaderMAC(rw.ingressMAC, rw.macCipher, headbuf[:16])
if !hmac.Equal(shouldMAC[:16], headbuf[16:]) {
return msg, errors.New("bad header MAC")
}
rw.dec.XORKeyStream(headbuf[:16], headbuf[:16]) // first half is now decrypted
fsize := readInt24(headbuf)
// ignore protocol type for now
// read the frame content
var rsize = fsize // frame size rounded up to 16 byte boundary
if padding := fsize % 16; padding > 0 {
rsize += 16 - padding
}
framebuf := make([]byte, rsize)
if _, err := io.ReadFull(rw.conn, framebuf); err != nil {
return msg, err
}
// read and validate frame MAC. we can re-use headbuf for that.
rw.ingressMAC.Write(framebuf)
if _, err := io.ReadFull(rw.conn, headbuf); err != nil {
return msg, err
}
shouldMAC = updateHeaderMAC(rw.ingressMAC, rw.macCipher, rw.ingressMAC.Sum(nil))
if !hmac.Equal(shouldMAC, headbuf) {
return msg, errors.New("bad frame MAC")
}
// decrypt frame content
rw.dec.XORKeyStream(framebuf, framebuf)
// decode message code
content := bytes.NewReader(framebuf[:fsize])
if err := rlp.Decode(content, &msg.Code); err != nil {
return msg, err
}
msg.Size = uint32(content.Len())
msg.Payload = content
return msg, nil
}
func updateHeaderMAC(mac hash.Hash, block cipher.Block, header []byte) []byte {
aesbuf := make([]byte, aes.BlockSize)
block.Encrypt(aesbuf, mac.Sum(nil))
for i := range aesbuf {
aesbuf[i] ^= header[i]
}
mac.Write(aesbuf)
return mac.Sum(nil)
}
func readInt24(b []byte) uint32 {
return uint32(b[2]) | uint32(b[1])<<8 | uint32(b[0])<<16
}
func putInt24(v uint32, b []byte) {
b[0] = byte(v >> 16)
b[1] = byte(v >> 8)
b[2] = byte(v)
}
| p2p/rlpx.go | 1 | https://github.com/ethereum/go-ethereum/commit/d344054e5a2844241bf0e4f64ccfc4d2ad259718 | [
0.9982789754867554,
0.5124886631965637,
0.0001660940470173955,
0.7456403374671936,
0.4567078948020935
] |
{
"id": 3,
"code_window": [
"\t// frame content was written to it as well.\n",
"\tmac := updateHeaderMAC(rw.egressMAC, rw.macCipher, rw.egressMAC.Sum(nil))\n",
"\t_, err := rw.conn.Write(mac)\n",
"\treturn err\n",
"}\n"
],
"labels": [
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tfmacseed := rw.egressMAC.Sum(nil)\n",
"\tmac := updateMAC(rw.egressMAC, rw.macCipher, fmacseed)\n"
],
"file_path": "p2p/rlpx.go",
"type": "replace",
"edit_start_line_idx": 85
} | #include <windows.h>
#define protREAD 1
#define protWRITE 2
#define protEXEC 4
extern "C" {
int mprotect(void *addr, size_t len, int prot)
{
DWORD wprot = 0;
if (prot & protWRITE) {
wprot = PAGE_READWRITE;
} else if (prot & protREAD) {
wprot = PAGE_READONLY;
}
if (prot & protEXEC) {
wprot <<= 4;
}
DWORD oldwprot;
if (!VirtualProtect(addr, len, wprot, &oldwprot)) {
return -1;
}
return 0;
}
} // extern "C"
| Godeps/_workspace/src/github.com/obscuren/qml/cpp/mmemwin.cpp | 0 | https://github.com/ethereum/go-ethereum/commit/d344054e5a2844241bf0e4f64ccfc4d2ad259718 | [
0.00017329922411590815,
0.00017164678138215095,
0.00016966275870800018,
0.00017197839042637497,
0.0000015029834230517736
] |
{
"id": 3,
"code_window": [
"\t// frame content was written to it as well.\n",
"\tmac := updateHeaderMAC(rw.egressMAC, rw.macCipher, rw.egressMAC.Sum(nil))\n",
"\t_, err := rw.conn.Write(mac)\n",
"\treturn err\n",
"}\n"
],
"labels": [
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tfmacseed := rw.egressMAC.Sum(nil)\n",
"\tmac := updateMAC(rw.egressMAC, rw.macCipher, fmacseed)\n"
],
"file_path": "p2p/rlpx.go",
"type": "replace",
"edit_start_line_idx": 85
} | // Copyright (c) 2013 Pieter Wuille
// Distributed under the MIT/X11 software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#ifndef _SECP256K1_FIELD_IMPL_H_
#define _SECP256K1_FIELD_IMPL_H_
#if defined(USE_FIELD_GMP)
#include "field_gmp.h"
#elif defined(USE_FIELD_10X26)
#include "field_10x26.h"
#elif defined(USE_FIELD_5X52)
#include "field_5x52.h"
#elif defined(USE_FIELD_5X64)
#include "field_5x64.h"
#else
#error "Please select field implementation"
#endif
void static secp256k1_fe_get_hex(char *r, int *rlen, const secp256k1_fe_t *a) {
if (*rlen < 65) {
*rlen = 65;
return;
}
*rlen = 65;
unsigned char tmp[32];
secp256k1_fe_t b = *a;
secp256k1_fe_normalize(&b);
secp256k1_fe_get_b32(tmp, &b);
for (int i=0; i<32; i++) {
static const char *c = "0123456789ABCDEF";
r[2*i] = c[(tmp[i] >> 4) & 0xF];
r[2*i+1] = c[(tmp[i]) & 0xF];
}
r[64] = 0x00;
}
void static secp256k1_fe_set_hex(secp256k1_fe_t *r, const char *a, int alen) {
unsigned char tmp[32] = {};
static const int cvt[256] = {0, 0, 0, 0, 0, 0, 0,0,0,0,0,0,0,0,0,0,
0, 0, 0, 0, 0, 0, 0,0,0,0,0,0,0,0,0,0,
0, 0, 0, 0, 0, 0, 0,0,0,0,0,0,0,0,0,0,
0, 1, 2, 3, 4, 5, 6,7,8,9,0,0,0,0,0,0,
0,10,11,12,13,14,15,0,0,0,0,0,0,0,0,0,
0, 0, 0, 0, 0, 0, 0,0,0,0,0,0,0,0,0,0,
0,10,11,12,13,14,15,0,0,0,0,0,0,0,0,0,
0, 0, 0, 0, 0, 0, 0,0,0,0,0,0,0,0,0,0,
0, 0, 0, 0, 0, 0, 0,0,0,0,0,0,0,0,0,0,
0, 0, 0, 0, 0, 0, 0,0,0,0,0,0,0,0,0,0,
0, 0, 0, 0, 0, 0, 0,0,0,0,0,0,0,0,0,0,
0, 0, 0, 0, 0, 0, 0,0,0,0,0,0,0,0,0,0,
0, 0, 0, 0, 0, 0, 0,0,0,0,0,0,0,0,0,0,
0, 0, 0, 0, 0, 0, 0,0,0,0,0,0,0,0,0,0,
0, 0, 0, 0, 0, 0, 0,0,0,0,0,0,0,0,0,0,
0, 0, 0, 0, 0, 0, 0,0,0,0,0,0,0,0,0,0};
for (int i=0; i<32; i++) {
if (alen > i*2)
tmp[32 - alen/2 + i] = (cvt[(unsigned char)a[2*i]] << 4) + cvt[(unsigned char)a[2*i+1]];
}
secp256k1_fe_set_b32(r, tmp);
}
void static secp256k1_fe_sqrt(secp256k1_fe_t *r, const secp256k1_fe_t *a) {
// calculate a^p, with p={15,780,1022,1023}
secp256k1_fe_t a2; secp256k1_fe_sqr(&a2, a);
secp256k1_fe_t a3; secp256k1_fe_mul(&a3, &a2, a);
secp256k1_fe_t a6; secp256k1_fe_sqr(&a6, &a3);
secp256k1_fe_t a12; secp256k1_fe_sqr(&a12, &a6);
secp256k1_fe_t a15; secp256k1_fe_mul(&a15, &a12, &a3);
secp256k1_fe_t a30; secp256k1_fe_sqr(&a30, &a15);
secp256k1_fe_t a60; secp256k1_fe_sqr(&a60, &a30);
secp256k1_fe_t a120; secp256k1_fe_sqr(&a120, &a60);
secp256k1_fe_t a240; secp256k1_fe_sqr(&a240, &a120);
secp256k1_fe_t a255; secp256k1_fe_mul(&a255, &a240, &a15);
secp256k1_fe_t a510; secp256k1_fe_sqr(&a510, &a255);
secp256k1_fe_t a750; secp256k1_fe_mul(&a750, &a510, &a240);
secp256k1_fe_t a780; secp256k1_fe_mul(&a780, &a750, &a30);
secp256k1_fe_t a1020; secp256k1_fe_sqr(&a1020, &a510);
secp256k1_fe_t a1022; secp256k1_fe_mul(&a1022, &a1020, &a2);
secp256k1_fe_t a1023; secp256k1_fe_mul(&a1023, &a1022, a);
secp256k1_fe_t x = a15;
for (int i=0; i<21; i++) {
for (int j=0; j<10; j++) secp256k1_fe_sqr(&x, &x);
secp256k1_fe_mul(&x, &x, &a1023);
}
for (int j=0; j<10; j++) secp256k1_fe_sqr(&x, &x);
secp256k1_fe_mul(&x, &x, &a1022);
for (int i=0; i<2; i++) {
for (int j=0; j<10; j++) secp256k1_fe_sqr(&x, &x);
secp256k1_fe_mul(&x, &x, &a1023);
}
for (int j=0; j<10; j++) secp256k1_fe_sqr(&x, &x);
secp256k1_fe_mul(r, &x, &a780);
}
void static secp256k1_fe_inv(secp256k1_fe_t *r, const secp256k1_fe_t *a) {
// calculate a^p, with p={45,63,1019,1023}
secp256k1_fe_t a2; secp256k1_fe_sqr(&a2, a);
secp256k1_fe_t a3; secp256k1_fe_mul(&a3, &a2, a);
secp256k1_fe_t a4; secp256k1_fe_sqr(&a4, &a2);
secp256k1_fe_t a5; secp256k1_fe_mul(&a5, &a4, a);
secp256k1_fe_t a10; secp256k1_fe_sqr(&a10, &a5);
secp256k1_fe_t a11; secp256k1_fe_mul(&a11, &a10, a);
secp256k1_fe_t a21; secp256k1_fe_mul(&a21, &a11, &a10);
secp256k1_fe_t a42; secp256k1_fe_sqr(&a42, &a21);
secp256k1_fe_t a45; secp256k1_fe_mul(&a45, &a42, &a3);
secp256k1_fe_t a63; secp256k1_fe_mul(&a63, &a42, &a21);
secp256k1_fe_t a126; secp256k1_fe_sqr(&a126, &a63);
secp256k1_fe_t a252; secp256k1_fe_sqr(&a252, &a126);
secp256k1_fe_t a504; secp256k1_fe_sqr(&a504, &a252);
secp256k1_fe_t a1008; secp256k1_fe_sqr(&a1008, &a504);
secp256k1_fe_t a1019; secp256k1_fe_mul(&a1019, &a1008, &a11);
secp256k1_fe_t a1023; secp256k1_fe_mul(&a1023, &a1019, &a4);
secp256k1_fe_t x = a63;
for (int i=0; i<21; i++) {
for (int j=0; j<10; j++) secp256k1_fe_sqr(&x, &x);
secp256k1_fe_mul(&x, &x, &a1023);
}
for (int j=0; j<10; j++) secp256k1_fe_sqr(&x, &x);
secp256k1_fe_mul(&x, &x, &a1019);
for (int i=0; i<2; i++) {
for (int j=0; j<10; j++) secp256k1_fe_sqr(&x, &x);
secp256k1_fe_mul(&x, &x, &a1023);
}
for (int j=0; j<10; j++) secp256k1_fe_sqr(&x, &x);
secp256k1_fe_mul(r, &x, &a45);
}
void static secp256k1_fe_inv_var(secp256k1_fe_t *r, const secp256k1_fe_t *a) {
#if defined(USE_FIELD_INV_BUILTIN)
secp256k1_fe_inv(r, a);
#elif defined(USE_FIELD_INV_NUM)
unsigned char b[32];
secp256k1_fe_t c = *a;
secp256k1_fe_normalize(&c);
secp256k1_fe_get_b32(b, &c);
secp256k1_num_t n;
secp256k1_num_init(&n);
secp256k1_num_set_bin(&n, b, 32);
secp256k1_num_mod_inverse(&n, &n, &secp256k1_fe_consts->p);
secp256k1_num_get_bin(b, 32, &n);
secp256k1_num_free(&n);
secp256k1_fe_set_b32(r, b);
#else
#error "Please select field inverse implementation"
#endif
}
void static secp256k1_fe_start(void) {
static const unsigned char secp256k1_fe_consts_p[] = {
0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,
0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,
0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,
0xFF,0xFF,0xFF,0xFE,0xFF,0xFF,0xFC,0x2F
};
if (secp256k1_fe_consts == NULL) {
secp256k1_fe_inner_start();
secp256k1_fe_consts_t *ret = (secp256k1_fe_consts_t*)malloc(sizeof(secp256k1_fe_consts_t));
secp256k1_num_init(&ret->p);
secp256k1_num_set_bin(&ret->p, secp256k1_fe_consts_p, sizeof(secp256k1_fe_consts_p));
secp256k1_fe_consts = ret;
}
}
void static secp256k1_fe_stop(void) {
if (secp256k1_fe_consts != NULL) {
secp256k1_fe_consts_t *c = (secp256k1_fe_consts_t*)secp256k1_fe_consts;
secp256k1_num_free(&c->p);
free((void*)c);
secp256k1_fe_consts = NULL;
secp256k1_fe_inner_stop();
}
}
#endif
| crypto/secp256k1/secp256k1/src/impl/field.h | 0 | https://github.com/ethereum/go-ethereum/commit/d344054e5a2844241bf0e4f64ccfc4d2ad259718 | [
0.000216486252611503,
0.0001728447386994958,
0.0001656309759709984,
0.0001694404927548021,
0.000011316369636915624
] |
{
"id": 3,
"code_window": [
"\t// frame content was written to it as well.\n",
"\tmac := updateHeaderMAC(rw.egressMAC, rw.macCipher, rw.egressMAC.Sum(nil))\n",
"\t_, err := rw.conn.Write(mac)\n",
"\treturn err\n",
"}\n"
],
"labels": [
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tfmacseed := rw.egressMAC.Sum(nil)\n",
"\tmac := updateMAC(rw.egressMAC, rw.macCipher, fmacseed)\n"
],
"file_path": "p2p/rlpx.go",
"type": "replace",
"edit_start_line_idx": 85
} |
// ** file automatically generated by glgen -- do not edit manually **
#ifndef __cplusplus
#include <inttypes.h>
#include <stddef.h>
typedef unsigned int GLenum;
typedef unsigned char GLboolean;
typedef unsigned int GLbitfield;
typedef void GLvoid;
typedef char GLchar;
typedef signed char GLbyte; /* 1-byte signed */
typedef short GLshort; /* 2-byte signed */
typedef int GLint; /* 4-byte signed */
typedef unsigned char GLubyte; /* 1-byte unsigned */
typedef unsigned short GLushort; /* 2-byte unsigned */
typedef unsigned int GLuint; /* 4-byte unsigned */
typedef int GLsizei; /* 4-byte signed */
typedef float GLfloat; /* single precision float */
typedef float GLclampf; /* single precision float in [0,1] */
typedef double GLdouble; /* double precision float */
typedef double GLclampd; /* double precision float in [0,1] */
typedef int64_t GLint64;
typedef uint64_t GLuint64;
typedef ptrdiff_t GLintptr;
typedef ptrdiff_t GLsizeiptr;
typedef ptrdiff_t GLintptrARB;
typedef ptrdiff_t GLsizeiptrARB;
typedef struct __GLsync *GLsync;
#endif
#ifdef __cplusplus
extern "C" {
#endif
void *gl2_1_funcs();
void gl2_1_glViewport(void *_glfuncs, GLint x, GLint y, GLsizei width, GLsizei height);
void gl2_1_glDepthRange(void *_glfuncs, GLdouble nearVal, GLdouble farVal);
GLboolean gl2_1_glIsEnabled(void *_glfuncs, GLenum cap);
void gl2_1_glGetTexLevelParameteriv(void *_glfuncs, GLenum target, GLint level, GLenum pname, GLint* params);
void gl2_1_glGetTexLevelParameterfv(void *_glfuncs, GLenum target, GLint level, GLenum pname, GLfloat* params);
void gl2_1_glGetTexParameteriv(void *_glfuncs, GLenum target, GLenum pname, GLint* params);
void gl2_1_glGetTexParameterfv(void *_glfuncs, GLenum target, GLenum pname, GLfloat* params);
void gl2_1_glGetTexImage(void *_glfuncs, GLenum target, GLint level, GLenum format, GLenum gltype, GLvoid* pixels);
void gl2_1_glGetIntegerv(void *_glfuncs, GLenum pname, GLint* params);
void gl2_1_glGetFloatv(void *_glfuncs, GLenum pname, GLfloat* params);
GLenum gl2_1_glGetError(void *_glfuncs);
void gl2_1_glGetDoublev(void *_glfuncs, GLenum pname, GLdouble* params);
void gl2_1_glGetBooleanv(void *_glfuncs, GLenum pname, GLboolean* params);
void gl2_1_glReadPixels(void *_glfuncs, GLint x, GLint y, GLsizei width, GLsizei height, GLenum format, GLenum gltype, GLvoid* pixels);
void gl2_1_glReadBuffer(void *_glfuncs, GLenum mode);
void gl2_1_glPixelStorei(void *_glfuncs, GLenum pname, GLint param);
void gl2_1_glPixelStoref(void *_glfuncs, GLenum pname, GLfloat param);
void gl2_1_glDepthFunc(void *_glfuncs, GLenum glfunc);
void gl2_1_glStencilOp(void *_glfuncs, GLenum fail, GLenum zfail, GLenum zpass);
void gl2_1_glStencilFunc(void *_glfuncs, GLenum glfunc, GLint ref, GLuint mask);
void gl2_1_glLogicOp(void *_glfuncs, GLenum opcode);
void gl2_1_glBlendFunc(void *_glfuncs, GLenum sfactor, GLenum dfactor);
void gl2_1_glFlush(void *_glfuncs);
void gl2_1_glFinish(void *_glfuncs);
void gl2_1_glEnable(void *_glfuncs, GLenum cap);
void gl2_1_glDisable(void *_glfuncs, GLenum cap);
void gl2_1_glDepthMask(void *_glfuncs, GLboolean flag);
void gl2_1_glColorMask(void *_glfuncs, GLboolean red, GLboolean green, GLboolean blue, GLboolean alpha);
void gl2_1_glStencilMask(void *_glfuncs, GLuint mask);
void gl2_1_glClearDepth(void *_glfuncs, GLdouble depth);
void gl2_1_glClearStencil(void *_glfuncs, GLint s);
void gl2_1_glClearColor(void *_glfuncs, GLfloat red, GLfloat green, GLfloat blue, GLfloat alpha);
void gl2_1_glClear(void *_glfuncs, GLbitfield mask);
void gl2_1_glDrawBuffer(void *_glfuncs, GLenum mode);
void gl2_1_glTexImage2D(void *_glfuncs, GLenum target, GLint level, GLint internalFormat, GLsizei width, GLsizei height, GLint border, GLenum format, GLenum gltype, const GLvoid* pixels);
void gl2_1_glTexImage1D(void *_glfuncs, GLenum target, GLint level, GLint internalFormat, GLsizei width, GLint border, GLenum format, GLenum gltype, const GLvoid* pixels);
void gl2_1_glTexParameteriv(void *_glfuncs, GLenum target, GLenum pname, const GLint* params);
void gl2_1_glTexParameteri(void *_glfuncs, GLenum target, GLenum pname, GLint param);
void gl2_1_glTexParameterfv(void *_glfuncs, GLenum target, GLenum pname, const GLfloat* params);
void gl2_1_glTexParameterf(void *_glfuncs, GLenum target, GLenum pname, GLfloat param);
void gl2_1_glScissor(void *_glfuncs, GLint x, GLint y, GLsizei width, GLsizei height);
void gl2_1_glPolygonMode(void *_glfuncs, GLenum face, GLenum mode);
void gl2_1_glPointSize(void *_glfuncs, GLfloat size);
void gl2_1_glLineWidth(void *_glfuncs, GLfloat width);
void gl2_1_glHint(void *_glfuncs, GLenum target, GLenum mode);
void gl2_1_glFrontFace(void *_glfuncs, GLenum mode);
void gl2_1_glCullFace(void *_glfuncs, GLenum mode);
void gl2_1_glIndexubv(void *_glfuncs, const GLubyte* c);
void gl2_1_glIndexub(void *_glfuncs, GLubyte c);
GLboolean gl2_1_glIsTexture(void *_glfuncs, GLuint texture);
void gl2_1_glGenTextures(void *_glfuncs, GLsizei n, GLuint* textures);
void gl2_1_glDeleteTextures(void *_glfuncs, GLsizei n, const GLuint* textures);
void gl2_1_glBindTexture(void *_glfuncs, GLenum target, GLuint texture);
void gl2_1_glTexSubImage2D(void *_glfuncs, GLenum target, GLint level, GLint xoffset, GLint yoffset, GLsizei width, GLsizei height, GLenum format, GLenum gltype, const GLvoid* pixels);
void gl2_1_glTexSubImage1D(void *_glfuncs, GLenum target, GLint level, GLint xoffset, GLsizei width, GLenum format, GLenum gltype, const GLvoid* pixels);
void gl2_1_glCopyTexSubImage2D(void *_glfuncs, GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint x, GLint y, GLsizei width, GLsizei height);
void gl2_1_glCopyTexSubImage1D(void *_glfuncs, GLenum target, GLint level, GLint xoffset, GLint x, GLint y, GLsizei width);
void gl2_1_glCopyTexImage2D(void *_glfuncs, GLenum target, GLint level, GLenum internalFormat, GLint x, GLint y, GLsizei width, GLsizei height, GLint border);
void gl2_1_glCopyTexImage1D(void *_glfuncs, GLenum target, GLint level, GLenum internalFormat, GLint x, GLint y, GLsizei width, GLint border);
void gl2_1_glPolygonOffset(void *_glfuncs, GLfloat factor, GLfloat units);
void gl2_1_glDrawElements(void *_glfuncs, GLenum mode, GLsizei count, GLenum gltype, const GLvoid* indices);
void gl2_1_glDrawArrays(void *_glfuncs, GLenum mode, GLint first, GLsizei count);
void gl2_1_glCopyTexSubImage3D(void *_glfuncs, GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLint x, GLint y, GLsizei width, GLsizei height);
void gl2_1_glTexSubImage3D(void *_glfuncs, GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLsizei width, GLsizei height, GLsizei depth, GLenum format, GLenum gltype, const GLvoid* pixels);
void gl2_1_glTexImage3D(void *_glfuncs, GLenum target, GLint level, GLint internalFormat, GLsizei width, GLsizei height, GLsizei depth, GLint border, GLenum format, GLenum gltype, const GLvoid* pixels);
void gl2_1_glDrawRangeElements(void *_glfuncs, GLenum mode, GLuint start, GLuint end, GLsizei count, GLenum gltype, const GLvoid* indices);
void gl2_1_glBlendEquation(void *_glfuncs, GLenum mode);
void gl2_1_glBlendColor(void *_glfuncs, GLfloat red, GLfloat green, GLfloat blue, GLfloat alpha);
void gl2_1_glGetCompressedTexImage(void *_glfuncs, GLenum target, GLint level, GLvoid* img);
void gl2_1_glCompressedTexSubImage1D(void *_glfuncs, GLenum target, GLint level, GLint xoffset, GLsizei width, GLenum format, GLsizei imageSize, const GLvoid* data);
void gl2_1_glCompressedTexSubImage2D(void *_glfuncs, GLenum target, GLint level, GLint xoffset, GLint yoffset, GLsizei width, GLsizei height, GLenum format, GLsizei imageSize, const GLvoid* data);
void gl2_1_glCompressedTexSubImage3D(void *_glfuncs, GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLsizei width, GLsizei height, GLsizei depth, GLenum format, GLsizei imageSize, const GLvoid* data);
void gl2_1_glCompressedTexImage1D(void *_glfuncs, GLenum target, GLint level, GLenum internalFormat, GLsizei width, GLint border, GLsizei imageSize, const GLvoid* data);
void gl2_1_glCompressedTexImage2D(void *_glfuncs, GLenum target, GLint level, GLenum internalFormat, GLsizei width, GLsizei height, GLint border, GLsizei imageSize, const GLvoid* data);
void gl2_1_glCompressedTexImage3D(void *_glfuncs, GLenum target, GLint level, GLenum internalFormat, GLsizei width, GLsizei height, GLsizei depth, GLint border, GLsizei imageSize, const GLvoid* data);
void gl2_1_glSampleCoverage(void *_glfuncs, GLfloat value, GLboolean invert);
void gl2_1_glActiveTexture(void *_glfuncs, GLenum texture);
void gl2_1_glPointParameteriv(void *_glfuncs, GLenum pname, const GLint* params);
void gl2_1_glPointParameteri(void *_glfuncs, GLenum pname, GLint param);
void gl2_1_glPointParameterfv(void *_glfuncs, GLenum pname, const GLfloat* params);
void gl2_1_glPointParameterf(void *_glfuncs, GLenum pname, GLfloat param);
void gl2_1_glMultiDrawArrays(void *_glfuncs, GLenum mode, const GLint* first, const GLsizei* count, GLsizei drawcount);
void gl2_1_glBlendFuncSeparate(void *_glfuncs, GLenum sfactorRGB, GLenum dfactorRGB, GLenum sfactorAlpha, GLenum dfactorAlpha);
void gl2_1_glGetBufferParameteriv(void *_glfuncs, GLenum target, GLenum pname, GLint* params);
GLboolean gl2_1_glUnmapBuffer(void *_glfuncs, GLenum target);
void gl2_1_glGetBufferSubData(void *_glfuncs, GLenum target, GLintptr offset, GLsizeiptr size, GLvoid* data);
void gl2_1_glBufferSubData(void *_glfuncs, GLenum target, GLintptr offset, GLsizeiptr size, const GLvoid* data);
void gl2_1_glBufferData(void *_glfuncs, GLenum target, GLsizeiptr size, const GLvoid* data, GLenum usage);
GLboolean gl2_1_glIsBuffer(void *_glfuncs, GLuint buffer);
void gl2_1_glGenBuffers(void *_glfuncs, GLsizei n, GLuint* buffers);
void gl2_1_glDeleteBuffers(void *_glfuncs, GLsizei n, const GLuint* buffers);
void gl2_1_glBindBuffer(void *_glfuncs, GLenum target, GLuint buffer);
void gl2_1_glGetQueryObjectuiv(void *_glfuncs, GLuint id, GLenum pname, GLuint* params);
void gl2_1_glGetQueryObjectiv(void *_glfuncs, GLuint id, GLenum pname, GLint* params);
void gl2_1_glGetQueryiv(void *_glfuncs, GLenum target, GLenum pname, GLint* params);
void gl2_1_glEndQuery(void *_glfuncs, GLenum target);
void gl2_1_glBeginQuery(void *_glfuncs, GLenum target, GLuint id);
GLboolean gl2_1_glIsQuery(void *_glfuncs, GLuint id);
void gl2_1_glDeleteQueries(void *_glfuncs, GLsizei n, const GLuint* ids);
void gl2_1_glGenQueries(void *_glfuncs, GLsizei n, GLuint* ids);
void gl2_1_glVertexAttribPointer(void *_glfuncs, GLuint index, GLint size, GLenum gltype, GLboolean normalized, GLsizei stride, const GLvoid* offset);
void gl2_1_glValidateProgram(void *_glfuncs, GLuint program);
void gl2_1_glUniformMatrix4fv(void *_glfuncs, GLint location, GLsizei count, GLboolean transpose, const GLfloat* value);
void gl2_1_glUniformMatrix3fv(void *_glfuncs, GLint location, GLsizei count, GLboolean transpose, const GLfloat* value);
void gl2_1_glUniformMatrix2fv(void *_glfuncs, GLint location, GLsizei count, GLboolean transpose, const GLfloat* value);
void gl2_1_glUniform4iv(void *_glfuncs, GLint location, GLsizei count, const GLint* value);
void gl2_1_glUniform3iv(void *_glfuncs, GLint location, GLsizei count, const GLint* value);
void gl2_1_glUniform2iv(void *_glfuncs, GLint location, GLsizei count, const GLint* value);
void gl2_1_glUniform1iv(void *_glfuncs, GLint location, GLsizei count, const GLint* value);
void gl2_1_glUniform4fv(void *_glfuncs, GLint location, GLsizei count, const GLfloat* value);
void gl2_1_glUniform3fv(void *_glfuncs, GLint location, GLsizei count, const GLfloat* value);
void gl2_1_glUniform2fv(void *_glfuncs, GLint location, GLsizei count, const GLfloat* value);
void gl2_1_glUniform1fv(void *_glfuncs, GLint location, GLsizei count, const GLfloat* value);
void gl2_1_glUniform4i(void *_glfuncs, GLint location, GLint v0, GLint v1, GLint v2, GLint v3);
void gl2_1_glUniform3i(void *_glfuncs, GLint location, GLint v0, GLint v1, GLint v2);
void gl2_1_glUniform2i(void *_glfuncs, GLint location, GLint v0, GLint v1);
void gl2_1_glUniform1i(void *_glfuncs, GLint location, GLint v0);
void gl2_1_glUniform4f(void *_glfuncs, GLint location, GLfloat v0, GLfloat v1, GLfloat v2, GLfloat v3);
void gl2_1_glUniform3f(void *_glfuncs, GLint location, GLfloat v0, GLfloat v1, GLfloat v2);
void gl2_1_glUniform2f(void *_glfuncs, GLint location, GLfloat v0, GLfloat v1);
void gl2_1_glUniform1f(void *_glfuncs, GLint location, GLfloat v0);
void gl2_1_glUseProgram(void *_glfuncs, GLuint program);
void gl2_1_glShaderSource(void *_glfuncs, GLuint shader, GLsizei count, const GLchar** source, const GLint* length);
void gl2_1_glLinkProgram(void *_glfuncs, GLuint program);
GLboolean gl2_1_glIsShader(void *_glfuncs, GLuint shader);
GLboolean gl2_1_glIsProgram(void *_glfuncs, GLuint program);
void gl2_1_glGetVertexAttribiv(void *_glfuncs, GLuint index, GLenum pname, GLint* params);
void gl2_1_glGetVertexAttribfv(void *_glfuncs, GLuint index, GLenum pname, GLfloat* params);
void gl2_1_glGetVertexAttribdv(void *_glfuncs, GLuint index, GLenum pname, GLdouble* params);
void gl2_1_glGetUniformiv(void *_glfuncs, GLuint program, GLint location, GLint* params);
void gl2_1_glGetUniformfv(void *_glfuncs, GLuint program, GLint location, GLfloat* params);
GLint gl2_1_glGetUniformLocation(void *_glfuncs, GLuint program, const GLchar* name);
void gl2_1_glGetShaderSource(void *_glfuncs, GLuint shader, GLsizei bufSize, GLsizei* length, GLchar* source);
void gl2_1_glGetShaderInfoLog(void *_glfuncs, GLuint shader, GLsizei bufSize, GLsizei* length, GLchar* infoLog);
void gl2_1_glGetShaderiv(void *_glfuncs, GLuint shader, GLenum pname, GLint* params);
void gl2_1_glGetProgramInfoLog(void *_glfuncs, GLuint program, GLsizei bufSize, GLsizei* length, GLchar* infoLog);
void gl2_1_glGetProgramiv(void *_glfuncs, GLuint program, GLenum pname, GLint* params);
GLint gl2_1_glGetAttribLocation(void *_glfuncs, GLuint program, const GLchar* name);
void gl2_1_glGetAttachedShaders(void *_glfuncs, GLuint program, GLsizei maxCount, GLsizei* count, GLuint* obj);
void gl2_1_glGetActiveUniform(void *_glfuncs, GLuint program, GLuint index, GLsizei bufSize, GLsizei* length, GLint* size, GLenum* gltype, GLchar* name);
void gl2_1_glGetActiveAttrib(void *_glfuncs, GLuint program, GLuint index, GLsizei bufSize, GLsizei* length, GLint* size, GLenum* gltype, GLchar* name);
void gl2_1_glEnableVertexAttribArray(void *_glfuncs, GLuint index);
void gl2_1_glDisableVertexAttribArray(void *_glfuncs, GLuint index);
void gl2_1_glDetachShader(void *_glfuncs, GLuint program, GLuint shader);
void gl2_1_glDeleteShader(void *_glfuncs, GLuint shader);
void gl2_1_glDeleteProgram(void *_glfuncs, GLuint program);
GLuint gl2_1_glCreateShader(void *_glfuncs, GLenum gltype);
GLuint gl2_1_glCreateProgram(void *_glfuncs);
void gl2_1_glCompileShader(void *_glfuncs, GLuint shader);
void gl2_1_glBindAttribLocation(void *_glfuncs, GLuint program, GLuint index, const GLchar* name);
void gl2_1_glAttachShader(void *_glfuncs, GLuint program, GLuint shader);
void gl2_1_glStencilMaskSeparate(void *_glfuncs, GLenum face, GLuint mask);
void gl2_1_glStencilFuncSeparate(void *_glfuncs, GLenum face, GLenum glfunc, GLint ref, GLuint mask);
void gl2_1_glStencilOpSeparate(void *_glfuncs, GLenum face, GLenum sfail, GLenum dpfail, GLenum dppass);
void gl2_1_glDrawBuffers(void *_glfuncs, GLsizei n, const GLenum* bufs);
void gl2_1_glBlendEquationSeparate(void *_glfuncs, GLenum modeRGB, GLenum modeAlpha);
void gl2_1_glUniformMatrix4x3fv(void *_glfuncs, GLint location, GLsizei count, GLboolean transpose, const GLfloat* value);
void gl2_1_glUniformMatrix3x4fv(void *_glfuncs, GLint location, GLsizei count, GLboolean transpose, const GLfloat* value);
void gl2_1_glUniformMatrix4x2fv(void *_glfuncs, GLint location, GLsizei count, GLboolean transpose, const GLfloat* value);
void gl2_1_glUniformMatrix2x4fv(void *_glfuncs, GLint location, GLsizei count, GLboolean transpose, const GLfloat* value);
void gl2_1_glUniformMatrix3x2fv(void *_glfuncs, GLint location, GLsizei count, GLboolean transpose, const GLfloat* value);
void gl2_1_glUniformMatrix2x3fv(void *_glfuncs, GLint location, GLsizei count, GLboolean transpose, const GLfloat* value);
void gl2_1_glTranslatef(void *_glfuncs, GLfloat x, GLfloat y, GLfloat z);
void gl2_1_glTranslated(void *_glfuncs, GLdouble x, GLdouble y, GLdouble z);
void gl2_1_glScalef(void *_glfuncs, GLfloat x, GLfloat y, GLfloat z);
void gl2_1_glScaled(void *_glfuncs, GLdouble x, GLdouble y, GLdouble z);
void gl2_1_glRotatef(void *_glfuncs, GLfloat angle, GLfloat x, GLfloat y, GLfloat z);
void gl2_1_glRotated(void *_glfuncs, GLdouble angle, GLdouble x, GLdouble y, GLdouble z);
void gl2_1_glPushMatrix(void *_glfuncs);
void gl2_1_glPopMatrix(void *_glfuncs);
void gl2_1_glOrtho(void *_glfuncs, GLdouble left, GLdouble right, GLdouble bottom, GLdouble top, GLdouble zNear, GLdouble zFar);
void gl2_1_glMultMatrixd(void *_glfuncs, const GLdouble* m);
void gl2_1_glMultMatrixf(void *_glfuncs, const GLfloat* m);
void gl2_1_glMatrixMode(void *_glfuncs, GLenum mode);
void gl2_1_glLoadMatrixd(void *_glfuncs, const GLdouble* m);
void gl2_1_glLoadMatrixf(void *_glfuncs, const GLfloat* m);
void gl2_1_glLoadIdentity(void *_glfuncs);
void gl2_1_glFrustum(void *_glfuncs, GLdouble left, GLdouble right, GLdouble bottom, GLdouble top, GLdouble zNear, GLdouble zFar);
GLboolean gl2_1_glIsList(void *_glfuncs, GLuint list);
void gl2_1_glGetTexGeniv(void *_glfuncs, GLenum coord, GLenum pname, GLint* params);
void gl2_1_glGetTexGenfv(void *_glfuncs, GLenum coord, GLenum pname, GLfloat* params);
void gl2_1_glGetTexGendv(void *_glfuncs, GLenum coord, GLenum pname, GLdouble* params);
void gl2_1_glGetTexEnviv(void *_glfuncs, GLenum target, GLenum pname, GLint* params);
void gl2_1_glGetTexEnvfv(void *_glfuncs, GLenum target, GLenum pname, GLfloat* params);
void gl2_1_glGetPolygonStipple(void *_glfuncs, GLubyte* mask);
void gl2_1_glGetPixelMapusv(void *_glfuncs, GLenum glmap, GLushort* values);
void gl2_1_glGetPixelMapuiv(void *_glfuncs, GLenum glmap, GLuint* values);
void gl2_1_glGetPixelMapfv(void *_glfuncs, GLenum glmap, GLfloat* values);
void gl2_1_glGetMaterialiv(void *_glfuncs, GLenum face, GLenum pname, GLint* params);
void gl2_1_glGetMaterialfv(void *_glfuncs, GLenum face, GLenum pname, GLfloat* params);
void gl2_1_glGetMapiv(void *_glfuncs, GLenum target, GLenum query, GLint* v);
void gl2_1_glGetMapfv(void *_glfuncs, GLenum target, GLenum query, GLfloat* v);
void gl2_1_glGetMapdv(void *_glfuncs, GLenum target, GLenum query, GLdouble* v);
void gl2_1_glGetLightiv(void *_glfuncs, GLenum light, GLenum pname, GLint* params);
void gl2_1_glGetLightfv(void *_glfuncs, GLenum light, GLenum pname, GLfloat* params);
void gl2_1_glGetClipPlane(void *_glfuncs, GLenum plane, GLdouble* equation);
void gl2_1_glDrawPixels(void *_glfuncs, GLsizei width, GLsizei height, GLenum format, GLenum gltype, const GLvoid* pixels);
void gl2_1_glCopyPixels(void *_glfuncs, GLint x, GLint y, GLsizei width, GLsizei height, GLenum gltype);
void gl2_1_glPixelMapusv(void *_glfuncs, GLenum glmap, GLint mapsize, const GLushort* values);
void gl2_1_glPixelMapuiv(void *_glfuncs, GLenum glmap, GLint mapsize, const GLuint* values);
void gl2_1_glPixelMapfv(void *_glfuncs, GLenum glmap, GLint mapsize, const GLfloat* values);
void gl2_1_glPixelTransferi(void *_glfuncs, GLenum pname, GLint param);
void gl2_1_glPixelTransferf(void *_glfuncs, GLenum pname, GLfloat param);
void gl2_1_glPixelZoom(void *_glfuncs, GLfloat xfactor, GLfloat yfactor);
void gl2_1_glAlphaFunc(void *_glfuncs, GLenum glfunc, GLfloat ref);
void gl2_1_glEvalPoint2(void *_glfuncs, GLint i, GLint j);
void gl2_1_glEvalMesh2(void *_glfuncs, GLenum mode, GLint i1, GLint i2, GLint j1, GLint j2);
void gl2_1_glEvalPoint1(void *_glfuncs, GLint i);
void gl2_1_glEvalMesh1(void *_glfuncs, GLenum mode, GLint i1, GLint i2);
void gl2_1_glEvalCoord2fv(void *_glfuncs, const GLfloat* u);
void gl2_1_glEvalCoord2f(void *_glfuncs, GLfloat u, GLfloat v);
void gl2_1_glEvalCoord2dv(void *_glfuncs, const GLdouble* u);
void gl2_1_glEvalCoord2d(void *_glfuncs, GLdouble u, GLdouble v);
void gl2_1_glEvalCoord1fv(void *_glfuncs, const GLfloat* u);
void gl2_1_glEvalCoord1f(void *_glfuncs, GLfloat u);
void gl2_1_glEvalCoord1dv(void *_glfuncs, const GLdouble* u);
void gl2_1_glEvalCoord1d(void *_glfuncs, GLdouble u);
void gl2_1_glMapGrid2f(void *_glfuncs, GLint un, GLfloat u1, GLfloat u2, GLint vn, GLfloat v1, GLfloat v2);
void gl2_1_glMapGrid2d(void *_glfuncs, GLint un, GLdouble u1, GLdouble u2, GLint vn, GLdouble v1, GLdouble v2);
void gl2_1_glMapGrid1f(void *_glfuncs, GLint un, GLfloat u1, GLfloat u2);
void gl2_1_glMapGrid1d(void *_glfuncs, GLint un, GLdouble u1, GLdouble u2);
void gl2_1_glMap2f(void *_glfuncs, GLenum target, GLfloat u1, GLfloat u2, GLint ustride, GLint uorder, GLfloat v1, GLfloat v2, GLint vstride, GLint vorder, const GLfloat* points);
void gl2_1_glMap2d(void *_glfuncs, GLenum target, GLdouble u1, GLdouble u2, GLint ustride, GLint uorder, GLdouble v1, GLdouble v2, GLint vstride, GLint vorder, const GLdouble* points);
void gl2_1_glMap1f(void *_glfuncs, GLenum target, GLfloat u1, GLfloat u2, GLint stride, GLint order, const GLfloat* points);
void gl2_1_glMap1d(void *_glfuncs, GLenum target, GLdouble u1, GLdouble u2, GLint stride, GLint order, const GLdouble* points);
void gl2_1_glPushAttrib(void *_glfuncs, GLbitfield mask);
void gl2_1_glPopAttrib(void *_glfuncs);
void gl2_1_glAccum(void *_glfuncs, GLenum op, GLfloat value);
void gl2_1_glIndexMask(void *_glfuncs, GLuint mask);
void gl2_1_glClearIndex(void *_glfuncs, GLfloat c);
void gl2_1_glClearAccum(void *_glfuncs, GLfloat red, GLfloat green, GLfloat blue, GLfloat alpha);
void gl2_1_glPushName(void *_glfuncs, GLuint name);
void gl2_1_glPopName(void *_glfuncs);
void gl2_1_glPassThrough(void *_glfuncs, GLfloat token);
void gl2_1_glLoadName(void *_glfuncs, GLuint name);
void gl2_1_glInitNames(void *_glfuncs);
GLint gl2_1_glRenderMode(void *_glfuncs, GLenum mode);
void gl2_1_glSelectBuffer(void *_glfuncs, GLsizei size, GLuint* buffer);
void gl2_1_glFeedbackBuffer(void *_glfuncs, GLsizei size, GLenum gltype, GLfloat* buffer);
void gl2_1_glTexGeniv(void *_glfuncs, GLenum coord, GLenum pname, const GLint* params);
void gl2_1_glTexGeni(void *_glfuncs, GLenum coord, GLenum pname, GLint param);
void gl2_1_glTexGenfv(void *_glfuncs, GLenum coord, GLenum pname, const GLfloat* params);
void gl2_1_glTexGenf(void *_glfuncs, GLenum coord, GLenum pname, GLfloat param);
void gl2_1_glTexGendv(void *_glfuncs, GLenum coord, GLenum pname, const GLdouble* params);
void gl2_1_glTexGend(void *_glfuncs, GLenum coord, GLenum pname, GLdouble param);
void gl2_1_glTexEnviv(void *_glfuncs, GLenum target, GLenum pname, const GLint* params);
void gl2_1_glTexEnvi(void *_glfuncs, GLenum target, GLenum pname, GLint param);
void gl2_1_glTexEnvfv(void *_glfuncs, GLenum target, GLenum pname, const GLfloat* params);
void gl2_1_glTexEnvf(void *_glfuncs, GLenum target, GLenum pname, GLfloat param);
void gl2_1_glShadeModel(void *_glfuncs, GLenum mode);
void gl2_1_glPolygonStipple(void *_glfuncs, const GLubyte* mask);
void gl2_1_glMaterialiv(void *_glfuncs, GLenum face, GLenum pname, const GLint* params);
void gl2_1_glMateriali(void *_glfuncs, GLenum face, GLenum pname, GLint param);
void gl2_1_glMaterialfv(void *_glfuncs, GLenum face, GLenum pname, const GLfloat* params);
void gl2_1_glMaterialf(void *_glfuncs, GLenum face, GLenum pname, GLfloat param);
void gl2_1_glLineStipple(void *_glfuncs, GLint factor, GLushort pattern);
void gl2_1_glLightModeliv(void *_glfuncs, GLenum pname, const GLint* params);
void gl2_1_glLightModeli(void *_glfuncs, GLenum pname, GLint param);
void gl2_1_glLightModelfv(void *_glfuncs, GLenum pname, const GLfloat* params);
void gl2_1_glLightModelf(void *_glfuncs, GLenum pname, GLfloat param);
void gl2_1_glLightiv(void *_glfuncs, GLenum light, GLenum pname, const GLint* params);
void gl2_1_glLighti(void *_glfuncs, GLenum light, GLenum pname, GLint param);
void gl2_1_glLightfv(void *_glfuncs, GLenum light, GLenum pname, const GLfloat* params);
void gl2_1_glLightf(void *_glfuncs, GLenum light, GLenum pname, GLfloat param);
void gl2_1_glFogiv(void *_glfuncs, GLenum pname, const GLint* params);
void gl2_1_glFogi(void *_glfuncs, GLenum pname, GLint param);
void gl2_1_glFogfv(void *_glfuncs, GLenum pname, const GLfloat* params);
void gl2_1_glFogf(void *_glfuncs, GLenum pname, GLfloat param);
void gl2_1_glColorMaterial(void *_glfuncs, GLenum face, GLenum mode);
void gl2_1_glClipPlane(void *_glfuncs, GLenum plane, const GLdouble* equation);
void gl2_1_glVertex4sv(void *_glfuncs, const GLshort* v);
void gl2_1_glVertex4s(void *_glfuncs, GLshort x, GLshort y, GLshort z, GLshort w);
void gl2_1_glVertex4iv(void *_glfuncs, const GLint* v);
void gl2_1_glVertex4i(void *_glfuncs, GLint x, GLint y, GLint z, GLint w);
void gl2_1_glVertex4fv(void *_glfuncs, const GLfloat* v);
void gl2_1_glVertex4f(void *_glfuncs, GLfloat x, GLfloat y, GLfloat z, GLfloat w);
void gl2_1_glVertex4dv(void *_glfuncs, const GLdouble* v);
void gl2_1_glVertex4d(void *_glfuncs, GLdouble x, GLdouble y, GLdouble z, GLdouble w);
void gl2_1_glVertex3sv(void *_glfuncs, const GLshort* v);
void gl2_1_glVertex3s(void *_glfuncs, GLshort x, GLshort y, GLshort z);
void gl2_1_glVertex3iv(void *_glfuncs, const GLint* v);
void gl2_1_glVertex3i(void *_glfuncs, GLint x, GLint y, GLint z);
void gl2_1_glVertex3fv(void *_glfuncs, const GLfloat* v);
void gl2_1_glVertex3f(void *_glfuncs, GLfloat x, GLfloat y, GLfloat z);
void gl2_1_glVertex3dv(void *_glfuncs, const GLdouble* v);
void gl2_1_glVertex3d(void *_glfuncs, GLdouble x, GLdouble y, GLdouble z);
void gl2_1_glVertex2sv(void *_glfuncs, const GLshort* v);
void gl2_1_glVertex2s(void *_glfuncs, GLshort x, GLshort y);
void gl2_1_glVertex2iv(void *_glfuncs, const GLint* v);
void gl2_1_glVertex2i(void *_glfuncs, GLint x, GLint y);
void gl2_1_glVertex2fv(void *_glfuncs, const GLfloat* v);
void gl2_1_glVertex2f(void *_glfuncs, GLfloat x, GLfloat y);
void gl2_1_glVertex2dv(void *_glfuncs, const GLdouble* v);
void gl2_1_glVertex2d(void *_glfuncs, GLdouble x, GLdouble y);
void gl2_1_glTexCoord4sv(void *_glfuncs, const GLshort* v);
void gl2_1_glTexCoord4s(void *_glfuncs, GLshort s, GLshort t, GLshort r, GLshort q);
void gl2_1_glTexCoord4iv(void *_glfuncs, const GLint* v);
void gl2_1_glTexCoord4i(void *_glfuncs, GLint s, GLint t, GLint r, GLint q);
void gl2_1_glTexCoord4fv(void *_glfuncs, const GLfloat* v);
void gl2_1_glTexCoord4f(void *_glfuncs, GLfloat s, GLfloat t, GLfloat r, GLfloat q);
void gl2_1_glTexCoord4dv(void *_glfuncs, const GLdouble* v);
void gl2_1_glTexCoord4d(void *_glfuncs, GLdouble s, GLdouble t, GLdouble r, GLdouble q);
void gl2_1_glTexCoord3sv(void *_glfuncs, const GLshort* v);
void gl2_1_glTexCoord3s(void *_glfuncs, GLshort s, GLshort t, GLshort r);
void gl2_1_glTexCoord3iv(void *_glfuncs, const GLint* v);
void gl2_1_glTexCoord3i(void *_glfuncs, GLint s, GLint t, GLint r);
void gl2_1_glTexCoord3fv(void *_glfuncs, const GLfloat* v);
void gl2_1_glTexCoord3f(void *_glfuncs, GLfloat s, GLfloat t, GLfloat r);
void gl2_1_glTexCoord3dv(void *_glfuncs, const GLdouble* v);
void gl2_1_glTexCoord3d(void *_glfuncs, GLdouble s, GLdouble t, GLdouble r);
void gl2_1_glTexCoord2sv(void *_glfuncs, const GLshort* v);
void gl2_1_glTexCoord2s(void *_glfuncs, GLshort s, GLshort t);
void gl2_1_glTexCoord2iv(void *_glfuncs, const GLint* v);
void gl2_1_glTexCoord2i(void *_glfuncs, GLint s, GLint t);
void gl2_1_glTexCoord2fv(void *_glfuncs, const GLfloat* v);
void gl2_1_glTexCoord2f(void *_glfuncs, GLfloat s, GLfloat t);
void gl2_1_glTexCoord2dv(void *_glfuncs, const GLdouble* v);
void gl2_1_glTexCoord2d(void *_glfuncs, GLdouble s, GLdouble t);
void gl2_1_glTexCoord1sv(void *_glfuncs, const GLshort* v);
void gl2_1_glTexCoord1s(void *_glfuncs, GLshort s);
void gl2_1_glTexCoord1iv(void *_glfuncs, const GLint* v);
void gl2_1_glTexCoord1i(void *_glfuncs, GLint s);
void gl2_1_glTexCoord1fv(void *_glfuncs, const GLfloat* v);
void gl2_1_glTexCoord1f(void *_glfuncs, GLfloat s);
void gl2_1_glTexCoord1dv(void *_glfuncs, const GLdouble* v);
void gl2_1_glTexCoord1d(void *_glfuncs, GLdouble s);
void gl2_1_glRectsv(void *_glfuncs, const GLshort* v1, const GLshort* v2);
void gl2_1_glRects(void *_glfuncs, GLshort x1, GLshort y1, GLshort x2, GLshort y2);
void gl2_1_glRectiv(void *_glfuncs, const GLint* v1, const GLint* v2);
void gl2_1_glRecti(void *_glfuncs, GLint x1, GLint y1, GLint x2, GLint y2);
void gl2_1_glRectfv(void *_glfuncs, const GLfloat* v1, const GLfloat* v2);
void gl2_1_glRectf(void *_glfuncs, GLfloat x1, GLfloat y1, GLfloat x2, GLfloat y2);
void gl2_1_glRectdv(void *_glfuncs, const GLdouble* v1, const GLdouble* v2);
void gl2_1_glRectd(void *_glfuncs, GLdouble x1, GLdouble y1, GLdouble x2, GLdouble y2);
void gl2_1_glRasterPos4sv(void *_glfuncs, const GLshort* v);
void gl2_1_glRasterPos4s(void *_glfuncs, GLshort x, GLshort y, GLshort z, GLshort w);
void gl2_1_glRasterPos4iv(void *_glfuncs, const GLint* v);
void gl2_1_glRasterPos4i(void *_glfuncs, GLint x, GLint y, GLint z, GLint w);
void gl2_1_glRasterPos4fv(void *_glfuncs, const GLfloat* v);
void gl2_1_glRasterPos4f(void *_glfuncs, GLfloat x, GLfloat y, GLfloat z, GLfloat w);
void gl2_1_glRasterPos4dv(void *_glfuncs, const GLdouble* v);
void gl2_1_glRasterPos4d(void *_glfuncs, GLdouble x, GLdouble y, GLdouble z, GLdouble w);
void gl2_1_glRasterPos3sv(void *_glfuncs, const GLshort* v);
void gl2_1_glRasterPos3s(void *_glfuncs, GLshort x, GLshort y, GLshort z);
void gl2_1_glRasterPos3iv(void *_glfuncs, const GLint* v);
void gl2_1_glRasterPos3i(void *_glfuncs, GLint x, GLint y, GLint z);
void gl2_1_glRasterPos3fv(void *_glfuncs, const GLfloat* v);
void gl2_1_glRasterPos3f(void *_glfuncs, GLfloat x, GLfloat y, GLfloat z);
void gl2_1_glRasterPos3dv(void *_glfuncs, const GLdouble* v);
void gl2_1_glRasterPos3d(void *_glfuncs, GLdouble x, GLdouble y, GLdouble z);
void gl2_1_glRasterPos2sv(void *_glfuncs, const GLshort* v);
void gl2_1_glRasterPos2s(void *_glfuncs, GLshort x, GLshort y);
void gl2_1_glRasterPos2iv(void *_glfuncs, const GLint* v);
void gl2_1_glRasterPos2i(void *_glfuncs, GLint x, GLint y);
void gl2_1_glRasterPos2fv(void *_glfuncs, const GLfloat* v);
void gl2_1_glRasterPos2f(void *_glfuncs, GLfloat x, GLfloat y);
void gl2_1_glRasterPos2dv(void *_glfuncs, const GLdouble* v);
void gl2_1_glRasterPos2d(void *_glfuncs, GLdouble x, GLdouble y);
void gl2_1_glNormal3sv(void *_glfuncs, const GLshort* v);
void gl2_1_glNormal3s(void *_glfuncs, GLshort nx, GLshort ny, GLshort nz);
void gl2_1_glNormal3iv(void *_glfuncs, const GLint* v);
void gl2_1_glNormal3i(void *_glfuncs, GLint nx, GLint ny, GLint nz);
void gl2_1_glNormal3fv(void *_glfuncs, const GLfloat* v);
void gl2_1_glNormal3f(void *_glfuncs, GLfloat nx, GLfloat ny, GLfloat nz);
void gl2_1_glNormal3dv(void *_glfuncs, const GLdouble* v);
void gl2_1_glNormal3d(void *_glfuncs, GLdouble nx, GLdouble ny, GLdouble nz);
void gl2_1_glNormal3bv(void *_glfuncs, const GLbyte* v);
void gl2_1_glNormal3b(void *_glfuncs, GLbyte nx, GLbyte ny, GLbyte nz);
void gl2_1_glIndexsv(void *_glfuncs, const GLshort* c);
void gl2_1_glIndexs(void *_glfuncs, GLshort c);
void gl2_1_glIndexiv(void *_glfuncs, const GLint* c);
void gl2_1_glIndexi(void *_glfuncs, GLint c);
void gl2_1_glIndexfv(void *_glfuncs, const GLfloat* c);
void gl2_1_glIndexf(void *_glfuncs, GLfloat c);
void gl2_1_glIndexdv(void *_glfuncs, const GLdouble* c);
void gl2_1_glIndexd(void *_glfuncs, GLdouble c);
void gl2_1_glEnd(void *_glfuncs);
void gl2_1_glEdgeFlagv(void *_glfuncs, const GLboolean* flag);
void gl2_1_glEdgeFlag(void *_glfuncs, GLboolean flag);
void gl2_1_glColor4usv(void *_glfuncs, const GLushort* v);
void gl2_1_glColor4us(void *_glfuncs, GLushort red, GLushort green, GLushort blue, GLushort alpha);
void gl2_1_glColor4uiv(void *_glfuncs, const GLuint* v);
void gl2_1_glColor4ui(void *_glfuncs, GLuint red, GLuint green, GLuint blue, GLuint alpha);
void gl2_1_glColor4ubv(void *_glfuncs, const GLubyte* v);
void gl2_1_glColor4ub(void *_glfuncs, GLubyte red, GLubyte green, GLubyte blue, GLubyte alpha);
void gl2_1_glColor4sv(void *_glfuncs, const GLshort* v);
void gl2_1_glColor4s(void *_glfuncs, GLshort red, GLshort green, GLshort blue, GLshort alpha);
void gl2_1_glColor4iv(void *_glfuncs, const GLint* v);
void gl2_1_glColor4i(void *_glfuncs, GLint red, GLint green, GLint blue, GLint alpha);
void gl2_1_glColor4fv(void *_glfuncs, const GLfloat* v);
void gl2_1_glColor4f(void *_glfuncs, GLfloat red, GLfloat green, GLfloat blue, GLfloat alpha);
void gl2_1_glColor4dv(void *_glfuncs, const GLdouble* v);
void gl2_1_glColor4d(void *_glfuncs, GLdouble red, GLdouble green, GLdouble blue, GLdouble alpha);
void gl2_1_glColor4bv(void *_glfuncs, const GLbyte* v);
void gl2_1_glColor4b(void *_glfuncs, GLbyte red, GLbyte green, GLbyte blue, GLbyte alpha);
void gl2_1_glColor3usv(void *_glfuncs, const GLushort* v);
void gl2_1_glColor3us(void *_glfuncs, GLushort red, GLushort green, GLushort blue);
void gl2_1_glColor3uiv(void *_glfuncs, const GLuint* v);
void gl2_1_glColor3ui(void *_glfuncs, GLuint red, GLuint green, GLuint blue);
void gl2_1_glColor3ubv(void *_glfuncs, const GLubyte* v);
void gl2_1_glColor3ub(void *_glfuncs, GLubyte red, GLubyte green, GLubyte blue);
void gl2_1_glColor3sv(void *_glfuncs, const GLshort* v);
void gl2_1_glColor3s(void *_glfuncs, GLshort red, GLshort green, GLshort blue);
void gl2_1_glColor3iv(void *_glfuncs, const GLint* v);
void gl2_1_glColor3i(void *_glfuncs, GLint red, GLint green, GLint blue);
void gl2_1_glColor3fv(void *_glfuncs, const GLfloat* v);
void gl2_1_glColor3f(void *_glfuncs, GLfloat red, GLfloat green, GLfloat blue);
void gl2_1_glColor3dv(void *_glfuncs, const GLdouble* v);
void gl2_1_glColor3d(void *_glfuncs, GLdouble red, GLdouble green, GLdouble blue);
void gl2_1_glColor3bv(void *_glfuncs, const GLbyte* v);
void gl2_1_glColor3b(void *_glfuncs, GLbyte red, GLbyte green, GLbyte blue);
void gl2_1_glBitmap(void *_glfuncs, GLsizei width, GLsizei height, GLfloat xorig, GLfloat yorig, GLfloat xmove, GLfloat ymove, const GLubyte* bitmap);
void gl2_1_glBegin(void *_glfuncs, GLenum mode);
void gl2_1_glListBase(void *_glfuncs, GLuint base);
GLuint gl2_1_glGenLists(void *_glfuncs, GLsizei range_);
void gl2_1_glDeleteLists(void *_glfuncs, GLuint list, GLsizei range_);
void gl2_1_glCallLists(void *_glfuncs, GLsizei n, GLenum gltype, const GLvoid* lists);
void gl2_1_glCallList(void *_glfuncs, GLuint list);
void gl2_1_glEndList(void *_glfuncs);
void gl2_1_glNewList(void *_glfuncs, GLuint list, GLenum mode);
void gl2_1_glPushClientAttrib(void *_glfuncs, GLbitfield mask);
void gl2_1_glPopClientAttrib(void *_glfuncs);
void gl2_1_glPrioritizeTextures(void *_glfuncs, GLsizei n, const GLuint* textures, const GLfloat* priorities);
GLboolean gl2_1_glAreTexturesResident(void *_glfuncs, GLsizei n, const GLuint* textures, GLboolean* residences);
void gl2_1_glVertexPointer(void *_glfuncs, GLint size, GLenum gltype, GLsizei stride, const GLvoid* pointer);
void gl2_1_glTexCoordPointer(void *_glfuncs, GLint size, GLenum gltype, GLsizei stride, const GLvoid* pointer);
void gl2_1_glNormalPointer(void *_glfuncs, GLenum gltype, GLsizei stride, const GLvoid* pointer);
void gl2_1_glInterleavedArrays(void *_glfuncs, GLenum format, GLsizei stride, const GLvoid* pointer);
void gl2_1_glIndexPointer(void *_glfuncs, GLenum gltype, GLsizei stride, const GLvoid* pointer);
void gl2_1_glEnableClientState(void *_glfuncs, GLenum array);
void gl2_1_glEdgeFlagPointer(void *_glfuncs, GLsizei stride, const GLvoid* pointer);
void gl2_1_glDisableClientState(void *_glfuncs, GLenum array);
void gl2_1_glColorPointer(void *_glfuncs, GLint size, GLenum gltype, GLsizei stride, const GLvoid* pointer);
void gl2_1_glArrayElement(void *_glfuncs, GLint i);
void gl2_1_glResetMinmax(void *_glfuncs, GLenum target);
void gl2_1_glResetHistogram(void *_glfuncs, GLenum target);
void gl2_1_glMinmax(void *_glfuncs, GLenum target, GLenum internalFormat, GLboolean sink);
void gl2_1_glHistogram(void *_glfuncs, GLenum target, GLsizei width, GLenum internalFormat, GLboolean sink);
void gl2_1_glGetMinmaxParameteriv(void *_glfuncs, GLenum target, GLenum pname, GLint* params);
void gl2_1_glGetMinmaxParameterfv(void *_glfuncs, GLenum target, GLenum pname, GLfloat* params);
void gl2_1_glGetMinmax(void *_glfuncs, GLenum target, GLboolean reset, GLenum format, GLenum gltype, GLvoid* values);
void gl2_1_glGetHistogramParameteriv(void *_glfuncs, GLenum target, GLenum pname, GLint* params);
void gl2_1_glGetHistogramParameterfv(void *_glfuncs, GLenum target, GLenum pname, GLfloat* params);
void gl2_1_glGetHistogram(void *_glfuncs, GLenum target, GLboolean reset, GLenum format, GLenum gltype, GLvoid* values);
void gl2_1_glSeparableFilter2D(void *_glfuncs, GLenum target, GLenum internalFormat, GLsizei width, GLsizei height, GLenum format, GLenum gltype, const GLvoid* row, const GLvoid* column);
void gl2_1_glGetSeparableFilter(void *_glfuncs, GLenum target, GLenum format, GLenum gltype, GLvoid* row, GLvoid* column, GLvoid* span);
void gl2_1_glGetConvolutionParameteriv(void *_glfuncs, GLenum target, GLenum pname, GLint* params);
void gl2_1_glGetConvolutionParameterfv(void *_glfuncs, GLenum target, GLenum pname, GLfloat* params);
void gl2_1_glGetConvolutionFilter(void *_glfuncs, GLenum target, GLenum format, GLenum gltype, GLvoid* image);
void gl2_1_glCopyConvolutionFilter2D(void *_glfuncs, GLenum target, GLenum internalFormat, GLint x, GLint y, GLsizei width, GLsizei height);
void gl2_1_glCopyConvolutionFilter1D(void *_glfuncs, GLenum target, GLenum internalFormat, GLint x, GLint y, GLsizei width);
void gl2_1_glConvolutionParameteriv(void *_glfuncs, GLenum target, GLenum pname, const GLint* params);
void gl2_1_glConvolutionParameteri(void *_glfuncs, GLenum target, GLenum pname, GLint params);
void gl2_1_glConvolutionParameterfv(void *_glfuncs, GLenum target, GLenum pname, const GLfloat* params);
void gl2_1_glConvolutionParameterf(void *_glfuncs, GLenum target, GLenum pname, GLfloat params);
void gl2_1_glConvolutionFilter2D(void *_glfuncs, GLenum target, GLenum internalFormat, GLsizei width, GLsizei height, GLenum format, GLenum gltype, const GLvoid* image);
void gl2_1_glConvolutionFilter1D(void *_glfuncs, GLenum target, GLenum internalFormat, GLsizei width, GLenum format, GLenum gltype, const GLvoid* image);
void gl2_1_glCopyColorSubTable(void *_glfuncs, GLenum target, GLsizei start, GLint x, GLint y, GLsizei width);
void gl2_1_glColorSubTable(void *_glfuncs, GLenum target, GLsizei start, GLsizei count, GLenum format, GLenum gltype, const GLvoid* data);
void gl2_1_glGetColorTableParameteriv(void *_glfuncs, GLenum target, GLenum pname, GLint* params);
void gl2_1_glGetColorTableParameterfv(void *_glfuncs, GLenum target, GLenum pname, GLfloat* params);
void gl2_1_glGetColorTable(void *_glfuncs, GLenum target, GLenum format, GLenum gltype, GLvoid* table);
void gl2_1_glCopyColorTable(void *_glfuncs, GLenum target, GLenum internalFormat, GLint x, GLint y, GLsizei width);
void gl2_1_glColorTableParameteriv(void *_glfuncs, GLenum target, GLenum pname, const GLint* params);
void gl2_1_glColorTableParameterfv(void *_glfuncs, GLenum target, GLenum pname, const GLfloat* params);
void gl2_1_glColorTable(void *_glfuncs, GLenum target, GLenum internalFormat, GLsizei width, GLenum format, GLenum gltype, const GLvoid* table);
void gl2_1_glMultTransposeMatrixd(void *_glfuncs, const GLdouble* m);
void gl2_1_glMultTransposeMatrixf(void *_glfuncs, const GLfloat* m);
void gl2_1_glLoadTransposeMatrixd(void *_glfuncs, const GLdouble* m);
void gl2_1_glLoadTransposeMatrixf(void *_glfuncs, const GLfloat* m);
void gl2_1_glMultiTexCoord4sv(void *_glfuncs, GLenum target, const GLshort* v);
void gl2_1_glMultiTexCoord4s(void *_glfuncs, GLenum target, GLshort s, GLshort t, GLshort r, GLshort q);
void gl2_1_glMultiTexCoord4iv(void *_glfuncs, GLenum target, const GLint* v);
void gl2_1_glMultiTexCoord4i(void *_glfuncs, GLenum target, GLint s, GLint t, GLint r, GLint q);
void gl2_1_glMultiTexCoord4fv(void *_glfuncs, GLenum target, const GLfloat* v);
void gl2_1_glMultiTexCoord4f(void *_glfuncs, GLenum target, GLfloat s, GLfloat t, GLfloat r, GLfloat q);
void gl2_1_glMultiTexCoord4dv(void *_glfuncs, GLenum target, const GLdouble* v);
void gl2_1_glMultiTexCoord4d(void *_glfuncs, GLenum target, GLdouble s, GLdouble t, GLdouble r, GLdouble q);
void gl2_1_glMultiTexCoord3sv(void *_glfuncs, GLenum target, const GLshort* v);
void gl2_1_glMultiTexCoord3s(void *_glfuncs, GLenum target, GLshort s, GLshort t, GLshort r);
void gl2_1_glMultiTexCoord3iv(void *_glfuncs, GLenum target, const GLint* v);
void gl2_1_glMultiTexCoord3i(void *_glfuncs, GLenum target, GLint s, GLint t, GLint r);
void gl2_1_glMultiTexCoord3fv(void *_glfuncs, GLenum target, const GLfloat* v);
void gl2_1_glMultiTexCoord3f(void *_glfuncs, GLenum target, GLfloat s, GLfloat t, GLfloat r);
void gl2_1_glMultiTexCoord3dv(void *_glfuncs, GLenum target, const GLdouble* v);
void gl2_1_glMultiTexCoord3d(void *_glfuncs, GLenum target, GLdouble s, GLdouble t, GLdouble r);
void gl2_1_glMultiTexCoord2sv(void *_glfuncs, GLenum target, const GLshort* v);
void gl2_1_glMultiTexCoord2s(void *_glfuncs, GLenum target, GLshort s, GLshort t);
void gl2_1_glMultiTexCoord2iv(void *_glfuncs, GLenum target, const GLint* v);
void gl2_1_glMultiTexCoord2i(void *_glfuncs, GLenum target, GLint s, GLint t);
void gl2_1_glMultiTexCoord2fv(void *_glfuncs, GLenum target, const GLfloat* v);
void gl2_1_glMultiTexCoord2f(void *_glfuncs, GLenum target, GLfloat s, GLfloat t);
void gl2_1_glMultiTexCoord2dv(void *_glfuncs, GLenum target, const GLdouble* v);
void gl2_1_glMultiTexCoord2d(void *_glfuncs, GLenum target, GLdouble s, GLdouble t);
void gl2_1_glMultiTexCoord1sv(void *_glfuncs, GLenum target, const GLshort* v);
void gl2_1_glMultiTexCoord1s(void *_glfuncs, GLenum target, GLshort s);
void gl2_1_glMultiTexCoord1iv(void *_glfuncs, GLenum target, const GLint* v);
void gl2_1_glMultiTexCoord1i(void *_glfuncs, GLenum target, GLint s);
void gl2_1_glMultiTexCoord1fv(void *_glfuncs, GLenum target, const GLfloat* v);
void gl2_1_glMultiTexCoord1f(void *_glfuncs, GLenum target, GLfloat s);
void gl2_1_glMultiTexCoord1dv(void *_glfuncs, GLenum target, const GLdouble* v);
void gl2_1_glMultiTexCoord1d(void *_glfuncs, GLenum target, GLdouble s);
void gl2_1_glClientActiveTexture(void *_glfuncs, GLenum texture);
void gl2_1_glWindowPos3sv(void *_glfuncs, const GLshort* v);
void gl2_1_glWindowPos3s(void *_glfuncs, GLshort x, GLshort y, GLshort z);
void gl2_1_glWindowPos3iv(void *_glfuncs, const GLint* v);
void gl2_1_glWindowPos3i(void *_glfuncs, GLint x, GLint y, GLint z);
void gl2_1_glWindowPos3fv(void *_glfuncs, const GLfloat* v);
void gl2_1_glWindowPos3f(void *_glfuncs, GLfloat x, GLfloat y, GLfloat z);
void gl2_1_glWindowPos3dv(void *_glfuncs, const GLdouble* v);
void gl2_1_glWindowPos3d(void *_glfuncs, GLdouble x, GLdouble y, GLdouble z);
void gl2_1_glWindowPos2sv(void *_glfuncs, const GLshort* v);
void gl2_1_glWindowPos2s(void *_glfuncs, GLshort x, GLshort y);
void gl2_1_glWindowPos2iv(void *_glfuncs, const GLint* v);
void gl2_1_glWindowPos2i(void *_glfuncs, GLint x, GLint y);
void gl2_1_glWindowPos2fv(void *_glfuncs, const GLfloat* v);
void gl2_1_glWindowPos2f(void *_glfuncs, GLfloat x, GLfloat y);
void gl2_1_glWindowPos2dv(void *_glfuncs, const GLdouble* v);
void gl2_1_glWindowPos2d(void *_glfuncs, GLdouble x, GLdouble y);
void gl2_1_glSecondaryColorPointer(void *_glfuncs, GLint size, GLenum gltype, GLsizei stride, const GLvoid* pointer);
void gl2_1_glSecondaryColor3usv(void *_glfuncs, const GLushort* v);
void gl2_1_glSecondaryColor3us(void *_glfuncs, GLushort red, GLushort green, GLushort blue);
void gl2_1_glSecondaryColor3uiv(void *_glfuncs, const GLuint* v);
void gl2_1_glSecondaryColor3ui(void *_glfuncs, GLuint red, GLuint green, GLuint blue);
void gl2_1_glSecondaryColor3ubv(void *_glfuncs, const GLubyte* v);
void gl2_1_glSecondaryColor3ub(void *_glfuncs, GLubyte red, GLubyte green, GLubyte blue);
void gl2_1_glSecondaryColor3sv(void *_glfuncs, const GLshort* v);
void gl2_1_glSecondaryColor3s(void *_glfuncs, GLshort red, GLshort green, GLshort blue);
void gl2_1_glSecondaryColor3iv(void *_glfuncs, const GLint* v);
void gl2_1_glSecondaryColor3i(void *_glfuncs, GLint red, GLint green, GLint blue);
void gl2_1_glSecondaryColor3fv(void *_glfuncs, const GLfloat* v);
void gl2_1_glSecondaryColor3f(void *_glfuncs, GLfloat red, GLfloat green, GLfloat blue);
void gl2_1_glSecondaryColor3dv(void *_glfuncs, const GLdouble* v);
void gl2_1_glSecondaryColor3d(void *_glfuncs, GLdouble red, GLdouble green, GLdouble blue);
void gl2_1_glSecondaryColor3bv(void *_glfuncs, const GLbyte* v);
void gl2_1_glSecondaryColor3b(void *_glfuncs, GLbyte red, GLbyte green, GLbyte blue);
void gl2_1_glFogCoordPointer(void *_glfuncs, GLenum gltype, GLsizei stride, const GLvoid* pointer);
void gl2_1_glFogCoorddv(void *_glfuncs, const GLdouble* coord);
void gl2_1_glFogCoordd(void *_glfuncs, GLdouble coord);
void gl2_1_glFogCoordfv(void *_glfuncs, const GLfloat* coord);
void gl2_1_glFogCoordf(void *_glfuncs, GLfloat coord);
void gl2_1_glVertexAttrib4usv(void *_glfuncs, GLuint index, const GLushort* v);
void gl2_1_glVertexAttrib4uiv(void *_glfuncs, GLuint index, const GLuint* v);
void gl2_1_glVertexAttrib4ubv(void *_glfuncs, GLuint index, const GLubyte* v);
void gl2_1_glVertexAttrib4sv(void *_glfuncs, GLuint index, const GLshort* v);
void gl2_1_glVertexAttrib4s(void *_glfuncs, GLuint index, GLshort x, GLshort y, GLshort z, GLshort w);
void gl2_1_glVertexAttrib4iv(void *_glfuncs, GLuint index, const GLint* v);
void gl2_1_glVertexAttrib4fv(void *_glfuncs, GLuint index, const GLfloat* v);
void gl2_1_glVertexAttrib4f(void *_glfuncs, GLuint index, GLfloat x, GLfloat y, GLfloat z, GLfloat w);
void gl2_1_glVertexAttrib4dv(void *_glfuncs, GLuint index, const GLdouble* v);
void gl2_1_glVertexAttrib4d(void *_glfuncs, GLuint index, GLdouble x, GLdouble y, GLdouble z, GLdouble w);
void gl2_1_glVertexAttrib4bv(void *_glfuncs, GLuint index, const GLbyte* v);
void gl2_1_glVertexAttrib4Nusv(void *_glfuncs, GLuint index, const GLushort* v);
void gl2_1_glVertexAttrib4Nuiv(void *_glfuncs, GLuint index, const GLuint* v);
void gl2_1_glVertexAttrib4Nubv(void *_glfuncs, GLuint index, const GLubyte* v);
void gl2_1_glVertexAttrib4Nub(void *_glfuncs, GLuint index, GLubyte x, GLubyte y, GLubyte z, GLubyte w);
void gl2_1_glVertexAttrib4Nsv(void *_glfuncs, GLuint index, const GLshort* v);
void gl2_1_glVertexAttrib4Niv(void *_glfuncs, GLuint index, const GLint* v);
void gl2_1_glVertexAttrib4Nbv(void *_glfuncs, GLuint index, const GLbyte* v);
void gl2_1_glVertexAttrib3sv(void *_glfuncs, GLuint index, const GLshort* v);
void gl2_1_glVertexAttrib3s(void *_glfuncs, GLuint index, GLshort x, GLshort y, GLshort z);
void gl2_1_glVertexAttrib3fv(void *_glfuncs, GLuint index, const GLfloat* v);
void gl2_1_glVertexAttrib3f(void *_glfuncs, GLuint index, GLfloat x, GLfloat y, GLfloat z);
void gl2_1_glVertexAttrib3dv(void *_glfuncs, GLuint index, const GLdouble* v);
void gl2_1_glVertexAttrib3d(void *_glfuncs, GLuint index, GLdouble x, GLdouble y, GLdouble z);
void gl2_1_glVertexAttrib2sv(void *_glfuncs, GLuint index, const GLshort* v);
void gl2_1_glVertexAttrib2s(void *_glfuncs, GLuint index, GLshort x, GLshort y);
void gl2_1_glVertexAttrib2fv(void *_glfuncs, GLuint index, const GLfloat* v);
void gl2_1_glVertexAttrib2f(void *_glfuncs, GLuint index, GLfloat x, GLfloat y);
void gl2_1_glVertexAttrib2dv(void *_glfuncs, GLuint index, const GLdouble* v);
void gl2_1_glVertexAttrib2d(void *_glfuncs, GLuint index, GLdouble x, GLdouble y);
void gl2_1_glVertexAttrib1sv(void *_glfuncs, GLuint index, const GLshort* v);
void gl2_1_glVertexAttrib1s(void *_glfuncs, GLuint index, GLshort x);
void gl2_1_glVertexAttrib1fv(void *_glfuncs, GLuint index, const GLfloat* v);
void gl2_1_glVertexAttrib1f(void *_glfuncs, GLuint index, GLfloat x);
void gl2_1_glVertexAttrib1dv(void *_glfuncs, GLuint index, const GLdouble* v);
void gl2_1_glVertexAttrib1d(void *_glfuncs, GLuint index, GLdouble x);
#ifdef __cplusplus
} // extern "C"
#endif
| Godeps/_workspace/src/github.com/obscuren/qml/gl/2.1/funcs.h | 0 | https://github.com/ethereum/go-ethereum/commit/d344054e5a2844241bf0e4f64ccfc4d2ad259718 | [
0.0015457055997103453,
0.0002718245377764106,
0.0001665036252234131,
0.00020458921790122986,
0.0002325834211660549
] |
{
"id": 4,
"code_window": [
"\t\treturn msg, err\n",
"\t}\n",
"\t// verify header mac\n",
"\tshouldMAC := updateHeaderMAC(rw.ingressMAC, rw.macCipher, headbuf[:16])\n",
"\tif !hmac.Equal(shouldMAC[:16], headbuf[16:]) {\n",
"\t\treturn msg, errors.New(\"bad header MAC\")\n",
"\t}\n",
"\trw.dec.XORKeyStream(headbuf[:16], headbuf[:16]) // first half is now decrypted\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tshouldMAC := updateMAC(rw.ingressMAC, rw.macCipher, headbuf[:16])\n",
"\tif !hmac.Equal(shouldMAC, headbuf[16:]) {\n"
],
"file_path": "p2p/rlpx.go",
"type": "replace",
"edit_start_line_idx": 97
} | package p2p
import (
"bytes"
"crypto/aes"
"crypto/cipher"
"crypto/hmac"
"errors"
"hash"
"io"
"github.com/ethereum/go-ethereum/rlp"
)
var (
// this is used in place of actual frame header data.
// TODO: replace this when Msg contains the protocol type code.
zeroHeader = []byte{0xC2, 0x80, 0x80}
// sixteen zero bytes
zero16 = make([]byte, 16)
)
type rlpxFrameRW struct {
conn io.ReadWriter
enc cipher.Stream
dec cipher.Stream
macCipher cipher.Block
egressMAC hash.Hash
ingressMAC hash.Hash
}
func newRlpxFrameRW(conn io.ReadWriter, s secrets) *rlpxFrameRW {
macc, err := aes.NewCipher(s.MAC)
if err != nil {
panic("invalid MAC secret: " + err.Error())
}
encc, err := aes.NewCipher(s.AES)
if err != nil {
panic("invalid AES secret: " + err.Error())
}
// we use an all-zeroes IV for AES because the key used
// for encryption is ephemeral.
iv := make([]byte, encc.BlockSize())
return &rlpxFrameRW{
conn: conn,
enc: cipher.NewCTR(encc, iv),
dec: cipher.NewCTR(encc, iv),
macCipher: macc,
egressMAC: s.EgressMAC,
ingressMAC: s.IngressMAC,
}
}
func (rw *rlpxFrameRW) WriteMsg(msg Msg) error {
ptype, _ := rlp.EncodeToBytes(msg.Code)
// write header
headbuf := make([]byte, 32)
fsize := uint32(len(ptype)) + msg.Size
putInt24(fsize, headbuf) // TODO: check overflow
copy(headbuf[3:], zeroHeader)
rw.enc.XORKeyStream(headbuf[:16], headbuf[:16]) // first half is now encrypted
copy(headbuf[16:], updateHeaderMAC(rw.egressMAC, rw.macCipher, headbuf[:16]))
if _, err := rw.conn.Write(headbuf); err != nil {
return err
}
// write encrypted frame, updating the egress MAC while writing to conn.
tee := cipher.StreamWriter{S: rw.enc, W: io.MultiWriter(rw.conn, rw.egressMAC)}
if _, err := tee.Write(ptype); err != nil {
return err
}
if _, err := io.Copy(tee, msg.Payload); err != nil {
return err
}
if padding := fsize % 16; padding > 0 {
if _, err := tee.Write(zero16[:16-padding]); err != nil {
return err
}
}
// write packet-mac. egress MAC is up to date because
// frame content was written to it as well.
mac := updateHeaderMAC(rw.egressMAC, rw.macCipher, rw.egressMAC.Sum(nil))
_, err := rw.conn.Write(mac)
return err
}
func (rw *rlpxFrameRW) ReadMsg() (msg Msg, err error) {
// read the header
headbuf := make([]byte, 32)
if _, err := io.ReadFull(rw.conn, headbuf); err != nil {
return msg, err
}
// verify header mac
shouldMAC := updateHeaderMAC(rw.ingressMAC, rw.macCipher, headbuf[:16])
if !hmac.Equal(shouldMAC[:16], headbuf[16:]) {
return msg, errors.New("bad header MAC")
}
rw.dec.XORKeyStream(headbuf[:16], headbuf[:16]) // first half is now decrypted
fsize := readInt24(headbuf)
// ignore protocol type for now
// read the frame content
var rsize = fsize // frame size rounded up to 16 byte boundary
if padding := fsize % 16; padding > 0 {
rsize += 16 - padding
}
framebuf := make([]byte, rsize)
if _, err := io.ReadFull(rw.conn, framebuf); err != nil {
return msg, err
}
// read and validate frame MAC. we can re-use headbuf for that.
rw.ingressMAC.Write(framebuf)
if _, err := io.ReadFull(rw.conn, headbuf); err != nil {
return msg, err
}
shouldMAC = updateHeaderMAC(rw.ingressMAC, rw.macCipher, rw.ingressMAC.Sum(nil))
if !hmac.Equal(shouldMAC, headbuf) {
return msg, errors.New("bad frame MAC")
}
// decrypt frame content
rw.dec.XORKeyStream(framebuf, framebuf)
// decode message code
content := bytes.NewReader(framebuf[:fsize])
if err := rlp.Decode(content, &msg.Code); err != nil {
return msg, err
}
msg.Size = uint32(content.Len())
msg.Payload = content
return msg, nil
}
func updateHeaderMAC(mac hash.Hash, block cipher.Block, header []byte) []byte {
aesbuf := make([]byte, aes.BlockSize)
block.Encrypt(aesbuf, mac.Sum(nil))
for i := range aesbuf {
aesbuf[i] ^= header[i]
}
mac.Write(aesbuf)
return mac.Sum(nil)
}
func readInt24(b []byte) uint32 {
return uint32(b[2]) | uint32(b[1])<<8 | uint32(b[0])<<16
}
func putInt24(v uint32, b []byte) {
b[0] = byte(v >> 16)
b[1] = byte(v >> 8)
b[2] = byte(v)
}
| p2p/rlpx.go | 1 | https://github.com/ethereum/go-ethereum/commit/d344054e5a2844241bf0e4f64ccfc4d2ad259718 | [
0.9979446530342102,
0.1323530375957489,
0.00016388780204579234,
0.005516336299479008,
0.32696908712387085
] |
{
"id": 4,
"code_window": [
"\t\treturn msg, err\n",
"\t}\n",
"\t// verify header mac\n",
"\tshouldMAC := updateHeaderMAC(rw.ingressMAC, rw.macCipher, headbuf[:16])\n",
"\tif !hmac.Equal(shouldMAC[:16], headbuf[16:]) {\n",
"\t\treturn msg, errors.New(\"bad header MAC\")\n",
"\t}\n",
"\trw.dec.XORKeyStream(headbuf[:16], headbuf[:16]) // first half is now decrypted\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tshouldMAC := updateMAC(rw.ingressMAC, rw.macCipher, headbuf[:16])\n",
"\tif !hmac.Equal(shouldMAC, headbuf[16:]) {\n"
],
"file_path": "p2p/rlpx.go",
"type": "replace",
"edit_start_line_idx": 97
} | package xeth
import (
"errors"
"time"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/logger"
"github.com/ethereum/go-ethereum/whisper"
)
var qlogger = logger.NewLogger("XSHH")
type Whisper struct {
*whisper.Whisper
}
func NewWhisper(w *whisper.Whisper) *Whisper {
return &Whisper{w}
}
func (self *Whisper) Post(payload string, to, from string, topics []string, priority, ttl uint32) error {
if priority == 0 {
priority = 1000
}
if ttl == 0 {
ttl = 100
}
pk := crypto.ToECDSAPub(fromHex(from))
if key := self.Whisper.GetIdentity(pk); key != nil || len(from) == 0 {
msg := whisper.NewMessage(fromHex(payload))
envelope, err := msg.Seal(time.Duration(priority*100000), whisper.Opts{
Ttl: time.Duration(ttl) * time.Second,
To: crypto.ToECDSAPub(fromHex(to)),
From: key,
Topics: whisper.TopicsFromString(topics...),
})
if err != nil {
return err
}
if err := self.Whisper.Send(envelope); err != nil {
return err
}
} else {
return errors.New("unmatched pub / priv for seal")
}
return nil
}
func (self *Whisper) NewIdentity() string {
key := self.Whisper.NewIdentity()
return toHex(crypto.FromECDSAPub(&key.PublicKey))
}
func (self *Whisper) HasIdentity(key string) bool {
return self.Whisper.HasIdentity(crypto.ToECDSAPub(fromHex(key)))
}
func (self *Whisper) Watch(opts *Options) int {
filter := whisper.Filter{
To: crypto.ToECDSAPub(fromHex(opts.To)),
From: crypto.ToECDSAPub(fromHex(opts.From)),
Topics: whisper.TopicsFromString(opts.Topics...),
}
var i int
filter.Fn = func(msg *whisper.Message) {
opts.Fn(NewWhisperMessage(msg))
}
i = self.Whisper.Watch(filter)
return i
}
func (self *Whisper) Messages(id int) (messages []WhisperMessage) {
msgs := self.Whisper.Messages(id)
messages = make([]WhisperMessage, len(msgs))
for i, message := range msgs {
messages[i] = NewWhisperMessage(message)
}
return
}
type Options struct {
To string
From string
Topics []string
Fn func(msg WhisperMessage)
}
type WhisperMessage struct {
ref *whisper.Message
Payload string `json:"payload"`
To string `json:"to"`
From string `json:"from"`
Sent int64 `json:"sent"`
}
func NewWhisperMessage(msg *whisper.Message) WhisperMessage {
return WhisperMessage{
ref: msg,
Payload: toHex(msg.Payload),
From: toHex(crypto.FromECDSAPub(msg.Recover())),
To: toHex(crypto.FromECDSAPub(msg.To)),
Sent: msg.Sent,
}
}
| xeth/whisper.go | 0 | https://github.com/ethereum/go-ethereum/commit/d344054e5a2844241bf0e4f64ccfc4d2ad259718 | [
0.0013760573929175735,
0.0002716670569498092,
0.00016309120110236108,
0.0001691026845946908,
0.00033310215803794563
] |
{
"id": 4,
"code_window": [
"\t\treturn msg, err\n",
"\t}\n",
"\t// verify header mac\n",
"\tshouldMAC := updateHeaderMAC(rw.ingressMAC, rw.macCipher, headbuf[:16])\n",
"\tif !hmac.Equal(shouldMAC[:16], headbuf[16:]) {\n",
"\t\treturn msg, errors.New(\"bad header MAC\")\n",
"\t}\n",
"\trw.dec.XORKeyStream(headbuf[:16], headbuf[:16]) // first half is now decrypted\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tshouldMAC := updateMAC(rw.ingressMAC, rw.macCipher, headbuf[:16])\n",
"\tif !hmac.Equal(shouldMAC, headbuf[16:]) {\n"
],
"file_path": "p2p/rlpx.go",
"type": "replace",
"edit_start_line_idx": 97
} | // Copyright (c) 2012, Suryandaru Triandana <[email protected]>
// All rights reserved.
//
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package filter
import (
"github.com/syndtr/goleveldb/leveldb/util"
)
func bloomHash(key []byte) uint32 {
return util.Hash(key, 0xbc9f1d34)
}
type bloomFilter int
// The bloom filter serializes its parameters and is backward compatible
// with respect to them. Therefor, its parameters are not added to its
// name.
func (bloomFilter) Name() string {
return "leveldb.BuiltinBloomFilter"
}
func (f bloomFilter) Contains(filter, key []byte) bool {
nBytes := len(filter) - 1
if nBytes < 1 {
return false
}
nBits := uint32(nBytes * 8)
// Use the encoded k so that we can read filters generated by
// bloom filters created using different parameters.
k := filter[nBytes]
if k > 30 {
// Reserved for potentially new encodings for short bloom filters.
// Consider it a match.
return true
}
kh := bloomHash(key)
delta := (kh >> 17) | (kh << 15) // Rotate right 17 bits
for j := uint8(0); j < k; j++ {
bitpos := kh % nBits
if (uint32(filter[bitpos/8]) & (1 << (bitpos % 8))) == 0 {
return false
}
kh += delta
}
return true
}
func (f bloomFilter) NewGenerator() FilterGenerator {
// Round down to reduce probing cost a little bit.
k := uint8(f * 69 / 100) // 0.69 =~ ln(2)
if k < 1 {
k = 1
} else if k > 30 {
k = 30
}
return &bloomFilterGenerator{
n: int(f),
k: k,
}
}
type bloomFilterGenerator struct {
n int
k uint8
keyHashes []uint32
}
func (g *bloomFilterGenerator) Add(key []byte) {
// Use double-hashing to generate a sequence of hash values.
// See analysis in [Kirsch,Mitzenmacher 2006].
g.keyHashes = append(g.keyHashes, bloomHash(key))
}
func (g *bloomFilterGenerator) Generate(b Buffer) {
// Compute bloom filter size (in both bits and bytes)
nBits := uint32(len(g.keyHashes) * g.n)
// For small n, we can see a very high false positive rate. Fix it
// by enforcing a minimum bloom filter length.
if nBits < 64 {
nBits = 64
}
nBytes := (nBits + 7) / 8
nBits = nBytes * 8
dest := b.Alloc(int(nBytes) + 1)
dest[nBytes] = g.k
for _, kh := range g.keyHashes {
delta := (kh >> 17) | (kh << 15) // Rotate right 17 bits
for j := uint8(0); j < g.k; j++ {
bitpos := kh % nBits
dest[bitpos/8] |= (1 << (bitpos % 8))
kh += delta
}
}
g.keyHashes = g.keyHashes[:0]
}
// NewBloomFilter creates a new initialized bloom filter for given
// bitsPerKey.
//
// Since bitsPerKey is persisted individually for each bloom filter
// serialization, bloom filters are backwards compatible with respect to
// changing bitsPerKey. This means that no big performance penalty will
// be experienced when changing the parameter. See documentation for
// opt.Options.Filter for more information.
func NewBloomFilter(bitsPerKey int) Filter {
return bloomFilter(bitsPerKey)
}
| Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/filter/bloom.go | 0 | https://github.com/ethereum/go-ethereum/commit/d344054e5a2844241bf0e4f64ccfc4d2ad259718 | [
0.00017769294208846986,
0.00017105252481997013,
0.0001656922686379403,
0.00017030307208187878,
0.000003117599590041209
] |
{
"id": 4,
"code_window": [
"\t\treturn msg, err\n",
"\t}\n",
"\t// verify header mac\n",
"\tshouldMAC := updateHeaderMAC(rw.ingressMAC, rw.macCipher, headbuf[:16])\n",
"\tif !hmac.Equal(shouldMAC[:16], headbuf[16:]) {\n",
"\t\treturn msg, errors.New(\"bad header MAC\")\n",
"\t}\n",
"\trw.dec.XORKeyStream(headbuf[:16], headbuf[:16]) // first half is now decrypted\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tshouldMAC := updateMAC(rw.ingressMAC, rw.macCipher, headbuf[:16])\n",
"\tif !hmac.Equal(shouldMAC, headbuf[16:]) {\n"
],
"file_path": "p2p/rlpx.go",
"type": "replace",
"edit_start_line_idx": 97
} | /*
This file is part of go-ethereum
go-ethereum is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
go-ethereum is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
*/
/**
* @authors
* Jeffrey Wilcke <[email protected]>
*/
package main
import (
"runtime"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/ethutil"
"github.com/ethereum/go-ethereum/xeth"
"github.com/obscuren/qml"
)
type QmlApplication struct {
win *qml.Window
engine *qml.Engine
lib *UiLib
path string
}
func NewQmlApplication(path string, lib *UiLib) *QmlApplication {
engine := qml.NewEngine()
return &QmlApplication{engine: engine, path: path, lib: lib}
}
func (app *QmlApplication) Create() error {
path := string(app.path)
// For some reason for windows we get /c:/path/to/something, windows doesn't like the first slash but is fine with the others so we are removing it
if app.path[0] == '/' && runtime.GOOS == "windows" {
path = app.path[1:]
}
component, err := app.engine.LoadFile(path)
if err != nil {
guilogger.Warnln(err)
}
app.win = component.CreateWindow(nil)
return nil
}
func (app *QmlApplication) Destroy() {
app.engine.Destroy()
}
func (app *QmlApplication) NewWatcher(quitChan chan bool) {
}
// Events
func (app *QmlApplication) NewBlock(block *types.Block) {
pblock := &xeth.Block{Number: int(block.NumberU64()), Hash: ethutil.Bytes2Hex(block.Hash())}
app.win.Call("onNewBlockCb", pblock)
}
// Getters
func (app *QmlApplication) Engine() *qml.Engine {
return app.engine
}
func (app *QmlApplication) Window() *qml.Window {
return app.win
}
func (app *QmlApplication) Post(data string, s int) {}
| cmd/mist/qml_container.go | 0 | https://github.com/ethereum/go-ethereum/commit/d344054e5a2844241bf0e4f64ccfc4d2ad259718 | [
0.00017582904547452927,
0.00016923961811698973,
0.00016508072440046817,
0.00016733970551285893,
0.0000032469804409629432
] |
{
"id": 5,
"code_window": [
"\t\treturn msg, err\n",
"\t}\n",
"\n",
"\t// read and validate frame MAC. we can re-use headbuf for that.\n",
"\trw.ingressMAC.Write(framebuf)\n",
"\tif _, err := io.ReadFull(rw.conn, headbuf); err != nil {\n",
"\t\treturn msg, err\n",
"\t}\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep"
],
"after_edit": [
"\tfmacseed := rw.ingressMAC.Sum(nil)\n",
"\tif _, err := io.ReadFull(rw.conn, headbuf[:16]); err != nil {\n"
],
"file_path": "p2p/rlpx.go",
"type": "replace",
"edit_start_line_idx": 117
} | package p2p
import (
"bytes"
"crypto/aes"
"crypto/cipher"
"crypto/hmac"
"errors"
"hash"
"io"
"github.com/ethereum/go-ethereum/rlp"
)
var (
// this is used in place of actual frame header data.
// TODO: replace this when Msg contains the protocol type code.
zeroHeader = []byte{0xC2, 0x80, 0x80}
// sixteen zero bytes
zero16 = make([]byte, 16)
)
type rlpxFrameRW struct {
conn io.ReadWriter
enc cipher.Stream
dec cipher.Stream
macCipher cipher.Block
egressMAC hash.Hash
ingressMAC hash.Hash
}
func newRlpxFrameRW(conn io.ReadWriter, s secrets) *rlpxFrameRW {
macc, err := aes.NewCipher(s.MAC)
if err != nil {
panic("invalid MAC secret: " + err.Error())
}
encc, err := aes.NewCipher(s.AES)
if err != nil {
panic("invalid AES secret: " + err.Error())
}
// we use an all-zeroes IV for AES because the key used
// for encryption is ephemeral.
iv := make([]byte, encc.BlockSize())
return &rlpxFrameRW{
conn: conn,
enc: cipher.NewCTR(encc, iv),
dec: cipher.NewCTR(encc, iv),
macCipher: macc,
egressMAC: s.EgressMAC,
ingressMAC: s.IngressMAC,
}
}
func (rw *rlpxFrameRW) WriteMsg(msg Msg) error {
ptype, _ := rlp.EncodeToBytes(msg.Code)
// write header
headbuf := make([]byte, 32)
fsize := uint32(len(ptype)) + msg.Size
putInt24(fsize, headbuf) // TODO: check overflow
copy(headbuf[3:], zeroHeader)
rw.enc.XORKeyStream(headbuf[:16], headbuf[:16]) // first half is now encrypted
copy(headbuf[16:], updateHeaderMAC(rw.egressMAC, rw.macCipher, headbuf[:16]))
if _, err := rw.conn.Write(headbuf); err != nil {
return err
}
// write encrypted frame, updating the egress MAC while writing to conn.
tee := cipher.StreamWriter{S: rw.enc, W: io.MultiWriter(rw.conn, rw.egressMAC)}
if _, err := tee.Write(ptype); err != nil {
return err
}
if _, err := io.Copy(tee, msg.Payload); err != nil {
return err
}
if padding := fsize % 16; padding > 0 {
if _, err := tee.Write(zero16[:16-padding]); err != nil {
return err
}
}
// write packet-mac. egress MAC is up to date because
// frame content was written to it as well.
mac := updateHeaderMAC(rw.egressMAC, rw.macCipher, rw.egressMAC.Sum(nil))
_, err := rw.conn.Write(mac)
return err
}
func (rw *rlpxFrameRW) ReadMsg() (msg Msg, err error) {
// read the header
headbuf := make([]byte, 32)
if _, err := io.ReadFull(rw.conn, headbuf); err != nil {
return msg, err
}
// verify header mac
shouldMAC := updateHeaderMAC(rw.ingressMAC, rw.macCipher, headbuf[:16])
if !hmac.Equal(shouldMAC[:16], headbuf[16:]) {
return msg, errors.New("bad header MAC")
}
rw.dec.XORKeyStream(headbuf[:16], headbuf[:16]) // first half is now decrypted
fsize := readInt24(headbuf)
// ignore protocol type for now
// read the frame content
var rsize = fsize // frame size rounded up to 16 byte boundary
if padding := fsize % 16; padding > 0 {
rsize += 16 - padding
}
framebuf := make([]byte, rsize)
if _, err := io.ReadFull(rw.conn, framebuf); err != nil {
return msg, err
}
// read and validate frame MAC. we can re-use headbuf for that.
rw.ingressMAC.Write(framebuf)
if _, err := io.ReadFull(rw.conn, headbuf); err != nil {
return msg, err
}
shouldMAC = updateHeaderMAC(rw.ingressMAC, rw.macCipher, rw.ingressMAC.Sum(nil))
if !hmac.Equal(shouldMAC, headbuf) {
return msg, errors.New("bad frame MAC")
}
// decrypt frame content
rw.dec.XORKeyStream(framebuf, framebuf)
// decode message code
content := bytes.NewReader(framebuf[:fsize])
if err := rlp.Decode(content, &msg.Code); err != nil {
return msg, err
}
msg.Size = uint32(content.Len())
msg.Payload = content
return msg, nil
}
func updateHeaderMAC(mac hash.Hash, block cipher.Block, header []byte) []byte {
aesbuf := make([]byte, aes.BlockSize)
block.Encrypt(aesbuf, mac.Sum(nil))
for i := range aesbuf {
aesbuf[i] ^= header[i]
}
mac.Write(aesbuf)
return mac.Sum(nil)
}
func readInt24(b []byte) uint32 {
return uint32(b[2]) | uint32(b[1])<<8 | uint32(b[0])<<16
}
func putInt24(v uint32, b []byte) {
b[0] = byte(v >> 16)
b[1] = byte(v >> 8)
b[2] = byte(v)
}
| p2p/rlpx.go | 1 | https://github.com/ethereum/go-ethereum/commit/d344054e5a2844241bf0e4f64ccfc4d2ad259718 | [
0.9982292056083679,
0.33153027296066284,
0.00016471845447085798,
0.010860958136618137,
0.4456348717212677
] |
{
"id": 5,
"code_window": [
"\t\treturn msg, err\n",
"\t}\n",
"\n",
"\t// read and validate frame MAC. we can re-use headbuf for that.\n",
"\trw.ingressMAC.Write(framebuf)\n",
"\tif _, err := io.ReadFull(rw.conn, headbuf); err != nil {\n",
"\t\treturn msg, err\n",
"\t}\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep"
],
"after_edit": [
"\tfmacseed := rw.ingressMAC.Sum(nil)\n",
"\tif _, err := io.ReadFull(rw.conn, headbuf[:16]); err != nil {\n"
],
"file_path": "p2p/rlpx.go",
"type": "replace",
"edit_start_line_idx": 117
} | package webengine
// #cgo CPPFLAGS: -I./
// #cgo CXXFLAGS: -std=c++0x -pedantic-errors -Wall -fno-strict-aliasing
// #cgo LDFLAGS: -lstdc++
// #cgo pkg-config: Qt5WebEngine
//
// #include "cpp/webengine.h"
import "C"
import "github.com/obscuren/qml"
// Initializes the WebEngine extension.
func Initialize() {
qml.RunMain(func() {
C.webengineInitialize()
})
}
| ui/qt/webengine/webengine.go | 0 | https://github.com/ethereum/go-ethereum/commit/d344054e5a2844241bf0e4f64ccfc4d2ad259718 | [
0.0001641856215428561,
0.0001640916452743113,
0.00016399768355768174,
0.0001640916452743113,
9.396899258717895e-8
] |
{
"id": 5,
"code_window": [
"\t\treturn msg, err\n",
"\t}\n",
"\n",
"\t// read and validate frame MAC. we can re-use headbuf for that.\n",
"\trw.ingressMAC.Write(framebuf)\n",
"\tif _, err := io.ReadFull(rw.conn, headbuf); err != nil {\n",
"\t\treturn msg, err\n",
"\t}\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep"
],
"after_edit": [
"\tfmacseed := rw.ingressMAC.Sum(nil)\n",
"\tif _, err := io.ReadFull(rw.conn, headbuf[:16]); err != nil {\n"
],
"file_path": "p2p/rlpx.go",
"type": "replace",
"edit_start_line_idx": 117
} | The MIT License (MIT)
Copyright (c) 2013 Fatih Arslan
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
| Godeps/_workspace/src/gopkg.in/fatih/set.v0/LICENSE.md | 0 | https://github.com/ethereum/go-ethereum/commit/d344054e5a2844241bf0e4f64ccfc4d2ad259718 | [
0.0001764533226378262,
0.00017273041885346174,
0.00017056494834832847,
0.00017117300012614578,
0.000002644165078891092
] |
{
"id": 5,
"code_window": [
"\t\treturn msg, err\n",
"\t}\n",
"\n",
"\t// read and validate frame MAC. we can re-use headbuf for that.\n",
"\trw.ingressMAC.Write(framebuf)\n",
"\tif _, err := io.ReadFull(rw.conn, headbuf); err != nil {\n",
"\t\treturn msg, err\n",
"\t}\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep"
],
"after_edit": [
"\tfmacseed := rw.ingressMAC.Sum(nil)\n",
"\tif _, err := io.ReadFull(rw.conn, headbuf[:16]); err != nil {\n"
],
"file_path": "p2p/rlpx.go",
"type": "replace",
"edit_start_line_idx": 117
} | package state
import (
"testing"
checker "gopkg.in/check.v1"
)
func Test(t *testing.T) { checker.TestingT(t) }
| state/main_test.go | 0 | https://github.com/ethereum/go-ethereum/commit/d344054e5a2844241bf0e4f64ccfc4d2ad259718 | [
0.00017374994058627635,
0.00017374994058627635,
0.00017374994058627635,
0.00017374994058627635,
0
] |
{
"id": 6,
"code_window": [
"\t\treturn msg, err\n",
"\t}\n",
"\tshouldMAC = updateHeaderMAC(rw.ingressMAC, rw.macCipher, rw.ingressMAC.Sum(nil))\n",
"\tif !hmac.Equal(shouldMAC, headbuf) {\n",
"\t\treturn msg, errors.New(\"bad frame MAC\")\n",
"\t}\n",
"\n"
],
"labels": [
"keep",
"keep",
"replace",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tshouldMAC = updateMAC(rw.ingressMAC, rw.macCipher, fmacseed)\n",
"\tif !hmac.Equal(shouldMAC, headbuf[:16]) {\n"
],
"file_path": "p2p/rlpx.go",
"type": "replace",
"edit_start_line_idx": 120
} | package p2p
import (
"bytes"
"crypto/aes"
"crypto/cipher"
"crypto/hmac"
"errors"
"hash"
"io"
"github.com/ethereum/go-ethereum/rlp"
)
var (
// this is used in place of actual frame header data.
// TODO: replace this when Msg contains the protocol type code.
zeroHeader = []byte{0xC2, 0x80, 0x80}
// sixteen zero bytes
zero16 = make([]byte, 16)
)
type rlpxFrameRW struct {
conn io.ReadWriter
enc cipher.Stream
dec cipher.Stream
macCipher cipher.Block
egressMAC hash.Hash
ingressMAC hash.Hash
}
func newRlpxFrameRW(conn io.ReadWriter, s secrets) *rlpxFrameRW {
macc, err := aes.NewCipher(s.MAC)
if err != nil {
panic("invalid MAC secret: " + err.Error())
}
encc, err := aes.NewCipher(s.AES)
if err != nil {
panic("invalid AES secret: " + err.Error())
}
// we use an all-zeroes IV for AES because the key used
// for encryption is ephemeral.
iv := make([]byte, encc.BlockSize())
return &rlpxFrameRW{
conn: conn,
enc: cipher.NewCTR(encc, iv),
dec: cipher.NewCTR(encc, iv),
macCipher: macc,
egressMAC: s.EgressMAC,
ingressMAC: s.IngressMAC,
}
}
func (rw *rlpxFrameRW) WriteMsg(msg Msg) error {
ptype, _ := rlp.EncodeToBytes(msg.Code)
// write header
headbuf := make([]byte, 32)
fsize := uint32(len(ptype)) + msg.Size
putInt24(fsize, headbuf) // TODO: check overflow
copy(headbuf[3:], zeroHeader)
rw.enc.XORKeyStream(headbuf[:16], headbuf[:16]) // first half is now encrypted
copy(headbuf[16:], updateHeaderMAC(rw.egressMAC, rw.macCipher, headbuf[:16]))
if _, err := rw.conn.Write(headbuf); err != nil {
return err
}
// write encrypted frame, updating the egress MAC while writing to conn.
tee := cipher.StreamWriter{S: rw.enc, W: io.MultiWriter(rw.conn, rw.egressMAC)}
if _, err := tee.Write(ptype); err != nil {
return err
}
if _, err := io.Copy(tee, msg.Payload); err != nil {
return err
}
if padding := fsize % 16; padding > 0 {
if _, err := tee.Write(zero16[:16-padding]); err != nil {
return err
}
}
// write packet-mac. egress MAC is up to date because
// frame content was written to it as well.
mac := updateHeaderMAC(rw.egressMAC, rw.macCipher, rw.egressMAC.Sum(nil))
_, err := rw.conn.Write(mac)
return err
}
func (rw *rlpxFrameRW) ReadMsg() (msg Msg, err error) {
// read the header
headbuf := make([]byte, 32)
if _, err := io.ReadFull(rw.conn, headbuf); err != nil {
return msg, err
}
// verify header mac
shouldMAC := updateHeaderMAC(rw.ingressMAC, rw.macCipher, headbuf[:16])
if !hmac.Equal(shouldMAC[:16], headbuf[16:]) {
return msg, errors.New("bad header MAC")
}
rw.dec.XORKeyStream(headbuf[:16], headbuf[:16]) // first half is now decrypted
fsize := readInt24(headbuf)
// ignore protocol type for now
// read the frame content
var rsize = fsize // frame size rounded up to 16 byte boundary
if padding := fsize % 16; padding > 0 {
rsize += 16 - padding
}
framebuf := make([]byte, rsize)
if _, err := io.ReadFull(rw.conn, framebuf); err != nil {
return msg, err
}
// read and validate frame MAC. we can re-use headbuf for that.
rw.ingressMAC.Write(framebuf)
if _, err := io.ReadFull(rw.conn, headbuf); err != nil {
return msg, err
}
shouldMAC = updateHeaderMAC(rw.ingressMAC, rw.macCipher, rw.ingressMAC.Sum(nil))
if !hmac.Equal(shouldMAC, headbuf) {
return msg, errors.New("bad frame MAC")
}
// decrypt frame content
rw.dec.XORKeyStream(framebuf, framebuf)
// decode message code
content := bytes.NewReader(framebuf[:fsize])
if err := rlp.Decode(content, &msg.Code); err != nil {
return msg, err
}
msg.Size = uint32(content.Len())
msg.Payload = content
return msg, nil
}
func updateHeaderMAC(mac hash.Hash, block cipher.Block, header []byte) []byte {
aesbuf := make([]byte, aes.BlockSize)
block.Encrypt(aesbuf, mac.Sum(nil))
for i := range aesbuf {
aesbuf[i] ^= header[i]
}
mac.Write(aesbuf)
return mac.Sum(nil)
}
func readInt24(b []byte) uint32 {
return uint32(b[2]) | uint32(b[1])<<8 | uint32(b[0])<<16
}
func putInt24(v uint32, b []byte) {
b[0] = byte(v >> 16)
b[1] = byte(v >> 8)
b[2] = byte(v)
}
| p2p/rlpx.go | 1 | https://github.com/ethereum/go-ethereum/commit/d344054e5a2844241bf0e4f64ccfc4d2ad259718 | [
0.9978235960006714,
0.09524089097976685,
0.00016393765690736473,
0.00394071638584137,
0.25882574915885925
] |
{
"id": 6,
"code_window": [
"\t\treturn msg, err\n",
"\t}\n",
"\tshouldMAC = updateHeaderMAC(rw.ingressMAC, rw.macCipher, rw.ingressMAC.Sum(nil))\n",
"\tif !hmac.Equal(shouldMAC, headbuf) {\n",
"\t\treturn msg, errors.New(\"bad frame MAC\")\n",
"\t}\n",
"\n"
],
"labels": [
"keep",
"keep",
"replace",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tshouldMAC = updateMAC(rw.ingressMAC, rw.macCipher, fmacseed)\n",
"\tif !hmac.Equal(shouldMAC, headbuf[:16]) {\n"
],
"file_path": "p2p/rlpx.go",
"type": "replace",
"edit_start_line_idx": 120
} | token_const.go: tokenfmt
./$^ | gofmt > $@
| Godeps/_workspace/src/github.com/robertkrimen/otto/token/Makefile | 0 | https://github.com/ethereum/go-ethereum/commit/d344054e5a2844241bf0e4f64ccfc4d2ad259718 | [
0.0001692563819233328,
0.0001692563819233328,
0.0001692563819233328,
0.0001692563819233328,
0
] |
{
"id": 6,
"code_window": [
"\t\treturn msg, err\n",
"\t}\n",
"\tshouldMAC = updateHeaderMAC(rw.ingressMAC, rw.macCipher, rw.ingressMAC.Sum(nil))\n",
"\tif !hmac.Equal(shouldMAC, headbuf) {\n",
"\t\treturn msg, errors.New(\"bad frame MAC\")\n",
"\t}\n",
"\n"
],
"labels": [
"keep",
"keep",
"replace",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tshouldMAC = updateMAC(rw.ingressMAC, rw.macCipher, fmacseed)\n",
"\tif !hmac.Equal(shouldMAC, headbuf[:16]) {\n"
],
"file_path": "p2p/rlpx.go",
"type": "replace",
"edit_start_line_idx": 120
} | package otto
import (
"testing"
)
func TestError(t *testing.T) {
tt(t, func() {
test, _ := test()
test(`
[ Error.prototype.name, Error.prototype.message, Error.prototype.hasOwnProperty("message") ];
`, "Error,,true")
})
}
func TestError_instanceof(t *testing.T) {
tt(t, func() {
test, _ := test()
test(`(new TypeError()) instanceof Error`, true)
})
}
func TestPanicValue(t *testing.T) {
tt(t, func() {
test, vm := test()
vm.Set("abc", func(call FunctionCall) Value {
value, err := call.Otto.Run(`({ def: 3.14159 })`)
is(err, nil)
panic(value)
})
test(`
try {
abc();
}
catch (err) {
error = err;
}
[ error instanceof Error, error.message, error.def ];
`, "false,,3.14159")
})
}
func Test_catchPanic(t *testing.T) {
tt(t, func() {
vm := New()
_, err := vm.Run(`
A syntax error that
does not define
var;
abc;
`)
is(err, "!=", nil)
_, err = vm.Call(`abc.def`, nil)
is(err, "!=", nil)
})
}
| Godeps/_workspace/src/github.com/obscuren/otto/error_test.go | 0 | https://github.com/ethereum/go-ethereum/commit/d344054e5a2844241bf0e4f64ccfc4d2ad259718 | [
0.00024281209334731102,
0.00018136377912014723,
0.00016662338748574257,
0.00017138956172857434,
0.000025238419766537845
] |
{
"id": 6,
"code_window": [
"\t\treturn msg, err\n",
"\t}\n",
"\tshouldMAC = updateHeaderMAC(rw.ingressMAC, rw.macCipher, rw.ingressMAC.Sum(nil))\n",
"\tif !hmac.Equal(shouldMAC, headbuf) {\n",
"\t\treturn msg, errors.New(\"bad frame MAC\")\n",
"\t}\n",
"\n"
],
"labels": [
"keep",
"keep",
"replace",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tshouldMAC = updateMAC(rw.ingressMAC, rw.macCipher, fmacseed)\n",
"\tif !hmac.Equal(shouldMAC, headbuf[:16]) {\n"
],
"file_path": "p2p/rlpx.go",
"type": "replace",
"edit_start_line_idx": 120
} | package otto
import (
"testing"
)
// bind
func Test_underscore_functions_0(t *testing.T) {
tt(t, func() {
test, _ := test_()
test(`
test("bind", function() {
var context = {name : 'moe'};
var func = function(arg) { return "name: " + (this.name || arg); };
var bound = _.bind(func, context);
equal(bound(), 'name: moe', 'can bind a function to a context');
bound = _(func).bind(context);
equal(bound(), 'name: moe', 'can do OO-style binding');
bound = _.bind(func, null, 'curly');
equal(bound(), 'name: curly', 'can bind without specifying a context');
func = function(salutation, name) { return salutation + ': ' + name; };
func = _.bind(func, this, 'hello');
equal(func('moe'), 'hello: moe', 'the function was partially applied in advance');
func = _.bind(func, this, 'curly');
equal(func(), 'hello: curly', 'the function was completely applied in advance');
func = function(salutation, firstname, lastname) { return salutation + ': ' + firstname + ' ' + lastname; };
func = _.bind(func, this, 'hello', 'moe', 'curly');
equal(func(), 'hello: moe curly', 'the function was partially applied in advance and can accept multiple arguments');
func = function(context, message) { equal(this, context, message); };
_.bind(func, 0, 0, 'can bind a function to <0>')();
_.bind(func, '', '', 'can bind a function to an empty string')();
_.bind(func, false, false, 'can bind a function to <false>')();
// These tests are only meaningful when using a browser without a native bind function
// To test this with a modern browser, set underscore's nativeBind to undefined
var F = function () { return this; };
var Boundf = _.bind(F, {hello: "moe curly"});
equal(Boundf().hello, "moe curly", "When called without the new operator, it's OK to be bound to the context");
});
`)
})
}
// partial
func Test_underscore_functions_1(t *testing.T) {
tt(t, func() {
test, _ := test_()
test(`
test("partial", function() {
var obj = {name: 'moe'};
var func = function() { return this.name + ' ' + _.toArray(arguments).join(' '); };
obj.func = _.partial(func, 'a', 'b');
equal(obj.func('c', 'd'), 'moe a b c d', 'can partially apply');
});
`)
})
}
// bindAll
func Test_underscore_functions_2(t *testing.T) {
tt(t, func() {
test, _ := test_()
test(`
test("bindAll", function() {
var curly = {name : 'curly'}, moe = {
name : 'moe',
getName : function() { return 'name: ' + this.name; },
sayHi : function() { return 'hi: ' + this.name; }
};
curly.getName = moe.getName;
_.bindAll(moe, 'getName', 'sayHi');
curly.sayHi = moe.sayHi;
equal(curly.getName(), 'name: curly', 'unbound function is bound to current object');
equal(curly.sayHi(), 'hi: moe', 'bound function is still bound to original object');
curly = {name : 'curly'};
moe = {
name : 'moe',
getName : function() { return 'name: ' + this.name; },
sayHi : function() { return 'hi: ' + this.name; }
};
_.bindAll(moe);
curly.sayHi = moe.sayHi;
equal(curly.sayHi(), 'hi: moe', 'calling bindAll with no arguments binds all functions to the object');
});
`)
})
}
// memoize
func Test_underscore_functions_3(t *testing.T) {
tt(t, func() {
test, _ := test_()
test(`
test("memoize", function() {
var fib = function(n) {
return n < 2 ? n : fib(n - 1) + fib(n - 2);
};
var fastFib = _.memoize(fib);
equal(fib(10), 55, 'a memoized version of fibonacci produces identical results');
equal(fastFib(10), 55, 'a memoized version of fibonacci produces identical results');
var o = function(str) {
return str;
};
var fastO = _.memoize(o);
equal(o('toString'), 'toString', 'checks hasOwnProperty');
equal(fastO('toString'), 'toString', 'checks hasOwnProperty');
});
`)
})
}
// once
func Test_underscore_functions_4(t *testing.T) {
tt(t, func() {
test, _ := test_()
test(`
test("once", function() {
var num = 0;
var increment = _.once(function(){ num++; });
increment();
increment();
equal(num, 1);
});
`)
})
}
// wrap
func Test_underscore_functions_5(t *testing.T) {
tt(t, func() {
test, _ := test_()
test(`
test("wrap", function() {
var greet = function(name){ return "hi: " + name; };
var backwards = _.wrap(greet, function(func, name){ return func(name) + ' ' + name.split('').reverse().join(''); });
equal(backwards('moe'), 'hi: moe eom', 'wrapped the saluation function');
var inner = function(){ return "Hello "; };
var obj = {name : "Moe"};
obj.hi = _.wrap(inner, function(fn){ return fn() + this.name; });
equal(obj.hi(), "Hello Moe");
var noop = function(){};
var wrapped = _.wrap(noop, function(fn){ return Array.prototype.slice.call(arguments, 0); });
var ret = wrapped(['whats', 'your'], 'vector', 'victor');
deepEqual(ret, [noop, ['whats', 'your'], 'vector', 'victor']);
});
`)
})
}
// compose
func Test_underscore_functions_6(t *testing.T) {
tt(t, func() {
test, _ := test_()
test(`
test("compose", function() {
var greet = function(name){ return "hi: " + name; };
var exclaim = function(sentence){ return sentence + '!'; };
var composed = _.compose(exclaim, greet);
equal(composed('moe'), 'hi: moe!', 'can compose a function that takes another');
composed = _.compose(greet, exclaim);
equal(composed('moe'), 'hi: moe!', 'in this case, the functions are also commutative');
});
`)
})
}
// after
func Test_underscore_functions_7(t *testing.T) {
tt(t, func() {
test, _ := test_()
test(`
test("after", function() {
var testAfter = function(afterAmount, timesCalled) {
var afterCalled = 0;
var after = _.after(afterAmount, function() {
afterCalled++;
});
while (timesCalled--) after();
return afterCalled;
};
equal(testAfter(5, 5), 1, "after(N) should fire after being called N times");
equal(testAfter(5, 4), 0, "after(N) should not fire unless called N times");
equal(testAfter(0, 0), 1, "after(0) should fire immediately");
});
`)
})
}
| Godeps/_workspace/src/github.com/obscuren/otto/underscore_functions_test.go | 0 | https://github.com/ethereum/go-ethereum/commit/d344054e5a2844241bf0e4f64ccfc4d2ad259718 | [
0.00017689804371912032,
0.00017344116349704564,
0.00016970564320217818,
0.00017325083899777383,
0.0000018674776356419898
] |
{
"id": 7,
"code_window": [
"\tmsg.Size = uint32(content.Len())\n",
"\tmsg.Payload = content\n",
"\treturn msg, nil\n",
"}\n",
"\n",
"func updateHeaderMAC(mac hash.Hash, block cipher.Block, header []byte) []byte {\n",
"\taesbuf := make([]byte, aes.BlockSize)\n",
"\tblock.Encrypt(aesbuf, mac.Sum(nil))\n",
"\tfor i := range aesbuf {\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"// updateMAC reseeds the given hash with encrypted seed.\n",
"// it returns the first 16 bytes of the hash sum after seeding.\n",
"func updateMAC(mac hash.Hash, block cipher.Block, seed []byte) []byte {\n"
],
"file_path": "p2p/rlpx.go",
"type": "replace",
"edit_start_line_idx": 138
} | package p2p
import (
"bytes"
"crypto/aes"
"crypto/cipher"
"crypto/hmac"
"errors"
"hash"
"io"
"github.com/ethereum/go-ethereum/rlp"
)
var (
// this is used in place of actual frame header data.
// TODO: replace this when Msg contains the protocol type code.
zeroHeader = []byte{0xC2, 0x80, 0x80}
// sixteen zero bytes
zero16 = make([]byte, 16)
)
type rlpxFrameRW struct {
conn io.ReadWriter
enc cipher.Stream
dec cipher.Stream
macCipher cipher.Block
egressMAC hash.Hash
ingressMAC hash.Hash
}
func newRlpxFrameRW(conn io.ReadWriter, s secrets) *rlpxFrameRW {
macc, err := aes.NewCipher(s.MAC)
if err != nil {
panic("invalid MAC secret: " + err.Error())
}
encc, err := aes.NewCipher(s.AES)
if err != nil {
panic("invalid AES secret: " + err.Error())
}
// we use an all-zeroes IV for AES because the key used
// for encryption is ephemeral.
iv := make([]byte, encc.BlockSize())
return &rlpxFrameRW{
conn: conn,
enc: cipher.NewCTR(encc, iv),
dec: cipher.NewCTR(encc, iv),
macCipher: macc,
egressMAC: s.EgressMAC,
ingressMAC: s.IngressMAC,
}
}
func (rw *rlpxFrameRW) WriteMsg(msg Msg) error {
ptype, _ := rlp.EncodeToBytes(msg.Code)
// write header
headbuf := make([]byte, 32)
fsize := uint32(len(ptype)) + msg.Size
putInt24(fsize, headbuf) // TODO: check overflow
copy(headbuf[3:], zeroHeader)
rw.enc.XORKeyStream(headbuf[:16], headbuf[:16]) // first half is now encrypted
copy(headbuf[16:], updateHeaderMAC(rw.egressMAC, rw.macCipher, headbuf[:16]))
if _, err := rw.conn.Write(headbuf); err != nil {
return err
}
// write encrypted frame, updating the egress MAC while writing to conn.
tee := cipher.StreamWriter{S: rw.enc, W: io.MultiWriter(rw.conn, rw.egressMAC)}
if _, err := tee.Write(ptype); err != nil {
return err
}
if _, err := io.Copy(tee, msg.Payload); err != nil {
return err
}
if padding := fsize % 16; padding > 0 {
if _, err := tee.Write(zero16[:16-padding]); err != nil {
return err
}
}
// write packet-mac. egress MAC is up to date because
// frame content was written to it as well.
mac := updateHeaderMAC(rw.egressMAC, rw.macCipher, rw.egressMAC.Sum(nil))
_, err := rw.conn.Write(mac)
return err
}
func (rw *rlpxFrameRW) ReadMsg() (msg Msg, err error) {
// read the header
headbuf := make([]byte, 32)
if _, err := io.ReadFull(rw.conn, headbuf); err != nil {
return msg, err
}
// verify header mac
shouldMAC := updateHeaderMAC(rw.ingressMAC, rw.macCipher, headbuf[:16])
if !hmac.Equal(shouldMAC[:16], headbuf[16:]) {
return msg, errors.New("bad header MAC")
}
rw.dec.XORKeyStream(headbuf[:16], headbuf[:16]) // first half is now decrypted
fsize := readInt24(headbuf)
// ignore protocol type for now
// read the frame content
var rsize = fsize // frame size rounded up to 16 byte boundary
if padding := fsize % 16; padding > 0 {
rsize += 16 - padding
}
framebuf := make([]byte, rsize)
if _, err := io.ReadFull(rw.conn, framebuf); err != nil {
return msg, err
}
// read and validate frame MAC. we can re-use headbuf for that.
rw.ingressMAC.Write(framebuf)
if _, err := io.ReadFull(rw.conn, headbuf); err != nil {
return msg, err
}
shouldMAC = updateHeaderMAC(rw.ingressMAC, rw.macCipher, rw.ingressMAC.Sum(nil))
if !hmac.Equal(shouldMAC, headbuf) {
return msg, errors.New("bad frame MAC")
}
// decrypt frame content
rw.dec.XORKeyStream(framebuf, framebuf)
// decode message code
content := bytes.NewReader(framebuf[:fsize])
if err := rlp.Decode(content, &msg.Code); err != nil {
return msg, err
}
msg.Size = uint32(content.Len())
msg.Payload = content
return msg, nil
}
func updateHeaderMAC(mac hash.Hash, block cipher.Block, header []byte) []byte {
aesbuf := make([]byte, aes.BlockSize)
block.Encrypt(aesbuf, mac.Sum(nil))
for i := range aesbuf {
aesbuf[i] ^= header[i]
}
mac.Write(aesbuf)
return mac.Sum(nil)
}
func readInt24(b []byte) uint32 {
return uint32(b[2]) | uint32(b[1])<<8 | uint32(b[0])<<16
}
func putInt24(v uint32, b []byte) {
b[0] = byte(v >> 16)
b[1] = byte(v >> 8)
b[2] = byte(v)
}
| p2p/rlpx.go | 1 | https://github.com/ethereum/go-ethereum/commit/d344054e5a2844241bf0e4f64ccfc4d2ad259718 | [
0.9992302656173706,
0.39571553468704224,
0.00021116634889040142,
0.06505370885133743,
0.46841955184936523
] |
{
"id": 7,
"code_window": [
"\tmsg.Size = uint32(content.Len())\n",
"\tmsg.Payload = content\n",
"\treturn msg, nil\n",
"}\n",
"\n",
"func updateHeaderMAC(mac hash.Hash, block cipher.Block, header []byte) []byte {\n",
"\taesbuf := make([]byte, aes.BlockSize)\n",
"\tblock.Encrypt(aesbuf, mac.Sum(nil))\n",
"\tfor i := range aesbuf {\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"// updateMAC reseeds the given hash with encrypted seed.\n",
"// it returns the first 16 bytes of the hash sum after seeding.\n",
"func updateMAC(mac hash.Hash, block cipher.Block, seed []byte) []byte {\n"
],
"file_path": "p2p/rlpx.go",
"type": "replace",
"edit_start_line_idx": 138
} | def call(axn, axd, ayn, ayd):
if !axn and !ayn:
return([0, 1, 0, 1], 4)
with P = -4294968273:
# No need to add (A, 1) because A = 0 for bitcoin
with mn = mulmod(mulmod(mulmod(axn, axn, P), 3, P), ayd, P):
with md = mulmod(mulmod(axd, axd, P), mulmod(ayn, 2, P), P):
with msqn = mulmod(mn, mn, P):
with msqd = mulmod(md, md, P):
with xn = addmod(mulmod(msqn, axd, P), P - mulmod(msqd, mulmod(axn, 2, P), P), P):
with xd = mulmod(msqd, axd, P):
with mamxn = mulmod(addmod(mulmod(axn, xd, P), P - mulmod(axd, xn, P), P), mn, P):
with mamxd = mulmod(mulmod(axd, xd, P), md, P):
with yn = addmod(mulmod(mamxn, ayd, P), P - mulmod(mamxd, ayn, P), P):
with yd = mulmod(mamxd, ayd, P):
return([xn, xd, yn, yd], 4)
| Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/ecc/jacobian_double.se | 0 | https://github.com/ethereum/go-ethereum/commit/d344054e5a2844241bf0e4f64ccfc4d2ad259718 | [
0.0007211636984720826,
0.0005519666592590511,
0.00038276964914985,
0.0005519666592590511,
0.0001691970246611163
] |
{
"id": 7,
"code_window": [
"\tmsg.Size = uint32(content.Len())\n",
"\tmsg.Payload = content\n",
"\treturn msg, nil\n",
"}\n",
"\n",
"func updateHeaderMAC(mac hash.Hash, block cipher.Block, header []byte) []byte {\n",
"\taesbuf := make([]byte, aes.BlockSize)\n",
"\tblock.Encrypt(aesbuf, mac.Sum(nil))\n",
"\tfor i := range aesbuf {\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"// updateMAC reseeds the given hash with encrypted seed.\n",
"// it returns the first 16 bytes of the hash sum after seeding.\n",
"func updateMAC(mac hash.Hash, block cipher.Block, seed []byte) []byte {\n"
],
"file_path": "p2p/rlpx.go",
"type": "replace",
"edit_start_line_idx": 138
} | package otto
import (
"fmt"
)
func ExampleSynopsis() {
vm := New()
vm.Run(`
abc = 2 + 2;
console.log("The value of abc is " + abc); // 4
`)
value, _ := vm.Get("abc")
{
value, _ := value.ToInteger()
fmt.Println(value)
}
vm.Set("def", 11)
vm.Run(`
console.log("The value of def is " + def);
`)
vm.Set("xyzzy", "Nothing happens.")
vm.Run(`
console.log(xyzzy.length);
`)
value, _ = vm.Run("xyzzy.length")
{
value, _ := value.ToInteger()
fmt.Println(value)
}
value, err := vm.Run("abcdefghijlmnopqrstuvwxyz.length")
fmt.Println(value)
fmt.Println(err)
vm.Set("sayHello", func(call FunctionCall) Value {
fmt.Printf("Hello, %s.\n", call.Argument(0).String())
return UndefinedValue()
})
vm.Set("twoPlus", func(call FunctionCall) Value {
right, _ := call.Argument(0).ToInteger()
result, _ := vm.ToValue(2 + right)
return result
})
value, _ = vm.Run(`
sayHello("Xyzzy");
sayHello();
result = twoPlus(2.0);
`)
fmt.Println(value)
// Output:
// The value of abc is 4
// 4
// The value of def is 11
// 16
// 16
// undefined
// ReferenceError: abcdefghijlmnopqrstuvwxyz is not defined
// Hello, Xyzzy.
// Hello, undefined.
// 4
}
func ExampleConsole() {
vm := New()
console := map[string]interface{}{
"log": func(call FunctionCall) Value {
fmt.Println("console.log:", formatForConsole(call.ArgumentList))
return UndefinedValue()
},
}
err := vm.Set("console", console)
value, err := vm.Run(`
console.log("Hello, World.");
`)
fmt.Println(value)
fmt.Println(err)
// Output:
// console.log: Hello, World.
// undefined
// <nil>
}
| Godeps/_workspace/src/github.com/obscuren/otto/documentation_test.go | 0 | https://github.com/ethereum/go-ethereum/commit/d344054e5a2844241bf0e4f64ccfc4d2ad259718 | [
0.00019217911176383495,
0.00017302737978752702,
0.00016763723397161812,
0.0001708457275526598,
0.000006883086825837381
] |
{
"id": 7,
"code_window": [
"\tmsg.Size = uint32(content.Len())\n",
"\tmsg.Payload = content\n",
"\treturn msg, nil\n",
"}\n",
"\n",
"func updateHeaderMAC(mac hash.Hash, block cipher.Block, header []byte) []byte {\n",
"\taesbuf := make([]byte, aes.BlockSize)\n",
"\tblock.Encrypt(aesbuf, mac.Sum(nil))\n",
"\tfor i := range aesbuf {\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"// updateMAC reseeds the given hash with encrypted seed.\n",
"// it returns the first 16 bytes of the hash sum after seeding.\n",
"func updateMAC(mac hash.Hash, block cipher.Block, seed []byte) []byte {\n"
],
"file_path": "p2p/rlpx.go",
"type": "replace",
"edit_start_line_idx": 138
} | package dagger
import (
"math/big"
"testing"
"github.com/ethereum/go-ethereum/ethutil"
)
func BenchmarkDaggerSearch(b *testing.B) {
hash := big.NewInt(0)
diff := ethutil.BigPow(2, 36)
o := big.NewInt(0) // nonce doesn't matter. We're only testing against speed, not validity
// Reset timer so the big generation isn't included in the benchmark
b.ResetTimer()
// Validate
DaggerVerify(hash, diff, o)
}
| pow/dagger/dagger_test.go | 0 | https://github.com/ethereum/go-ethereum/commit/d344054e5a2844241bf0e4f64ccfc4d2ad259718 | [
0.00020761271298397332,
0.0001882862125057727,
0.0001689597120275721,
0.0001882862125057727,
0.000019326500478200614
] |
{
"id": 8,
"code_window": [
"\taesbuf := make([]byte, aes.BlockSize)\n",
"\tblock.Encrypt(aesbuf, mac.Sum(nil))\n",
"\tfor i := range aesbuf {\n",
"\t\taesbuf[i] ^= header[i]\n",
"\t}\n",
"\tmac.Write(aesbuf)\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep"
],
"after_edit": [
"\t\taesbuf[i] ^= seed[i]\n"
],
"file_path": "p2p/rlpx.go",
"type": "replace",
"edit_start_line_idx": 142
} | package p2p
import (
"bytes"
"crypto/aes"
"crypto/cipher"
"crypto/hmac"
"errors"
"hash"
"io"
"github.com/ethereum/go-ethereum/rlp"
)
var (
// this is used in place of actual frame header data.
// TODO: replace this when Msg contains the protocol type code.
zeroHeader = []byte{0xC2, 0x80, 0x80}
// sixteen zero bytes
zero16 = make([]byte, 16)
)
type rlpxFrameRW struct {
conn io.ReadWriter
enc cipher.Stream
dec cipher.Stream
macCipher cipher.Block
egressMAC hash.Hash
ingressMAC hash.Hash
}
func newRlpxFrameRW(conn io.ReadWriter, s secrets) *rlpxFrameRW {
macc, err := aes.NewCipher(s.MAC)
if err != nil {
panic("invalid MAC secret: " + err.Error())
}
encc, err := aes.NewCipher(s.AES)
if err != nil {
panic("invalid AES secret: " + err.Error())
}
// we use an all-zeroes IV for AES because the key used
// for encryption is ephemeral.
iv := make([]byte, encc.BlockSize())
return &rlpxFrameRW{
conn: conn,
enc: cipher.NewCTR(encc, iv),
dec: cipher.NewCTR(encc, iv),
macCipher: macc,
egressMAC: s.EgressMAC,
ingressMAC: s.IngressMAC,
}
}
func (rw *rlpxFrameRW) WriteMsg(msg Msg) error {
ptype, _ := rlp.EncodeToBytes(msg.Code)
// write header
headbuf := make([]byte, 32)
fsize := uint32(len(ptype)) + msg.Size
putInt24(fsize, headbuf) // TODO: check overflow
copy(headbuf[3:], zeroHeader)
rw.enc.XORKeyStream(headbuf[:16], headbuf[:16]) // first half is now encrypted
copy(headbuf[16:], updateHeaderMAC(rw.egressMAC, rw.macCipher, headbuf[:16]))
if _, err := rw.conn.Write(headbuf); err != nil {
return err
}
// write encrypted frame, updating the egress MAC while writing to conn.
tee := cipher.StreamWriter{S: rw.enc, W: io.MultiWriter(rw.conn, rw.egressMAC)}
if _, err := tee.Write(ptype); err != nil {
return err
}
if _, err := io.Copy(tee, msg.Payload); err != nil {
return err
}
if padding := fsize % 16; padding > 0 {
if _, err := tee.Write(zero16[:16-padding]); err != nil {
return err
}
}
// write packet-mac. egress MAC is up to date because
// frame content was written to it as well.
mac := updateHeaderMAC(rw.egressMAC, rw.macCipher, rw.egressMAC.Sum(nil))
_, err := rw.conn.Write(mac)
return err
}
func (rw *rlpxFrameRW) ReadMsg() (msg Msg, err error) {
// read the header
headbuf := make([]byte, 32)
if _, err := io.ReadFull(rw.conn, headbuf); err != nil {
return msg, err
}
// verify header mac
shouldMAC := updateHeaderMAC(rw.ingressMAC, rw.macCipher, headbuf[:16])
if !hmac.Equal(shouldMAC[:16], headbuf[16:]) {
return msg, errors.New("bad header MAC")
}
rw.dec.XORKeyStream(headbuf[:16], headbuf[:16]) // first half is now decrypted
fsize := readInt24(headbuf)
// ignore protocol type for now
// read the frame content
var rsize = fsize // frame size rounded up to 16 byte boundary
if padding := fsize % 16; padding > 0 {
rsize += 16 - padding
}
framebuf := make([]byte, rsize)
if _, err := io.ReadFull(rw.conn, framebuf); err != nil {
return msg, err
}
// read and validate frame MAC. we can re-use headbuf for that.
rw.ingressMAC.Write(framebuf)
if _, err := io.ReadFull(rw.conn, headbuf); err != nil {
return msg, err
}
shouldMAC = updateHeaderMAC(rw.ingressMAC, rw.macCipher, rw.ingressMAC.Sum(nil))
if !hmac.Equal(shouldMAC, headbuf) {
return msg, errors.New("bad frame MAC")
}
// decrypt frame content
rw.dec.XORKeyStream(framebuf, framebuf)
// decode message code
content := bytes.NewReader(framebuf[:fsize])
if err := rlp.Decode(content, &msg.Code); err != nil {
return msg, err
}
msg.Size = uint32(content.Len())
msg.Payload = content
return msg, nil
}
func updateHeaderMAC(mac hash.Hash, block cipher.Block, header []byte) []byte {
aesbuf := make([]byte, aes.BlockSize)
block.Encrypt(aesbuf, mac.Sum(nil))
for i := range aesbuf {
aesbuf[i] ^= header[i]
}
mac.Write(aesbuf)
return mac.Sum(nil)
}
func readInt24(b []byte) uint32 {
return uint32(b[2]) | uint32(b[1])<<8 | uint32(b[0])<<16
}
func putInt24(v uint32, b []byte) {
b[0] = byte(v >> 16)
b[1] = byte(v >> 8)
b[2] = byte(v)
}
| p2p/rlpx.go | 1 | https://github.com/ethereum/go-ethereum/commit/d344054e5a2844241bf0e4f64ccfc4d2ad259718 | [
0.9979782700538635,
0.12557758390903473,
0.00016300182323902845,
0.00040780362905934453,
0.32923218607902527
] |
{
"id": 8,
"code_window": [
"\taesbuf := make([]byte, aes.BlockSize)\n",
"\tblock.Encrypt(aesbuf, mac.Sum(nil))\n",
"\tfor i := range aesbuf {\n",
"\t\taesbuf[i] ^= header[i]\n",
"\t}\n",
"\tmac.Write(aesbuf)\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep"
],
"after_edit": [
"\t\taesbuf[i] ^= seed[i]\n"
],
"file_path": "p2p/rlpx.go",
"type": "replace",
"edit_start_line_idx": 142
} | ---
- name: Terminate ec2 instances
local_action: ec2
state=absent
instance_ids={{ ec2_id }}
region={{ region }}
wait=true
| tests/files/ansible/roles/ec2/tasks/terminate.yml | 0 | https://github.com/ethereum/go-ethereum/commit/d344054e5a2844241bf0e4f64ccfc4d2ad259718 | [
0.00017689332889858633,
0.00017689332889858633,
0.00017689332889858633,
0.00017689332889858633,
0
] |
{
"id": 8,
"code_window": [
"\taesbuf := make([]byte, aes.BlockSize)\n",
"\tblock.Encrypt(aesbuf, mac.Sum(nil))\n",
"\tfor i := range aesbuf {\n",
"\t\taesbuf[i] ^= header[i]\n",
"\t}\n",
"\tmac.Write(aesbuf)\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep"
],
"after_edit": [
"\t\taesbuf[i] ^= seed[i]\n"
],
"file_path": "p2p/rlpx.go",
"type": "replace",
"edit_start_line_idx": 142
} | #!/usr/bin/env perl
use strict;
use warnings;
my (%token, @order, @keywords);
{
my $keywords;
my @const;
push @const, <<_END_;
package token
const(
_ Token = iota
_END_
for (split m/\n/, <<_END_) {
ILLEGAL
EOF
COMMENT
KEYWORD
STRING
BOOLEAN
NULL
NUMBER
IDENTIFIER
PLUS +
MINUS -
MULTIPLY *
SLASH /
REMAINDER %
AND &
OR |
EXCLUSIVE_OR ^
SHIFT_LEFT <<
SHIFT_RIGHT >>
UNSIGNED_SHIFT_RIGHT >>>
AND_NOT &^
ADD_ASSIGN +=
SUBTRACT_ASSIGN -=
MULTIPLY_ASSIGN *=
QUOTIENT_ASSIGN /=
REMAINDER_ASSIGN %=
AND_ASSIGN &=
OR_ASSIGN |=
EXCLUSIVE_OR_ASSIGN ^=
SHIFT_LEFT_ASSIGN <<=
SHIFT_RIGHT_ASSIGN >>=
UNSIGNED_SHIFT_RIGHT_ASSIGN >>>=
AND_NOT_ASSIGN &^=
LOGICAL_AND &&
LOGICAL_OR ||
INCREMENT ++
DECREMENT --
EQUAL ==
STRICT_EQUAL ===
LESS <
GREATER >
ASSIGN =
NOT !
BITWISE_NOT ~
NOT_EQUAL !=
STRICT_NOT_EQUAL !==
LESS_OR_EQUAL <=
GREATER_OR_EQUAL <=
LEFT_PARENTHESIS (
LEFT_BRACKET [
LEFT_BRACE {
COMMA ,
PERIOD .
RIGHT_PARENTHESIS )
RIGHT_BRACKET ]
RIGHT_BRACE }
SEMICOLON ;
COLON :
QUESTION_MARK ?
firstKeyword
IF
IN
DO
VAR
FOR
NEW
TRY
THIS
ELSE
CASE
VOID
WITH
WHILE
BREAK
CATCH
THROW
RETURN
TYPEOF
DELETE
SWITCH
DEFAULT
FINALLY
FUNCTION
CONTINUE
DEBUGGER
INSTANCEOF
lastKeyword
_END_
chomp;
next if m/^\s*#/;
my ($name, $symbol) = m/(\w+)\s*(\S+)?/;
if (defined $symbol) {
push @order, $name;
push @const, "$name // $symbol";
$token{$name} = $symbol;
} elsif (defined $name) {
$keywords ||= $name eq 'firstKeyword';
push @const, $name;
#$const[-1] .= " Token = iota" if 2 == @const;
if ($name =~ m/^([A-Z]+)/) {
push @keywords, $name if $keywords;
push @order, $name;
if ($token{SEMICOLON}) {
$token{$name} = lc $1;
} else {
$token{$name} = $name;
}
}
} else {
push @const, "";
}
}
push @const, ")";
print join "\n", @const, "";
}
{
print <<_END_;
var token2string = [...]string{
_END_
for my $name (@order) {
print "$name: \"$token{$name}\",\n";
}
print <<_END_;
}
_END_
print <<_END_;
var keywordTable = map[string]_keyword{
_END_
for my $name (@keywords) {
print <<_END_
"@{[ lc $name ]}": _keyword{
token: $name,
},
_END_
}
for my $name (qw/
const
class
enum
export
extends
import
super
/) {
print <<_END_
"$name": _keyword{
token: KEYWORD,
futureKeyword: true,
},
_END_
}
for my $name (qw/
implements
interface
let
package
private
protected
public
static
/) {
print <<_END_
"$name": _keyword{
token: KEYWORD,
futureKeyword: true,
strict: true,
},
_END_
}
print <<_END_;
}
_END_
}
| Godeps/_workspace/src/github.com/robertkrimen/otto/token/tokenfmt | 0 | https://github.com/ethereum/go-ethereum/commit/d344054e5a2844241bf0e4f64ccfc4d2ad259718 | [
0.0012586963130161166,
0.0002478509850334376,
0.00016582771786488593,
0.00017350772395730019,
0.0002398684446234256
] |
{
"id": 8,
"code_window": [
"\taesbuf := make([]byte, aes.BlockSize)\n",
"\tblock.Encrypt(aesbuf, mac.Sum(nil))\n",
"\tfor i := range aesbuf {\n",
"\t\taesbuf[i] ^= header[i]\n",
"\t}\n",
"\tmac.Write(aesbuf)\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep"
],
"after_edit": [
"\t\taesbuf[i] ^= seed[i]\n"
],
"file_path": "p2p/rlpx.go",
"type": "replace",
"edit_start_line_idx": 142
} | // +build evmjit
package vm
/*
void* evmjit_create();
int evmjit_run(void* _jit, void* _data, void* _env);
void evmjit_destroy(void* _jit);
// Shared library evmjit (e.g. libevmjit.so) is expected to be installed in /usr/local/lib
// More: https://github.com/ethereum/evmjit
#cgo LDFLAGS: -levmjit
*/
import "C"
import (
"bytes"
"errors"
"fmt"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/state"
"math/big"
"unsafe"
)
type JitVm struct {
env Environment
me ContextRef
callerAddr []byte
price *big.Int
data RuntimeData
}
type i256 [32]byte
type RuntimeData struct {
gas int64
gasPrice int64
callData *byte
callDataSize uint64
address i256
caller i256
origin i256
callValue i256
coinBase i256
difficulty i256
gasLimit i256
number uint64
timestamp int64
code *byte
codeSize uint64
codeHash i256
}
func hash2llvm(h []byte) i256 {
var m i256
copy(m[len(m)-len(h):], h) // right aligned copy
return m
}
func llvm2hash(m *i256) []byte {
return C.GoBytes(unsafe.Pointer(m), C.int(len(m)))
}
func llvm2hashRef(m *i256) []byte {
return (*[1 << 30]byte)(unsafe.Pointer(m))[:len(m):len(m)]
}
func address2llvm(addr []byte) i256 {
n := hash2llvm(addr)
bswap(&n)
return n
}
// bswap swap bytes of the 256-bit integer on LLVM side
// TODO: Do not change memory on LLVM side, that can conflict with memory access optimizations
func bswap(m *i256) *i256 {
for i, l := 0, len(m); i < l/2; i++ {
m[i], m[l-i-1] = m[l-i-1], m[i]
}
return m
}
func trim(m []byte) []byte {
skip := 0
for i := 0; i < len(m); i++ {
if m[i] == 0 {
skip++
} else {
break
}
}
return m[skip:]
}
func getDataPtr(m []byte) *byte {
var p *byte
if len(m) > 0 {
p = &m[0]
}
return p
}
func big2llvm(n *big.Int) i256 {
m := hash2llvm(n.Bytes())
bswap(&m)
return m
}
func llvm2big(m *i256) *big.Int {
n := big.NewInt(0)
for i := 0; i < len(m); i++ {
b := big.NewInt(int64(m[i]))
b.Lsh(b, uint(i)*8)
n.Add(n, b)
}
return n
}
// llvm2bytesRef creates a []byte slice that references byte buffer on LLVM side (as of that not controller by GC)
// User must asure that referenced memory is available to Go until the data is copied or not needed any more
func llvm2bytesRef(data *byte, length uint64) []byte {
if length == 0 {
return nil
}
if data == nil {
panic("Unexpected nil data pointer")
}
return (*[1 << 30]byte)(unsafe.Pointer(data))[:length:length]
}
func untested(condition bool, message string) {
if condition {
panic("Condition `" + message + "` tested. Remove assert.")
}
}
func assert(condition bool, message string) {
if !condition {
panic("Assert `" + message + "` failed!")
}
}
func NewJitVm(env Environment) *JitVm {
return &JitVm{env: env}
}
func (self *JitVm) Run(me, caller ContextRef, code []byte, value, gas, price *big.Int, callData []byte) (ret []byte, err error) {
// TODO: depth is increased but never checked by VM. VM should not know about it at all.
self.env.SetDepth(self.env.Depth() + 1)
// TODO: Move it to Env.Call() or sth
if Precompiled[string(me.Address())] != nil {
// if it's address of precopiled contract
// fallback to standard VM
stdVm := New(self.env)
return stdVm.Run(me, caller, code, value, gas, price, callData)
}
if self.me != nil {
panic("JitVm.Run() can be called only once per JitVm instance")
}
self.me = me
self.callerAddr = caller.Address()
self.price = price
self.data.gas = gas.Int64()
self.data.gasPrice = price.Int64()
self.data.callData = getDataPtr(callData)
self.data.callDataSize = uint64(len(callData))
self.data.address = address2llvm(self.me.Address())
self.data.caller = address2llvm(caller.Address())
self.data.origin = address2llvm(self.env.Origin())
self.data.callValue = big2llvm(value)
self.data.coinBase = address2llvm(self.env.Coinbase())
self.data.difficulty = big2llvm(self.env.Difficulty())
self.data.gasLimit = big2llvm(self.env.GasLimit())
self.data.number = self.env.BlockNumber().Uint64()
self.data.timestamp = self.env.Time()
self.data.code = getDataPtr(code)
self.data.codeSize = uint64(len(code))
self.data.codeHash = hash2llvm(crypto.Sha3(code)) // TODO: Get already computed hash?
jit := C.evmjit_create()
retCode := C.evmjit_run(jit, unsafe.Pointer(&self.data), unsafe.Pointer(self))
if retCode < 0 {
err = errors.New("OOG from JIT")
gas.SetInt64(0) // Set gas to 0, JIT does not bother
} else {
gas.SetInt64(self.data.gas)
if retCode == 1 { // RETURN
ret = C.GoBytes(unsafe.Pointer(self.data.callData), C.int(self.data.callDataSize))
} else if retCode == 2 { // SUICIDE
// TODO: Suicide support logic should be moved to Env to be shared by VM implementations
state := self.Env().State()
receiverAddr := llvm2hashRef(bswap(&self.data.address))
receiver := state.GetOrNewStateObject(receiverAddr)
balance := state.GetBalance(me.Address())
receiver.AddBalance(balance)
state.Delete(me.Address())
}
}
C.evmjit_destroy(jit)
return
}
func (self *JitVm) Printf(format string, v ...interface{}) VirtualMachine {
return self
}
func (self *JitVm) Endl() VirtualMachine {
return self
}
func (self *JitVm) Env() Environment {
return self.env
}
//export env_sha3
func env_sha3(dataPtr *byte, length uint64, resultPtr unsafe.Pointer) {
data := llvm2bytesRef(dataPtr, length)
hash := crypto.Sha3(data)
result := (*i256)(resultPtr)
*result = hash2llvm(hash)
}
//export env_sstore
func env_sstore(vmPtr unsafe.Pointer, indexPtr unsafe.Pointer, valuePtr unsafe.Pointer) {
vm := (*JitVm)(vmPtr)
index := llvm2hash(bswap((*i256)(indexPtr)))
value := llvm2hash(bswap((*i256)(valuePtr)))
value = trim(value)
if len(value) == 0 {
prevValue := vm.env.State().GetState(vm.me.Address(), index)
if len(prevValue) != 0 {
vm.Env().State().Refund(vm.callerAddr, GasSStoreRefund)
}
}
vm.env.State().SetState(vm.me.Address(), index, value)
}
//export env_sload
func env_sload(vmPtr unsafe.Pointer, indexPtr unsafe.Pointer, resultPtr unsafe.Pointer) {
vm := (*JitVm)(vmPtr)
index := llvm2hash(bswap((*i256)(indexPtr)))
value := vm.env.State().GetState(vm.me.Address(), index)
result := (*i256)(resultPtr)
*result = hash2llvm(value)
bswap(result)
}
//export env_balance
func env_balance(_vm unsafe.Pointer, _addr unsafe.Pointer, _result unsafe.Pointer) {
vm := (*JitVm)(_vm)
addr := llvm2hash((*i256)(_addr))
balance := vm.Env().State().GetBalance(addr)
result := (*i256)(_result)
*result = big2llvm(balance)
}
//export env_blockhash
func env_blockhash(_vm unsafe.Pointer, _number unsafe.Pointer, _result unsafe.Pointer) {
vm := (*JitVm)(_vm)
number := llvm2big((*i256)(_number))
result := (*i256)(_result)
currNumber := vm.Env().BlockNumber()
limit := big.NewInt(0).Sub(currNumber, big.NewInt(256))
if number.Cmp(limit) >= 0 && number.Cmp(currNumber) < 0 {
hash := vm.Env().GetHash(uint64(number.Int64()))
*result = hash2llvm(hash)
} else {
*result = i256{}
}
}
//export env_call
func env_call(_vm unsafe.Pointer, _gas *int64, _receiveAddr unsafe.Pointer, _value unsafe.Pointer, inDataPtr unsafe.Pointer, inDataLen uint64, outDataPtr *byte, outDataLen uint64, _codeAddr unsafe.Pointer) bool {
vm := (*JitVm)(_vm)
//fmt.Printf("env_call (depth %d)\n", vm.Env().Depth())
defer func() {
if r := recover(); r != nil {
fmt.Printf("Recovered in env_call (depth %d, out %p %d): %s\n", vm.Env().Depth(), outDataPtr, outDataLen, r)
}
}()
balance := vm.Env().State().GetBalance(vm.me.Address())
value := llvm2big((*i256)(_value))
if balance.Cmp(value) >= 0 {
receiveAddr := llvm2hash((*i256)(_receiveAddr))
inData := C.GoBytes(inDataPtr, C.int(inDataLen))
outData := llvm2bytesRef(outDataPtr, outDataLen)
codeAddr := llvm2hash((*i256)(_codeAddr))
gas := big.NewInt(*_gas)
var out []byte
var err error
if bytes.Equal(codeAddr, receiveAddr) {
out, err = vm.env.Call(vm.me, codeAddr, inData, gas, vm.price, value)
} else {
out, err = vm.env.CallCode(vm.me, codeAddr, inData, gas, vm.price, value)
}
*_gas = gas.Int64()
if err == nil {
copy(outData, out)
return true
}
}
return false
}
//export env_create
func env_create(_vm unsafe.Pointer, _gas *int64, _value unsafe.Pointer, initDataPtr unsafe.Pointer, initDataLen uint64, _result unsafe.Pointer) {
vm := (*JitVm)(_vm)
value := llvm2big((*i256)(_value))
initData := C.GoBytes(initDataPtr, C.int(initDataLen)) // TODO: Unnecessary if low balance
result := (*i256)(_result)
*result = i256{}
gas := big.NewInt(*_gas)
ret, suberr, ref := vm.env.Create(vm.me, nil, initData, gas, vm.price, value)
if suberr == nil {
dataGas := big.NewInt(int64(len(ret))) // TODO: Nto the best design. env.Create can do it, it has the reference to gas counter
dataGas.Mul(dataGas, GasCreateByte)
gas.Sub(gas, dataGas)
*result = hash2llvm(ref.Address())
}
*_gas = gas.Int64()
}
//export env_log
func env_log(_vm unsafe.Pointer, dataPtr unsafe.Pointer, dataLen uint64, _topic1 unsafe.Pointer, _topic2 unsafe.Pointer, _topic3 unsafe.Pointer, _topic4 unsafe.Pointer) {
vm := (*JitVm)(_vm)
data := C.GoBytes(dataPtr, C.int(dataLen))
topics := make([][]byte, 0, 4)
if _topic1 != nil {
topics = append(topics, llvm2hash((*i256)(_topic1)))
}
if _topic2 != nil {
topics = append(topics, llvm2hash((*i256)(_topic2)))
}
if _topic3 != nil {
topics = append(topics, llvm2hash((*i256)(_topic3)))
}
if _topic4 != nil {
topics = append(topics, llvm2hash((*i256)(_topic4)))
}
vm.Env().AddLog(state.NewLog(vm.me.Address(), topics, data, vm.env.BlockNumber().Uint64()))
}
//export env_extcode
func env_extcode(_vm unsafe.Pointer, _addr unsafe.Pointer, o_size *uint64) *byte {
vm := (*JitVm)(_vm)
addr := llvm2hash((*i256)(_addr))
code := vm.Env().State().GetCode(addr)
*o_size = uint64(len(code))
return getDataPtr(code)
}
| vm/vm_jit.go | 0 | https://github.com/ethereum/go-ethereum/commit/d344054e5a2844241bf0e4f64ccfc4d2ad259718 | [
0.011969661340117455,
0.0005019020172767341,
0.00016496190801262856,
0.0001709516509436071,
0.0018889948260039091
] |
{
"id": 9,
"code_window": [
"\t}\n",
"\tmac.Write(aesbuf)\n",
"\treturn mac.Sum(nil)\n",
"}\n",
"\n",
"func readInt24(b []byte) uint32 {\n"
],
"labels": [
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\treturn mac.Sum(nil)[:16]\n"
],
"file_path": "p2p/rlpx.go",
"type": "replace",
"edit_start_line_idx": 145
} | package p2p
import (
"bytes"
"crypto/aes"
"crypto/cipher"
"crypto/hmac"
"errors"
"hash"
"io"
"github.com/ethereum/go-ethereum/rlp"
)
var (
// this is used in place of actual frame header data.
// TODO: replace this when Msg contains the protocol type code.
zeroHeader = []byte{0xC2, 0x80, 0x80}
// sixteen zero bytes
zero16 = make([]byte, 16)
)
type rlpxFrameRW struct {
conn io.ReadWriter
enc cipher.Stream
dec cipher.Stream
macCipher cipher.Block
egressMAC hash.Hash
ingressMAC hash.Hash
}
func newRlpxFrameRW(conn io.ReadWriter, s secrets) *rlpxFrameRW {
macc, err := aes.NewCipher(s.MAC)
if err != nil {
panic("invalid MAC secret: " + err.Error())
}
encc, err := aes.NewCipher(s.AES)
if err != nil {
panic("invalid AES secret: " + err.Error())
}
// we use an all-zeroes IV for AES because the key used
// for encryption is ephemeral.
iv := make([]byte, encc.BlockSize())
return &rlpxFrameRW{
conn: conn,
enc: cipher.NewCTR(encc, iv),
dec: cipher.NewCTR(encc, iv),
macCipher: macc,
egressMAC: s.EgressMAC,
ingressMAC: s.IngressMAC,
}
}
func (rw *rlpxFrameRW) WriteMsg(msg Msg) error {
ptype, _ := rlp.EncodeToBytes(msg.Code)
// write header
headbuf := make([]byte, 32)
fsize := uint32(len(ptype)) + msg.Size
putInt24(fsize, headbuf) // TODO: check overflow
copy(headbuf[3:], zeroHeader)
rw.enc.XORKeyStream(headbuf[:16], headbuf[:16]) // first half is now encrypted
copy(headbuf[16:], updateHeaderMAC(rw.egressMAC, rw.macCipher, headbuf[:16]))
if _, err := rw.conn.Write(headbuf); err != nil {
return err
}
// write encrypted frame, updating the egress MAC while writing to conn.
tee := cipher.StreamWriter{S: rw.enc, W: io.MultiWriter(rw.conn, rw.egressMAC)}
if _, err := tee.Write(ptype); err != nil {
return err
}
if _, err := io.Copy(tee, msg.Payload); err != nil {
return err
}
if padding := fsize % 16; padding > 0 {
if _, err := tee.Write(zero16[:16-padding]); err != nil {
return err
}
}
// write packet-mac. egress MAC is up to date because
// frame content was written to it as well.
mac := updateHeaderMAC(rw.egressMAC, rw.macCipher, rw.egressMAC.Sum(nil))
_, err := rw.conn.Write(mac)
return err
}
func (rw *rlpxFrameRW) ReadMsg() (msg Msg, err error) {
// read the header
headbuf := make([]byte, 32)
if _, err := io.ReadFull(rw.conn, headbuf); err != nil {
return msg, err
}
// verify header mac
shouldMAC := updateHeaderMAC(rw.ingressMAC, rw.macCipher, headbuf[:16])
if !hmac.Equal(shouldMAC[:16], headbuf[16:]) {
return msg, errors.New("bad header MAC")
}
rw.dec.XORKeyStream(headbuf[:16], headbuf[:16]) // first half is now decrypted
fsize := readInt24(headbuf)
// ignore protocol type for now
// read the frame content
var rsize = fsize // frame size rounded up to 16 byte boundary
if padding := fsize % 16; padding > 0 {
rsize += 16 - padding
}
framebuf := make([]byte, rsize)
if _, err := io.ReadFull(rw.conn, framebuf); err != nil {
return msg, err
}
// read and validate frame MAC. we can re-use headbuf for that.
rw.ingressMAC.Write(framebuf)
if _, err := io.ReadFull(rw.conn, headbuf); err != nil {
return msg, err
}
shouldMAC = updateHeaderMAC(rw.ingressMAC, rw.macCipher, rw.ingressMAC.Sum(nil))
if !hmac.Equal(shouldMAC, headbuf) {
return msg, errors.New("bad frame MAC")
}
// decrypt frame content
rw.dec.XORKeyStream(framebuf, framebuf)
// decode message code
content := bytes.NewReader(framebuf[:fsize])
if err := rlp.Decode(content, &msg.Code); err != nil {
return msg, err
}
msg.Size = uint32(content.Len())
msg.Payload = content
return msg, nil
}
func updateHeaderMAC(mac hash.Hash, block cipher.Block, header []byte) []byte {
aesbuf := make([]byte, aes.BlockSize)
block.Encrypt(aesbuf, mac.Sum(nil))
for i := range aesbuf {
aesbuf[i] ^= header[i]
}
mac.Write(aesbuf)
return mac.Sum(nil)
}
func readInt24(b []byte) uint32 {
return uint32(b[2]) | uint32(b[1])<<8 | uint32(b[0])<<16
}
func putInt24(v uint32, b []byte) {
b[0] = byte(v >> 16)
b[1] = byte(v >> 8)
b[2] = byte(v)
}
| p2p/rlpx.go | 1 | https://github.com/ethereum/go-ethereum/commit/d344054e5a2844241bf0e4f64ccfc4d2ad259718 | [
0.9980165958404541,
0.11148595064878464,
0.00016707154281903058,
0.003631735220551491,
0.29066362977027893
] |
{
"id": 9,
"code_window": [
"\t}\n",
"\tmac.Write(aesbuf)\n",
"\treturn mac.Sum(nil)\n",
"}\n",
"\n",
"func readInt24(b []byte) uint32 {\n"
],
"labels": [
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\treturn mac.Sum(nil)[:16]\n"
],
"file_path": "p2p/rlpx.go",
"type": "replace",
"edit_start_line_idx": 145
} | package qml
import (
"sync"
)
var stats *Statistics
var statsMutex sync.Mutex
func Stats() (snapshot Statistics) {
statsMutex.Lock()
snapshot = *stats
statsMutex.Unlock()
return
}
func CollectStats(enabled bool) {
statsMutex.Lock()
if enabled {
if stats == nil {
stats = &Statistics{}
}
} else {
stats = nil
}
statsMutex.Unlock()
}
func ResetStats() {
statsMutex.Lock()
old := stats
stats = &Statistics{}
// These are absolute values:
stats.EnginesAlive = old.EnginesAlive
stats.ValuesAlive = old.ValuesAlive
statsMutex.Unlock()
return
}
type Statistics struct {
EnginesAlive int
ValuesAlive int
ConnectionsAlive int
}
func (stats *Statistics) enginesAlive(delta int) {
if stats != nil {
statsMutex.Lock()
stats.EnginesAlive += delta
statsMutex.Unlock()
}
}
func (stats *Statistics) valuesAlive(delta int) {
if stats != nil {
statsMutex.Lock()
stats.ValuesAlive += delta
statsMutex.Unlock()
}
}
func (stats *Statistics) connectionsAlive(delta int) {
if stats != nil {
statsMutex.Lock()
stats.ConnectionsAlive += delta
statsMutex.Unlock()
}
}
| Godeps/_workspace/src/github.com/obscuren/qml/stats.go | 0 | https://github.com/ethereum/go-ethereum/commit/d344054e5a2844241bf0e4f64ccfc4d2ad259718 | [
0.00017559398838784546,
0.00016999314539134502,
0.00016667296586092561,
0.0001686823379714042,
0.00000291059063783905
] |
{
"id": 9,
"code_window": [
"\t}\n",
"\tmac.Write(aesbuf)\n",
"\treturn mac.Sum(nil)\n",
"}\n",
"\n",
"func readInt24(b []byte) uint32 {\n"
],
"labels": [
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\treturn mac.Sum(nil)[:16]\n"
],
"file_path": "p2p/rlpx.go",
"type": "replace",
"edit_start_line_idx": 145
} | {
"directory": "example/js/",
"cwd": "./",
"analytics": false
} | cmd/mist/assets/ext/.bowerrc | 0 | https://github.com/ethereum/go-ethereum/commit/d344054e5a2844241bf0e4f64ccfc4d2ad259718 | [
0.00017393726739101112,
0.00017393726739101112,
0.00017393726739101112,
0.00017393726739101112,
0
] |
{
"id": 9,
"code_window": [
"\t}\n",
"\tmac.Write(aesbuf)\n",
"\treturn mac.Sum(nil)\n",
"}\n",
"\n",
"func readInt24(b []byte) uint32 {\n"
],
"labels": [
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\treturn mac.Sum(nil)[:16]\n"
],
"file_path": "p2p/rlpx.go",
"type": "replace",
"edit_start_line_idx": 145
} | package ssdp
import (
"errors"
"log"
"net/http"
"net/url"
"strconv"
"time"
"github.com/huin/goupnp/httpu"
)
const (
ssdpDiscover = `"ssdp:discover"`
ntsAlive = `ssdp:alive`
ntsByebye = `ssdp:byebye`
ntsUpdate = `ssdp:update`
ssdpUDP4Addr = "239.255.255.250:1900"
ssdpSearchPort = 1900
methodSearch = "M-SEARCH"
methodNotify = "NOTIFY"
)
// SSDPRawSearch performs a fairly raw SSDP search request, and returns the
// unique response(s) that it receives. Each response has the requested
// searchTarget, a USN, and a valid location. maxWaitSeconds states how long to
// wait for responses in seconds, and must be a minimum of 1 (the
// implementation waits an additional 100ms for responses to arrive), 2 is a
// reasonable value for this. numSends is the number of requests to send - 3 is
// a reasonable value for this.
func SSDPRawSearch(httpu *httpu.HTTPUClient, searchTarget string, maxWaitSeconds int, numSends int) ([]*http.Response, error) {
if maxWaitSeconds < 1 {
return nil, errors.New("ssdp: maxWaitSeconds must be >= 1")
}
seenUsns := make(map[string]bool)
var responses []*http.Response
req := http.Request{
Method: methodSearch,
// TODO: Support both IPv4 and IPv6.
Host: ssdpUDP4Addr,
URL: &url.URL{Opaque: "*"},
Header: http.Header{
// Putting headers in here avoids them being title-cased.
// (The UPnP discovery protocol uses case-sensitive headers)
"HOST": []string{ssdpUDP4Addr},
"MX": []string{strconv.FormatInt(int64(maxWaitSeconds), 10)},
"MAN": []string{ssdpDiscover},
"ST": []string{searchTarget},
},
}
allResponses, err := httpu.Do(&req, time.Duration(maxWaitSeconds)*time.Second+100*time.Millisecond, numSends)
if err != nil {
return nil, err
}
for _, response := range allResponses {
if response.StatusCode != 200 {
log.Printf("ssdp: got response status code %q in search response", response.Status)
continue
}
if st := response.Header.Get("ST"); st != searchTarget {
log.Printf("ssdp: got unexpected search target result %q", st)
continue
}
location, err := response.Location()
if err != nil {
log.Printf("ssdp: no usable location in search response (discarding): %v", err)
continue
}
usn := response.Header.Get("USN")
if usn == "" {
log.Printf("ssdp: empty/missing USN in search response (using location instead): %v", err)
usn = location.String()
}
if _, alreadySeen := seenUsns[usn]; !alreadySeen {
seenUsns[usn] = true
responses = append(responses, response)
}
}
return responses, nil
}
| Godeps/_workspace/src/github.com/huin/goupnp/ssdp/ssdp.go | 0 | https://github.com/ethereum/go-ethereum/commit/d344054e5a2844241bf0e4f64ccfc4d2ad259718 | [
0.0001833204150898382,
0.0001720572472549975,
0.00016644917195662856,
0.00016817275900393724,
0.000006315738573903218
] |
{
"id": 10,
"code_window": [
"01010101010101010101010101010101\n",
"ba628a4ba590cb43f7848f41c4382885\n",
"01010101010101010101010101010101\n",
"01010101010101010101010101010101\n",
"`)\n",
"\n",
"\t// Check WriteMsg. This puts a message into the buffer.\n",
"\tif err := EncodeMsg(rw, 8, []interface{}{1, 2, 3, 4}); err != nil {\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "p2p/rlpx_test.go",
"type": "replace",
"edit_start_line_idx": 31
} | package p2p
import (
"bytes"
"crypto/aes"
"crypto/cipher"
"crypto/hmac"
"errors"
"hash"
"io"
"github.com/ethereum/go-ethereum/rlp"
)
var (
// this is used in place of actual frame header data.
// TODO: replace this when Msg contains the protocol type code.
zeroHeader = []byte{0xC2, 0x80, 0x80}
// sixteen zero bytes
zero16 = make([]byte, 16)
)
type rlpxFrameRW struct {
conn io.ReadWriter
enc cipher.Stream
dec cipher.Stream
macCipher cipher.Block
egressMAC hash.Hash
ingressMAC hash.Hash
}
func newRlpxFrameRW(conn io.ReadWriter, s secrets) *rlpxFrameRW {
macc, err := aes.NewCipher(s.MAC)
if err != nil {
panic("invalid MAC secret: " + err.Error())
}
encc, err := aes.NewCipher(s.AES)
if err != nil {
panic("invalid AES secret: " + err.Error())
}
// we use an all-zeroes IV for AES because the key used
// for encryption is ephemeral.
iv := make([]byte, encc.BlockSize())
return &rlpxFrameRW{
conn: conn,
enc: cipher.NewCTR(encc, iv),
dec: cipher.NewCTR(encc, iv),
macCipher: macc,
egressMAC: s.EgressMAC,
ingressMAC: s.IngressMAC,
}
}
func (rw *rlpxFrameRW) WriteMsg(msg Msg) error {
ptype, _ := rlp.EncodeToBytes(msg.Code)
// write header
headbuf := make([]byte, 32)
fsize := uint32(len(ptype)) + msg.Size
putInt24(fsize, headbuf) // TODO: check overflow
copy(headbuf[3:], zeroHeader)
rw.enc.XORKeyStream(headbuf[:16], headbuf[:16]) // first half is now encrypted
copy(headbuf[16:], updateHeaderMAC(rw.egressMAC, rw.macCipher, headbuf[:16]))
if _, err := rw.conn.Write(headbuf); err != nil {
return err
}
// write encrypted frame, updating the egress MAC while writing to conn.
tee := cipher.StreamWriter{S: rw.enc, W: io.MultiWriter(rw.conn, rw.egressMAC)}
if _, err := tee.Write(ptype); err != nil {
return err
}
if _, err := io.Copy(tee, msg.Payload); err != nil {
return err
}
if padding := fsize % 16; padding > 0 {
if _, err := tee.Write(zero16[:16-padding]); err != nil {
return err
}
}
// write packet-mac. egress MAC is up to date because
// frame content was written to it as well.
mac := updateHeaderMAC(rw.egressMAC, rw.macCipher, rw.egressMAC.Sum(nil))
_, err := rw.conn.Write(mac)
return err
}
func (rw *rlpxFrameRW) ReadMsg() (msg Msg, err error) {
// read the header
headbuf := make([]byte, 32)
if _, err := io.ReadFull(rw.conn, headbuf); err != nil {
return msg, err
}
// verify header mac
shouldMAC := updateHeaderMAC(rw.ingressMAC, rw.macCipher, headbuf[:16])
if !hmac.Equal(shouldMAC[:16], headbuf[16:]) {
return msg, errors.New("bad header MAC")
}
rw.dec.XORKeyStream(headbuf[:16], headbuf[:16]) // first half is now decrypted
fsize := readInt24(headbuf)
// ignore protocol type for now
// read the frame content
var rsize = fsize // frame size rounded up to 16 byte boundary
if padding := fsize % 16; padding > 0 {
rsize += 16 - padding
}
framebuf := make([]byte, rsize)
if _, err := io.ReadFull(rw.conn, framebuf); err != nil {
return msg, err
}
// read and validate frame MAC. we can re-use headbuf for that.
rw.ingressMAC.Write(framebuf)
if _, err := io.ReadFull(rw.conn, headbuf); err != nil {
return msg, err
}
shouldMAC = updateHeaderMAC(rw.ingressMAC, rw.macCipher, rw.ingressMAC.Sum(nil))
if !hmac.Equal(shouldMAC, headbuf) {
return msg, errors.New("bad frame MAC")
}
// decrypt frame content
rw.dec.XORKeyStream(framebuf, framebuf)
// decode message code
content := bytes.NewReader(framebuf[:fsize])
if err := rlp.Decode(content, &msg.Code); err != nil {
return msg, err
}
msg.Size = uint32(content.Len())
msg.Payload = content
return msg, nil
}
func updateHeaderMAC(mac hash.Hash, block cipher.Block, header []byte) []byte {
aesbuf := make([]byte, aes.BlockSize)
block.Encrypt(aesbuf, mac.Sum(nil))
for i := range aesbuf {
aesbuf[i] ^= header[i]
}
mac.Write(aesbuf)
return mac.Sum(nil)
}
func readInt24(b []byte) uint32 {
return uint32(b[2]) | uint32(b[1])<<8 | uint32(b[0])<<16
}
func putInt24(v uint32, b []byte) {
b[0] = byte(v >> 16)
b[1] = byte(v >> 8)
b[2] = byte(v)
}
| p2p/rlpx.go | 1 | https://github.com/ethereum/go-ethereum/commit/d344054e5a2844241bf0e4f64ccfc4d2ad259718 | [
0.9817109107971191,
0.09664838016033173,
0.0002419498487142846,
0.010893149301409721,
0.23521213233470917
] |
{
"id": 10,
"code_window": [
"01010101010101010101010101010101\n",
"ba628a4ba590cb43f7848f41c4382885\n",
"01010101010101010101010101010101\n",
"01010101010101010101010101010101\n",
"`)\n",
"\n",
"\t// Check WriteMsg. This puts a message into the buffer.\n",
"\tif err := EncodeMsg(rw, 8, []interface{}{1, 2, 3, 4}); err != nil {\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "p2p/rlpx_test.go",
"type": "replace",
"edit_start_line_idx": 31
} | package globalconf
import (
"flag"
"io/ioutil"
"os"
"testing"
)
const envTestPrefix = "CONFTEST_"
func TestNewWithOptionsNoFilename(t *testing.T) {
opts := Options{EnvPrefix: envTestPrefix}
os.Setenv(envTestPrefix+"D", "EnvD")
flagD := flag.String("d", "default", "")
flagE := flag.Bool("e", true, "")
conf, err := NewWithOptions(&opts)
if err != nil {
t.Fatal(err)
}
conf.ParseAll()
if *flagD != "EnvD" {
t.Errorf("flagD found %v, expected 'EnvD'", *flagD)
}
if !*flagE {
t.Errorf("flagE found %v, expected true", *flagE)
}
}
func TestParse_Global(t *testing.T) {
resetForTesting("")
os.Setenv(envTestPrefix+"D", "EnvD")
os.Setenv(envTestPrefix+"E", "true")
os.Setenv(envTestPrefix+"F", "5.5")
flagA := flag.Bool("a", false, "")
flagB := flag.Float64("b", 0.0, "")
flagC := flag.String("c", "", "")
flagD := flag.String("d", "", "")
flagE := flag.Bool("e", false, "")
flagF := flag.Float64("f", 0.0, "")
parse(t, "./testdata/global.ini", envTestPrefix)
if !*flagA {
t.Errorf("flagA found %v, expected true", *flagA)
}
if *flagB != 5.6 {
t.Errorf("flagB found %v, expected 5.6", *flagB)
}
if *flagC != "Hello world" {
t.Errorf("flagC found %v, expected 'Hello world'", *flagC)
}
if *flagD != "EnvD" {
t.Errorf("flagD found %v, expected 'EnvD'", *flagD)
}
if !*flagE {
t.Errorf("flagE found %v, expected true", *flagE)
}
if *flagF != 5.5 {
t.Errorf("flagF found %v, expected 5.5", *flagF)
}
}
func TestParse_DashConversion(t *testing.T) {
resetForTesting("")
flagFooBar := flag.String("foo-bar", "", "")
os.Setenv("PREFIX_FOO_BAR", "baz")
opts := Options{EnvPrefix: "PREFIX_"}
conf, err := NewWithOptions(&opts)
if err != nil {
t.Fatal(err)
}
conf.ParseAll()
if *flagFooBar != "baz" {
t.Errorf("flagFooBar found %v, expected 5.5", *flagFooBar)
}
}
func TestParse_GlobalWithDottedFlagname(t *testing.T) {
resetForTesting("")
os.Setenv(envTestPrefix+"SOME_VALUE", "some-value")
flagSomeValue := flag.String("some.value", "", "")
parse(t, "./testdata/global.ini", envTestPrefix)
if *flagSomeValue != "some-value" {
t.Errorf("flagSomeValue found %v, some-value expected", *flagSomeValue)
}
}
func TestParse_GlobalOverwrite(t *testing.T) {
resetForTesting("-b=7.6")
flagB := flag.Float64("b", 0.0, "")
parse(t, "./testdata/global.ini", "")
if *flagB != 7.6 {
t.Errorf("flagB found %v, expected 7.6", *flagB)
}
}
func TestParse_Custom(t *testing.T) {
resetForTesting("")
os.Setenv(envTestPrefix+"CUSTOM_E", "Hello Env")
flagB := flag.Float64("b", 5.0, "")
name := "custom"
custom := flag.NewFlagSet(name, flag.ExitOnError)
flagD := custom.String("d", "dd", "")
flagE := custom.String("e", "ee", "")
Register(name, custom)
parse(t, "./testdata/custom.ini", envTestPrefix)
if *flagB != 5.0 {
t.Errorf("flagB found %v, expected 5.0", *flagB)
}
if *flagD != "Hello d" {
t.Errorf("flagD found %v, expected 'Hello d'", *flagD)
}
if *flagE != "Hello Env" {
t.Errorf("flagE found %v, expected 'Hello Env'", *flagE)
}
}
func TestParse_CustomOverwrite(t *testing.T) {
resetForTesting("-b=6")
flagB := flag.Float64("b", 5.0, "")
name := "custom"
custom := flag.NewFlagSet(name, flag.ExitOnError)
flagD := custom.String("d", "dd", "")
Register(name, custom)
parse(t, "./testdata/custom.ini", "")
if *flagB != 6.0 {
t.Errorf("flagB found %v, expected 6.0", *flagB)
}
if *flagD != "Hello d" {
t.Errorf("flagD found %v, expected 'Hello d'", *flagD)
}
}
func TestParse_GlobalAndCustom(t *testing.T) {
resetForTesting("")
flagA := flag.Bool("a", false, "")
flagB := flag.Float64("b", 0.0, "")
flagC := flag.String("c", "", "")
name := "custom"
custom := flag.NewFlagSet(name, flag.ExitOnError)
flagD := custom.String("d", "", "")
Register(name, custom)
parse(t, "./testdata/globalandcustom.ini", "")
if !*flagA {
t.Errorf("flagA found %v, expected true", *flagA)
}
if *flagB != 5.6 {
t.Errorf("flagB found %v, expected 5.6", *flagB)
}
if *flagC != "Hello world" {
t.Errorf("flagC found %v, expected 'Hello world'", *flagC)
}
if *flagD != "Hello d" {
t.Errorf("flagD found %v, expected 'Hello d'", *flagD)
}
}
func TestParse_GlobalAndCustomOverwrite(t *testing.T) {
resetForTesting("-a=true", "-b=5", "-c=Hello")
flagA := flag.Bool("a", false, "")
flagB := flag.Float64("b", 0.0, "")
flagC := flag.String("c", "", "")
name := "custom"
custom := flag.NewFlagSet(name, flag.ExitOnError)
flagD := custom.String("d", "", "")
Register(name, custom)
parse(t, "./testdata/globalandcustom.ini", "")
if !*flagA {
t.Errorf("flagA found %v, expected true", *flagA)
}
if *flagB != 5.0 {
t.Errorf("flagB found %v, expected 5.0", *flagB)
}
if *flagC != "Hello" {
t.Errorf("flagC found %v, expected 'Hello'", *flagC)
}
if *flagD != "Hello d" {
t.Errorf("flagD found %v, expected 'Hello d'", *flagD)
}
}
func TestSet(t *testing.T) {
resetForTesting()
file, _ := ioutil.TempFile("", "")
conf := parse(t, file.Name(), "")
conf.Set("", &flag.Flag{Name: "a", Value: newFlagValue("test")})
flagA := flag.String("a", "", "")
parse(t, file.Name(), "")
if *flagA != "test" {
t.Errorf("flagA found %v, expected 'test'", *flagA)
}
}
func TestDelete(t *testing.T) {
resetForTesting()
file, _ := ioutil.TempFile("", "")
conf := parse(t, file.Name(), "")
conf.Set("", &flag.Flag{Name: "a", Value: newFlagValue("test")})
conf.Delete("", "a")
flagA := flag.String("a", "", "")
parse(t, file.Name(), "")
if *flagA != "" {
t.Errorf("flagNewA found %v, expected ''", *flagA)
}
}
func parse(t *testing.T, filename, envPrefix string) *GlobalConf {
opts := Options{
Filename: filename,
EnvPrefix: envPrefix,
}
conf, err := NewWithOptions(&opts)
if err != nil {
t.Error(err)
}
conf.ParseAll()
return conf
}
// Resets os.Args and the default flag set.
func resetForTesting(args ...string) {
os.Clearenv()
os.Args = append([]string{"cmd"}, args...)
flag.CommandLine = flag.NewFlagSet(os.Args[0], flag.ExitOnError)
}
type flagValue struct {
str string
}
func (f *flagValue) String() string {
return f.str
}
func (f *flagValue) Set(value string) error {
f.str = value
return nil
}
func newFlagValue(val string) *flagValue {
return &flagValue{str: val}
}
| Godeps/_workspace/src/github.com/rakyll/globalconf/globalconf_test.go | 0 | https://github.com/ethereum/go-ethereum/commit/d344054e5a2844241bf0e4f64ccfc4d2ad259718 | [
0.019752906635403633,
0.0027731163427233696,
0.0002956356038339436,
0.0009770585456863046,
0.004860993940383196
] |
{
"id": 10,
"code_window": [
"01010101010101010101010101010101\n",
"ba628a4ba590cb43f7848f41c4382885\n",
"01010101010101010101010101010101\n",
"01010101010101010101010101010101\n",
"`)\n",
"\n",
"\t// Check WriteMsg. This puts a message into the buffer.\n",
"\tif err := EncodeMsg(rw, 8, []interface{}{1, 2, 3, 4}); err != nil {\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "p2p/rlpx_test.go",
"type": "replace",
"edit_start_line_idx": 31
} | package otto
import (
"testing"
)
// map/flatten/reduce
func Test_underscore_chaining_0(t *testing.T) {
tt(t, func() {
test, _ := test_()
test(`
test("map/flatten/reduce", function() {
var lyrics = [
"I'm a lumberjack and I'm okay",
"I sleep all night and I work all day",
"He's a lumberjack and he's okay",
"He sleeps all night and he works all day"
];
var counts = _(lyrics).chain()
.map(function(line) { return line.split(''); })
.flatten()
.reduce(function(hash, l) {
hash[l] = hash[l] || 0;
hash[l]++;
return hash;
}, {}).value();
ok(counts['a'] == 16 && counts['e'] == 10, 'counted all the letters in the song');
});
`)
})
}
// select/reject/sortBy
func Test_underscore_chaining_1(t *testing.T) {
tt(t, func() {
test, _ := test_()
test(`
test("select/reject/sortBy", function() {
var numbers = [1,2,3,4,5,6,7,8,9,10];
numbers = _(numbers).chain().select(function(n) {
return n % 2 == 0;
}).reject(function(n) {
return n % 4 == 0;
}).sortBy(function(n) {
return -n;
}).value();
equal(numbers.join(', '), "10, 6, 2", "filtered and reversed the numbers");
});
`)
})
}
// select/reject/sortBy in functional style
func Test_underscore_chaining_2(t *testing.T) {
tt(t, func() {
test, _ := test_()
test(`
test("select/reject/sortBy in functional style", function() {
var numbers = [1,2,3,4,5,6,7,8,9,10];
numbers = _.chain(numbers).select(function(n) {
return n % 2 == 0;
}).reject(function(n) {
return n % 4 == 0;
}).sortBy(function(n) {
return -n;
}).value();
equal(numbers.join(', '), "10, 6, 2", "filtered and reversed the numbers");
});
`)
})
}
// reverse/concat/unshift/pop/map
func Test_underscore_chaining_3(t *testing.T) {
tt(t, func() {
test, _ := test_()
test(`
test("reverse/concat/unshift/pop/map", function() {
var numbers = [1,2,3,4,5];
numbers = _(numbers).chain()
.reverse()
.concat([5, 5, 5])
.unshift(17)
.pop()
.map(function(n){ return n * 2; })
.value();
equal(numbers.join(', '), "34, 10, 8, 6, 4, 2, 10, 10", 'can chain together array functions.');
});
`)
})
}
| Godeps/_workspace/src/github.com/obscuren/otto/underscore_chaining_test.go | 0 | https://github.com/ethereum/go-ethereum/commit/d344054e5a2844241bf0e4f64ccfc4d2ad259718 | [
0.002537788124755025,
0.00135534827131778,
0.00021914827811997384,
0.0010941673535853624,
0.0009166322997771204
] |
{
"id": 10,
"code_window": [
"01010101010101010101010101010101\n",
"ba628a4ba590cb43f7848f41c4382885\n",
"01010101010101010101010101010101\n",
"01010101010101010101010101010101\n",
"`)\n",
"\n",
"\t// Check WriteMsg. This puts a message into the buffer.\n",
"\tif err := EncodeMsg(rw, 8, []interface{}{1, 2, 3, 4}); err != nil {\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "p2p/rlpx_test.go",
"type": "replace",
"edit_start_line_idx": 31
} | .PHONY: test test-race test-release release release-check test-262
.PHONY: parser
.PHONY: otto assets underscore
TESTS := \
~
TEST := -v --run
TEST := -v --run Test\($(subst $(eval) ,\|,$(TESTS))\)
TEST := -v
TEST := .
test: parser inline.go
go test -i
go test $(TEST)
@echo PASS
parser:
$(MAKE) -C parser
inline.go: inline
./$< > $@
#################
# release, test #
#################
release: test-race test-release
for package in . parser token ast file underscore registry; do (cd $$package && godocdown --signature > README.markdown); done
@echo \*\*\* make release-check
@echo PASS
release-check: .test
$(MAKE) -C test build test
$(MAKE) -C .test/test262 build test
@echo PASS
test-262: .test
$(MAKE) -C .test/test262 build test
@echo PASS
test-release:
go test -i
go test
test-race:
go test -race -i
go test -race
#################################
# otto, assets, underscore, ... #
#################################
otto:
$(MAKE) -C otto
assets:
mkdir -p .assets
for file in underscore/test/*.js; do tr "\`" "_" < $$file > .assets/`basename $$file`; done
underscore:
$(MAKE) -C $@
| Godeps/_workspace/src/github.com/obscuren/otto/Makefile | 0 | https://github.com/ethereum/go-ethereum/commit/d344054e5a2844241bf0e4f64ccfc4d2ad259718 | [
0.000962199701461941,
0.000349925976479426,
0.00019134061585646123,
0.00025175887276418507,
0.00025460432516410947
] |
{
"id": 0,
"code_window": [
"\t\t// reconsidered if we eventually need to move in-tree storage tests out.\n",
"\t\tsc.Parameters[\"csi.storage.k8s.io/fstype\"] = fsType\n",
"\t}\n",
"\treturn testsuites.GetStorageClass(sc.Provisioner, sc.Parameters, sc.VolumeBindingMode, f.Namespace.Name, \"e2e-sc\")\n",
"}\n",
"\n",
"func loadSnapshotClass(filename string) (*unstructured.Unstructured, error) {\n",
"\tdata, err := ioutil.ReadFile(filename)\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\treturn testsuites.CopyStorageClass(sc, f.Namespace.Name, \"e2e-sc\")\n"
],
"file_path": "test/e2e/storage/external/external.go",
"type": "replace",
"edit_start_line_idx": 301
} | /*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package testsuites
import (
"context"
"flag"
"fmt"
"math"
"regexp"
"strings"
"time"
"github.com/onsi/ginkgo"
"github.com/pkg/errors"
v1 "k8s.io/api/core/v1"
storagev1 "k8s.io/api/storage/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/types"
utilerrors "k8s.io/apimachinery/pkg/util/errors"
"k8s.io/apimachinery/pkg/util/sets"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/component-base/metrics/testutil"
csitrans "k8s.io/csi-translation-lib"
"k8s.io/kubernetes/test/e2e/framework"
e2emetrics "k8s.io/kubernetes/test/e2e/framework/metrics"
e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
e2evolume "k8s.io/kubernetes/test/e2e/framework/volume"
"k8s.io/kubernetes/test/e2e/storage/podlogs"
"k8s.io/kubernetes/test/e2e/storage/testpatterns"
)
var (
migratedPlugins *string
minValidSize = "1Ki"
maxValidSize = "10Ei"
)
func init() {
migratedPlugins = flag.String("storage.migratedPlugins", "", "comma separated list of in-tree plugin names of form 'kubernetes.io/{pluginName}' migrated to CSI")
}
type opCounts map[string]int64
// migrationOpCheck validates migrated metrics.
type migrationOpCheck struct {
cs clientset.Interface
pluginName string
skipCheck bool
// The old ops are not set if skipCheck is true.
oldInTreeOps opCounts
oldMigratedOps opCounts
}
// BaseSuites is a list of storage test suites that work for in-tree and CSI drivers
var BaseSuites = []func() TestSuite{
InitVolumesTestSuite,
InitVolumeIOTestSuite,
InitVolumeModeTestSuite,
InitSubPathTestSuite,
InitProvisioningTestSuite,
InitMultiVolumeTestSuite,
InitVolumeExpandTestSuite,
InitDisruptiveTestSuite,
InitVolumeLimitsTestSuite,
InitTopologyTestSuite,
InitVolumeStressTestSuite,
InitFsGroupChangePolicyTestSuite,
}
// CSISuites is a list of storage test suites that work only for CSI drivers
var CSISuites = append(BaseSuites,
InitEphemeralTestSuite,
InitSnapshottableTestSuite,
InitSnapshottableStressTestSuite,
)
// TestSuite represents an interface for a set of tests which works with TestDriver
type TestSuite interface {
// GetTestSuiteInfo returns the TestSuiteInfo for this TestSuite
GetTestSuiteInfo() TestSuiteInfo
// DefineTests defines tests of the testpattern for the driver.
// Called inside a Ginkgo context that reflects the current driver and test pattern,
// so the test suite can define tests directly with ginkgo.It.
DefineTests(TestDriver, testpatterns.TestPattern)
// SkipRedundantSuite will skip the test suite based on the given TestPattern and TestDriver
SkipRedundantSuite(TestDriver, testpatterns.TestPattern)
}
// TestSuiteInfo represents a set of parameters for TestSuite
type TestSuiteInfo struct {
Name string // name of the TestSuite
FeatureTag string // featureTag for the TestSuite
TestPatterns []testpatterns.TestPattern // Slice of TestPattern for the TestSuite
SupportedSizeRange e2evolume.SizeRange // Size range supported by the test suite
}
func getTestNameStr(suite TestSuite, pattern testpatterns.TestPattern) string {
tsInfo := suite.GetTestSuiteInfo()
return fmt.Sprintf("[Testpattern: %s]%s %s%s", pattern.Name, pattern.FeatureTag, tsInfo.Name, tsInfo.FeatureTag)
}
// DefineTestSuite defines tests for all testpatterns and all testSuites for a driver
func DefineTestSuite(driver TestDriver, tsInits []func() TestSuite) {
for _, testSuiteInit := range tsInits {
suite := testSuiteInit()
for _, pattern := range suite.GetTestSuiteInfo().TestPatterns {
p := pattern
ginkgo.Context(getTestNameStr(suite, p), func() {
ginkgo.BeforeEach(func() {
// Skip unsupported tests to avoid unnecessary resource initialization
suite.SkipRedundantSuite(driver, p)
skipUnsupportedTest(driver, p)
})
suite.DefineTests(driver, p)
})
}
}
}
// skipUnsupportedTest will skip tests if the combination of driver, and testpattern
// is not suitable to be tested.
// Whether it needs to be skipped is checked by following steps:
// 1. Check if Whether SnapshotType is supported by driver from its interface
// 2. Check if Whether volType is supported by driver from its interface
// 3. Check if fsType is supported
// 4. Check with driver specific logic
//
// Test suites can also skip tests inside their own DefineTests function or in
// individual tests.
func skipUnsupportedTest(driver TestDriver, pattern testpatterns.TestPattern) {
dInfo := driver.GetDriverInfo()
var isSupported bool
// 0. Check with driver specific logic
driver.SkipUnsupportedTest(pattern)
// 1. Check if Whether volType is supported by driver from its interface
switch pattern.VolType {
case testpatterns.InlineVolume:
_, isSupported = driver.(InlineVolumeTestDriver)
case testpatterns.PreprovisionedPV:
_, isSupported = driver.(PreprovisionedPVTestDriver)
case testpatterns.DynamicPV, testpatterns.GenericEphemeralVolume:
_, isSupported = driver.(DynamicPVTestDriver)
case testpatterns.CSIInlineVolume:
_, isSupported = driver.(EphemeralTestDriver)
default:
isSupported = false
}
if !isSupported {
e2eskipper.Skipf("Driver %s doesn't support %v -- skipping", dInfo.Name, pattern.VolType)
}
// 2. Check if fsType is supported
if !dInfo.SupportedFsType.Has(pattern.FsType) {
e2eskipper.Skipf("Driver %s doesn't support %v -- skipping", dInfo.Name, pattern.FsType)
}
if pattern.FsType == "xfs" && framework.NodeOSDistroIs("windows") {
e2eskipper.Skipf("Distro doesn't support xfs -- skipping")
}
if pattern.FsType == "ntfs" && !framework.NodeOSDistroIs("windows") {
e2eskipper.Skipf("Distro %s doesn't support ntfs -- skipping", framework.TestContext.NodeOSDistro)
}
}
// VolumeResource is a generic implementation of TestResource that wil be able to
// be used in most of TestSuites.
// See volume_io.go or volumes.go in test/e2e/storage/testsuites/ for how to use this resource.
// Also, see subpath.go in the same directory for how to extend and use it.
type VolumeResource struct {
Config *PerTestConfig
Pattern testpatterns.TestPattern
VolSource *v1.VolumeSource
Pvc *v1.PersistentVolumeClaim
Pv *v1.PersistentVolume
Sc *storagev1.StorageClass
Volume TestVolume
}
// CreateVolumeResource constructs a VolumeResource for the current test. It knows how to deal with
// different test pattern volume types.
func CreateVolumeResource(driver TestDriver, config *PerTestConfig, pattern testpatterns.TestPattern, testVolumeSizeRange e2evolume.SizeRange) *VolumeResource {
r := VolumeResource{
Config: config,
Pattern: pattern,
}
dInfo := driver.GetDriverInfo()
f := config.Framework
cs := f.ClientSet
// Create volume for pre-provisioned volume tests
r.Volume = CreateVolume(driver, config, pattern.VolType)
switch pattern.VolType {
case testpatterns.InlineVolume:
framework.Logf("Creating resource for inline volume")
if iDriver, ok := driver.(InlineVolumeTestDriver); ok {
r.VolSource = iDriver.GetVolumeSource(false, pattern.FsType, r.Volume)
}
case testpatterns.PreprovisionedPV:
framework.Logf("Creating resource for pre-provisioned PV")
if pDriver, ok := driver.(PreprovisionedPVTestDriver); ok {
pvSource, volumeNodeAffinity := pDriver.GetPersistentVolumeSource(false, pattern.FsType, r.Volume)
if pvSource != nil {
r.Pv, r.Pvc = createPVCPV(f, dInfo.Name, pvSource, volumeNodeAffinity, pattern.VolMode, dInfo.RequiredAccessModes)
r.VolSource = createVolumeSource(r.Pvc.Name, false /* readOnly */)
}
}
case testpatterns.DynamicPV, testpatterns.GenericEphemeralVolume:
framework.Logf("Creating resource for dynamic PV")
if dDriver, ok := driver.(DynamicPVTestDriver); ok {
var err error
driverVolumeSizeRange := dDriver.GetDriverInfo().SupportedSizeRange
claimSize, err := getSizeRangesIntersection(testVolumeSizeRange, driverVolumeSizeRange)
framework.ExpectNoError(err, "determine intersection of test size range %+v and driver size range %+v", testVolumeSizeRange, driverVolumeSizeRange)
framework.Logf("Using claimSize:%s, test suite supported size:%v, driver(%s) supported size:%v ", claimSize, testVolumeSizeRange, dDriver.GetDriverInfo().Name, testVolumeSizeRange)
r.Sc = dDriver.GetDynamicProvisionStorageClass(r.Config, pattern.FsType)
if pattern.BindingMode != "" {
r.Sc.VolumeBindingMode = &pattern.BindingMode
}
if pattern.AllowExpansion != false {
r.Sc.AllowVolumeExpansion = &pattern.AllowExpansion
}
ginkgo.By("creating a StorageClass " + r.Sc.Name)
r.Sc, err = cs.StorageV1().StorageClasses().Create(context.TODO(), r.Sc, metav1.CreateOptions{})
framework.ExpectNoError(err)
switch pattern.VolType {
case testpatterns.DynamicPV:
r.Pv, r.Pvc = createPVCPVFromDynamicProvisionSC(
f, dInfo.Name, claimSize, r.Sc, pattern.VolMode, dInfo.RequiredAccessModes)
r.VolSource = createVolumeSource(r.Pvc.Name, false /* readOnly */)
case testpatterns.GenericEphemeralVolume:
driverVolumeSizeRange := dDriver.GetDriverInfo().SupportedSizeRange
claimSize, err := getSizeRangesIntersection(testVolumeSizeRange, driverVolumeSizeRange)
framework.ExpectNoError(err, "determine intersection of test size range %+v and driver size range %+v", testVolumeSizeRange, driverVolumeSizeRange)
r.VolSource = createEphemeralVolumeSource(r.Sc.Name, dInfo.RequiredAccessModes, claimSize, false /* readOnly */)
}
}
case testpatterns.CSIInlineVolume:
framework.Logf("Creating resource for CSI ephemeral inline volume")
if eDriver, ok := driver.(EphemeralTestDriver); ok {
attributes, _, _ := eDriver.GetVolume(config, 0)
r.VolSource = &v1.VolumeSource{
CSI: &v1.CSIVolumeSource{
Driver: eDriver.GetCSIDriverName(config),
VolumeAttributes: attributes,
},
}
}
default:
framework.Failf("VolumeResource doesn't support: %s", pattern.VolType)
}
if r.VolSource == nil {
e2eskipper.Skipf("Driver %s doesn't support %v -- skipping", dInfo.Name, pattern.VolType)
}
return &r
}
func createVolumeSource(pvcName string, readOnly bool) *v1.VolumeSource {
return &v1.VolumeSource{
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
ClaimName: pvcName,
ReadOnly: readOnly,
},
}
}
func createEphemeralVolumeSource(scName string, accessModes []v1.PersistentVolumeAccessMode, claimSize string, readOnly bool) *v1.VolumeSource {
if len(accessModes) == 0 {
accessModes = []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}
}
return &v1.VolumeSource{
Ephemeral: &v1.EphemeralVolumeSource{
VolumeClaimTemplate: &v1.PersistentVolumeClaimTemplate{
Spec: v1.PersistentVolumeClaimSpec{
StorageClassName: &scName,
AccessModes: accessModes,
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceStorage: resource.MustParse(claimSize),
},
},
},
},
ReadOnly: readOnly,
},
}
}
// CleanupResource cleans up VolumeResource
func (r *VolumeResource) CleanupResource() error {
f := r.Config.Framework
var cleanUpErrs []error
if r.Pvc != nil || r.Pv != nil {
switch r.Pattern.VolType {
case testpatterns.PreprovisionedPV:
ginkgo.By("Deleting pv and pvc")
if errs := e2epv.PVPVCCleanup(f.ClientSet, f.Namespace.Name, r.Pv, r.Pvc); len(errs) != 0 {
framework.Failf("Failed to delete PVC or PV: %v", utilerrors.NewAggregate(errs))
}
case testpatterns.DynamicPV:
ginkgo.By("Deleting pvc")
// We only delete the PVC so that PV (and disk) can be cleaned up by dynamic provisioner
if r.Pv != nil && r.Pv.Spec.PersistentVolumeReclaimPolicy != v1.PersistentVolumeReclaimDelete {
framework.Failf("Test framework does not currently support Dynamically Provisioned Persistent Volume %v specified with reclaim policy that isnt %v",
r.Pv.Name, v1.PersistentVolumeReclaimDelete)
}
if r.Pvc != nil {
cs := f.ClientSet
pv := r.Pv
if pv == nil && r.Pvc.Name != "" {
// This happens for late binding. Check whether we have a volume now that we need to wait for.
pvc, err := cs.CoreV1().PersistentVolumeClaims(r.Pvc.Namespace).Get(context.TODO(), r.Pvc.Name, metav1.GetOptions{})
switch {
case err == nil:
if pvc.Spec.VolumeName != "" {
pv, err = cs.CoreV1().PersistentVolumes().Get(context.TODO(), pvc.Spec.VolumeName, metav1.GetOptions{})
if err != nil {
cleanUpErrs = append(cleanUpErrs, errors.Wrapf(err, "Failed to find PV %v", pvc.Spec.VolumeName))
}
}
case apierrors.IsNotFound(err):
// Without the PVC, we cannot locate the corresponding PV. Let's
// hope that it is gone.
default:
cleanUpErrs = append(cleanUpErrs, errors.Wrapf(err, "Failed to find PVC %v", r.Pvc.Name))
}
}
err := e2epv.DeletePersistentVolumeClaim(f.ClientSet, r.Pvc.Name, f.Namespace.Name)
if err != nil {
cleanUpErrs = append(cleanUpErrs, errors.Wrapf(err, "Failed to delete PVC %v", r.Pvc.Name))
}
if pv != nil {
err = e2epv.WaitForPersistentVolumeDeleted(f.ClientSet, pv.Name, 5*time.Second, 5*time.Minute)
if err != nil {
cleanUpErrs = append(cleanUpErrs, errors.Wrapf(err,
"Persistent Volume %v not deleted by dynamic provisioner", pv.Name))
}
}
}
default:
framework.Failf("Found PVC (%v) or PV (%v) but not running Preprovisioned or Dynamic test pattern", r.Pvc, r.Pv)
}
}
if r.Sc != nil {
ginkgo.By("Deleting sc")
if err := deleteStorageClass(f.ClientSet, r.Sc.Name); err != nil {
cleanUpErrs = append(cleanUpErrs, errors.Wrapf(err, "Failed to delete StorageClass %v", r.Sc.Name))
}
}
// Cleanup volume for pre-provisioned volume tests
if r.Volume != nil {
if err := tryFunc(r.Volume.DeleteVolume); err != nil {
cleanUpErrs = append(cleanUpErrs, errors.Wrap(err, "Failed to delete Volume"))
}
}
return utilerrors.NewAggregate(cleanUpErrs)
}
func createPVCPV(
f *framework.Framework,
name string,
pvSource *v1.PersistentVolumeSource,
volumeNodeAffinity *v1.VolumeNodeAffinity,
volMode v1.PersistentVolumeMode,
accessModes []v1.PersistentVolumeAccessMode,
) (*v1.PersistentVolume, *v1.PersistentVolumeClaim) {
pvConfig := e2epv.PersistentVolumeConfig{
NamePrefix: fmt.Sprintf("%s-", name),
StorageClassName: f.Namespace.Name,
PVSource: *pvSource,
NodeAffinity: volumeNodeAffinity,
AccessModes: accessModes,
}
pvcConfig := e2epv.PersistentVolumeClaimConfig{
StorageClassName: &f.Namespace.Name,
AccessModes: accessModes,
}
if volMode != "" {
pvConfig.VolumeMode = &volMode
pvcConfig.VolumeMode = &volMode
}
framework.Logf("Creating PVC and PV")
pv, pvc, err := e2epv.CreatePVCPV(f.ClientSet, pvConfig, pvcConfig, f.Namespace.Name, false)
framework.ExpectNoError(err, "PVC, PV creation failed")
err = e2epv.WaitOnPVandPVC(f.ClientSet, f.Namespace.Name, pv, pvc)
framework.ExpectNoError(err, "PVC, PV failed to bind")
return pv, pvc
}
func createPVCPVFromDynamicProvisionSC(
f *framework.Framework,
name string,
claimSize string,
sc *storagev1.StorageClass,
volMode v1.PersistentVolumeMode,
accessModes []v1.PersistentVolumeAccessMode,
) (*v1.PersistentVolume, *v1.PersistentVolumeClaim) {
cs := f.ClientSet
ns := f.Namespace.Name
ginkgo.By("creating a claim")
pvcCfg := e2epv.PersistentVolumeClaimConfig{
NamePrefix: name,
ClaimSize: claimSize,
StorageClassName: &(sc.Name),
AccessModes: accessModes,
VolumeMode: &volMode,
}
pvc := e2epv.MakePersistentVolumeClaim(pvcCfg, ns)
var err error
pvc, err = e2epv.CreatePVC(cs, ns, pvc)
framework.ExpectNoError(err)
if !isDelayedBinding(sc) {
err = e2epv.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, cs, pvc.Namespace, pvc.Name, framework.Poll, framework.ClaimProvisionTimeout)
framework.ExpectNoError(err)
}
pvc, err = cs.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(context.TODO(), pvc.Name, metav1.GetOptions{})
framework.ExpectNoError(err)
var pv *v1.PersistentVolume
if !isDelayedBinding(sc) {
pv, err = cs.CoreV1().PersistentVolumes().Get(context.TODO(), pvc.Spec.VolumeName, metav1.GetOptions{})
framework.ExpectNoError(err)
}
return pv, pvc
}
func isDelayedBinding(sc *storagev1.StorageClass) bool {
if sc.VolumeBindingMode != nil {
return *sc.VolumeBindingMode == storagev1.VolumeBindingWaitForFirstConsumer
}
return false
}
// deleteStorageClass deletes the passed in StorageClass and catches errors other than "Not Found"
func deleteStorageClass(cs clientset.Interface, className string) error {
err := cs.StorageV1().StorageClasses().Delete(context.TODO(), className, metav1.DeleteOptions{})
if err != nil && !apierrors.IsNotFound(err) {
return err
}
return nil
}
// convertTestConfig returns a framework test config with the
// parameters specified for the testsuite or (if available) the
// dynamically created config for the volume server.
//
// This is done because TestConfig is the public API for
// the testsuites package whereas volume.TestConfig is merely
// an implementation detail. It contains fields that have no effect,
// which makes it unsuitable for use in the testsuits public API.
func convertTestConfig(in *PerTestConfig) e2evolume.TestConfig {
if in.ServerConfig != nil {
return *in.ServerConfig
}
return e2evolume.TestConfig{
Namespace: in.Framework.Namespace.Name,
Prefix: in.Prefix,
ClientNodeSelection: in.ClientNodeSelection,
}
}
// getSizeRangesIntersection takes two instances of storage size ranges and determines the
// intersection of the intervals (if it exists) and return the minimum of the intersection
// to be used as the claim size for the test.
// if value not set, that means there's no minimum or maximum size limitation and we set default size for it.
func getSizeRangesIntersection(first e2evolume.SizeRange, second e2evolume.SizeRange) (string, error) {
var firstMin, firstMax, secondMin, secondMax resource.Quantity
var err error
//if SizeRange is not set, assign a minimum or maximum size
if len(first.Min) == 0 {
first.Min = minValidSize
}
if len(first.Max) == 0 {
first.Max = maxValidSize
}
if len(second.Min) == 0 {
second.Min = minValidSize
}
if len(second.Max) == 0 {
second.Max = maxValidSize
}
if firstMin, err = resource.ParseQuantity(first.Min); err != nil {
return "", err
}
if firstMax, err = resource.ParseQuantity(first.Max); err != nil {
return "", err
}
if secondMin, err = resource.ParseQuantity(second.Min); err != nil {
return "", err
}
if secondMax, err = resource.ParseQuantity(second.Max); err != nil {
return "", err
}
interSectionStart := math.Max(float64(firstMin.Value()), float64(secondMin.Value()))
intersectionEnd := math.Min(float64(firstMax.Value()), float64(secondMax.Value()))
// the minimum of the intersection shall be returned as the claim size
var intersectionMin resource.Quantity
if intersectionEnd-interSectionStart >= 0 { //have intersection
intersectionMin = *resource.NewQuantity(int64(interSectionStart), "BinarySI") //convert value to BinarySI format. E.g. 5Gi
// return the minimum of the intersection as the claim size
return intersectionMin.String(), nil
}
return "", fmt.Errorf("intersection of size ranges %+v, %+v is null", first, second)
}
func getSnapshot(claimName string, ns, snapshotClassName string) *unstructured.Unstructured {
snapshot := &unstructured.Unstructured{
Object: map[string]interface{}{
"kind": "VolumeSnapshot",
"apiVersion": snapshotAPIVersion,
"metadata": map[string]interface{}{
"generateName": "snapshot-",
"namespace": ns,
},
"spec": map[string]interface{}{
"volumeSnapshotClassName": snapshotClassName,
"source": map[string]interface{}{
"persistentVolumeClaimName": claimName,
},
},
},
}
return snapshot
}
func getPreProvisionedSnapshot(snapName, ns, snapshotContentName string) *unstructured.Unstructured {
snapshot := &unstructured.Unstructured{
Object: map[string]interface{}{
"kind": "VolumeSnapshot",
"apiVersion": snapshotAPIVersion,
"metadata": map[string]interface{}{
"name": snapName,
"namespace": ns,
},
"spec": map[string]interface{}{
"source": map[string]interface{}{
"volumeSnapshotContentName": snapshotContentName,
},
},
},
}
return snapshot
}
func getPreProvisionedSnapshotContent(snapcontentName, snapshotName, snapshotNamespace, snapshotHandle, deletionPolicy, csiDriverName string) *unstructured.Unstructured {
snapshotContent := &unstructured.Unstructured{
Object: map[string]interface{}{
"kind": "VolumeSnapshotContent",
"apiVersion": snapshotAPIVersion,
"metadata": map[string]interface{}{
"name": snapcontentName,
},
"spec": map[string]interface{}{
"source": map[string]interface{}{
"snapshotHandle": snapshotHandle,
},
"volumeSnapshotRef": map[string]interface{}{
"name": snapshotName,
"namespace": snapshotNamespace,
},
"driver": csiDriverName,
"deletionPolicy": deletionPolicy,
},
},
}
return snapshotContent
}
func getPreProvisionedSnapshotContentName(uuid types.UID) string {
return fmt.Sprintf("pre-provisioned-snapcontent-%s", string(uuid))
}
func getPreProvisionedSnapshotName(uuid types.UID) string {
return fmt.Sprintf("pre-provisioned-snapshot-%s", string(uuid))
}
// StartPodLogs begins capturing log output and events from current
// and future pods running in the namespace of the framework. That
// ends when the returned cleanup function is called.
//
// The output goes to log files (when using --report-dir, as in the
// CI) or the output stream (otherwise).
func StartPodLogs(f *framework.Framework, driverNamespace *v1.Namespace) func() {
ctx, cancel := context.WithCancel(context.Background())
cs := f.ClientSet
ns := driverNamespace.Name
to := podlogs.LogOutput{
StatusWriter: ginkgo.GinkgoWriter,
}
if framework.TestContext.ReportDir == "" {
to.LogWriter = ginkgo.GinkgoWriter
} else {
test := ginkgo.CurrentGinkgoTestDescription()
// Clean up each individual component text such that
// it contains only characters that are valid as file
// name.
reg := regexp.MustCompile("[^a-zA-Z0-9_-]+")
var components []string
for _, component := range test.ComponentTexts {
components = append(components, reg.ReplaceAllString(component, "_"))
}
// We end the prefix with a slash to ensure that all logs
// end up in a directory named after the current test.
//
// Each component name maps to a directory. This
// avoids cluttering the root artifact directory and
// keeps each directory name smaller (the full test
// name at one point exceeded 256 characters, which was
// too much for some filesystems).
to.LogPathPrefix = framework.TestContext.ReportDir + "/" +
strings.Join(components, "/") + "/"
}
podlogs.CopyAllLogs(ctx, cs, ns, to)
// pod events are something that the framework already collects itself
// after a failed test. Logging them live is only useful for interactive
// debugging, not when we collect reports.
if framework.TestContext.ReportDir == "" {
podlogs.WatchPods(ctx, cs, ns, ginkgo.GinkgoWriter)
}
return cancel
}
func getVolumeOpsFromMetricsForPlugin(ms testutil.Metrics, pluginName string) opCounts {
totOps := opCounts{}
for method, samples := range ms {
switch method {
case "storage_operation_status_count":
for _, sample := range samples {
plugin := string(sample.Metric["volume_plugin"])
if pluginName != plugin {
continue
}
opName := string(sample.Metric["operation_name"])
if opName == "verify_controller_attached_volume" {
// We ignore verify_controller_attached_volume because it does not call into
// the plugin. It only watches Node API and updates Actual State of World cache
continue
}
totOps[opName] = totOps[opName] + int64(sample.Value)
}
}
}
return totOps
}
func getVolumeOpCounts(c clientset.Interface, pluginName string) opCounts {
if !framework.ProviderIs("gce", "gke", "aws") {
return opCounts{}
}
nodeLimit := 25
metricsGrabber, err := e2emetrics.NewMetricsGrabber(c, nil, true, false, true, false, false)
if err != nil {
framework.ExpectNoError(err, "Error creating metrics grabber: %v", err)
}
if !metricsGrabber.HasControlPlanePods() {
framework.Logf("Warning: Environment does not support getting controller-manager metrics")
return opCounts{}
}
controllerMetrics, err := metricsGrabber.GrabFromControllerManager()
framework.ExpectNoError(err, "Error getting c-m metrics : %v", err)
totOps := getVolumeOpsFromMetricsForPlugin(testutil.Metrics(controllerMetrics), pluginName)
framework.Logf("Node name not specified for getVolumeOpCounts, falling back to listing nodes from API Server")
nodes, err := c.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})
framework.ExpectNoError(err, "Error listing nodes: %v", err)
if len(nodes.Items) <= nodeLimit {
// For large clusters with > nodeLimit nodes it is too time consuming to
// gather metrics from all nodes. We just ignore the node metrics
// for those clusters
for _, node := range nodes.Items {
nodeMetrics, err := metricsGrabber.GrabFromKubelet(node.GetName())
framework.ExpectNoError(err, "Error getting Kubelet %v metrics: %v", node.GetName(), err)
totOps = addOpCounts(totOps, getVolumeOpsFromMetricsForPlugin(testutil.Metrics(nodeMetrics), pluginName))
}
} else {
framework.Logf("Skipping operation metrics gathering from nodes in getVolumeOpCounts, greater than %v nodes", nodeLimit)
}
return totOps
}
func addOpCounts(o1 opCounts, o2 opCounts) opCounts {
totOps := opCounts{}
seen := sets.NewString()
for op, count := range o1 {
seen.Insert(op)
totOps[op] = totOps[op] + count + o2[op]
}
for op, count := range o2 {
if !seen.Has(op) {
totOps[op] = totOps[op] + count
}
}
return totOps
}
func getMigrationVolumeOpCounts(cs clientset.Interface, pluginName string) (opCounts, opCounts) {
if len(pluginName) > 0 {
var migratedOps opCounts
l := csitrans.New()
csiName, err := l.GetCSINameFromInTreeName(pluginName)
if err != nil {
framework.Logf("Could not find CSI Name for in-tree plugin %v", pluginName)
migratedOps = opCounts{}
} else {
csiName = "kubernetes.io/csi:" + csiName
migratedOps = getVolumeOpCounts(cs, csiName)
}
return getVolumeOpCounts(cs, pluginName), migratedOps
}
// Not an in-tree driver
framework.Logf("Test running for native CSI Driver, not checking metrics")
return opCounts{}, opCounts{}
}
func newMigrationOpCheck(cs clientset.Interface, pluginName string) *migrationOpCheck {
moc := migrationOpCheck{
cs: cs,
pluginName: pluginName,
}
if len(pluginName) == 0 {
// This is a native CSI Driver and we don't check ops
moc.skipCheck = true
return &moc
}
if !sets.NewString(strings.Split(*migratedPlugins, ",")...).Has(pluginName) {
// In-tree plugin is not migrated
framework.Logf("In-tree plugin %v is not migrated, not validating any metrics", pluginName)
// We don't check in-tree plugin metrics because some negative test
// cases may not do any volume operations and therefore not emit any
// metrics
// We don't check counts for the Migrated version of the driver because
// if tests are running in parallel a test could be using the CSI Driver
// natively and increase the metrics count
// TODO(dyzz): Add a dimension to OperationGenerator metrics for
// "migrated"->true/false so that we can disambiguate migrated metrics
// and native CSI Driver metrics. This way we can check the counts for
// migrated version of the driver for stronger negative test case
// guarantees (as well as more informative metrics).
moc.skipCheck = true
return &moc
}
moc.oldInTreeOps, moc.oldMigratedOps = getMigrationVolumeOpCounts(cs, pluginName)
return &moc
}
func (moc *migrationOpCheck) validateMigrationVolumeOpCounts() {
if moc.skipCheck {
return
}
newInTreeOps, _ := getMigrationVolumeOpCounts(moc.cs, moc.pluginName)
for op, count := range newInTreeOps {
if count != moc.oldInTreeOps[op] {
framework.Failf("In-tree plugin %v migrated to CSI Driver, however found %v %v metrics for in-tree plugin", moc.pluginName, count-moc.oldInTreeOps[op], op)
}
}
// We don't check for migrated metrics because some negative test cases
// may not do any volume operations and therefore not emit any metrics
}
// Skip skipVolTypes patterns if the driver supports dynamic provisioning
func skipVolTypePatterns(pattern testpatterns.TestPattern, driver TestDriver, skipVolTypes map[testpatterns.TestVolType]bool) {
_, supportsProvisioning := driver.(DynamicPVTestDriver)
if supportsProvisioning && skipVolTypes[pattern.VolType] {
e2eskipper.Skipf("Driver supports dynamic provisioning, skipping %s pattern", pattern.VolType)
}
}
func tryFunc(f func()) error {
var err error
if f == nil {
return nil
}
defer func() {
if recoverError := recover(); recoverError != nil {
err = fmt.Errorf("%v", recoverError)
}
}()
f()
return err
}
| test/e2e/storage/testsuites/base.go | 1 | https://github.com/kubernetes/kubernetes/commit/8c175344294cccb2c06342605e3f2cd6a91d60c7 | [
0.026468545198440552,
0.0011047673178836703,
0.00016121435328386724,
0.00017244491027668118,
0.0036670775152742863
] |
{
"id": 0,
"code_window": [
"\t\t// reconsidered if we eventually need to move in-tree storage tests out.\n",
"\t\tsc.Parameters[\"csi.storage.k8s.io/fstype\"] = fsType\n",
"\t}\n",
"\treturn testsuites.GetStorageClass(sc.Provisioner, sc.Parameters, sc.VolumeBindingMode, f.Namespace.Name, \"e2e-sc\")\n",
"}\n",
"\n",
"func loadSnapshotClass(filename string) (*unstructured.Unstructured, error) {\n",
"\tdata, err := ioutil.ReadFile(filename)\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\treturn testsuites.CopyStorageClass(sc, f.Namespace.Name, \"e2e-sc\")\n"
],
"file_path": "test/e2e/storage/external/external.go",
"type": "replace",
"edit_start_line_idx": 301
} | package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
"go_test",
)
go_test(
name = "go_default_test",
srcs = [
"cache_test.go",
"cached_token_authenticator_test.go",
],
embed = [":go_default_library"],
deps = [
"//staging/src/k8s.io/apimachinery/pkg/util/clock:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/apis/audit:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/audit:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/authentication/authenticator:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/authentication/user:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/endpoints/request:go_default_library",
"//vendor/github.com/google/go-cmp/cmp:go_default_library",
"//vendor/github.com/google/uuid:go_default_library",
],
)
go_library(
name = "go_default_library",
srcs = [
"cache_simple.go",
"cache_striped.go",
"cached_token_authenticator.go",
"stats.go",
],
importmap = "k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/authentication/token/cache",
importpath = "k8s.io/apiserver/pkg/authentication/token/cache",
deps = [
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/cache:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/clock:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/apis/audit:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/audit:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/authentication/authenticator:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/endpoints/request:go_default_library",
"//staging/src/k8s.io/component-base/metrics:go_default_library",
"//staging/src/k8s.io/component-base/metrics/legacyregistry:go_default_library",
"//vendor/golang.org/x/sync/singleflight:go_default_library",
"//vendor/k8s.io/klog/v2:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
)
| staging/src/k8s.io/apiserver/pkg/authentication/token/cache/BUILD | 0 | https://github.com/kubernetes/kubernetes/commit/8c175344294cccb2c06342605e3f2cd6a91d60c7 | [
0.0001750830706441775,
0.0001713903620839119,
0.00016690244956407696,
0.000170716448337771,
0.0000028319618650129996
] |
{
"id": 0,
"code_window": [
"\t\t// reconsidered if we eventually need to move in-tree storage tests out.\n",
"\t\tsc.Parameters[\"csi.storage.k8s.io/fstype\"] = fsType\n",
"\t}\n",
"\treturn testsuites.GetStorageClass(sc.Provisioner, sc.Parameters, sc.VolumeBindingMode, f.Namespace.Name, \"e2e-sc\")\n",
"}\n",
"\n",
"func loadSnapshotClass(filename string) (*unstructured.Unstructured, error) {\n",
"\tdata, err := ioutil.ReadFile(filename)\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\treturn testsuites.CopyStorageClass(sc, f.Namespace.Name, \"e2e-sc\")\n"
],
"file_path": "test/e2e/storage/external/external.go",
"type": "replace",
"edit_start_line_idx": 301
} | /*
Copyright (c) 2014 VMware, Inc. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package progress
// Tee works like Unix tee; it forwards all progress reports it receives to the
// specified sinks
func Tee(s1, s2 Sinker) Sinker {
fn := func() chan<- Report {
d1 := s1.Sink()
d2 := s2.Sink()
u := make(chan Report)
go tee(u, d1, d2)
return u
}
return SinkFunc(fn)
}
func tee(u <-chan Report, d1, d2 chan<- Report) {
defer close(d1)
defer close(d2)
for r := range u {
d1 <- r
d2 <- r
}
}
| vendor/github.com/vmware/govmomi/vim25/progress/tee.go | 0 | https://github.com/kubernetes/kubernetes/commit/8c175344294cccb2c06342605e3f2cd6a91d60c7 | [
0.00017801669309847057,
0.00017626781482249498,
0.00017484267300460488,
0.00017573886725585908,
0.0000013290958804645925
] |
{
"id": 0,
"code_window": [
"\t\t// reconsidered if we eventually need to move in-tree storage tests out.\n",
"\t\tsc.Parameters[\"csi.storage.k8s.io/fstype\"] = fsType\n",
"\t}\n",
"\treturn testsuites.GetStorageClass(sc.Provisioner, sc.Parameters, sc.VolumeBindingMode, f.Namespace.Name, \"e2e-sc\")\n",
"}\n",
"\n",
"func loadSnapshotClass(filename string) (*unstructured.Unstructured, error) {\n",
"\tdata, err := ioutil.ReadFile(filename)\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\treturn testsuites.CopyStorageClass(sc, f.Namespace.Name, \"e2e-sc\")\n"
],
"file_path": "test/e2e/storage/external/external.go",
"type": "replace",
"edit_start_line_idx": 301
} | apiVersion: v1
kind: ServiceAccount
metadata:
name: ip-masq-agent
namespace: kube-system
labels:
k8s-app: ip-masq-agent
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: ip-masq-agent
namespace: kube-system
labels:
addonmanager.kubernetes.io/mode: Reconcile
spec:
selector:
matchLabels:
k8s-app: ip-masq-agent
template:
metadata:
labels:
k8s-app: ip-masq-agent
spec:
priorityClassName: system-node-critical
serviceAccountName: ip-masq-agent
hostNetwork: true
containers:
- name: ip-masq-agent
image: k8s.gcr.io/networking/ip-masq-agent-amd64:v2.6.0
args:
- --masq-chain=IP-MASQ
- --nomasq-all-reserved-ranges
resources:
requests:
cpu: 10m
memory: 16Mi
securityContext:
privileged: true
volumeMounts:
- name: config
mountPath: /etc/config
nodeSelector:
kubernetes.io/os: linux
node.kubernetes.io/masq-agent-ds-ready: "true"
volumes:
- name: config
configMap:
# Note this ConfigMap must be created in the same namespace as the daemon pods - this spec uses kube-system
name: ip-masq-agent
optional: true
items:
# The daemon looks for its config in a YAML file at /etc/config/ip-masq-agent
- key: config
path: ip-masq-agent
tolerations:
- effect: NoSchedule
operator: Exists
- effect: NoExecute
operator: Exists
- key: "CriticalAddonsOnly"
operator: "Exists"
| cluster/addons/ip-masq-agent/ip-masq-agent.yaml | 0 | https://github.com/kubernetes/kubernetes/commit/8c175344294cccb2c06342605e3f2cd6a91d60c7 | [
0.0001770446979207918,
0.00017204562027473003,
0.00016671921184752136,
0.00017317057063337415,
0.000003266703743065591
] |
{
"id": 1,
"code_window": [
"\t\t\tr.Sc = dDriver.GetDynamicProvisionStorageClass(r.Config, pattern.FsType)\n",
"\n",
"\t\t\tif pattern.BindingMode != \"\" {\n",
"\t\t\t\tr.Sc.VolumeBindingMode = &pattern.BindingMode\n",
"\t\t\t}\n",
"\t\t\tif pattern.AllowExpansion != false {\n",
"\t\t\t\tr.Sc.AllowVolumeExpansion = &pattern.AllowExpansion\n",
"\t\t\t}\n",
"\n",
"\t\t\tginkgo.By(\"creating a StorageClass \" + r.Sc.Name)\n",
"\n",
"\t\t\tr.Sc, err = cs.StorageV1().StorageClasses().Create(context.TODO(), r.Sc, metav1.CreateOptions{})\n",
"\t\t\tframework.ExpectNoError(err)\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\tr.Sc.AllowVolumeExpansion = &pattern.AllowExpansion\n"
],
"file_path": "test/e2e/storage/testsuites/base.go",
"type": "replace",
"edit_start_line_idx": 244
} | /*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package testsuites
import (
"context"
"flag"
"fmt"
"math"
"regexp"
"strings"
"time"
"github.com/onsi/ginkgo"
"github.com/pkg/errors"
v1 "k8s.io/api/core/v1"
storagev1 "k8s.io/api/storage/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/types"
utilerrors "k8s.io/apimachinery/pkg/util/errors"
"k8s.io/apimachinery/pkg/util/sets"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/component-base/metrics/testutil"
csitrans "k8s.io/csi-translation-lib"
"k8s.io/kubernetes/test/e2e/framework"
e2emetrics "k8s.io/kubernetes/test/e2e/framework/metrics"
e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
e2evolume "k8s.io/kubernetes/test/e2e/framework/volume"
"k8s.io/kubernetes/test/e2e/storage/podlogs"
"k8s.io/kubernetes/test/e2e/storage/testpatterns"
)
var (
migratedPlugins *string
minValidSize = "1Ki"
maxValidSize = "10Ei"
)
func init() {
migratedPlugins = flag.String("storage.migratedPlugins", "", "comma separated list of in-tree plugin names of form 'kubernetes.io/{pluginName}' migrated to CSI")
}
type opCounts map[string]int64
// migrationOpCheck validates migrated metrics.
type migrationOpCheck struct {
cs clientset.Interface
pluginName string
skipCheck bool
// The old ops are not set if skipCheck is true.
oldInTreeOps opCounts
oldMigratedOps opCounts
}
// BaseSuites is a list of storage test suites that work for in-tree and CSI drivers
var BaseSuites = []func() TestSuite{
InitVolumesTestSuite,
InitVolumeIOTestSuite,
InitVolumeModeTestSuite,
InitSubPathTestSuite,
InitProvisioningTestSuite,
InitMultiVolumeTestSuite,
InitVolumeExpandTestSuite,
InitDisruptiveTestSuite,
InitVolumeLimitsTestSuite,
InitTopologyTestSuite,
InitVolumeStressTestSuite,
InitFsGroupChangePolicyTestSuite,
}
// CSISuites is a list of storage test suites that work only for CSI drivers
var CSISuites = append(BaseSuites,
InitEphemeralTestSuite,
InitSnapshottableTestSuite,
InitSnapshottableStressTestSuite,
)
// TestSuite represents an interface for a set of tests which works with TestDriver
type TestSuite interface {
// GetTestSuiteInfo returns the TestSuiteInfo for this TestSuite
GetTestSuiteInfo() TestSuiteInfo
// DefineTests defines tests of the testpattern for the driver.
// Called inside a Ginkgo context that reflects the current driver and test pattern,
// so the test suite can define tests directly with ginkgo.It.
DefineTests(TestDriver, testpatterns.TestPattern)
// SkipRedundantSuite will skip the test suite based on the given TestPattern and TestDriver
SkipRedundantSuite(TestDriver, testpatterns.TestPattern)
}
// TestSuiteInfo represents a set of parameters for TestSuite
type TestSuiteInfo struct {
Name string // name of the TestSuite
FeatureTag string // featureTag for the TestSuite
TestPatterns []testpatterns.TestPattern // Slice of TestPattern for the TestSuite
SupportedSizeRange e2evolume.SizeRange // Size range supported by the test suite
}
func getTestNameStr(suite TestSuite, pattern testpatterns.TestPattern) string {
tsInfo := suite.GetTestSuiteInfo()
return fmt.Sprintf("[Testpattern: %s]%s %s%s", pattern.Name, pattern.FeatureTag, tsInfo.Name, tsInfo.FeatureTag)
}
// DefineTestSuite defines tests for all testpatterns and all testSuites for a driver
func DefineTestSuite(driver TestDriver, tsInits []func() TestSuite) {
for _, testSuiteInit := range tsInits {
suite := testSuiteInit()
for _, pattern := range suite.GetTestSuiteInfo().TestPatterns {
p := pattern
ginkgo.Context(getTestNameStr(suite, p), func() {
ginkgo.BeforeEach(func() {
// Skip unsupported tests to avoid unnecessary resource initialization
suite.SkipRedundantSuite(driver, p)
skipUnsupportedTest(driver, p)
})
suite.DefineTests(driver, p)
})
}
}
}
// skipUnsupportedTest will skip tests if the combination of driver, and testpattern
// is not suitable to be tested.
// Whether it needs to be skipped is checked by following steps:
// 1. Check if Whether SnapshotType is supported by driver from its interface
// 2. Check if Whether volType is supported by driver from its interface
// 3. Check if fsType is supported
// 4. Check with driver specific logic
//
// Test suites can also skip tests inside their own DefineTests function or in
// individual tests.
func skipUnsupportedTest(driver TestDriver, pattern testpatterns.TestPattern) {
dInfo := driver.GetDriverInfo()
var isSupported bool
// 0. Check with driver specific logic
driver.SkipUnsupportedTest(pattern)
// 1. Check if Whether volType is supported by driver from its interface
switch pattern.VolType {
case testpatterns.InlineVolume:
_, isSupported = driver.(InlineVolumeTestDriver)
case testpatterns.PreprovisionedPV:
_, isSupported = driver.(PreprovisionedPVTestDriver)
case testpatterns.DynamicPV, testpatterns.GenericEphemeralVolume:
_, isSupported = driver.(DynamicPVTestDriver)
case testpatterns.CSIInlineVolume:
_, isSupported = driver.(EphemeralTestDriver)
default:
isSupported = false
}
if !isSupported {
e2eskipper.Skipf("Driver %s doesn't support %v -- skipping", dInfo.Name, pattern.VolType)
}
// 2. Check if fsType is supported
if !dInfo.SupportedFsType.Has(pattern.FsType) {
e2eskipper.Skipf("Driver %s doesn't support %v -- skipping", dInfo.Name, pattern.FsType)
}
if pattern.FsType == "xfs" && framework.NodeOSDistroIs("windows") {
e2eskipper.Skipf("Distro doesn't support xfs -- skipping")
}
if pattern.FsType == "ntfs" && !framework.NodeOSDistroIs("windows") {
e2eskipper.Skipf("Distro %s doesn't support ntfs -- skipping", framework.TestContext.NodeOSDistro)
}
}
// VolumeResource is a generic implementation of TestResource that wil be able to
// be used in most of TestSuites.
// See volume_io.go or volumes.go in test/e2e/storage/testsuites/ for how to use this resource.
// Also, see subpath.go in the same directory for how to extend and use it.
type VolumeResource struct {
Config *PerTestConfig
Pattern testpatterns.TestPattern
VolSource *v1.VolumeSource
Pvc *v1.PersistentVolumeClaim
Pv *v1.PersistentVolume
Sc *storagev1.StorageClass
Volume TestVolume
}
// CreateVolumeResource constructs a VolumeResource for the current test. It knows how to deal with
// different test pattern volume types.
func CreateVolumeResource(driver TestDriver, config *PerTestConfig, pattern testpatterns.TestPattern, testVolumeSizeRange e2evolume.SizeRange) *VolumeResource {
r := VolumeResource{
Config: config,
Pattern: pattern,
}
dInfo := driver.GetDriverInfo()
f := config.Framework
cs := f.ClientSet
// Create volume for pre-provisioned volume tests
r.Volume = CreateVolume(driver, config, pattern.VolType)
switch pattern.VolType {
case testpatterns.InlineVolume:
framework.Logf("Creating resource for inline volume")
if iDriver, ok := driver.(InlineVolumeTestDriver); ok {
r.VolSource = iDriver.GetVolumeSource(false, pattern.FsType, r.Volume)
}
case testpatterns.PreprovisionedPV:
framework.Logf("Creating resource for pre-provisioned PV")
if pDriver, ok := driver.(PreprovisionedPVTestDriver); ok {
pvSource, volumeNodeAffinity := pDriver.GetPersistentVolumeSource(false, pattern.FsType, r.Volume)
if pvSource != nil {
r.Pv, r.Pvc = createPVCPV(f, dInfo.Name, pvSource, volumeNodeAffinity, pattern.VolMode, dInfo.RequiredAccessModes)
r.VolSource = createVolumeSource(r.Pvc.Name, false /* readOnly */)
}
}
case testpatterns.DynamicPV, testpatterns.GenericEphemeralVolume:
framework.Logf("Creating resource for dynamic PV")
if dDriver, ok := driver.(DynamicPVTestDriver); ok {
var err error
driverVolumeSizeRange := dDriver.GetDriverInfo().SupportedSizeRange
claimSize, err := getSizeRangesIntersection(testVolumeSizeRange, driverVolumeSizeRange)
framework.ExpectNoError(err, "determine intersection of test size range %+v and driver size range %+v", testVolumeSizeRange, driverVolumeSizeRange)
framework.Logf("Using claimSize:%s, test suite supported size:%v, driver(%s) supported size:%v ", claimSize, testVolumeSizeRange, dDriver.GetDriverInfo().Name, testVolumeSizeRange)
r.Sc = dDriver.GetDynamicProvisionStorageClass(r.Config, pattern.FsType)
if pattern.BindingMode != "" {
r.Sc.VolumeBindingMode = &pattern.BindingMode
}
if pattern.AllowExpansion != false {
r.Sc.AllowVolumeExpansion = &pattern.AllowExpansion
}
ginkgo.By("creating a StorageClass " + r.Sc.Name)
r.Sc, err = cs.StorageV1().StorageClasses().Create(context.TODO(), r.Sc, metav1.CreateOptions{})
framework.ExpectNoError(err)
switch pattern.VolType {
case testpatterns.DynamicPV:
r.Pv, r.Pvc = createPVCPVFromDynamicProvisionSC(
f, dInfo.Name, claimSize, r.Sc, pattern.VolMode, dInfo.RequiredAccessModes)
r.VolSource = createVolumeSource(r.Pvc.Name, false /* readOnly */)
case testpatterns.GenericEphemeralVolume:
driverVolumeSizeRange := dDriver.GetDriverInfo().SupportedSizeRange
claimSize, err := getSizeRangesIntersection(testVolumeSizeRange, driverVolumeSizeRange)
framework.ExpectNoError(err, "determine intersection of test size range %+v and driver size range %+v", testVolumeSizeRange, driverVolumeSizeRange)
r.VolSource = createEphemeralVolumeSource(r.Sc.Name, dInfo.RequiredAccessModes, claimSize, false /* readOnly */)
}
}
case testpatterns.CSIInlineVolume:
framework.Logf("Creating resource for CSI ephemeral inline volume")
if eDriver, ok := driver.(EphemeralTestDriver); ok {
attributes, _, _ := eDriver.GetVolume(config, 0)
r.VolSource = &v1.VolumeSource{
CSI: &v1.CSIVolumeSource{
Driver: eDriver.GetCSIDriverName(config),
VolumeAttributes: attributes,
},
}
}
default:
framework.Failf("VolumeResource doesn't support: %s", pattern.VolType)
}
if r.VolSource == nil {
e2eskipper.Skipf("Driver %s doesn't support %v -- skipping", dInfo.Name, pattern.VolType)
}
return &r
}
func createVolumeSource(pvcName string, readOnly bool) *v1.VolumeSource {
return &v1.VolumeSource{
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
ClaimName: pvcName,
ReadOnly: readOnly,
},
}
}
func createEphemeralVolumeSource(scName string, accessModes []v1.PersistentVolumeAccessMode, claimSize string, readOnly bool) *v1.VolumeSource {
if len(accessModes) == 0 {
accessModes = []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}
}
return &v1.VolumeSource{
Ephemeral: &v1.EphemeralVolumeSource{
VolumeClaimTemplate: &v1.PersistentVolumeClaimTemplate{
Spec: v1.PersistentVolumeClaimSpec{
StorageClassName: &scName,
AccessModes: accessModes,
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceStorage: resource.MustParse(claimSize),
},
},
},
},
ReadOnly: readOnly,
},
}
}
// CleanupResource cleans up VolumeResource
func (r *VolumeResource) CleanupResource() error {
f := r.Config.Framework
var cleanUpErrs []error
if r.Pvc != nil || r.Pv != nil {
switch r.Pattern.VolType {
case testpatterns.PreprovisionedPV:
ginkgo.By("Deleting pv and pvc")
if errs := e2epv.PVPVCCleanup(f.ClientSet, f.Namespace.Name, r.Pv, r.Pvc); len(errs) != 0 {
framework.Failf("Failed to delete PVC or PV: %v", utilerrors.NewAggregate(errs))
}
case testpatterns.DynamicPV:
ginkgo.By("Deleting pvc")
// We only delete the PVC so that PV (and disk) can be cleaned up by dynamic provisioner
if r.Pv != nil && r.Pv.Spec.PersistentVolumeReclaimPolicy != v1.PersistentVolumeReclaimDelete {
framework.Failf("Test framework does not currently support Dynamically Provisioned Persistent Volume %v specified with reclaim policy that isnt %v",
r.Pv.Name, v1.PersistentVolumeReclaimDelete)
}
if r.Pvc != nil {
cs := f.ClientSet
pv := r.Pv
if pv == nil && r.Pvc.Name != "" {
// This happens for late binding. Check whether we have a volume now that we need to wait for.
pvc, err := cs.CoreV1().PersistentVolumeClaims(r.Pvc.Namespace).Get(context.TODO(), r.Pvc.Name, metav1.GetOptions{})
switch {
case err == nil:
if pvc.Spec.VolumeName != "" {
pv, err = cs.CoreV1().PersistentVolumes().Get(context.TODO(), pvc.Spec.VolumeName, metav1.GetOptions{})
if err != nil {
cleanUpErrs = append(cleanUpErrs, errors.Wrapf(err, "Failed to find PV %v", pvc.Spec.VolumeName))
}
}
case apierrors.IsNotFound(err):
// Without the PVC, we cannot locate the corresponding PV. Let's
// hope that it is gone.
default:
cleanUpErrs = append(cleanUpErrs, errors.Wrapf(err, "Failed to find PVC %v", r.Pvc.Name))
}
}
err := e2epv.DeletePersistentVolumeClaim(f.ClientSet, r.Pvc.Name, f.Namespace.Name)
if err != nil {
cleanUpErrs = append(cleanUpErrs, errors.Wrapf(err, "Failed to delete PVC %v", r.Pvc.Name))
}
if pv != nil {
err = e2epv.WaitForPersistentVolumeDeleted(f.ClientSet, pv.Name, 5*time.Second, 5*time.Minute)
if err != nil {
cleanUpErrs = append(cleanUpErrs, errors.Wrapf(err,
"Persistent Volume %v not deleted by dynamic provisioner", pv.Name))
}
}
}
default:
framework.Failf("Found PVC (%v) or PV (%v) but not running Preprovisioned or Dynamic test pattern", r.Pvc, r.Pv)
}
}
if r.Sc != nil {
ginkgo.By("Deleting sc")
if err := deleteStorageClass(f.ClientSet, r.Sc.Name); err != nil {
cleanUpErrs = append(cleanUpErrs, errors.Wrapf(err, "Failed to delete StorageClass %v", r.Sc.Name))
}
}
// Cleanup volume for pre-provisioned volume tests
if r.Volume != nil {
if err := tryFunc(r.Volume.DeleteVolume); err != nil {
cleanUpErrs = append(cleanUpErrs, errors.Wrap(err, "Failed to delete Volume"))
}
}
return utilerrors.NewAggregate(cleanUpErrs)
}
func createPVCPV(
f *framework.Framework,
name string,
pvSource *v1.PersistentVolumeSource,
volumeNodeAffinity *v1.VolumeNodeAffinity,
volMode v1.PersistentVolumeMode,
accessModes []v1.PersistentVolumeAccessMode,
) (*v1.PersistentVolume, *v1.PersistentVolumeClaim) {
pvConfig := e2epv.PersistentVolumeConfig{
NamePrefix: fmt.Sprintf("%s-", name),
StorageClassName: f.Namespace.Name,
PVSource: *pvSource,
NodeAffinity: volumeNodeAffinity,
AccessModes: accessModes,
}
pvcConfig := e2epv.PersistentVolumeClaimConfig{
StorageClassName: &f.Namespace.Name,
AccessModes: accessModes,
}
if volMode != "" {
pvConfig.VolumeMode = &volMode
pvcConfig.VolumeMode = &volMode
}
framework.Logf("Creating PVC and PV")
pv, pvc, err := e2epv.CreatePVCPV(f.ClientSet, pvConfig, pvcConfig, f.Namespace.Name, false)
framework.ExpectNoError(err, "PVC, PV creation failed")
err = e2epv.WaitOnPVandPVC(f.ClientSet, f.Namespace.Name, pv, pvc)
framework.ExpectNoError(err, "PVC, PV failed to bind")
return pv, pvc
}
func createPVCPVFromDynamicProvisionSC(
f *framework.Framework,
name string,
claimSize string,
sc *storagev1.StorageClass,
volMode v1.PersistentVolumeMode,
accessModes []v1.PersistentVolumeAccessMode,
) (*v1.PersistentVolume, *v1.PersistentVolumeClaim) {
cs := f.ClientSet
ns := f.Namespace.Name
ginkgo.By("creating a claim")
pvcCfg := e2epv.PersistentVolumeClaimConfig{
NamePrefix: name,
ClaimSize: claimSize,
StorageClassName: &(sc.Name),
AccessModes: accessModes,
VolumeMode: &volMode,
}
pvc := e2epv.MakePersistentVolumeClaim(pvcCfg, ns)
var err error
pvc, err = e2epv.CreatePVC(cs, ns, pvc)
framework.ExpectNoError(err)
if !isDelayedBinding(sc) {
err = e2epv.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, cs, pvc.Namespace, pvc.Name, framework.Poll, framework.ClaimProvisionTimeout)
framework.ExpectNoError(err)
}
pvc, err = cs.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(context.TODO(), pvc.Name, metav1.GetOptions{})
framework.ExpectNoError(err)
var pv *v1.PersistentVolume
if !isDelayedBinding(sc) {
pv, err = cs.CoreV1().PersistentVolumes().Get(context.TODO(), pvc.Spec.VolumeName, metav1.GetOptions{})
framework.ExpectNoError(err)
}
return pv, pvc
}
func isDelayedBinding(sc *storagev1.StorageClass) bool {
if sc.VolumeBindingMode != nil {
return *sc.VolumeBindingMode == storagev1.VolumeBindingWaitForFirstConsumer
}
return false
}
// deleteStorageClass deletes the passed in StorageClass and catches errors other than "Not Found"
func deleteStorageClass(cs clientset.Interface, className string) error {
err := cs.StorageV1().StorageClasses().Delete(context.TODO(), className, metav1.DeleteOptions{})
if err != nil && !apierrors.IsNotFound(err) {
return err
}
return nil
}
// convertTestConfig returns a framework test config with the
// parameters specified for the testsuite or (if available) the
// dynamically created config for the volume server.
//
// This is done because TestConfig is the public API for
// the testsuites package whereas volume.TestConfig is merely
// an implementation detail. It contains fields that have no effect,
// which makes it unsuitable for use in the testsuits public API.
func convertTestConfig(in *PerTestConfig) e2evolume.TestConfig {
if in.ServerConfig != nil {
return *in.ServerConfig
}
return e2evolume.TestConfig{
Namespace: in.Framework.Namespace.Name,
Prefix: in.Prefix,
ClientNodeSelection: in.ClientNodeSelection,
}
}
// getSizeRangesIntersection takes two instances of storage size ranges and determines the
// intersection of the intervals (if it exists) and return the minimum of the intersection
// to be used as the claim size for the test.
// if value not set, that means there's no minimum or maximum size limitation and we set default size for it.
func getSizeRangesIntersection(first e2evolume.SizeRange, second e2evolume.SizeRange) (string, error) {
var firstMin, firstMax, secondMin, secondMax resource.Quantity
var err error
//if SizeRange is not set, assign a minimum or maximum size
if len(first.Min) == 0 {
first.Min = minValidSize
}
if len(first.Max) == 0 {
first.Max = maxValidSize
}
if len(second.Min) == 0 {
second.Min = minValidSize
}
if len(second.Max) == 0 {
second.Max = maxValidSize
}
if firstMin, err = resource.ParseQuantity(first.Min); err != nil {
return "", err
}
if firstMax, err = resource.ParseQuantity(first.Max); err != nil {
return "", err
}
if secondMin, err = resource.ParseQuantity(second.Min); err != nil {
return "", err
}
if secondMax, err = resource.ParseQuantity(second.Max); err != nil {
return "", err
}
interSectionStart := math.Max(float64(firstMin.Value()), float64(secondMin.Value()))
intersectionEnd := math.Min(float64(firstMax.Value()), float64(secondMax.Value()))
// the minimum of the intersection shall be returned as the claim size
var intersectionMin resource.Quantity
if intersectionEnd-interSectionStart >= 0 { //have intersection
intersectionMin = *resource.NewQuantity(int64(interSectionStart), "BinarySI") //convert value to BinarySI format. E.g. 5Gi
// return the minimum of the intersection as the claim size
return intersectionMin.String(), nil
}
return "", fmt.Errorf("intersection of size ranges %+v, %+v is null", first, second)
}
func getSnapshot(claimName string, ns, snapshotClassName string) *unstructured.Unstructured {
snapshot := &unstructured.Unstructured{
Object: map[string]interface{}{
"kind": "VolumeSnapshot",
"apiVersion": snapshotAPIVersion,
"metadata": map[string]interface{}{
"generateName": "snapshot-",
"namespace": ns,
},
"spec": map[string]interface{}{
"volumeSnapshotClassName": snapshotClassName,
"source": map[string]interface{}{
"persistentVolumeClaimName": claimName,
},
},
},
}
return snapshot
}
func getPreProvisionedSnapshot(snapName, ns, snapshotContentName string) *unstructured.Unstructured {
snapshot := &unstructured.Unstructured{
Object: map[string]interface{}{
"kind": "VolumeSnapshot",
"apiVersion": snapshotAPIVersion,
"metadata": map[string]interface{}{
"name": snapName,
"namespace": ns,
},
"spec": map[string]interface{}{
"source": map[string]interface{}{
"volumeSnapshotContentName": snapshotContentName,
},
},
},
}
return snapshot
}
func getPreProvisionedSnapshotContent(snapcontentName, snapshotName, snapshotNamespace, snapshotHandle, deletionPolicy, csiDriverName string) *unstructured.Unstructured {
snapshotContent := &unstructured.Unstructured{
Object: map[string]interface{}{
"kind": "VolumeSnapshotContent",
"apiVersion": snapshotAPIVersion,
"metadata": map[string]interface{}{
"name": snapcontentName,
},
"spec": map[string]interface{}{
"source": map[string]interface{}{
"snapshotHandle": snapshotHandle,
},
"volumeSnapshotRef": map[string]interface{}{
"name": snapshotName,
"namespace": snapshotNamespace,
},
"driver": csiDriverName,
"deletionPolicy": deletionPolicy,
},
},
}
return snapshotContent
}
func getPreProvisionedSnapshotContentName(uuid types.UID) string {
return fmt.Sprintf("pre-provisioned-snapcontent-%s", string(uuid))
}
func getPreProvisionedSnapshotName(uuid types.UID) string {
return fmt.Sprintf("pre-provisioned-snapshot-%s", string(uuid))
}
// StartPodLogs begins capturing log output and events from current
// and future pods running in the namespace of the framework. That
// ends when the returned cleanup function is called.
//
// The output goes to log files (when using --report-dir, as in the
// CI) or the output stream (otherwise).
func StartPodLogs(f *framework.Framework, driverNamespace *v1.Namespace) func() {
ctx, cancel := context.WithCancel(context.Background())
cs := f.ClientSet
ns := driverNamespace.Name
to := podlogs.LogOutput{
StatusWriter: ginkgo.GinkgoWriter,
}
if framework.TestContext.ReportDir == "" {
to.LogWriter = ginkgo.GinkgoWriter
} else {
test := ginkgo.CurrentGinkgoTestDescription()
// Clean up each individual component text such that
// it contains only characters that are valid as file
// name.
reg := regexp.MustCompile("[^a-zA-Z0-9_-]+")
var components []string
for _, component := range test.ComponentTexts {
components = append(components, reg.ReplaceAllString(component, "_"))
}
// We end the prefix with a slash to ensure that all logs
// end up in a directory named after the current test.
//
// Each component name maps to a directory. This
// avoids cluttering the root artifact directory and
// keeps each directory name smaller (the full test
// name at one point exceeded 256 characters, which was
// too much for some filesystems).
to.LogPathPrefix = framework.TestContext.ReportDir + "/" +
strings.Join(components, "/") + "/"
}
podlogs.CopyAllLogs(ctx, cs, ns, to)
// pod events are something that the framework already collects itself
// after a failed test. Logging them live is only useful for interactive
// debugging, not when we collect reports.
if framework.TestContext.ReportDir == "" {
podlogs.WatchPods(ctx, cs, ns, ginkgo.GinkgoWriter)
}
return cancel
}
func getVolumeOpsFromMetricsForPlugin(ms testutil.Metrics, pluginName string) opCounts {
totOps := opCounts{}
for method, samples := range ms {
switch method {
case "storage_operation_status_count":
for _, sample := range samples {
plugin := string(sample.Metric["volume_plugin"])
if pluginName != plugin {
continue
}
opName := string(sample.Metric["operation_name"])
if opName == "verify_controller_attached_volume" {
// We ignore verify_controller_attached_volume because it does not call into
// the plugin. It only watches Node API and updates Actual State of World cache
continue
}
totOps[opName] = totOps[opName] + int64(sample.Value)
}
}
}
return totOps
}
func getVolumeOpCounts(c clientset.Interface, pluginName string) opCounts {
if !framework.ProviderIs("gce", "gke", "aws") {
return opCounts{}
}
nodeLimit := 25
metricsGrabber, err := e2emetrics.NewMetricsGrabber(c, nil, true, false, true, false, false)
if err != nil {
framework.ExpectNoError(err, "Error creating metrics grabber: %v", err)
}
if !metricsGrabber.HasControlPlanePods() {
framework.Logf("Warning: Environment does not support getting controller-manager metrics")
return opCounts{}
}
controllerMetrics, err := metricsGrabber.GrabFromControllerManager()
framework.ExpectNoError(err, "Error getting c-m metrics : %v", err)
totOps := getVolumeOpsFromMetricsForPlugin(testutil.Metrics(controllerMetrics), pluginName)
framework.Logf("Node name not specified for getVolumeOpCounts, falling back to listing nodes from API Server")
nodes, err := c.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})
framework.ExpectNoError(err, "Error listing nodes: %v", err)
if len(nodes.Items) <= nodeLimit {
// For large clusters with > nodeLimit nodes it is too time consuming to
// gather metrics from all nodes. We just ignore the node metrics
// for those clusters
for _, node := range nodes.Items {
nodeMetrics, err := metricsGrabber.GrabFromKubelet(node.GetName())
framework.ExpectNoError(err, "Error getting Kubelet %v metrics: %v", node.GetName(), err)
totOps = addOpCounts(totOps, getVolumeOpsFromMetricsForPlugin(testutil.Metrics(nodeMetrics), pluginName))
}
} else {
framework.Logf("Skipping operation metrics gathering from nodes in getVolumeOpCounts, greater than %v nodes", nodeLimit)
}
return totOps
}
func addOpCounts(o1 opCounts, o2 opCounts) opCounts {
totOps := opCounts{}
seen := sets.NewString()
for op, count := range o1 {
seen.Insert(op)
totOps[op] = totOps[op] + count + o2[op]
}
for op, count := range o2 {
if !seen.Has(op) {
totOps[op] = totOps[op] + count
}
}
return totOps
}
func getMigrationVolumeOpCounts(cs clientset.Interface, pluginName string) (opCounts, opCounts) {
if len(pluginName) > 0 {
var migratedOps opCounts
l := csitrans.New()
csiName, err := l.GetCSINameFromInTreeName(pluginName)
if err != nil {
framework.Logf("Could not find CSI Name for in-tree plugin %v", pluginName)
migratedOps = opCounts{}
} else {
csiName = "kubernetes.io/csi:" + csiName
migratedOps = getVolumeOpCounts(cs, csiName)
}
return getVolumeOpCounts(cs, pluginName), migratedOps
}
// Not an in-tree driver
framework.Logf("Test running for native CSI Driver, not checking metrics")
return opCounts{}, opCounts{}
}
func newMigrationOpCheck(cs clientset.Interface, pluginName string) *migrationOpCheck {
moc := migrationOpCheck{
cs: cs,
pluginName: pluginName,
}
if len(pluginName) == 0 {
// This is a native CSI Driver and we don't check ops
moc.skipCheck = true
return &moc
}
if !sets.NewString(strings.Split(*migratedPlugins, ",")...).Has(pluginName) {
// In-tree plugin is not migrated
framework.Logf("In-tree plugin %v is not migrated, not validating any metrics", pluginName)
// We don't check in-tree plugin metrics because some negative test
// cases may not do any volume operations and therefore not emit any
// metrics
// We don't check counts for the Migrated version of the driver because
// if tests are running in parallel a test could be using the CSI Driver
// natively and increase the metrics count
// TODO(dyzz): Add a dimension to OperationGenerator metrics for
// "migrated"->true/false so that we can disambiguate migrated metrics
// and native CSI Driver metrics. This way we can check the counts for
// migrated version of the driver for stronger negative test case
// guarantees (as well as more informative metrics).
moc.skipCheck = true
return &moc
}
moc.oldInTreeOps, moc.oldMigratedOps = getMigrationVolumeOpCounts(cs, pluginName)
return &moc
}
func (moc *migrationOpCheck) validateMigrationVolumeOpCounts() {
if moc.skipCheck {
return
}
newInTreeOps, _ := getMigrationVolumeOpCounts(moc.cs, moc.pluginName)
for op, count := range newInTreeOps {
if count != moc.oldInTreeOps[op] {
framework.Failf("In-tree plugin %v migrated to CSI Driver, however found %v %v metrics for in-tree plugin", moc.pluginName, count-moc.oldInTreeOps[op], op)
}
}
// We don't check for migrated metrics because some negative test cases
// may not do any volume operations and therefore not emit any metrics
}
// Skip skipVolTypes patterns if the driver supports dynamic provisioning
func skipVolTypePatterns(pattern testpatterns.TestPattern, driver TestDriver, skipVolTypes map[testpatterns.TestVolType]bool) {
_, supportsProvisioning := driver.(DynamicPVTestDriver)
if supportsProvisioning && skipVolTypes[pattern.VolType] {
e2eskipper.Skipf("Driver supports dynamic provisioning, skipping %s pattern", pattern.VolType)
}
}
func tryFunc(f func()) error {
var err error
if f == nil {
return nil
}
defer func() {
if recoverError := recover(); recoverError != nil {
err = fmt.Errorf("%v", recoverError)
}
}()
f()
return err
}
| test/e2e/storage/testsuites/base.go | 1 | https://github.com/kubernetes/kubernetes/commit/8c175344294cccb2c06342605e3f2cd6a91d60c7 | [
0.9965962767601013,
0.02374259941279888,
0.00015999229799490422,
0.000174635075381957,
0.14920708537101746
] |
{
"id": 1,
"code_window": [
"\t\t\tr.Sc = dDriver.GetDynamicProvisionStorageClass(r.Config, pattern.FsType)\n",
"\n",
"\t\t\tif pattern.BindingMode != \"\" {\n",
"\t\t\t\tr.Sc.VolumeBindingMode = &pattern.BindingMode\n",
"\t\t\t}\n",
"\t\t\tif pattern.AllowExpansion != false {\n",
"\t\t\t\tr.Sc.AllowVolumeExpansion = &pattern.AllowExpansion\n",
"\t\t\t}\n",
"\n",
"\t\t\tginkgo.By(\"creating a StorageClass \" + r.Sc.Name)\n",
"\n",
"\t\t\tr.Sc, err = cs.StorageV1().StorageClasses().Create(context.TODO(), r.Sc, metav1.CreateOptions{})\n",
"\t\t\tframework.ExpectNoError(err)\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\tr.Sc.AllowVolumeExpansion = &pattern.AllowExpansion\n"
],
"file_path": "test/e2e/storage/testsuites/base.go",
"type": "replace",
"edit_start_line_idx": 244
} | package assert
import (
"fmt"
"net/http"
"net/http/httptest"
"net/url"
"strings"
)
// httpCode is a helper that returns HTTP code of the response. It returns -1 and
// an error if building a new request fails.
func httpCode(handler http.HandlerFunc, method, url string, values url.Values) (int, error) {
w := httptest.NewRecorder()
req, err := http.NewRequest(method, url, nil)
if err != nil {
return -1, err
}
req.URL.RawQuery = values.Encode()
handler(w, req)
return w.Code, nil
}
// HTTPSuccess asserts that a specified handler returns a success status code.
//
// assert.HTTPSuccess(t, myHandler, "POST", "http://www.google.com", nil)
//
// Returns whether the assertion was successful (true) or not (false).
func HTTPSuccess(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, msgAndArgs ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
code, err := httpCode(handler, method, url, values)
if err != nil {
Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err))
}
isSuccessCode := code >= http.StatusOK && code <= http.StatusPartialContent
if !isSuccessCode {
Fail(t, fmt.Sprintf("Expected HTTP success status code for %q but received %d", url+"?"+values.Encode(), code))
}
return isSuccessCode
}
// HTTPRedirect asserts that a specified handler returns a redirect status code.
//
// assert.HTTPRedirect(t, myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}}
//
// Returns whether the assertion was successful (true) or not (false).
func HTTPRedirect(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, msgAndArgs ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
code, err := httpCode(handler, method, url, values)
if err != nil {
Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err))
}
isRedirectCode := code >= http.StatusMultipleChoices && code <= http.StatusTemporaryRedirect
if !isRedirectCode {
Fail(t, fmt.Sprintf("Expected HTTP redirect status code for %q but received %d", url+"?"+values.Encode(), code))
}
return isRedirectCode
}
// HTTPError asserts that a specified handler returns an error status code.
//
// assert.HTTPError(t, myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}}
//
// Returns whether the assertion was successful (true) or not (false).
func HTTPError(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, msgAndArgs ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
code, err := httpCode(handler, method, url, values)
if err != nil {
Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err))
}
isErrorCode := code >= http.StatusBadRequest
if !isErrorCode {
Fail(t, fmt.Sprintf("Expected HTTP error status code for %q but received %d", url+"?"+values.Encode(), code))
}
return isErrorCode
}
// HTTPStatusCode asserts that a specified handler returns a specified status code.
//
// assert.HTTPStatusCode(t, myHandler, "GET", "/notImplemented", nil, 501)
//
// Returns whether the assertion was successful (true) or not (false).
func HTTPStatusCode(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, statuscode int, msgAndArgs ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
code, err := httpCode(handler, method, url, values)
if err != nil {
Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err))
}
successful := code == statuscode
if !successful {
Fail(t, fmt.Sprintf("Expected HTTP status code %d for %q but received %d", statuscode, url+"?"+values.Encode(), code))
}
return successful
}
// HTTPBody is a helper that returns HTTP body of the response. It returns
// empty string if building a new request fails.
func HTTPBody(handler http.HandlerFunc, method, url string, values url.Values) string {
w := httptest.NewRecorder()
req, err := http.NewRequest(method, url+"?"+values.Encode(), nil)
if err != nil {
return ""
}
handler(w, req)
return w.Body.String()
}
// HTTPBodyContains asserts that a specified handler returns a
// body that contains a string.
//
// assert.HTTPBodyContains(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky")
//
// Returns whether the assertion was successful (true) or not (false).
func HTTPBodyContains(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
body := HTTPBody(handler, method, url, values)
contains := strings.Contains(body, fmt.Sprint(str))
if !contains {
Fail(t, fmt.Sprintf("Expected response body for \"%s\" to contain \"%s\" but found \"%s\"", url+"?"+values.Encode(), str, body))
}
return contains
}
// HTTPBodyNotContains asserts that a specified handler returns a
// body that does not contain a string.
//
// assert.HTTPBodyNotContains(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky")
//
// Returns whether the assertion was successful (true) or not (false).
func HTTPBodyNotContains(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
body := HTTPBody(handler, method, url, values)
contains := strings.Contains(body, fmt.Sprint(str))
if contains {
Fail(t, fmt.Sprintf("Expected response body for \"%s\" to NOT contain \"%s\" but found \"%s\"", url+"?"+values.Encode(), str, body))
}
return !contains
}
| vendor/github.com/stretchr/testify/assert/http_assertions.go | 0 | https://github.com/kubernetes/kubernetes/commit/8c175344294cccb2c06342605e3f2cd6a91d60c7 | [
0.00032554566860198975,
0.00017944088904187083,
0.00016231814515776932,
0.00017197347187902778,
0.00003688586730277166
] |
{
"id": 1,
"code_window": [
"\t\t\tr.Sc = dDriver.GetDynamicProvisionStorageClass(r.Config, pattern.FsType)\n",
"\n",
"\t\t\tif pattern.BindingMode != \"\" {\n",
"\t\t\t\tr.Sc.VolumeBindingMode = &pattern.BindingMode\n",
"\t\t\t}\n",
"\t\t\tif pattern.AllowExpansion != false {\n",
"\t\t\t\tr.Sc.AllowVolumeExpansion = &pattern.AllowExpansion\n",
"\t\t\t}\n",
"\n",
"\t\t\tginkgo.By(\"creating a StorageClass \" + r.Sc.Name)\n",
"\n",
"\t\t\tr.Sc, err = cs.StorageV1().StorageClasses().Create(context.TODO(), r.Sc, metav1.CreateOptions{})\n",
"\t\t\tframework.ExpectNoError(err)\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\tr.Sc.AllowVolumeExpansion = &pattern.AllowExpansion\n"
],
"file_path": "test/e2e/storage/testsuites/base.go",
"type": "replace",
"edit_start_line_idx": 244
} | /*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cache
import (
"k8s.io/klog/v2"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
)
// AppendFunc is used to add a matching item to whatever list the caller is using
type AppendFunc func(interface{})
// ListAll calls appendFn with each value retrieved from store which matches the selector.
func ListAll(store Store, selector labels.Selector, appendFn AppendFunc) error {
selectAll := selector.Empty()
for _, m := range store.List() {
if selectAll {
// Avoid computing labels of the objects to speed up common flows
// of listing all objects.
appendFn(m)
continue
}
metadata, err := meta.Accessor(m)
if err != nil {
return err
}
if selector.Matches(labels.Set(metadata.GetLabels())) {
appendFn(m)
}
}
return nil
}
// ListAllByNamespace used to list items belongs to namespace from Indexer.
func ListAllByNamespace(indexer Indexer, namespace string, selector labels.Selector, appendFn AppendFunc) error {
selectAll := selector.Empty()
if namespace == metav1.NamespaceAll {
for _, m := range indexer.List() {
if selectAll {
// Avoid computing labels of the objects to speed up common flows
// of listing all objects.
appendFn(m)
continue
}
metadata, err := meta.Accessor(m)
if err != nil {
return err
}
if selector.Matches(labels.Set(metadata.GetLabels())) {
appendFn(m)
}
}
return nil
}
items, err := indexer.Index(NamespaceIndex, &metav1.ObjectMeta{Namespace: namespace})
if err != nil {
// Ignore error; do slow search without index.
klog.Warningf("can not retrieve list of objects using index : %v", err)
for _, m := range indexer.List() {
metadata, err := meta.Accessor(m)
if err != nil {
return err
}
if metadata.GetNamespace() == namespace && selector.Matches(labels.Set(metadata.GetLabels())) {
appendFn(m)
}
}
return nil
}
for _, m := range items {
if selectAll {
// Avoid computing labels of the objects to speed up common flows
// of listing all objects.
appendFn(m)
continue
}
metadata, err := meta.Accessor(m)
if err != nil {
return err
}
if selector.Matches(labels.Set(metadata.GetLabels())) {
appendFn(m)
}
}
return nil
}
// GenericLister is a lister skin on a generic Indexer
type GenericLister interface {
// List will return all objects across namespaces
List(selector labels.Selector) (ret []runtime.Object, err error)
// Get will attempt to retrieve assuming that name==key
Get(name string) (runtime.Object, error)
// ByNamespace will give you a GenericNamespaceLister for one namespace
ByNamespace(namespace string) GenericNamespaceLister
}
// GenericNamespaceLister is a lister skin on a generic Indexer
type GenericNamespaceLister interface {
// List will return all objects in this namespace
List(selector labels.Selector) (ret []runtime.Object, err error)
// Get will attempt to retrieve by namespace and name
Get(name string) (runtime.Object, error)
}
// NewGenericLister creates a new instance for the genericLister.
func NewGenericLister(indexer Indexer, resource schema.GroupResource) GenericLister {
return &genericLister{indexer: indexer, resource: resource}
}
type genericLister struct {
indexer Indexer
resource schema.GroupResource
}
func (s *genericLister) List(selector labels.Selector) (ret []runtime.Object, err error) {
err = ListAll(s.indexer, selector, func(m interface{}) {
ret = append(ret, m.(runtime.Object))
})
return ret, err
}
func (s *genericLister) ByNamespace(namespace string) GenericNamespaceLister {
return &genericNamespaceLister{indexer: s.indexer, namespace: namespace, resource: s.resource}
}
func (s *genericLister) Get(name string) (runtime.Object, error) {
obj, exists, err := s.indexer.GetByKey(name)
if err != nil {
return nil, err
}
if !exists {
return nil, errors.NewNotFound(s.resource, name)
}
return obj.(runtime.Object), nil
}
type genericNamespaceLister struct {
indexer Indexer
namespace string
resource schema.GroupResource
}
func (s *genericNamespaceLister) List(selector labels.Selector) (ret []runtime.Object, err error) {
err = ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) {
ret = append(ret, m.(runtime.Object))
})
return ret, err
}
func (s *genericNamespaceLister) Get(name string) (runtime.Object, error) {
obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name)
if err != nil {
return nil, err
}
if !exists {
return nil, errors.NewNotFound(s.resource, name)
}
return obj.(runtime.Object), nil
}
| staging/src/k8s.io/client-go/tools/cache/listers.go | 0 | https://github.com/kubernetes/kubernetes/commit/8c175344294cccb2c06342605e3f2cd6a91d60c7 | [
0.00017923257837537676,
0.00017032292089425027,
0.0001598349481355399,
0.00017129055049736053,
0.000005302279078023275
] |
{
"id": 1,
"code_window": [
"\t\t\tr.Sc = dDriver.GetDynamicProvisionStorageClass(r.Config, pattern.FsType)\n",
"\n",
"\t\t\tif pattern.BindingMode != \"\" {\n",
"\t\t\t\tr.Sc.VolumeBindingMode = &pattern.BindingMode\n",
"\t\t\t}\n",
"\t\t\tif pattern.AllowExpansion != false {\n",
"\t\t\t\tr.Sc.AllowVolumeExpansion = &pattern.AllowExpansion\n",
"\t\t\t}\n",
"\n",
"\t\t\tginkgo.By(\"creating a StorageClass \" + r.Sc.Name)\n",
"\n",
"\t\t\tr.Sc, err = cs.StorageV1().StorageClasses().Create(context.TODO(), r.Sc, metav1.CreateOptions{})\n",
"\t\t\tframework.ExpectNoError(err)\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\tr.Sc.AllowVolumeExpansion = &pattern.AllowExpansion\n"
],
"file_path": "test/e2e/storage/testsuites/base.go",
"type": "replace",
"edit_start_line_idx": 244
} | /*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta1
// This file contains a collection of methods that can be used from go-restful to
// generate Swagger API documentation for its models. Please read this PR for more
// information on the implementation: https://github.com/emicklei/go-restful/pull/215
//
// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if
// they are on one line! For multiple line or blocks that you want to ignore use ---.
// Any context after a --- is ignored.
//
// Those methods can be generated by using hack/update-generated-swagger-docs.sh
// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT.
var map_CertificateSigningRequest = map[string]string{
"": "Describes a certificate signing request",
"spec": "The certificate request itself and any additional information.",
"status": "Derived information about the request.",
}
func (CertificateSigningRequest) SwaggerDoc() map[string]string {
return map_CertificateSigningRequest
}
var map_CertificateSigningRequestCondition = map[string]string{
"type": "type of the condition. Known conditions include \"Approved\", \"Denied\", and \"Failed\".",
"status": "Status of the condition, one of True, False, Unknown. Approved, Denied, and Failed conditions may not be \"False\" or \"Unknown\". Defaults to \"True\". If unset, should be treated as \"True\".",
"reason": "brief reason for the request state",
"message": "human readable message with details about the request state",
"lastUpdateTime": "timestamp for the last update to this condition",
"lastTransitionTime": "lastTransitionTime is the time the condition last transitioned from one status to another. If unset, when a new condition type is added or an existing condition's status is changed, the server defaults this to the current time.",
}
func (CertificateSigningRequestCondition) SwaggerDoc() map[string]string {
return map_CertificateSigningRequestCondition
}
var map_CertificateSigningRequestSpec = map[string]string{
"": "This information is immutable after the request is created. Only the Request and Usages fields can be set on creation, other fields are derived by Kubernetes and cannot be modified by users.",
"request": "Base64-encoded PKCS#10 CSR data",
"signerName": "Requested signer for the request. It is a qualified name in the form: `scope-hostname.io/name`. If empty, it will be defaulted:\n 1. If it's a kubelet client certificate, it is assigned\n \"kubernetes.io/kube-apiserver-client-kubelet\".\n 2. If it's a kubelet serving certificate, it is assigned\n \"kubernetes.io/kubelet-serving\".\n 3. Otherwise, it is assigned \"kubernetes.io/legacy-unknown\".\nDistribution of trust for signers happens out of band. You can select on this field using `spec.signerName`.",
"usages": "allowedUsages specifies a set of usage contexts the key will be valid for. See: https://tools.ietf.org/html/rfc5280#section-4.2.1.3\n https://tools.ietf.org/html/rfc5280#section-4.2.1.12\nValid values are:\n \"signing\",\n \"digital signature\",\n \"content commitment\",\n \"key encipherment\",\n \"key agreement\",\n \"data encipherment\",\n \"cert sign\",\n \"crl sign\",\n \"encipher only\",\n \"decipher only\",\n \"any\",\n \"server auth\",\n \"client auth\",\n \"code signing\",\n \"email protection\",\n \"s/mime\",\n \"ipsec end system\",\n \"ipsec tunnel\",\n \"ipsec user\",\n \"timestamping\",\n \"ocsp signing\",\n \"microsoft sgc\",\n \"netscape sgc\"",
"username": "Information about the requesting user. See user.Info interface for details.",
"uid": "UID information about the requesting user. See user.Info interface for details.",
"groups": "Group information about the requesting user. See user.Info interface for details.",
"extra": "Extra information about the requesting user. See user.Info interface for details.",
}
func (CertificateSigningRequestSpec) SwaggerDoc() map[string]string {
return map_CertificateSigningRequestSpec
}
var map_CertificateSigningRequestStatus = map[string]string{
"conditions": "Conditions applied to the request, such as approval or denial.",
"certificate": "If request was approved, the controller will place the issued certificate here.",
}
func (CertificateSigningRequestStatus) SwaggerDoc() map[string]string {
return map_CertificateSigningRequestStatus
}
// AUTO-GENERATED FUNCTIONS END HERE
| staging/src/k8s.io/api/certificates/v1beta1/types_swagger_doc_generated.go | 0 | https://github.com/kubernetes/kubernetes/commit/8c175344294cccb2c06342605e3f2cd6a91d60c7 | [
0.00017907148867379874,
0.00017015289631672204,
0.00016306850011460483,
0.00016828786465339363,
0.000005259435511106858
] |
{
"id": 2,
"code_window": [
"\treturn nil\n",
"}\n",
"\n",
"// GetStorageClass constructs a new StorageClass instance\n",
"// with a unique name that is based on namespace + suffix.\n",
"func GetStorageClass(\n",
"\tprovisioner string,\n"
],
"labels": [
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"// CopyStorageClass constructs a new StorageClass instance\n",
"// with a unique name that is based on namespace + suffix\n",
"// using the same storageclass setting from the parameter\n",
"func CopyStorageClass(sc *storagev1.StorageClass, ns string, suffix string) *storagev1.StorageClass {\n",
"\tcopy := sc.DeepCopy()\n",
"\tcopy.ObjectMeta.Name = names.SimpleNameGenerator.GenerateName(ns + \"-\" + suffix)\n",
"\tcopy.ResourceVersion = \"\"\n",
"\treturn copy\n",
"}\n",
"\n"
],
"file_path": "test/e2e/storage/testsuites/driveroperations.go",
"type": "add",
"edit_start_line_idx": 56
} | /*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package external
import (
"context"
"flag"
"io/ioutil"
"github.com/pkg/errors"
storagev1 "k8s.io/api/storage/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/client-go/kubernetes/scheme"
"k8s.io/kubernetes/test/e2e/framework"
e2econfig "k8s.io/kubernetes/test/e2e/framework/config"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
e2evolume "k8s.io/kubernetes/test/e2e/framework/volume"
"k8s.io/kubernetes/test/e2e/storage/testpatterns"
"k8s.io/kubernetes/test/e2e/storage/testsuites"
"k8s.io/kubernetes/test/e2e/storage/utils"
"github.com/onsi/ginkgo"
)
// DriverDefinition needs to be filled in via a .yaml or .json
// file. Its methods then implement the TestDriver interface, using
// nothing but the information in this struct.
type driverDefinition struct {
// DriverInfo is the static information that the storage testsuite
// expects from a test driver. See test/e2e/storage/testsuites/testdriver.go
// for details. The only field with a non-zero default is the list of
// supported file systems (SupportedFsType): it is set so that tests using
// the default file system are enabled.
DriverInfo testsuites.DriverInfo
// StorageClass must be set to enable dynamic provisioning tests.
// The default is to not run those tests.
StorageClass struct {
// FromName set to true enables the usage of a storage
// class with DriverInfo.Name as provisioner and no
// parameters.
FromName bool
// FromFile is used only when FromName is false. It
// loads a storage class from the given .yaml or .json
// file. File names are resolved by the
// framework.testfiles package, which typically means
// that they can be absolute or relative to the test
// suite's --repo-root parameter.
//
// This can be used when the storage class is meant to have
// additional parameters.
FromFile string
// FromExistingClassName specifies the name of a pre-installed
// StorageClass that will be copied and used for the tests.
FromExistingClassName string
}
// SnapshotClass must be set to enable snapshotting tests.
// The default is to not run those tests.
SnapshotClass struct {
// FromName set to true enables the usage of a
// snapshotter class with DriverInfo.Name as provisioner.
FromName bool
// FromFile is used only when FromName is false. It
// loads a snapshot class from the given .yaml or .json
// file. File names are resolved by the
// framework.testfiles package, which typically means
// that they can be absolute or relative to the test
// suite's --repo-root parameter.
//
// This can be used when the snapshot class is meant to have
// additional parameters.
FromFile string
// FromExistingClassName specifies the name of a pre-installed
// SnapshotClass that will be copied and used for the tests.
FromExistingClassName string
}
// InlineVolumes defines one or more volumes for use as inline
// ephemeral volumes. At least one such volume has to be
// defined to enable testing of inline ephemeral volumes. If
// a test needs more volumes than defined, some of the defined
// volumes will be used multiple times.
//
// DriverInfo.Name is used as name of the driver in the inline volume.
InlineVolumes []struct {
// Attributes are passed as NodePublishVolumeReq.volume_context.
// Can be empty.
Attributes map[string]string
// Shared defines whether the resulting volume is
// shared between different pods (i.e. changes made
// in one pod are visible in another)
Shared bool
// ReadOnly must be set to true if the driver does not
// support mounting as read/write.
ReadOnly bool
}
// SupportedSizeRange defines the desired size of dynamically
// provisioned volumes.
SupportedSizeRange e2evolume.SizeRange
// ClientNodeName selects a specific node for scheduling test pods.
// Can be left empty. Most drivers should not need this and instead
// use topology to ensure that pods land on the right node(s).
ClientNodeName string
}
func init() {
e2econfig.Flags.Var(testDriverParameter{}, "storage.testdriver", "name of a .yaml or .json file that defines a driver for storage testing, can be used more than once")
}
// testDriverParameter is used to hook loading of the driver
// definition file and test instantiation into argument parsing: for
// each of potentially many parameters, Set is called and then does
// both immediately. There is no other code location between argument
// parsing and starting of the test suite where those test could be
// defined.
type testDriverParameter struct {
}
var _ flag.Value = testDriverParameter{}
func (t testDriverParameter) String() string {
return "<.yaml or .json file>"
}
func (t testDriverParameter) Set(filename string) error {
return AddDriverDefinition(filename)
}
// AddDriverDefinition defines ginkgo tests for CSI driver definition file.
// Either --storage.testdriver cmdline argument or AddDriverDefinition can be used
// to define the tests.
func AddDriverDefinition(filename string) error {
driver, err := loadDriverDefinition(filename)
if err != nil {
return err
}
if driver.DriverInfo.Name == "" {
return errors.Errorf("%q: DriverInfo.Name not set", filename)
}
description := "External Storage " + testsuites.GetDriverNameWithFeatureTags(driver)
ginkgo.Describe(description, func() {
testsuites.DefineTestSuite(driver, testsuites.CSISuites)
})
return nil
}
func loadDriverDefinition(filename string) (*driverDefinition, error) {
if filename == "" {
return nil, errors.New("missing file name")
}
data, err := ioutil.ReadFile(filename)
if err != nil {
return nil, err
}
// Some reasonable defaults follow.
driver := &driverDefinition{
DriverInfo: testsuites.DriverInfo{
SupportedFsType: sets.NewString(
"", // Default fsType
),
},
SupportedSizeRange: e2evolume.SizeRange{
Min: "5Gi",
},
}
// TODO: strict checking of the file content once https://github.com/kubernetes/kubernetes/pull/71589
// or something similar is merged.
if err := runtime.DecodeInto(scheme.Codecs.UniversalDecoder(), data, driver); err != nil {
return nil, errors.Wrap(err, filename)
}
return driver, nil
}
var _ testsuites.TestDriver = &driverDefinition{}
// We have to implement the interface because dynamic PV may or may
// not be supported. driverDefinition.SkipUnsupportedTest checks that
// based on the actual driver definition.
var _ testsuites.DynamicPVTestDriver = &driverDefinition{}
// Same for snapshotting.
var _ testsuites.SnapshottableTestDriver = &driverDefinition{}
// And for ephemeral volumes.
var _ testsuites.EphemeralTestDriver = &driverDefinition{}
// runtime.DecodeInto needs a runtime.Object but doesn't do any
// deserialization of it and therefore none of the methods below need
// an implementation.
var _ runtime.Object = &driverDefinition{}
func (d *driverDefinition) DeepCopyObject() runtime.Object {
return nil
}
func (d *driverDefinition) GetObjectKind() schema.ObjectKind {
return nil
}
func (d *driverDefinition) GetDriverInfo() *testsuites.DriverInfo {
return &d.DriverInfo
}
func (d *driverDefinition) SkipUnsupportedTest(pattern testpatterns.TestPattern) {
supported := false
// TODO (?): add support for more volume types
switch pattern.VolType {
case "":
supported = true
case testpatterns.DynamicPV:
if d.StorageClass.FromName || d.StorageClass.FromFile != "" || d.StorageClass.FromExistingClassName != "" {
supported = true
}
case testpatterns.CSIInlineVolume:
supported = len(d.InlineVolumes) != 0
}
if !supported {
e2eskipper.Skipf("Driver %q does not support volume type %q - skipping", d.DriverInfo.Name, pattern.VolType)
}
supported = false
switch pattern.SnapshotType {
case "":
supported = true
case testpatterns.DynamicCreatedSnapshot, testpatterns.PreprovisionedCreatedSnapshot:
if d.SnapshotClass.FromName || d.SnapshotClass.FromFile != "" || d.SnapshotClass.FromExistingClassName != "" {
supported = true
}
}
if !supported {
e2eskipper.Skipf("Driver %q does not support snapshot type %q - skipping", d.DriverInfo.Name, pattern.SnapshotType)
}
}
func (d *driverDefinition) GetDynamicProvisionStorageClass(e2econfig *testsuites.PerTestConfig, fsType string) *storagev1.StorageClass {
var (
sc *storagev1.StorageClass
err error
)
f := e2econfig.Framework
switch {
case d.StorageClass.FromName:
sc = &storagev1.StorageClass{Provisioner: d.DriverInfo.Name}
case d.StorageClass.FromExistingClassName != "":
sc, err = f.ClientSet.StorageV1().StorageClasses().Get(context.TODO(), d.StorageClass.FromExistingClassName, metav1.GetOptions{})
framework.ExpectNoError(err, "getting storage class %s", d.StorageClass.FromExistingClassName)
case d.StorageClass.FromFile != "":
var ok bool
items, err := utils.LoadFromManifests(d.StorageClass.FromFile)
framework.ExpectNoError(err, "load storage class from %s", d.StorageClass.FromFile)
framework.ExpectEqual(len(items), 1, "exactly one item from %s", d.StorageClass.FromFile)
err = utils.PatchItems(f, f.Namespace, items...)
framework.ExpectNoError(err, "patch items")
sc, ok = items[0].(*storagev1.StorageClass)
framework.ExpectEqual(ok, true, "storage class from %s", d.StorageClass.FromFile)
}
framework.ExpectNotEqual(sc, nil, "storage class is unexpectantly nil")
if fsType != "" {
if sc.Parameters == nil {
sc.Parameters = map[string]string{}
}
// This limits the external storage test suite to only CSI drivers, which may need to be
// reconsidered if we eventually need to move in-tree storage tests out.
sc.Parameters["csi.storage.k8s.io/fstype"] = fsType
}
return testsuites.GetStorageClass(sc.Provisioner, sc.Parameters, sc.VolumeBindingMode, f.Namespace.Name, "e2e-sc")
}
func loadSnapshotClass(filename string) (*unstructured.Unstructured, error) {
data, err := ioutil.ReadFile(filename)
if err != nil {
return nil, err
}
snapshotClass := &unstructured.Unstructured{}
if err := runtime.DecodeInto(scheme.Codecs.UniversalDecoder(), data, snapshotClass); err != nil {
return nil, errors.Wrap(err, filename)
}
return snapshotClass, nil
}
func (d *driverDefinition) GetSnapshotClass(e2econfig *testsuites.PerTestConfig) *unstructured.Unstructured {
if !d.SnapshotClass.FromName && d.SnapshotClass.FromFile == "" && d.SnapshotClass.FromExistingClassName == "" {
e2eskipper.Skipf("Driver %q does not support snapshotting - skipping", d.DriverInfo.Name)
}
f := e2econfig.Framework
snapshotter := d.DriverInfo.Name
parameters := map[string]string{}
ns := e2econfig.Framework.Namespace.Name
suffix := "vsc"
switch {
case d.SnapshotClass.FromName:
// Do nothing (just use empty parameters)
case d.SnapshotClass.FromExistingClassName != "":
snapshotClass, err := f.DynamicClient.Resource(testsuites.SnapshotClassGVR).Get(context.TODO(), d.SnapshotClass.FromExistingClassName, metav1.GetOptions{})
framework.ExpectNoError(err, "getting snapshot class %s", d.SnapshotClass.FromExistingClassName)
if params, ok := snapshotClass.Object["parameters"].(map[string]interface{}); ok {
for k, v := range params {
parameters[k] = v.(string)
}
}
if snapshotProvider, ok := snapshotClass.Object["driver"]; ok {
snapshotter = snapshotProvider.(string)
}
case d.SnapshotClass.FromFile != "":
snapshotClass, err := loadSnapshotClass(d.SnapshotClass.FromFile)
framework.ExpectNoError(err, "load snapshot class from %s", d.SnapshotClass.FromFile)
if params, ok := snapshotClass.Object["parameters"].(map[string]interface{}); ok {
for k, v := range params {
parameters[k] = v.(string)
}
}
if snapshotProvider, ok := snapshotClass.Object["driver"]; ok {
snapshotter = snapshotProvider.(string)
}
}
return testsuites.GetSnapshotClass(snapshotter, parameters, ns, suffix)
}
func (d *driverDefinition) GetVolume(e2econfig *testsuites.PerTestConfig, volumeNumber int) (map[string]string, bool, bool) {
if len(d.InlineVolumes) == 0 {
e2eskipper.Skipf("%s does not have any InlineVolumeAttributes defined", d.DriverInfo.Name)
}
e2evolume := d.InlineVolumes[volumeNumber%len(d.InlineVolumes)]
return e2evolume.Attributes, e2evolume.Shared, e2evolume.ReadOnly
}
func (d *driverDefinition) GetCSIDriverName(e2econfig *testsuites.PerTestConfig) string {
return d.DriverInfo.Name
}
func (d *driverDefinition) PrepareTest(f *framework.Framework) (*testsuites.PerTestConfig, func()) {
e2econfig := &testsuites.PerTestConfig{
Driver: d,
Prefix: "external",
Framework: f,
ClientNodeSelection: e2epod.NodeSelection{Name: d.ClientNodeName},
}
return e2econfig, func() {}
}
| test/e2e/storage/external/external.go | 1 | https://github.com/kubernetes/kubernetes/commit/8c175344294cccb2c06342605e3f2cd6a91d60c7 | [
0.9990842342376709,
0.12306497991085052,
0.0001627706951694563,
0.0002650829264894128,
0.31336697936058044
] |
{
"id": 2,
"code_window": [
"\treturn nil\n",
"}\n",
"\n",
"// GetStorageClass constructs a new StorageClass instance\n",
"// with a unique name that is based on namespace + suffix.\n",
"func GetStorageClass(\n",
"\tprovisioner string,\n"
],
"labels": [
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"// CopyStorageClass constructs a new StorageClass instance\n",
"// with a unique name that is based on namespace + suffix\n",
"// using the same storageclass setting from the parameter\n",
"func CopyStorageClass(sc *storagev1.StorageClass, ns string, suffix string) *storagev1.StorageClass {\n",
"\tcopy := sc.DeepCopy()\n",
"\tcopy.ObjectMeta.Name = names.SimpleNameGenerator.GenerateName(ns + \"-\" + suffix)\n",
"\tcopy.ResourceVersion = \"\"\n",
"\treturn copy\n",
"}\n",
"\n"
],
"file_path": "test/e2e/storage/testsuites/driveroperations.go",
"type": "add",
"edit_start_line_idx": 56
} | package time // import "github.com/docker/docker/api/types/time"
import (
"strconv"
"time"
)
// DurationToSecondsString converts the specified duration to the number
// seconds it represents, formatted as a string.
func DurationToSecondsString(duration time.Duration) string {
return strconv.FormatFloat(duration.Seconds(), 'f', 0, 64)
}
| vendor/github.com/docker/docker/api/types/time/duration_convert.go | 0 | https://github.com/kubernetes/kubernetes/commit/8c175344294cccb2c06342605e3f2cd6a91d60c7 | [
0.0007687675533816218,
0.0004694105591624975,
0.00017005357949528843,
0.0004694105591624975,
0.0002993569942191243
] |
{
"id": 2,
"code_window": [
"\treturn nil\n",
"}\n",
"\n",
"// GetStorageClass constructs a new StorageClass instance\n",
"// with a unique name that is based on namespace + suffix.\n",
"func GetStorageClass(\n",
"\tprovisioner string,\n"
],
"labels": [
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"// CopyStorageClass constructs a new StorageClass instance\n",
"// with a unique name that is based on namespace + suffix\n",
"// using the same storageclass setting from the parameter\n",
"func CopyStorageClass(sc *storagev1.StorageClass, ns string, suffix string) *storagev1.StorageClass {\n",
"\tcopy := sc.DeepCopy()\n",
"\tcopy.ObjectMeta.Name = names.SimpleNameGenerator.GenerateName(ns + \"-\" + suffix)\n",
"\tcopy.ResourceVersion = \"\"\n",
"\treturn copy\n",
"}\n",
"\n"
],
"file_path": "test/e2e/storage/testsuites/driveroperations.go",
"type": "add",
"edit_start_line_idx": 56
} | package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
"go_test",
)
go_test(
name = "go_default_test",
srcs = ["framer_test.go"],
embed = [":go_default_library"],
)
go_library(
name = "go_default_library",
srcs = ["framer.go"],
importmap = "k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/framer",
importpath = "k8s.io/apimachinery/pkg/util/framer",
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
)
| staging/src/k8s.io/apimachinery/pkg/util/framer/BUILD | 0 | https://github.com/kubernetes/kubernetes/commit/8c175344294cccb2c06342605e3f2cd6a91d60c7 | [
0.00017399730859324336,
0.00017192562518175691,
0.00017032717005349696,
0.00017168899648822844,
0.0000014908652019585134
] |
{
"id": 2,
"code_window": [
"\treturn nil\n",
"}\n",
"\n",
"// GetStorageClass constructs a new StorageClass instance\n",
"// with a unique name that is based on namespace + suffix.\n",
"func GetStorageClass(\n",
"\tprovisioner string,\n"
],
"labels": [
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"// CopyStorageClass constructs a new StorageClass instance\n",
"// with a unique name that is based on namespace + suffix\n",
"// using the same storageclass setting from the parameter\n",
"func CopyStorageClass(sc *storagev1.StorageClass, ns string, suffix string) *storagev1.StorageClass {\n",
"\tcopy := sc.DeepCopy()\n",
"\tcopy.ObjectMeta.Name = names.SimpleNameGenerator.GenerateName(ns + \"-\" + suffix)\n",
"\tcopy.ResourceVersion = \"\"\n",
"\treturn copy\n",
"}\n",
"\n"
],
"file_path": "test/e2e/storage/testsuites/driveroperations.go",
"type": "add",
"edit_start_line_idx": 56
} | load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = ["helpers.go"],
importmap = "k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/util/apihelpers",
importpath = "k8s.io/apiserver/pkg/util/apihelpers",
visibility = ["//visibility:public"],
deps = ["//staging/src/k8s.io/api/flowcontrol/v1beta1:go_default_library"],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)
| staging/src/k8s.io/apiserver/pkg/util/apihelpers/BUILD | 0 | https://github.com/kubernetes/kubernetes/commit/8c175344294cccb2c06342605e3f2cd6a91d60c7 | [
0.0001733739482006058,
0.00017073475464712828,
0.0001685031020315364,
0.00017032717005349696,
0.0000020092902559554204
] |
{
"id": 3,
"code_window": [
"\t\tginkgo.It(\"should not allow expansion of pvcs without AllowVolumeExpansion property\", func() {\n",
"\t\t\tinit()\n",
"\t\t\tdefer cleanup()\n",
"\n",
"\t\t\tvar err error\n",
"\t\t\tgomega.Expect(l.resource.Sc.AllowVolumeExpansion).To(gomega.BeNil())\n",
"\t\t\tginkgo.By(\"Expanding non-expandable pvc\")\n",
"\t\t\tcurrentPvcSize := l.resource.Pvc.Spec.Resources.Requests[v1.ResourceStorage]\n",
"\t\t\tnewSize := currentPvcSize.DeepCopy()\n",
"\t\t\tnewSize.Add(resource.MustParse(\"1Gi\"))\n",
"\t\t\tframework.Logf(\"currentPvcSize %v, newSize %v\", currentPvcSize, newSize)\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\tgomega.Expect(l.resource.Sc.AllowVolumeExpansion).NotTo(gomega.BeNil())\n",
"\t\t\tallowVolumeExpansion := *l.resource.Sc.AllowVolumeExpansion\n",
"\t\t\tgomega.Expect(allowVolumeExpansion).To(gomega.BeFalse())\n"
],
"file_path": "test/e2e/storage/testsuites/volume_expand.go",
"type": "replace",
"edit_start_line_idx": 158
} | /*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package external
import (
"context"
"flag"
"io/ioutil"
"github.com/pkg/errors"
storagev1 "k8s.io/api/storage/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/client-go/kubernetes/scheme"
"k8s.io/kubernetes/test/e2e/framework"
e2econfig "k8s.io/kubernetes/test/e2e/framework/config"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
e2evolume "k8s.io/kubernetes/test/e2e/framework/volume"
"k8s.io/kubernetes/test/e2e/storage/testpatterns"
"k8s.io/kubernetes/test/e2e/storage/testsuites"
"k8s.io/kubernetes/test/e2e/storage/utils"
"github.com/onsi/ginkgo"
)
// DriverDefinition needs to be filled in via a .yaml or .json
// file. Its methods then implement the TestDriver interface, using
// nothing but the information in this struct.
type driverDefinition struct {
// DriverInfo is the static information that the storage testsuite
// expects from a test driver. See test/e2e/storage/testsuites/testdriver.go
// for details. The only field with a non-zero default is the list of
// supported file systems (SupportedFsType): it is set so that tests using
// the default file system are enabled.
DriverInfo testsuites.DriverInfo
// StorageClass must be set to enable dynamic provisioning tests.
// The default is to not run those tests.
StorageClass struct {
// FromName set to true enables the usage of a storage
// class with DriverInfo.Name as provisioner and no
// parameters.
FromName bool
// FromFile is used only when FromName is false. It
// loads a storage class from the given .yaml or .json
// file. File names are resolved by the
// framework.testfiles package, which typically means
// that they can be absolute or relative to the test
// suite's --repo-root parameter.
//
// This can be used when the storage class is meant to have
// additional parameters.
FromFile string
// FromExistingClassName specifies the name of a pre-installed
// StorageClass that will be copied and used for the tests.
FromExistingClassName string
}
// SnapshotClass must be set to enable snapshotting tests.
// The default is to not run those tests.
SnapshotClass struct {
// FromName set to true enables the usage of a
// snapshotter class with DriverInfo.Name as provisioner.
FromName bool
// FromFile is used only when FromName is false. It
// loads a snapshot class from the given .yaml or .json
// file. File names are resolved by the
// framework.testfiles package, which typically means
// that they can be absolute or relative to the test
// suite's --repo-root parameter.
//
// This can be used when the snapshot class is meant to have
// additional parameters.
FromFile string
// FromExistingClassName specifies the name of a pre-installed
// SnapshotClass that will be copied and used for the tests.
FromExistingClassName string
}
// InlineVolumes defines one or more volumes for use as inline
// ephemeral volumes. At least one such volume has to be
// defined to enable testing of inline ephemeral volumes. If
// a test needs more volumes than defined, some of the defined
// volumes will be used multiple times.
//
// DriverInfo.Name is used as name of the driver in the inline volume.
InlineVolumes []struct {
// Attributes are passed as NodePublishVolumeReq.volume_context.
// Can be empty.
Attributes map[string]string
// Shared defines whether the resulting volume is
// shared between different pods (i.e. changes made
// in one pod are visible in another)
Shared bool
// ReadOnly must be set to true if the driver does not
// support mounting as read/write.
ReadOnly bool
}
// SupportedSizeRange defines the desired size of dynamically
// provisioned volumes.
SupportedSizeRange e2evolume.SizeRange
// ClientNodeName selects a specific node for scheduling test pods.
// Can be left empty. Most drivers should not need this and instead
// use topology to ensure that pods land on the right node(s).
ClientNodeName string
}
func init() {
e2econfig.Flags.Var(testDriverParameter{}, "storage.testdriver", "name of a .yaml or .json file that defines a driver for storage testing, can be used more than once")
}
// testDriverParameter is used to hook loading of the driver
// definition file and test instantiation into argument parsing: for
// each of potentially many parameters, Set is called and then does
// both immediately. There is no other code location between argument
// parsing and starting of the test suite where those test could be
// defined.
type testDriverParameter struct {
}
var _ flag.Value = testDriverParameter{}
func (t testDriverParameter) String() string {
return "<.yaml or .json file>"
}
func (t testDriverParameter) Set(filename string) error {
return AddDriverDefinition(filename)
}
// AddDriverDefinition defines ginkgo tests for CSI driver definition file.
// Either --storage.testdriver cmdline argument or AddDriverDefinition can be used
// to define the tests.
func AddDriverDefinition(filename string) error {
driver, err := loadDriverDefinition(filename)
if err != nil {
return err
}
if driver.DriverInfo.Name == "" {
return errors.Errorf("%q: DriverInfo.Name not set", filename)
}
description := "External Storage " + testsuites.GetDriverNameWithFeatureTags(driver)
ginkgo.Describe(description, func() {
testsuites.DefineTestSuite(driver, testsuites.CSISuites)
})
return nil
}
func loadDriverDefinition(filename string) (*driverDefinition, error) {
if filename == "" {
return nil, errors.New("missing file name")
}
data, err := ioutil.ReadFile(filename)
if err != nil {
return nil, err
}
// Some reasonable defaults follow.
driver := &driverDefinition{
DriverInfo: testsuites.DriverInfo{
SupportedFsType: sets.NewString(
"", // Default fsType
),
},
SupportedSizeRange: e2evolume.SizeRange{
Min: "5Gi",
},
}
// TODO: strict checking of the file content once https://github.com/kubernetes/kubernetes/pull/71589
// or something similar is merged.
if err := runtime.DecodeInto(scheme.Codecs.UniversalDecoder(), data, driver); err != nil {
return nil, errors.Wrap(err, filename)
}
return driver, nil
}
var _ testsuites.TestDriver = &driverDefinition{}
// We have to implement the interface because dynamic PV may or may
// not be supported. driverDefinition.SkipUnsupportedTest checks that
// based on the actual driver definition.
var _ testsuites.DynamicPVTestDriver = &driverDefinition{}
// Same for snapshotting.
var _ testsuites.SnapshottableTestDriver = &driverDefinition{}
// And for ephemeral volumes.
var _ testsuites.EphemeralTestDriver = &driverDefinition{}
// runtime.DecodeInto needs a runtime.Object but doesn't do any
// deserialization of it and therefore none of the methods below need
// an implementation.
var _ runtime.Object = &driverDefinition{}
func (d *driverDefinition) DeepCopyObject() runtime.Object {
return nil
}
func (d *driverDefinition) GetObjectKind() schema.ObjectKind {
return nil
}
func (d *driverDefinition) GetDriverInfo() *testsuites.DriverInfo {
return &d.DriverInfo
}
func (d *driverDefinition) SkipUnsupportedTest(pattern testpatterns.TestPattern) {
supported := false
// TODO (?): add support for more volume types
switch pattern.VolType {
case "":
supported = true
case testpatterns.DynamicPV:
if d.StorageClass.FromName || d.StorageClass.FromFile != "" || d.StorageClass.FromExistingClassName != "" {
supported = true
}
case testpatterns.CSIInlineVolume:
supported = len(d.InlineVolumes) != 0
}
if !supported {
e2eskipper.Skipf("Driver %q does not support volume type %q - skipping", d.DriverInfo.Name, pattern.VolType)
}
supported = false
switch pattern.SnapshotType {
case "":
supported = true
case testpatterns.DynamicCreatedSnapshot, testpatterns.PreprovisionedCreatedSnapshot:
if d.SnapshotClass.FromName || d.SnapshotClass.FromFile != "" || d.SnapshotClass.FromExistingClassName != "" {
supported = true
}
}
if !supported {
e2eskipper.Skipf("Driver %q does not support snapshot type %q - skipping", d.DriverInfo.Name, pattern.SnapshotType)
}
}
func (d *driverDefinition) GetDynamicProvisionStorageClass(e2econfig *testsuites.PerTestConfig, fsType string) *storagev1.StorageClass {
var (
sc *storagev1.StorageClass
err error
)
f := e2econfig.Framework
switch {
case d.StorageClass.FromName:
sc = &storagev1.StorageClass{Provisioner: d.DriverInfo.Name}
case d.StorageClass.FromExistingClassName != "":
sc, err = f.ClientSet.StorageV1().StorageClasses().Get(context.TODO(), d.StorageClass.FromExistingClassName, metav1.GetOptions{})
framework.ExpectNoError(err, "getting storage class %s", d.StorageClass.FromExistingClassName)
case d.StorageClass.FromFile != "":
var ok bool
items, err := utils.LoadFromManifests(d.StorageClass.FromFile)
framework.ExpectNoError(err, "load storage class from %s", d.StorageClass.FromFile)
framework.ExpectEqual(len(items), 1, "exactly one item from %s", d.StorageClass.FromFile)
err = utils.PatchItems(f, f.Namespace, items...)
framework.ExpectNoError(err, "patch items")
sc, ok = items[0].(*storagev1.StorageClass)
framework.ExpectEqual(ok, true, "storage class from %s", d.StorageClass.FromFile)
}
framework.ExpectNotEqual(sc, nil, "storage class is unexpectantly nil")
if fsType != "" {
if sc.Parameters == nil {
sc.Parameters = map[string]string{}
}
// This limits the external storage test suite to only CSI drivers, which may need to be
// reconsidered if we eventually need to move in-tree storage tests out.
sc.Parameters["csi.storage.k8s.io/fstype"] = fsType
}
return testsuites.GetStorageClass(sc.Provisioner, sc.Parameters, sc.VolumeBindingMode, f.Namespace.Name, "e2e-sc")
}
func loadSnapshotClass(filename string) (*unstructured.Unstructured, error) {
data, err := ioutil.ReadFile(filename)
if err != nil {
return nil, err
}
snapshotClass := &unstructured.Unstructured{}
if err := runtime.DecodeInto(scheme.Codecs.UniversalDecoder(), data, snapshotClass); err != nil {
return nil, errors.Wrap(err, filename)
}
return snapshotClass, nil
}
func (d *driverDefinition) GetSnapshotClass(e2econfig *testsuites.PerTestConfig) *unstructured.Unstructured {
if !d.SnapshotClass.FromName && d.SnapshotClass.FromFile == "" && d.SnapshotClass.FromExistingClassName == "" {
e2eskipper.Skipf("Driver %q does not support snapshotting - skipping", d.DriverInfo.Name)
}
f := e2econfig.Framework
snapshotter := d.DriverInfo.Name
parameters := map[string]string{}
ns := e2econfig.Framework.Namespace.Name
suffix := "vsc"
switch {
case d.SnapshotClass.FromName:
// Do nothing (just use empty parameters)
case d.SnapshotClass.FromExistingClassName != "":
snapshotClass, err := f.DynamicClient.Resource(testsuites.SnapshotClassGVR).Get(context.TODO(), d.SnapshotClass.FromExistingClassName, metav1.GetOptions{})
framework.ExpectNoError(err, "getting snapshot class %s", d.SnapshotClass.FromExistingClassName)
if params, ok := snapshotClass.Object["parameters"].(map[string]interface{}); ok {
for k, v := range params {
parameters[k] = v.(string)
}
}
if snapshotProvider, ok := snapshotClass.Object["driver"]; ok {
snapshotter = snapshotProvider.(string)
}
case d.SnapshotClass.FromFile != "":
snapshotClass, err := loadSnapshotClass(d.SnapshotClass.FromFile)
framework.ExpectNoError(err, "load snapshot class from %s", d.SnapshotClass.FromFile)
if params, ok := snapshotClass.Object["parameters"].(map[string]interface{}); ok {
for k, v := range params {
parameters[k] = v.(string)
}
}
if snapshotProvider, ok := snapshotClass.Object["driver"]; ok {
snapshotter = snapshotProvider.(string)
}
}
return testsuites.GetSnapshotClass(snapshotter, parameters, ns, suffix)
}
func (d *driverDefinition) GetVolume(e2econfig *testsuites.PerTestConfig, volumeNumber int) (map[string]string, bool, bool) {
if len(d.InlineVolumes) == 0 {
e2eskipper.Skipf("%s does not have any InlineVolumeAttributes defined", d.DriverInfo.Name)
}
e2evolume := d.InlineVolumes[volumeNumber%len(d.InlineVolumes)]
return e2evolume.Attributes, e2evolume.Shared, e2evolume.ReadOnly
}
func (d *driverDefinition) GetCSIDriverName(e2econfig *testsuites.PerTestConfig) string {
return d.DriverInfo.Name
}
func (d *driverDefinition) PrepareTest(f *framework.Framework) (*testsuites.PerTestConfig, func()) {
e2econfig := &testsuites.PerTestConfig{
Driver: d,
Prefix: "external",
Framework: f,
ClientNodeSelection: e2epod.NodeSelection{Name: d.ClientNodeName},
}
return e2econfig, func() {}
}
| test/e2e/storage/external/external.go | 1 | https://github.com/kubernetes/kubernetes/commit/8c175344294cccb2c06342605e3f2cd6a91d60c7 | [
0.0008462745463475585,
0.0001893292646855116,
0.00015869369963184,
0.00016953921294771135,
0.000107242536614649
] |
{
"id": 3,
"code_window": [
"\t\tginkgo.It(\"should not allow expansion of pvcs without AllowVolumeExpansion property\", func() {\n",
"\t\t\tinit()\n",
"\t\t\tdefer cleanup()\n",
"\n",
"\t\t\tvar err error\n",
"\t\t\tgomega.Expect(l.resource.Sc.AllowVolumeExpansion).To(gomega.BeNil())\n",
"\t\t\tginkgo.By(\"Expanding non-expandable pvc\")\n",
"\t\t\tcurrentPvcSize := l.resource.Pvc.Spec.Resources.Requests[v1.ResourceStorage]\n",
"\t\t\tnewSize := currentPvcSize.DeepCopy()\n",
"\t\t\tnewSize.Add(resource.MustParse(\"1Gi\"))\n",
"\t\t\tframework.Logf(\"currentPvcSize %v, newSize %v\", currentPvcSize, newSize)\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\tgomega.Expect(l.resource.Sc.AllowVolumeExpansion).NotTo(gomega.BeNil())\n",
"\t\t\tallowVolumeExpansion := *l.resource.Sc.AllowVolumeExpansion\n",
"\t\t\tgomega.Expect(allowVolumeExpansion).To(gomega.BeFalse())\n"
],
"file_path": "test/e2e/storage/testsuites/volume_expand.go",
"type": "replace",
"edit_start_line_idx": 158
} | /*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package hostpath
import (
"fmt"
"os"
"testing"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/client-go/kubernetes/fake"
"k8s.io/kubernetes/pkg/volume"
volumetest "k8s.io/kubernetes/pkg/volume/testing"
"k8s.io/kubernetes/pkg/volume/util/hostutil"
utilpath "k8s.io/utils/path"
)
func newHostPathType(pathType string) *v1.HostPathType {
hostPathType := new(v1.HostPathType)
*hostPathType = v1.HostPathType(pathType)
return hostPathType
}
func newHostPathTypeList(pathType ...string) []*v1.HostPathType {
typeList := []*v1.HostPathType{}
for _, ele := range pathType {
typeList = append(typeList, newHostPathType(ele))
}
return typeList
}
func TestCanSupport(t *testing.T) {
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(volume.VolumeConfig{}), nil /* prober */, volumetest.NewFakeVolumeHost(t, "fake", nil, nil))
plug, err := plugMgr.FindPluginByName(hostPathPluginName)
if err != nil {
t.Fatal("Can't find the plugin by name")
}
if plug.GetPluginName() != hostPathPluginName {
t.Errorf("Wrong name: %s", plug.GetPluginName())
}
if !plug.CanSupport(&volume.Spec{Volume: &v1.Volume{VolumeSource: v1.VolumeSource{HostPath: &v1.HostPathVolumeSource{}}}}) {
t.Errorf("Expected true")
}
if !plug.CanSupport(&volume.Spec{PersistentVolume: &v1.PersistentVolume{Spec: v1.PersistentVolumeSpec{PersistentVolumeSource: v1.PersistentVolumeSource{HostPath: &v1.HostPathVolumeSource{}}}}}) {
t.Errorf("Expected true")
}
if plug.CanSupport(&volume.Spec{Volume: &v1.Volume{VolumeSource: v1.VolumeSource{}}}) {
t.Errorf("Expected false")
}
}
func TestGetAccessModes(t *testing.T) {
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(volume.VolumeConfig{}), nil /* prober */, volumetest.NewFakeVolumeHost(t, "/tmp/fake", nil, nil))
plug, err := plugMgr.FindPersistentPluginByName(hostPathPluginName)
if err != nil {
t.Fatal("Can't find the plugin by name")
}
if len(plug.GetAccessModes()) != 1 || plug.GetAccessModes()[0] != v1.ReadWriteOnce {
t.Errorf("Expected %s PersistentVolumeAccessMode", v1.ReadWriteOnce)
}
}
func TestRecycler(t *testing.T) {
plugMgr := volume.VolumePluginMgr{}
pluginHost := volumetest.NewFakeVolumeHost(t, "/tmp/fake", nil, nil)
plugMgr.InitPlugins([]volume.VolumePlugin{&hostPathPlugin{nil, volume.VolumeConfig{}}}, nil, pluginHost)
spec := &volume.Spec{PersistentVolume: &v1.PersistentVolume{Spec: v1.PersistentVolumeSpec{PersistentVolumeSource: v1.PersistentVolumeSource{HostPath: &v1.HostPathVolumeSource{Path: "/foo"}}}}}
_, err := plugMgr.FindRecyclablePluginBySpec(spec)
if err != nil {
t.Errorf("Can't find the plugin by name")
}
}
func TestDeleter(t *testing.T) {
// Deleter has a hard-coded regex for "/tmp".
tempPath := fmt.Sprintf("/tmp/hostpath.%s", uuid.NewUUID())
err := os.MkdirAll(tempPath, 0750)
if err != nil {
t.Fatalf("Failed to create tmp directory for deleter: %v", err)
}
defer os.RemoveAll(tempPath)
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(volume.VolumeConfig{}), nil /* prober */, volumetest.NewFakeVolumeHost(t, "/tmp/fake", nil, nil))
spec := &volume.Spec{PersistentVolume: &v1.PersistentVolume{Spec: v1.PersistentVolumeSpec{PersistentVolumeSource: v1.PersistentVolumeSource{HostPath: &v1.HostPathVolumeSource{Path: tempPath}}}}}
plug, err := plugMgr.FindDeletablePluginBySpec(spec)
if err != nil {
t.Fatal("Can't find the plugin by name")
}
deleter, err := plug.NewDeleter(spec)
if err != nil {
t.Errorf("Failed to make a new Deleter: %v", err)
}
if deleter.GetPath() != tempPath {
t.Errorf("Expected %s but got %s", tempPath, deleter.GetPath())
}
if err := deleter.Delete(); err != nil {
t.Errorf("Mock Recycler expected to return nil but got %s", err)
}
if exists, _ := utilpath.Exists(utilpath.CheckFollowSymlink, tempPath); exists {
t.Errorf("Temp path expected to be deleted, but was found at %s", tempPath)
}
}
func TestDeleterTempDir(t *testing.T) {
tests := map[string]struct {
expectedFailure bool
path string
}{
"just-tmp": {true, "/tmp"},
"not-tmp": {true, "/nottmp"},
"good-tmp": {false, "/tmp/scratch"},
}
for name, test := range tests {
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(volume.VolumeConfig{}), nil /* prober */, volumetest.NewFakeVolumeHost(t, "/tmp/fake", nil, nil))
spec := &volume.Spec{PersistentVolume: &v1.PersistentVolume{Spec: v1.PersistentVolumeSpec{PersistentVolumeSource: v1.PersistentVolumeSource{HostPath: &v1.HostPathVolumeSource{Path: test.path}}}}}
plug, _ := plugMgr.FindDeletablePluginBySpec(spec)
deleter, _ := plug.NewDeleter(spec)
err := deleter.Delete()
if err == nil && test.expectedFailure {
t.Errorf("Expected failure for test '%s' but got nil err", name)
}
if err != nil && !test.expectedFailure {
t.Errorf("Unexpected failure for test '%s': %v", name, err)
}
}
}
func TestProvisioner(t *testing.T) {
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(volume.VolumeConfig{ProvisioningEnabled: true}),
nil,
volumetest.NewFakeVolumeHost(t, "/tmp/fake", nil, nil))
spec := &volume.Spec{PersistentVolume: &v1.PersistentVolume{Spec: v1.PersistentVolumeSpec{
PersistentVolumeSource: v1.PersistentVolumeSource{HostPath: &v1.HostPathVolumeSource{Path: fmt.Sprintf("/tmp/hostpath.%s", uuid.NewUUID())}}}}}
plug, err := plugMgr.FindCreatablePluginBySpec(spec)
if err != nil {
t.Fatalf("Can't find the plugin by name")
}
options := volume.VolumeOptions{
PVC: volumetest.CreateTestPVC("1Gi", []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}),
PersistentVolumeReclaimPolicy: v1.PersistentVolumeReclaimDelete,
}
creator, err := plug.NewProvisioner(options)
if err != nil {
t.Fatalf("Failed to make a new Provisioner: %v", err)
}
hostPathCreator, ok := creator.(*hostPathProvisioner)
if !ok {
t.Fatal("Not a hostPathProvisioner")
}
hostPathCreator.basePath = fmt.Sprintf("%s.%s", "hostPath_pv", uuid.NewUUID())
pv, err := hostPathCreator.Provision(nil, nil)
if err != nil {
t.Errorf("Unexpected error creating volume: %v", err)
}
if pv.Spec.HostPath.Path == "" {
t.Errorf("Expected pv.Spec.HostPath.Path to not be empty: %#v", pv)
}
expectedCapacity := resource.NewQuantity(1*1024*1024*1024, resource.BinarySI)
actualCapacity := pv.Spec.Capacity[v1.ResourceStorage]
expectedAmt := expectedCapacity.Value()
actualAmt := actualCapacity.Value()
if expectedAmt != actualAmt {
t.Errorf("Expected capacity %+v but got %+v", expectedAmt, actualAmt)
}
if pv.Spec.PersistentVolumeReclaimPolicy != v1.PersistentVolumeReclaimDelete {
t.Errorf("Expected reclaim policy %+v but got %+v", v1.PersistentVolumeReclaimDelete, pv.Spec.PersistentVolumeReclaimPolicy)
}
os.RemoveAll(hostPathCreator.basePath)
}
func TestInvalidHostPath(t *testing.T) {
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(volume.VolumeConfig{}), nil /* prober */, volumetest.NewFakeVolumeHost(t, "fake", nil, nil))
plug, err := plugMgr.FindPluginByName(hostPathPluginName)
if err != nil {
t.Fatalf("Unable to find plugin %s by name: %v", hostPathPluginName, err)
}
spec := &v1.Volume{
Name: "vol1",
VolumeSource: v1.VolumeSource{HostPath: &v1.HostPathVolumeSource{Path: "/no/backsteps/allowed/.."}},
}
pod := &v1.Pod{ObjectMeta: metav1.ObjectMeta{UID: types.UID("poduid")}}
mounter, err := plug.NewMounter(volume.NewSpecFromVolume(spec), pod, volume.VolumeOptions{})
if err != nil {
t.Fatal(err)
}
err = mounter.SetUp(volume.MounterArgs{})
expectedMsg := "invalid HostPath `/no/backsteps/allowed/..`: must not contain '..'"
if err.Error() != expectedMsg {
t.Fatalf("expected error `%s` but got `%s`", expectedMsg, err)
}
}
func TestPlugin(t *testing.T) {
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(volume.VolumeConfig{}), nil /* prober */, volumetest.NewFakeVolumeHost(t, "fake", nil, nil))
plug, err := plugMgr.FindPluginByName(hostPathPluginName)
if err != nil {
t.Fatal("Can't find the plugin by name")
}
volPath := "/tmp/vol1"
spec := &v1.Volume{
Name: "vol1",
VolumeSource: v1.VolumeSource{HostPath: &v1.HostPathVolumeSource{Path: volPath, Type: newHostPathType(string(v1.HostPathDirectoryOrCreate))}},
}
pod := &v1.Pod{ObjectMeta: metav1.ObjectMeta{UID: types.UID("poduid")}}
defer os.RemoveAll(volPath)
mounter, err := plug.NewMounter(volume.NewSpecFromVolume(spec), pod, volume.VolumeOptions{})
if err != nil {
t.Errorf("Failed to make a new Mounter: %v", err)
}
if mounter == nil {
t.Fatalf("Got a nil Mounter")
}
path := mounter.GetPath()
if path != volPath {
t.Errorf("Got unexpected path: %s", path)
}
if err := mounter.SetUp(volume.MounterArgs{}); err != nil {
t.Errorf("Expected success, got: %v", err)
}
unmounter, err := plug.NewUnmounter("vol1", types.UID("poduid"))
if err != nil {
t.Errorf("Failed to make a new Unmounter: %v", err)
}
if unmounter == nil {
t.Fatalf("Got a nil Unmounter")
}
if err := unmounter.TearDown(); err != nil {
t.Errorf("Expected success, got: %v", err)
}
}
func TestPersistentClaimReadOnlyFlag(t *testing.T) {
pv := &v1.PersistentVolume{
ObjectMeta: metav1.ObjectMeta{
Name: "pvA",
},
Spec: v1.PersistentVolumeSpec{
PersistentVolumeSource: v1.PersistentVolumeSource{
HostPath: &v1.HostPathVolumeSource{Path: "foo", Type: newHostPathType(string(v1.HostPathDirectoryOrCreate))},
},
ClaimRef: &v1.ObjectReference{
Name: "claimA",
},
},
}
defer os.RemoveAll("foo")
claim := &v1.PersistentVolumeClaim{
ObjectMeta: metav1.ObjectMeta{
Name: "claimA",
Namespace: "nsA",
},
Spec: v1.PersistentVolumeClaimSpec{
VolumeName: "pvA",
},
Status: v1.PersistentVolumeClaimStatus{
Phase: v1.ClaimBound,
},
}
client := fake.NewSimpleClientset(pv, claim)
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(volume.VolumeConfig{}), nil /* prober */, volumetest.NewFakeVolumeHost(t, "/tmp/fake", client, nil))
plug, _ := plugMgr.FindPluginByName(hostPathPluginName)
// readOnly bool is supplied by persistent-claim volume source when its mounter creates other volumes
spec := volume.NewSpecFromPersistentVolume(pv, true)
pod := &v1.Pod{ObjectMeta: metav1.ObjectMeta{UID: types.UID("poduid")}}
mounter, _ := plug.NewMounter(spec, pod, volume.VolumeOptions{})
if mounter == nil {
t.Fatalf("Got a nil Mounter")
}
if !mounter.GetAttributes().ReadOnly {
t.Errorf("Expected true for mounter.IsReadOnly")
}
}
func setUp() error {
err := os.MkdirAll("/tmp/ExistingFolder", os.FileMode(0755))
if err != nil {
return err
}
f, err := os.OpenFile("/tmp/ExistingFolder/foo", os.O_CREATE, os.FileMode(0644))
if err != nil {
return err
}
defer f.Close()
return nil
}
func tearDown() {
os.RemoveAll("/tmp/ExistingFolder")
}
func TestOSFileTypeChecker(t *testing.T) {
err := setUp()
if err != nil {
t.Error(err)
}
defer tearDown()
testCases := []struct {
name string
path string
desiredType string
isDir bool
isFile bool
isSocket bool
isBlock bool
isChar bool
}{
{
name: "Existing Folder",
path: "/tmp/ExistingFolder",
desiredType: string(hostutil.FileTypeDirectory),
isDir: true,
},
{
name: "Existing File",
path: "/tmp/ExistingFolder/foo",
desiredType: string(hostutil.FileTypeFile),
isFile: true,
},
{
name: "Existing Socket File",
path: "/tmp/ExistingFolder/foo",
desiredType: string(v1.HostPathSocket),
isSocket: true,
},
{
name: "Existing Character Device",
path: "/tmp/ExistingFolder/foo",
desiredType: string(v1.HostPathCharDev),
isChar: true,
},
{
name: "Existing Block Device",
path: "/tmp/ExistingFolder/foo",
desiredType: string(v1.HostPathBlockDev),
isBlock: true,
},
}
for i, tc := range testCases {
fakeFTC := hostutil.NewFakeHostUtil(
map[string]hostutil.FileType{
tc.path: hostutil.FileType(tc.desiredType),
})
oftc := newFileTypeChecker(tc.path, fakeFTC)
path := oftc.GetPath()
if path != tc.path {
t.Errorf("[%d: %q] got unexpected path: %s", i, tc.name, path)
}
exist := oftc.Exists()
if !exist {
t.Errorf("[%d: %q] path: %s does not exist", i, tc.name, path)
}
if tc.isDir {
if !oftc.IsDir() {
t.Errorf("[%d: %q] expected folder, got unexpected: %s", i, tc.name, path)
}
if oftc.IsFile() {
t.Errorf("[%d: %q] expected folder, got unexpected file: %s", i, tc.name, path)
}
if oftc.IsSocket() {
t.Errorf("[%d: %q] expected folder, got unexpected socket file: %s", i, tc.name, path)
}
if oftc.IsBlock() {
t.Errorf("[%d: %q] expected folder, got unexpected block device: %s", i, tc.name, path)
}
if oftc.IsChar() {
t.Errorf("[%d: %q] expected folder, got unexpected character device: %s", i, tc.name, path)
}
}
if tc.isFile {
if !oftc.IsFile() {
t.Errorf("[%d: %q] expected file, got unexpected: %s", i, tc.name, path)
}
if oftc.IsDir() {
t.Errorf("[%d: %q] expected file, got unexpected folder: %s", i, tc.name, path)
}
if oftc.IsSocket() {
t.Errorf("[%d: %q] expected file, got unexpected socket file: %s", i, tc.name, path)
}
if oftc.IsBlock() {
t.Errorf("[%d: %q] expected file, got unexpected block device: %s", i, tc.name, path)
}
if oftc.IsChar() {
t.Errorf("[%d: %q] expected file, got unexpected character device: %s", i, tc.name, path)
}
}
if tc.isSocket {
if !oftc.IsSocket() {
t.Errorf("[%d: %q] expected socket file, got unexpected: %s", i, tc.name, path)
}
if oftc.IsDir() {
t.Errorf("[%d: %q] expected socket file, got unexpected folder: %s", i, tc.name, path)
}
if oftc.IsFile() {
t.Errorf("[%d: %q] expected socket file, got unexpected file: %s", i, tc.name, path)
}
if oftc.IsBlock() {
t.Errorf("[%d: %q] expected socket file, got unexpected block device: %s", i, tc.name, path)
}
if oftc.IsChar() {
t.Errorf("[%d: %q] expected socket file, got unexpected character device: %s", i, tc.name, path)
}
}
if tc.isChar {
if !oftc.IsChar() {
t.Errorf("[%d: %q] expected character device, got unexpected: %s", i, tc.name, path)
}
if oftc.IsDir() {
t.Errorf("[%d: %q] expected character device, got unexpected folder: %s", i, tc.name, path)
}
if oftc.IsFile() {
t.Errorf("[%d: %q] expected character device, got unexpected file: %s", i, tc.name, path)
}
if oftc.IsSocket() {
t.Errorf("[%d: %q] expected character device, got unexpected socket file: %s", i, tc.name, path)
}
if oftc.IsBlock() {
t.Errorf("[%d: %q] expected character device, got unexpected block device: %s", i, tc.name, path)
}
}
if tc.isBlock {
if !oftc.IsBlock() {
t.Errorf("[%d: %q] expected block device, got unexpected: %s", i, tc.name, path)
}
if oftc.IsDir() {
t.Errorf("[%d: %q] expected block device, got unexpected folder: %s", i, tc.name, path)
}
if oftc.IsFile() {
t.Errorf("[%d: %q] expected block device, got unexpected file: %s", i, tc.name, path)
}
if oftc.IsSocket() {
t.Errorf("[%d: %q] expected block device, got unexpected socket file: %s", i, tc.name, path)
}
if oftc.IsChar() {
t.Errorf("[%d: %q] expected block device, got unexpected character device: %s", i, tc.name, path)
}
}
}
}
type fakeHostPathTypeChecker struct {
name string
path string
exists bool
isDir bool
isFile bool
isSocket bool
isBlock bool
isChar bool
validpathType []*v1.HostPathType
invalidpathType []*v1.HostPathType
}
func (ftc *fakeHostPathTypeChecker) MakeFile() error { return nil }
func (ftc *fakeHostPathTypeChecker) MakeDir() error { return nil }
func (ftc *fakeHostPathTypeChecker) Exists() bool { return ftc.exists }
func (ftc *fakeHostPathTypeChecker) IsFile() bool { return ftc.isFile }
func (ftc *fakeHostPathTypeChecker) IsDir() bool { return ftc.isDir }
func (ftc *fakeHostPathTypeChecker) IsBlock() bool { return ftc.isBlock }
func (ftc *fakeHostPathTypeChecker) IsChar() bool { return ftc.isChar }
func (ftc *fakeHostPathTypeChecker) IsSocket() bool { return ftc.isSocket }
func (ftc *fakeHostPathTypeChecker) GetPath() string { return ftc.path }
func TestHostPathTypeCheckerInternal(t *testing.T) {
testCases := []fakeHostPathTypeChecker{
{
name: "Existing Folder",
path: "/existingFolder",
isDir: true,
exists: true,
validpathType: newHostPathTypeList(string(v1.HostPathDirectoryOrCreate), string(v1.HostPathDirectory)),
invalidpathType: newHostPathTypeList(string(v1.HostPathFileOrCreate), string(v1.HostPathFile),
string(v1.HostPathSocket), string(v1.HostPathCharDev), string(v1.HostPathBlockDev)),
},
{
name: "New Folder",
path: "/newFolder",
isDir: false,
exists: false,
validpathType: newHostPathTypeList(string(v1.HostPathDirectoryOrCreate)),
invalidpathType: newHostPathTypeList(string(v1.HostPathDirectory), string(v1.HostPathFile),
string(v1.HostPathSocket), string(v1.HostPathCharDev), string(v1.HostPathBlockDev)),
},
{
name: "Existing File",
path: "/existingFile",
isFile: true,
exists: true,
validpathType: newHostPathTypeList(string(v1.HostPathFileOrCreate), string(v1.HostPathFile)),
invalidpathType: newHostPathTypeList(string(v1.HostPathDirectoryOrCreate), string(v1.HostPathDirectory),
string(v1.HostPathSocket), string(v1.HostPathCharDev), string(v1.HostPathBlockDev)),
},
{
name: "New File",
path: "/newFile",
isFile: false,
exists: false,
validpathType: newHostPathTypeList(string(v1.HostPathFileOrCreate)),
invalidpathType: newHostPathTypeList(string(v1.HostPathDirectory),
string(v1.HostPathSocket), string(v1.HostPathCharDev), string(v1.HostPathBlockDev)),
},
{
name: "Existing Socket",
path: "/existing.socket",
isSocket: true,
isFile: true,
exists: true,
validpathType: newHostPathTypeList(string(v1.HostPathSocket), string(v1.HostPathFileOrCreate), string(v1.HostPathFile)),
invalidpathType: newHostPathTypeList(string(v1.HostPathDirectoryOrCreate), string(v1.HostPathDirectory),
string(v1.HostPathCharDev), string(v1.HostPathBlockDev)),
},
{
name: "Existing Character Device",
path: "/existing.char",
isChar: true,
isFile: true,
exists: true,
validpathType: newHostPathTypeList(string(v1.HostPathCharDev), string(v1.HostPathFileOrCreate), string(v1.HostPathFile)),
invalidpathType: newHostPathTypeList(string(v1.HostPathDirectoryOrCreate), string(v1.HostPathDirectory),
string(v1.HostPathSocket), string(v1.HostPathBlockDev)),
},
{
name: "Existing Block Device",
path: "/existing.block",
isBlock: true,
isFile: true,
exists: true,
validpathType: newHostPathTypeList(string(v1.HostPathBlockDev), string(v1.HostPathFileOrCreate), string(v1.HostPathFile)),
invalidpathType: newHostPathTypeList(string(v1.HostPathDirectoryOrCreate), string(v1.HostPathDirectory),
string(v1.HostPathSocket), string(v1.HostPathCharDev)),
},
}
for i, tc := range testCases {
for _, pathType := range tc.validpathType {
err := checkTypeInternal(&tc, pathType)
if err != nil {
t.Errorf("[%d: %q] [%q] expected nil, got %v", i, tc.name, string(*pathType), err)
}
}
for _, pathType := range tc.invalidpathType {
checkResult := checkTypeInternal(&tc, pathType)
if checkResult == nil {
t.Errorf("[%d: %q] [%q] expected error, got nil", i, tc.name, string(*pathType))
}
}
}
}
| pkg/volume/hostpath/host_path_test.go | 0 | https://github.com/kubernetes/kubernetes/commit/8c175344294cccb2c06342605e3f2cd6a91d60c7 | [
0.0016920657362788916,
0.00020062610565219074,
0.00016228377353399992,
0.0001724970352370292,
0.0001951049198396504
] |
{
"id": 3,
"code_window": [
"\t\tginkgo.It(\"should not allow expansion of pvcs without AllowVolumeExpansion property\", func() {\n",
"\t\t\tinit()\n",
"\t\t\tdefer cleanup()\n",
"\n",
"\t\t\tvar err error\n",
"\t\t\tgomega.Expect(l.resource.Sc.AllowVolumeExpansion).To(gomega.BeNil())\n",
"\t\t\tginkgo.By(\"Expanding non-expandable pvc\")\n",
"\t\t\tcurrentPvcSize := l.resource.Pvc.Spec.Resources.Requests[v1.ResourceStorage]\n",
"\t\t\tnewSize := currentPvcSize.DeepCopy()\n",
"\t\t\tnewSize.Add(resource.MustParse(\"1Gi\"))\n",
"\t\t\tframework.Logf(\"currentPvcSize %v, newSize %v\", currentPvcSize, newSize)\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\tgomega.Expect(l.resource.Sc.AllowVolumeExpansion).NotTo(gomega.BeNil())\n",
"\t\t\tallowVolumeExpansion := *l.resource.Sc.AllowVolumeExpansion\n",
"\t\t\tgomega.Expect(allowVolumeExpansion).To(gomega.BeFalse())\n"
],
"file_path": "test/e2e/storage/testsuites/volume_expand.go",
"type": "replace",
"edit_start_line_idx": 158
} | load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = [
"error.go",
"validation.go",
],
importmap = "k8s.io/kubernetes/vendor/github.com/Azure/go-autorest/autorest/validation",
importpath = "github.com/Azure/go-autorest/autorest/validation",
visibility = ["//visibility:public"],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)
| vendor/github.com/Azure/go-autorest/autorest/validation/BUILD | 0 | https://github.com/kubernetes/kubernetes/commit/8c175344294cccb2c06342605e3f2cd6a91d60c7 | [
0.00017656270938459784,
0.00017566945462021977,
0.0001748810027493164,
0.00017556468083057553,
6.905403893142648e-7
] |
{
"id": 3,
"code_window": [
"\t\tginkgo.It(\"should not allow expansion of pvcs without AllowVolumeExpansion property\", func() {\n",
"\t\t\tinit()\n",
"\t\t\tdefer cleanup()\n",
"\n",
"\t\t\tvar err error\n",
"\t\t\tgomega.Expect(l.resource.Sc.AllowVolumeExpansion).To(gomega.BeNil())\n",
"\t\t\tginkgo.By(\"Expanding non-expandable pvc\")\n",
"\t\t\tcurrentPvcSize := l.resource.Pvc.Spec.Resources.Requests[v1.ResourceStorage]\n",
"\t\t\tnewSize := currentPvcSize.DeepCopy()\n",
"\t\t\tnewSize.Add(resource.MustParse(\"1Gi\"))\n",
"\t\t\tframework.Logf(\"currentPvcSize %v, newSize %v\", currentPvcSize, newSize)\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\tgomega.Expect(l.resource.Sc.AllowVolumeExpansion).NotTo(gomega.BeNil())\n",
"\t\t\tallowVolumeExpansion := *l.resource.Sc.AllowVolumeExpansion\n",
"\t\t\tgomega.Expect(allowVolumeExpansion).To(gomega.BeFalse())\n"
],
"file_path": "test/e2e/storage/testsuites/volume_expand.go",
"type": "replace",
"edit_start_line_idx": 158
} | package wclayer
import (
"bufio"
"encoding/binary"
"errors"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"strings"
"syscall"
"github.com/Microsoft/go-winio"
"github.com/Microsoft/hcsshim/internal/longpath"
"github.com/Microsoft/hcsshim/internal/safefile"
)
var errorIterationCanceled = errors.New("")
var mutatedUtilityVMFiles = map[string]bool{
`EFI\Microsoft\Boot\BCD`: true,
`EFI\Microsoft\Boot\BCD.LOG`: true,
`EFI\Microsoft\Boot\BCD.LOG1`: true,
`EFI\Microsoft\Boot\BCD.LOG2`: true,
}
const (
filesPath = `Files`
hivesPath = `Hives`
utilityVMPath = `UtilityVM`
utilityVMFilesPath = `UtilityVM\Files`
)
func openFileOrDir(path string, mode uint32, createDisposition uint32) (file *os.File, err error) {
return winio.OpenForBackup(path, mode, syscall.FILE_SHARE_READ, createDisposition)
}
func hasPathPrefix(p, prefix string) bool {
return strings.HasPrefix(p, prefix) && len(p) > len(prefix) && p[len(prefix)] == '\\'
}
type fileEntry struct {
path string
fi os.FileInfo
err error
}
type legacyLayerReader struct {
root string
result chan *fileEntry
proceed chan bool
currentFile *os.File
backupReader *winio.BackupFileReader
}
// newLegacyLayerReader returns a new LayerReader that can read the Windows
// container layer transport format from disk.
func newLegacyLayerReader(root string) *legacyLayerReader {
r := &legacyLayerReader{
root: root,
result: make(chan *fileEntry),
proceed: make(chan bool),
}
go r.walk()
return r
}
func readTombstones(path string) (map[string]([]string), error) {
tf, err := os.Open(filepath.Join(path, "tombstones.txt"))
if err != nil {
return nil, err
}
defer tf.Close()
s := bufio.NewScanner(tf)
if !s.Scan() || s.Text() != "\xef\xbb\xbfVersion 1.0" {
return nil, errors.New("Invalid tombstones file")
}
ts := make(map[string]([]string))
for s.Scan() {
t := filepath.Join(filesPath, s.Text()[1:]) // skip leading `\`
dir := filepath.Dir(t)
ts[dir] = append(ts[dir], t)
}
if err = s.Err(); err != nil {
return nil, err
}
return ts, nil
}
func (r *legacyLayerReader) walkUntilCancelled() error {
root, err := longpath.LongAbs(r.root)
if err != nil {
return err
}
r.root = root
ts, err := readTombstones(r.root)
if err != nil {
return err
}
err = filepath.Walk(r.root, func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
// Indirect fix for https://github.com/moby/moby/issues/32838#issuecomment-343610048.
// Handle failure from what may be a golang bug in the conversion of
// UTF16 to UTF8 in files which are left in the recycle bin. Os.Lstat
// which is called by filepath.Walk will fail when a filename contains
// unicode characters. Skip the recycle bin regardless which is goodness.
if strings.EqualFold(path, filepath.Join(r.root, `Files\$Recycle.Bin`)) && info.IsDir() {
return filepath.SkipDir
}
if path == r.root || path == filepath.Join(r.root, "tombstones.txt") || strings.HasSuffix(path, ".$wcidirs$") {
return nil
}
r.result <- &fileEntry{path, info, nil}
if !<-r.proceed {
return errorIterationCanceled
}
// List all the tombstones.
if info.IsDir() {
relPath, err := filepath.Rel(r.root, path)
if err != nil {
return err
}
if dts, ok := ts[relPath]; ok {
for _, t := range dts {
r.result <- &fileEntry{filepath.Join(r.root, t), nil, nil}
if !<-r.proceed {
return errorIterationCanceled
}
}
}
}
return nil
})
if err == errorIterationCanceled {
return nil
}
if err == nil {
return io.EOF
}
return err
}
func (r *legacyLayerReader) walk() {
defer close(r.result)
if !<-r.proceed {
return
}
err := r.walkUntilCancelled()
if err != nil {
for {
r.result <- &fileEntry{err: err}
if !<-r.proceed {
return
}
}
}
}
func (r *legacyLayerReader) reset() {
if r.backupReader != nil {
r.backupReader.Close()
r.backupReader = nil
}
if r.currentFile != nil {
r.currentFile.Close()
r.currentFile = nil
}
}
func findBackupStreamSize(r io.Reader) (int64, error) {
br := winio.NewBackupStreamReader(r)
for {
hdr, err := br.Next()
if err != nil {
if err == io.EOF {
err = nil
}
return 0, err
}
if hdr.Id == winio.BackupData {
return hdr.Size, nil
}
}
}
func (r *legacyLayerReader) Next() (path string, size int64, fileInfo *winio.FileBasicInfo, err error) {
r.reset()
r.proceed <- true
fe := <-r.result
if fe == nil {
err = errors.New("LegacyLayerReader closed")
return
}
if fe.err != nil {
err = fe.err
return
}
path, err = filepath.Rel(r.root, fe.path)
if err != nil {
return
}
if fe.fi == nil {
// This is a tombstone. Return a nil fileInfo.
return
}
if fe.fi.IsDir() && hasPathPrefix(path, filesPath) {
fe.path += ".$wcidirs$"
}
f, err := openFileOrDir(fe.path, syscall.GENERIC_READ, syscall.OPEN_EXISTING)
if err != nil {
return
}
defer func() {
if f != nil {
f.Close()
}
}()
fileInfo, err = winio.GetFileBasicInfo(f)
if err != nil {
return
}
if !hasPathPrefix(path, filesPath) {
size = fe.fi.Size()
r.backupReader = winio.NewBackupFileReader(f, false)
if path == hivesPath || path == filesPath {
// The Hives directory has a non-deterministic file time because of the
// nature of the import process. Use the times from System_Delta.
var g *os.File
g, err = os.Open(filepath.Join(r.root, hivesPath, `System_Delta`))
if err != nil {
return
}
attr := fileInfo.FileAttributes
fileInfo, err = winio.GetFileBasicInfo(g)
g.Close()
if err != nil {
return
}
fileInfo.FileAttributes = attr
}
// The creation time and access time get reset for files outside of the Files path.
fileInfo.CreationTime = fileInfo.LastWriteTime
fileInfo.LastAccessTime = fileInfo.LastWriteTime
} else {
// The file attributes are written before the backup stream.
var attr uint32
err = binary.Read(f, binary.LittleEndian, &attr)
if err != nil {
return
}
fileInfo.FileAttributes = attr
beginning := int64(4)
// Find the accurate file size.
if !fe.fi.IsDir() {
size, err = findBackupStreamSize(f)
if err != nil {
err = &os.PathError{Op: "findBackupStreamSize", Path: fe.path, Err: err}
return
}
}
// Return back to the beginning of the backup stream.
_, err = f.Seek(beginning, 0)
if err != nil {
return
}
}
r.currentFile = f
f = nil
return
}
func (r *legacyLayerReader) Read(b []byte) (int, error) {
if r.backupReader == nil {
if r.currentFile == nil {
return 0, io.EOF
}
return r.currentFile.Read(b)
}
return r.backupReader.Read(b)
}
func (r *legacyLayerReader) Seek(offset int64, whence int) (int64, error) {
if r.backupReader == nil {
if r.currentFile == nil {
return 0, errors.New("no current file")
}
return r.currentFile.Seek(offset, whence)
}
return 0, errors.New("seek not supported on this stream")
}
func (r *legacyLayerReader) Close() error {
r.proceed <- false
<-r.result
r.reset()
return nil
}
type pendingLink struct {
Path, Target string
TargetRoot *os.File
}
type pendingDir struct {
Path string
Root *os.File
}
type legacyLayerWriter struct {
root *os.File
destRoot *os.File
parentRoots []*os.File
currentFile *os.File
bufWriter *bufio.Writer
currentFileName string
currentFileRoot *os.File
backupWriter *winio.BackupFileWriter
Tombstones []string
HasUtilityVM bool
changedDi []dirInfo
addedFiles map[string]bool
PendingLinks []pendingLink
pendingDirs []pendingDir
currentIsDir bool
}
// newLegacyLayerWriter returns a LayerWriter that can write the contaler layer
// transport format to disk.
func newLegacyLayerWriter(root string, parentRoots []string, destRoot string) (w *legacyLayerWriter, err error) {
w = &legacyLayerWriter{
addedFiles: make(map[string]bool),
}
defer func() {
if err != nil {
w.CloseRoots()
w = nil
}
}()
w.root, err = safefile.OpenRoot(root)
if err != nil {
return
}
w.destRoot, err = safefile.OpenRoot(destRoot)
if err != nil {
return
}
for _, r := range parentRoots {
f, err := safefile.OpenRoot(r)
if err != nil {
return w, err
}
w.parentRoots = append(w.parentRoots, f)
}
w.bufWriter = bufio.NewWriterSize(ioutil.Discard, 65536)
return
}
func (w *legacyLayerWriter) CloseRoots() {
if w.root != nil {
w.root.Close()
w.root = nil
}
if w.destRoot != nil {
w.destRoot.Close()
w.destRoot = nil
}
for i := range w.parentRoots {
w.parentRoots[i].Close()
}
w.parentRoots = nil
}
func (w *legacyLayerWriter) initUtilityVM() error {
if !w.HasUtilityVM {
err := safefile.MkdirRelative(utilityVMPath, w.destRoot)
if err != nil {
return err
}
// Server 2016 does not support multiple layers for the utility VM, so
// clone the utility VM from the parent layer into this layer. Use hard
// links to avoid unnecessary copying, since most of the files are
// immutable.
err = cloneTree(w.parentRoots[0], w.destRoot, utilityVMFilesPath, mutatedUtilityVMFiles)
if err != nil {
return fmt.Errorf("cloning the parent utility VM image failed: %s", err)
}
w.HasUtilityVM = true
}
return nil
}
func (w *legacyLayerWriter) reset() error {
err := w.bufWriter.Flush()
if err != nil {
return err
}
w.bufWriter.Reset(ioutil.Discard)
if w.currentIsDir {
r := w.currentFile
br := winio.NewBackupStreamReader(r)
// Seek to the beginning of the backup stream, skipping the fileattrs
if _, err := r.Seek(4, io.SeekStart); err != nil {
return err
}
for {
bhdr, err := br.Next()
if err == io.EOF {
// end of backupstream data
break
}
if err != nil {
return err
}
switch bhdr.Id {
case winio.BackupReparseData:
// The current file is a `.$wcidirs$` metadata file that
// describes a directory reparse point. Delete the placeholder
// directory to prevent future files being added into the
// destination of the reparse point during the ImportLayer call
if err := safefile.RemoveRelative(w.currentFileName, w.currentFileRoot); err != nil {
return err
}
w.pendingDirs = append(w.pendingDirs, pendingDir{Path: w.currentFileName, Root: w.currentFileRoot})
default:
// ignore all other stream types, as we only care about directory reparse points
}
}
w.currentIsDir = false
}
if w.backupWriter != nil {
w.backupWriter.Close()
w.backupWriter = nil
}
if w.currentFile != nil {
w.currentFile.Close()
w.currentFile = nil
w.currentFileName = ""
w.currentFileRoot = nil
}
return nil
}
// copyFileWithMetadata copies a file using the backup/restore APIs in order to preserve metadata
func copyFileWithMetadata(srcRoot, destRoot *os.File, subPath string, isDir bool) (fileInfo *winio.FileBasicInfo, err error) {
src, err := safefile.OpenRelative(
subPath,
srcRoot,
syscall.GENERIC_READ|winio.ACCESS_SYSTEM_SECURITY,
syscall.FILE_SHARE_READ,
safefile.FILE_OPEN,
safefile.FILE_OPEN_REPARSE_POINT)
if err != nil {
return nil, err
}
defer src.Close()
srcr := winio.NewBackupFileReader(src, true)
defer srcr.Close()
fileInfo, err = winio.GetFileBasicInfo(src)
if err != nil {
return nil, err
}
extraFlags := uint32(0)
if isDir {
extraFlags |= safefile.FILE_DIRECTORY_FILE
}
dest, err := safefile.OpenRelative(
subPath,
destRoot,
syscall.GENERIC_READ|syscall.GENERIC_WRITE|winio.WRITE_DAC|winio.WRITE_OWNER|winio.ACCESS_SYSTEM_SECURITY,
syscall.FILE_SHARE_READ,
safefile.FILE_CREATE,
extraFlags)
if err != nil {
return nil, err
}
defer dest.Close()
err = winio.SetFileBasicInfo(dest, fileInfo)
if err != nil {
return nil, err
}
destw := winio.NewBackupFileWriter(dest, true)
defer func() {
cerr := destw.Close()
if err == nil {
err = cerr
}
}()
_, err = io.Copy(destw, srcr)
if err != nil {
return nil, err
}
return fileInfo, nil
}
// cloneTree clones a directory tree using hard links. It skips hard links for
// the file names in the provided map and just copies those files.
func cloneTree(srcRoot *os.File, destRoot *os.File, subPath string, mutatedFiles map[string]bool) error {
var di []dirInfo
err := safefile.EnsureNotReparsePointRelative(subPath, srcRoot)
if err != nil {
return err
}
err = filepath.Walk(filepath.Join(srcRoot.Name(), subPath), func(srcFilePath string, info os.FileInfo, err error) error {
if err != nil {
return err
}
relPath, err := filepath.Rel(srcRoot.Name(), srcFilePath)
if err != nil {
return err
}
fileAttributes := info.Sys().(*syscall.Win32FileAttributeData).FileAttributes
// Directories, reparse points, and files that will be mutated during
// utility VM import must be copied. All other files can be hard linked.
isReparsePoint := fileAttributes&syscall.FILE_ATTRIBUTE_REPARSE_POINT != 0
// In go1.9, FileInfo.IsDir() returns false if the directory is also a symlink.
// See: https://github.com/golang/go/commit/1989921aef60c83e6f9127a8448fb5ede10e9acc
// Fixes the problem by checking syscall.FILE_ATTRIBUTE_DIRECTORY directly
isDir := fileAttributes&syscall.FILE_ATTRIBUTE_DIRECTORY != 0
if isDir || isReparsePoint || mutatedFiles[relPath] {
fi, err := copyFileWithMetadata(srcRoot, destRoot, relPath, isDir)
if err != nil {
return err
}
if isDir {
di = append(di, dirInfo{path: relPath, fileInfo: *fi})
}
} else {
err = safefile.LinkRelative(relPath, srcRoot, relPath, destRoot)
if err != nil {
return err
}
}
return nil
})
if err != nil {
return err
}
return reapplyDirectoryTimes(destRoot, di)
}
func (w *legacyLayerWriter) Add(name string, fileInfo *winio.FileBasicInfo) error {
if err := w.reset(); err != nil {
return err
}
if name == utilityVMPath {
return w.initUtilityVM()
}
if (fileInfo.FileAttributes & syscall.FILE_ATTRIBUTE_DIRECTORY) != 0 {
w.changedDi = append(w.changedDi, dirInfo{path: name, fileInfo: *fileInfo})
}
name = filepath.Clean(name)
if hasPathPrefix(name, utilityVMPath) {
if !w.HasUtilityVM {
return errors.New("missing UtilityVM directory")
}
if !hasPathPrefix(name, utilityVMFilesPath) && name != utilityVMFilesPath {
return errors.New("invalid UtilityVM layer")
}
createDisposition := uint32(safefile.FILE_OPEN)
if (fileInfo.FileAttributes & syscall.FILE_ATTRIBUTE_DIRECTORY) != 0 {
st, err := safefile.LstatRelative(name, w.destRoot)
if err != nil && !os.IsNotExist(err) {
return err
}
if st != nil {
// Delete the existing file/directory if it is not the same type as this directory.
existingAttr := st.Sys().(*syscall.Win32FileAttributeData).FileAttributes
if (uint32(fileInfo.FileAttributes)^existingAttr)&(syscall.FILE_ATTRIBUTE_DIRECTORY|syscall.FILE_ATTRIBUTE_REPARSE_POINT) != 0 {
if err = safefile.RemoveAllRelative(name, w.destRoot); err != nil {
return err
}
st = nil
}
}
if st == nil {
if err = safefile.MkdirRelative(name, w.destRoot); err != nil {
return err
}
}
} else {
// Overwrite any existing hard link.
err := safefile.RemoveRelative(name, w.destRoot)
if err != nil && !os.IsNotExist(err) {
return err
}
createDisposition = safefile.FILE_CREATE
}
f, err := safefile.OpenRelative(
name,
w.destRoot,
syscall.GENERIC_READ|syscall.GENERIC_WRITE|winio.WRITE_DAC|winio.WRITE_OWNER|winio.ACCESS_SYSTEM_SECURITY,
syscall.FILE_SHARE_READ,
createDisposition,
safefile.FILE_OPEN_REPARSE_POINT,
)
if err != nil {
return err
}
defer func() {
if f != nil {
f.Close()
safefile.RemoveRelative(name, w.destRoot)
}
}()
err = winio.SetFileBasicInfo(f, fileInfo)
if err != nil {
return err
}
w.backupWriter = winio.NewBackupFileWriter(f, true)
w.bufWriter.Reset(w.backupWriter)
w.currentFile = f
w.currentFileName = name
w.currentFileRoot = w.destRoot
w.addedFiles[name] = true
f = nil
return nil
}
fname := name
if (fileInfo.FileAttributes & syscall.FILE_ATTRIBUTE_DIRECTORY) != 0 {
err := safefile.MkdirRelative(name, w.root)
if err != nil {
return err
}
fname += ".$wcidirs$"
w.currentIsDir = true
}
f, err := safefile.OpenRelative(fname, w.root, syscall.GENERIC_READ|syscall.GENERIC_WRITE, syscall.FILE_SHARE_READ, safefile.FILE_CREATE, 0)
if err != nil {
return err
}
defer func() {
if f != nil {
f.Close()
safefile.RemoveRelative(fname, w.root)
}
}()
strippedFi := *fileInfo
strippedFi.FileAttributes = 0
err = winio.SetFileBasicInfo(f, &strippedFi)
if err != nil {
return err
}
if hasPathPrefix(name, hivesPath) {
w.backupWriter = winio.NewBackupFileWriter(f, false)
w.bufWriter.Reset(w.backupWriter)
} else {
w.bufWriter.Reset(f)
// The file attributes are written before the stream.
err = binary.Write(w.bufWriter, binary.LittleEndian, uint32(fileInfo.FileAttributes))
if err != nil {
w.bufWriter.Reset(ioutil.Discard)
return err
}
}
w.currentFile = f
w.currentFileName = name
w.currentFileRoot = w.root
w.addedFiles[name] = true
f = nil
return nil
}
func (w *legacyLayerWriter) AddLink(name string, target string) error {
if err := w.reset(); err != nil {
return err
}
target = filepath.Clean(target)
var roots []*os.File
if hasPathPrefix(target, filesPath) {
// Look for cross-layer hard link targets in the parent layers, since
// nothing is in the destination path yet.
roots = w.parentRoots
} else if hasPathPrefix(target, utilityVMFilesPath) {
// Since the utility VM is fully cloned into the destination path
// already, look for cross-layer hard link targets directly in the
// destination path.
roots = []*os.File{w.destRoot}
}
if roots == nil || (!hasPathPrefix(name, filesPath) && !hasPathPrefix(name, utilityVMFilesPath)) {
return errors.New("invalid hard link in layer")
}
// Find to try the target of the link in a previously added file. If that
// fails, search in parent layers.
var selectedRoot *os.File
if _, ok := w.addedFiles[target]; ok {
selectedRoot = w.destRoot
} else {
for _, r := range roots {
if _, err := safefile.LstatRelative(target, r); err != nil {
if !os.IsNotExist(err) {
return err
}
} else {
selectedRoot = r
break
}
}
if selectedRoot == nil {
return fmt.Errorf("failed to find link target for '%s' -> '%s'", name, target)
}
}
// The link can't be written until after the ImportLayer call.
w.PendingLinks = append(w.PendingLinks, pendingLink{
Path: name,
Target: target,
TargetRoot: selectedRoot,
})
w.addedFiles[name] = true
return nil
}
func (w *legacyLayerWriter) Remove(name string) error {
name = filepath.Clean(name)
if hasPathPrefix(name, filesPath) {
w.Tombstones = append(w.Tombstones, name)
} else if hasPathPrefix(name, utilityVMFilesPath) {
err := w.initUtilityVM()
if err != nil {
return err
}
// Make sure the path exists; os.RemoveAll will not fail if the file is
// already gone, and this needs to be a fatal error for diagnostics
// purposes.
if _, err := safefile.LstatRelative(name, w.destRoot); err != nil {
return err
}
err = safefile.RemoveAllRelative(name, w.destRoot)
if err != nil {
return err
}
} else {
return fmt.Errorf("invalid tombstone %s", name)
}
return nil
}
func (w *legacyLayerWriter) Write(b []byte) (int, error) {
if w.backupWriter == nil && w.currentFile == nil {
return 0, errors.New("closed")
}
return w.bufWriter.Write(b)
}
func (w *legacyLayerWriter) Close() error {
if err := w.reset(); err != nil {
return err
}
if err := safefile.RemoveRelative("tombstones.txt", w.root); err != nil && !os.IsNotExist(err) {
return err
}
for _, pd := range w.pendingDirs {
err := safefile.MkdirRelative(pd.Path, pd.Root)
if err != nil {
return err
}
}
return nil
}
| vendor/github.com/Microsoft/hcsshim/internal/wclayer/legacy.go | 0 | https://github.com/kubernetes/kubernetes/commit/8c175344294cccb2c06342605e3f2cd6a91d60c7 | [
0.0015707029961049557,
0.00019425222126301378,
0.00015768806042615324,
0.00016833690460771322,
0.0001694286911515519
] |
{
"id": 0,
"code_window": [
"\treturn err\n",
"}\n",
"\n",
"// MirrorsIterate iterates all mirror repositories.\n",
"func MirrorsIterate(limit int, f func(idx int, bean interface{}) error) error {\n",
"\treturn db.GetEngine(db.DefaultContext).\n",
"\t\tWhere(\"next_update_unix<=?\", time.Now().Unix()).\n",
"\t\tAnd(\"next_update_unix!=0\").\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep"
],
"after_edit": [
"\tsess := db.GetEngine(db.DefaultContext).\n"
],
"file_path": "models/repo/mirror.go",
"type": "replace",
"edit_start_line_idx": 109
} | // Copyright 2021 The Gitea Authors. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package repo
import (
"context"
"errors"
"time"
"code.gitea.io/gitea/models/db"
"code.gitea.io/gitea/modules/log"
"code.gitea.io/gitea/modules/timeutil"
"xorm.io/builder"
)
// ErrPushMirrorNotExist mirror does not exist error
var ErrPushMirrorNotExist = errors.New("PushMirror does not exist")
// PushMirror represents mirror information of a repository.
type PushMirror struct {
ID int64 `xorm:"pk autoincr"`
RepoID int64 `xorm:"INDEX"`
Repo *Repository `xorm:"-"`
RemoteName string
SyncOnCommit bool `xorm:"NOT NULL DEFAULT true"`
Interval time.Duration
CreatedUnix timeutil.TimeStamp `xorm:"created"`
LastUpdateUnix timeutil.TimeStamp `xorm:"INDEX last_update"`
LastError string `xorm:"text"`
}
type PushMirrorOptions struct {
ID int64
RepoID int64
RemoteName string
}
func (opts *PushMirrorOptions) toConds() builder.Cond {
cond := builder.NewCond()
if opts.RepoID > 0 {
cond = cond.And(builder.Eq{"repo_id": opts.RepoID})
}
if opts.RemoteName != "" {
cond = cond.And(builder.Eq{"remote_name": opts.RemoteName})
}
if opts.ID > 0 {
cond = cond.And(builder.Eq{"id": opts.ID})
}
return cond
}
func init() {
db.RegisterModel(new(PushMirror))
}
// GetRepository returns the path of the repository.
func (m *PushMirror) GetRepository() *Repository {
if m.Repo != nil {
return m.Repo
}
var err error
m.Repo, err = GetRepositoryByIDCtx(db.DefaultContext, m.RepoID)
if err != nil {
log.Error("getRepositoryByID[%d]: %v", m.ID, err)
}
return m.Repo
}
// GetRemoteName returns the name of the remote.
func (m *PushMirror) GetRemoteName() string {
return m.RemoteName
}
// InsertPushMirror inserts a push-mirror to database
func InsertPushMirror(ctx context.Context, m *PushMirror) error {
_, err := db.GetEngine(ctx).Insert(m)
return err
}
// UpdatePushMirror updates the push-mirror
func UpdatePushMirror(ctx context.Context, m *PushMirror) error {
_, err := db.GetEngine(ctx).ID(m.ID).AllCols().Update(m)
return err
}
func DeletePushMirrors(ctx context.Context, opts PushMirrorOptions) error {
if opts.RepoID > 0 {
_, err := db.GetEngine(ctx).Where(opts.toConds()).Delete(&PushMirror{})
return err
}
return errors.New("repoID required and must be set")
}
func GetPushMirror(ctx context.Context, opts PushMirrorOptions) (*PushMirror, error) {
mirror := &PushMirror{}
exist, err := db.GetEngine(ctx).Where(opts.toConds()).Get(mirror)
if err != nil {
return nil, err
} else if !exist {
return nil, ErrPushMirrorNotExist
}
return mirror, nil
}
// GetPushMirrorsByRepoID returns push-mirror information of a repository.
func GetPushMirrorsByRepoID(ctx context.Context, repoID int64, listOptions db.ListOptions) ([]*PushMirror, int64, error) {
sess := db.GetEngine(ctx).Where("repo_id = ?", repoID)
if listOptions.Page != 0 {
sess = db.SetSessionPagination(sess, &listOptions)
mirrors := make([]*PushMirror, 0, listOptions.PageSize)
count, err := sess.FindAndCount(&mirrors)
return mirrors, count, err
}
mirrors := make([]*PushMirror, 0, 10)
count, err := sess.FindAndCount(&mirrors)
return mirrors, count, err
}
// GetPushMirrorsSyncedOnCommit returns push-mirrors for this repo that should be updated by new commits
func GetPushMirrorsSyncedOnCommit(repoID int64) ([]*PushMirror, error) {
mirrors := make([]*PushMirror, 0, 10)
return mirrors, db.GetEngine(db.DefaultContext).
Where("repo_id=? AND sync_on_commit=?", repoID, true).
Find(&mirrors)
}
// PushMirrorsIterate iterates all push-mirror repositories.
func PushMirrorsIterate(ctx context.Context, limit int, f func(idx int, bean interface{}) error) error {
return db.GetEngine(ctx).
Where("last_update + (`interval` / ?) <= ?", time.Second, time.Now().Unix()).
And("`interval` != 0").
OrderBy("last_update ASC").
Limit(limit).
Iterate(new(PushMirror), f)
}
| models/repo/pushmirror.go | 1 | https://github.com/go-gitea/gitea/commit/fc4680ea712fdf89065db882cd9d67946d004500 | [
0.9922010898590088,
0.1507386714220047,
0.00017794626182876527,
0.007095096632838249,
0.341377317905426
] |
{
"id": 0,
"code_window": [
"\treturn err\n",
"}\n",
"\n",
"// MirrorsIterate iterates all mirror repositories.\n",
"func MirrorsIterate(limit int, f func(idx int, bean interface{}) error) error {\n",
"\treturn db.GetEngine(db.DefaultContext).\n",
"\t\tWhere(\"next_update_unix<=?\", time.Now().Unix()).\n",
"\t\tAnd(\"next_update_unix!=0\").\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep"
],
"after_edit": [
"\tsess := db.GetEngine(db.DefaultContext).\n"
],
"file_path": "models/repo/mirror.go",
"type": "replace",
"edit_start_line_idx": 109
} | // Copyright 2021 The Gitea Authors. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package lfs
import (
"path"
"strings"
"testing"
"github.com/stretchr/testify/assert"
)
func TestStringContent(t *testing.T) {
p := Pointer{Oid: "4d7a214614ab2935c943f9e0ff69d22eadbb8f32b1258daaa5e2ca24d17e2393", Size: 1234}
expected := "version https://git-lfs.github.com/spec/v1\noid sha256:4d7a214614ab2935c943f9e0ff69d22eadbb8f32b1258daaa5e2ca24d17e2393\nsize 1234\n"
assert.Equal(t, expected, p.StringContent())
}
func TestRelativePath(t *testing.T) {
p := Pointer{Oid: "4d7a214614ab2935c943f9e0ff69d22eadbb8f32b1258daaa5e2ca24d17e2393"}
expected := path.Join("4d", "7a", "214614ab2935c943f9e0ff69d22eadbb8f32b1258daaa5e2ca24d17e2393")
assert.Equal(t, expected, p.RelativePath())
p2 := Pointer{Oid: "4d7a"}
assert.Equal(t, "4d7a", p2.RelativePath())
}
func TestIsValid(t *testing.T) {
p := Pointer{}
assert.False(t, p.IsValid())
p = Pointer{Oid: "123"}
assert.False(t, p.IsValid())
p = Pointer{Oid: "z4cb57646c54a297c9807697e80a30946f79a4b82cb079d2606847825b1812cc"}
assert.False(t, p.IsValid())
p = Pointer{Oid: "94cb57646c54a297c9807697e80a30946f79a4b82cb079d2606847825b1812cc"}
assert.True(t, p.IsValid())
p = Pointer{Oid: "94cb57646c54a297c9807697e80a30946f79a4b82cb079d2606847825b1812cc", Size: -1}
assert.False(t, p.IsValid())
}
func TestGeneratePointer(t *testing.T) {
p, err := GeneratePointer(strings.NewReader("Gitea"))
assert.NoError(t, err)
assert.True(t, p.IsValid())
assert.Equal(t, "94cb57646c54a297c9807697e80a30946f79a4b82cb079d2606847825b1812cc", p.Oid)
assert.Equal(t, int64(5), p.Size)
}
func TestReadPointerFromBuffer(t *testing.T) {
p, err := ReadPointerFromBuffer([]byte{})
assert.ErrorIs(t, err, ErrMissingPrefix)
assert.False(t, p.IsValid())
p, err = ReadPointerFromBuffer([]byte("test"))
assert.ErrorIs(t, err, ErrMissingPrefix)
assert.False(t, p.IsValid())
p, err = ReadPointerFromBuffer([]byte("version https://git-lfs.github.com/spec/v1\n"))
assert.ErrorIs(t, err, ErrInvalidStructure)
assert.False(t, p.IsValid())
p, err = ReadPointerFromBuffer([]byte("version https://git-lfs.github.com/spec/v1\noid sha256:4d7a\nsize 1234\n"))
assert.ErrorIs(t, err, ErrInvalidOIDFormat)
assert.False(t, p.IsValid())
p, err = ReadPointerFromBuffer([]byte("version https://git-lfs.github.com/spec/v1\noid sha256:4d7a2146z4ab2935c943f9e0ff69d22eadbb8f32b1258daaa5e2ca24d17e2393\nsize 1234\n"))
assert.ErrorIs(t, err, ErrInvalidOIDFormat)
assert.False(t, p.IsValid())
p, err = ReadPointerFromBuffer([]byte("version https://git-lfs.github.com/spec/v1\noid sha256:4d7a214614ab2935c943f9e0ff69d22eadbb8f32b1258daaa5e2ca24d17e2393\ntest 1234\n"))
assert.Error(t, err)
assert.False(t, p.IsValid())
p, err = ReadPointerFromBuffer([]byte("version https://git-lfs.github.com/spec/v1\noid sha256:4d7a214614ab2935c943f9e0ff69d22eadbb8f32b1258daaa5e2ca24d17e2393\nsize test\n"))
assert.Error(t, err)
assert.False(t, p.IsValid())
p, err = ReadPointerFromBuffer([]byte("version https://git-lfs.github.com/spec/v1\noid sha256:4d7a214614ab2935c943f9e0ff69d22eadbb8f32b1258daaa5e2ca24d17e2393\nsize 1234\n"))
assert.NoError(t, err)
assert.True(t, p.IsValid())
assert.Equal(t, "4d7a214614ab2935c943f9e0ff69d22eadbb8f32b1258daaa5e2ca24d17e2393", p.Oid)
assert.Equal(t, int64(1234), p.Size)
p, err = ReadPointerFromBuffer([]byte("version https://git-lfs.github.com/spec/v1\noid sha256:4d7a214614ab2935c943f9e0ff69d22eadbb8f32b1258daaa5e2ca24d17e2393\nsize 1234\ntest"))
assert.NoError(t, err)
assert.True(t, p.IsValid())
assert.Equal(t, "4d7a214614ab2935c943f9e0ff69d22eadbb8f32b1258daaa5e2ca24d17e2393", p.Oid)
assert.Equal(t, int64(1234), p.Size)
}
func TestReadPointer(t *testing.T) {
p, err := ReadPointer(strings.NewReader("version https://git-lfs.github.com/spec/v1\noid sha256:4d7a214614ab2935c943f9e0ff69d22eadbb8f32b1258daaa5e2ca24d17e2393\nsize 1234\n"))
assert.NoError(t, err)
assert.True(t, p.IsValid())
assert.Equal(t, "4d7a214614ab2935c943f9e0ff69d22eadbb8f32b1258daaa5e2ca24d17e2393", p.Oid)
assert.Equal(t, int64(1234), p.Size)
}
| modules/lfs/pointer_test.go | 0 | https://github.com/go-gitea/gitea/commit/fc4680ea712fdf89065db882cd9d67946d004500 | [
0.0013457444729283452,
0.00033341094967909157,
0.00015860158600844443,
0.0001725212059682235,
0.0003614421293605119
] |
{
"id": 0,
"code_window": [
"\treturn err\n",
"}\n",
"\n",
"// MirrorsIterate iterates all mirror repositories.\n",
"func MirrorsIterate(limit int, f func(idx int, bean interface{}) error) error {\n",
"\treturn db.GetEngine(db.DefaultContext).\n",
"\t\tWhere(\"next_update_unix<=?\", time.Now().Unix()).\n",
"\t\tAnd(\"next_update_unix!=0\").\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep"
],
"after_edit": [
"\tsess := db.GetEngine(db.DefaultContext).\n"
],
"file_path": "models/repo/mirror.go",
"type": "replace",
"edit_start_line_idx": 109
} | // Copyright 2020 The Gitea Authors. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package util
import "testing"
func TestShellEscape(t *testing.T) {
tests := []struct {
name string
toEscape string
want string
}{
{
"Simplest case - nothing to escape",
"a/b/c/d",
"a/b/c/d",
}, {
"Prefixed tilde - with normal stuff - should not escape",
"~/src/go/gitea/gitea",
"~/src/go/gitea/gitea",
}, {
"Typical windows path with spaces - should get doublequote escaped",
`C:\Program Files\Gitea v1.13 - I like lots of spaces\gitea`,
`"C:\\Program Files\\Gitea v1.13 - I like lots of spaces\\gitea"`,
}, {
"Forward-slashed windows path with spaces - should get doublequote escaped",
"C:/Program Files/Gitea v1.13 - I like lots of spaces/gitea",
`"C:/Program Files/Gitea v1.13 - I like lots of spaces/gitea"`,
}, {
"Prefixed tilde - but then a space filled path",
"~git/Gitea v1.13/gitea",
`~git/"Gitea v1.13/gitea"`,
}, {
"Bangs are unfortunately not predictable so need to be singlequoted",
"C:/Program Files/Gitea!/gitea",
`'C:/Program Files/Gitea!/gitea'`,
}, {
"Newlines are just irritating",
"/home/git/Gitea\n\nWHY-WOULD-YOU-DO-THIS\n\nGitea/gitea",
"'/home/git/Gitea\n\nWHY-WOULD-YOU-DO-THIS\n\nGitea/gitea'",
}, {
"Similarly we should nicely handle multiple single quotes if we have to single-quote",
"'!''!'''!''!'!'",
`\''!'\'\''!'\'\'\''!'\'\''!'\''!'\'`,
}, {
"Double quote < ...",
"~/<gitea",
"~/\"<gitea\"",
}, {
"Double quote > ...",
"~/gitea>",
"~/\"gitea>\"",
}, {
"Double quote and escape $ ...",
"~/$gitea",
"~/\"\\$gitea\"",
}, {
"Double quote {...",
"~/{gitea",
"~/\"{gitea\"",
}, {
"Double quote }...",
"~/gitea}",
"~/\"gitea}\"",
}, {
"Double quote ()...",
"~/(gitea)",
"~/\"(gitea)\"",
}, {
"Double quote and escape `...",
"~/gitea`",
"~/\"gitea\\`\"",
}, {
"Double quotes can handle a number of things without having to escape them but not everything ...",
"~/<gitea> ${gitea} `gitea` [gitea] (gitea) \"gitea\" \\gitea\\ 'gitea'",
"~/\"<gitea> \\${gitea} \\`gitea\\` [gitea] (gitea) \\\"gitea\\\" \\\\gitea\\\\ 'gitea'\"",
}, {
"Single quotes don't need to escape except for '...",
"~/<gitea> ${gitea} `gitea` (gitea) !gitea! \"gitea\" \\gitea\\ 'gitea'",
"~/'<gitea> ${gitea} `gitea` (gitea) !gitea! \"gitea\" \\gitea\\ '\\''gitea'\\'",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := ShellEscape(tt.toEscape); got != tt.want {
t.Errorf("ShellEscape(%q):\nGot: %s\nWanted: %s", tt.toEscape, got, tt.want)
}
})
}
}
| modules/util/shellquote_test.go | 0 | https://github.com/go-gitea/gitea/commit/fc4680ea712fdf89065db882cd9d67946d004500 | [
0.000176854882738553,
0.00017259508604183793,
0.00016935518942773342,
0.00017212801321875304,
0.0000023450527351087658
] |
{
"id": 0,
"code_window": [
"\treturn err\n",
"}\n",
"\n",
"// MirrorsIterate iterates all mirror repositories.\n",
"func MirrorsIterate(limit int, f func(idx int, bean interface{}) error) error {\n",
"\treturn db.GetEngine(db.DefaultContext).\n",
"\t\tWhere(\"next_update_unix<=?\", time.Now().Unix()).\n",
"\t\tAnd(\"next_update_unix!=0\").\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep"
],
"after_edit": [
"\tsess := db.GetEngine(db.DefaultContext).\n"
],
"file_path": "models/repo/mirror.go",
"type": "replace",
"edit_start_line_idx": 109
} | // Copyright 2021 The Gitea Authors. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package asymkey
import (
"testing"
asymkey_model "code.gitea.io/gitea/models/asymkey"
"code.gitea.io/gitea/models/auth"
"code.gitea.io/gitea/models/unittest"
user_model "code.gitea.io/gitea/models/user"
"github.com/stretchr/testify/assert"
)
func TestAddLdapSSHPublicKeys(t *testing.T) {
assert.NoError(t, unittest.PrepareTestDatabase())
user := unittest.AssertExistsAndLoadBean(t, &user_model.User{ID: 2})
s := &auth.Source{ID: 1}
testCases := []struct {
keyString string
number int
keyContents []string
}{
{
keyString: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQC4cn+iXnA4KvcQYSV88vGn0Yi91vG47t1P7okprVmhNTkipNRIHWr6WdCO4VDr/cvsRkuVJAsLO2enwjGWWueOO6BodiBgyAOZ/5t5nJNMCNuLGT5UIo/RI1b0WRQwxEZTRjt6mFNw6lH14wRd8ulsr9toSWBPMOGWoYs1PDeDL0JuTjL+tr1SZi/EyxCngpYszKdXllJEHyI79KQgeD0Vt3pTrkbNVTOEcCNqZePSVmUH8X8Vhugz3bnE0/iE9Pb5fkWO9c4AnM1FgI/8Bvp27Fw2ShryIXuR6kKvUqhVMTuOSDHwu6A8jLE5Owt3GAYugDpDYuwTVNGrHLXKpPzrGGPE/jPmaLCMZcsdkec95dYeU3zKODEm8UQZFhmJmDeWVJ36nGrGZHL4J5aTTaeFUJmmXDaJYiJ+K2/ioKgXqnXvltu0A9R8/LGy4nrTJRr4JMLuJFoUXvGm1gXQ70w2LSpk6yl71RNC0hCtsBe8BP8IhYCM0EP5jh7eCMQZNvM= nocomment\n",
number: 1,
keyContents: []string{
"ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQC4cn+iXnA4KvcQYSV88vGn0Yi91vG47t1P7okprVmhNTkipNRIHWr6WdCO4VDr/cvsRkuVJAsLO2enwjGWWueOO6BodiBgyAOZ/5t5nJNMCNuLGT5UIo/RI1b0WRQwxEZTRjt6mFNw6lH14wRd8ulsr9toSWBPMOGWoYs1PDeDL0JuTjL+tr1SZi/EyxCngpYszKdXllJEHyI79KQgeD0Vt3pTrkbNVTOEcCNqZePSVmUH8X8Vhugz3bnE0/iE9Pb5fkWO9c4AnM1FgI/8Bvp27Fw2ShryIXuR6kKvUqhVMTuOSDHwu6A8jLE5Owt3GAYugDpDYuwTVNGrHLXKpPzrGGPE/jPmaLCMZcsdkec95dYeU3zKODEm8UQZFhmJmDeWVJ36nGrGZHL4J5aTTaeFUJmmXDaJYiJ+K2/ioKgXqnXvltu0A9R8/LGy4nrTJRr4JMLuJFoUXvGm1gXQ70w2LSpk6yl71RNC0hCtsBe8BP8IhYCM0EP5jh7eCMQZNvM=",
},
},
{
keyString: `ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQC4cn+iXnA4KvcQYSV88vGn0Yi91vG47t1P7okprVmhNTkipNRIHWr6WdCO4VDr/cvsRkuVJAsLO2enwjGWWueOO6BodiBgyAOZ/5t5nJNMCNuLGT5UIo/RI1b0WRQwxEZTRjt6mFNw6lH14wRd8ulsr9toSWBPMOGWoYs1PDeDL0JuTjL+tr1SZi/EyxCngpYszKdXllJEHyI79KQgeD0Vt3pTrkbNVTOEcCNqZePSVmUH8X8Vhugz3bnE0/iE9Pb5fkWO9c4AnM1FgI/8Bvp27Fw2ShryIXuR6kKvUqhVMTuOSDHwu6A8jLE5Owt3GAYugDpDYuwTVNGrHLXKpPzrGGPE/jPmaLCMZcsdkec95dYeU3zKODEm8UQZFhmJmDeWVJ36nGrGZHL4J5aTTaeFUJmmXDaJYiJ+K2/ioKgXqnXvltu0A9R8/LGy4nrTJRr4JMLuJFoUXvGm1gXQ70w2LSpk6yl71RNC0hCtsBe8BP8IhYCM0EP5jh7eCMQZNvM= nocomment
ssh-dss AAAAB3NzaC1kc3MAAACBAOChCC7lf6Uo9n7BmZ6M8St19PZf4Tn59NriyboW2x/DZuYAz3ibZ2OkQ3S0SqDIa0HXSEJ1zaExQdmbO+Ux/wsytWZmCczWOVsaszBZSl90q8UnWlSH6P+/YA+RWJm5SFtuV9PtGIhyZgoNuz5kBQ7K139wuQsecdKktISwTakzAAAAFQCzKsO2JhNKlL+wwwLGOcLffoAmkwAAAIBpK7/3xvduajLBD/9vASqBQIHrgK2J+wiQnIb/Wzy0UsVmvfn8A+udRbBo+csM8xrSnlnlJnjkJS3qiM5g+eTwsLIV1IdKPEwmwB+VcP53Cw6lSyWyJcvhFb0N6s08NZysLzvj0N+ZC/FnhKTLzIyMtkHf/IrPCwlM+pV/M/96YgAAAIEAqQcGn9CKgzgPaguIZooTAOQdvBLMI5y0bQjOW6734XOpqQGf/Kra90wpoasLKZjSYKNPjE+FRUOrStLrxcNs4BeVKhy2PYTRnybfYVk1/dmKgH6P1YSRONsGKvTsH6c5IyCRG0ncCgYeF8tXppyd642982daopE7zQ/NPAnJfag= nocomment`,
number: 2,
keyContents: []string{
"ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQC4cn+iXnA4KvcQYSV88vGn0Yi91vG47t1P7okprVmhNTkipNRIHWr6WdCO4VDr/cvsRkuVJAsLO2enwjGWWueOO6BodiBgyAOZ/5t5nJNMCNuLGT5UIo/RI1b0WRQwxEZTRjt6mFNw6lH14wRd8ulsr9toSWBPMOGWoYs1PDeDL0JuTjL+tr1SZi/EyxCngpYszKdXllJEHyI79KQgeD0Vt3pTrkbNVTOEcCNqZePSVmUH8X8Vhugz3bnE0/iE9Pb5fkWO9c4AnM1FgI/8Bvp27Fw2ShryIXuR6kKvUqhVMTuOSDHwu6A8jLE5Owt3GAYugDpDYuwTVNGrHLXKpPzrGGPE/jPmaLCMZcsdkec95dYeU3zKODEm8UQZFhmJmDeWVJ36nGrGZHL4J5aTTaeFUJmmXDaJYiJ+K2/ioKgXqnXvltu0A9R8/LGy4nrTJRr4JMLuJFoUXvGm1gXQ70w2LSpk6yl71RNC0hCtsBe8BP8IhYCM0EP5jh7eCMQZNvM=",
"ssh-dss AAAAB3NzaC1kc3MAAACBAOChCC7lf6Uo9n7BmZ6M8St19PZf4Tn59NriyboW2x/DZuYAz3ibZ2OkQ3S0SqDIa0HXSEJ1zaExQdmbO+Ux/wsytWZmCczWOVsaszBZSl90q8UnWlSH6P+/YA+RWJm5SFtuV9PtGIhyZgoNuz5kBQ7K139wuQsecdKktISwTakzAAAAFQCzKsO2JhNKlL+wwwLGOcLffoAmkwAAAIBpK7/3xvduajLBD/9vASqBQIHrgK2J+wiQnIb/Wzy0UsVmvfn8A+udRbBo+csM8xrSnlnlJnjkJS3qiM5g+eTwsLIV1IdKPEwmwB+VcP53Cw6lSyWyJcvhFb0N6s08NZysLzvj0N+ZC/FnhKTLzIyMtkHf/IrPCwlM+pV/M/96YgAAAIEAqQcGn9CKgzgPaguIZooTAOQdvBLMI5y0bQjOW6734XOpqQGf/Kra90wpoasLKZjSYKNPjE+FRUOrStLrxcNs4BeVKhy2PYTRnybfYVk1/dmKgH6P1YSRONsGKvTsH6c5IyCRG0ncCgYeF8tXppyd642982daopE7zQ/NPAnJfag=",
},
},
{
keyString: `ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQC4cn+iXnA4KvcQYSV88vGn0Yi91vG47t1P7okprVmhNTkipNRIHWr6WdCO4VDr/cvsRkuVJAsLO2enwjGWWueOO6BodiBgyAOZ/5t5nJNMCNuLGT5UIo/RI1b0WRQwxEZTRjt6mFNw6lH14wRd8ulsr9toSWBPMOGWoYs1PDeDL0JuTjL+tr1SZi/EyxCngpYszKdXllJEHyI79KQgeD0Vt3pTrkbNVTOEcCNqZePSVmUH8X8Vhugz3bnE0/iE9Pb5fkWO9c4AnM1FgI/8Bvp27Fw2ShryIXuR6kKvUqhVMTuOSDHwu6A8jLE5Owt3GAYugDpDYuwTVNGrHLXKpPzrGGPE/jPmaLCMZcsdkec95dYeU3zKODEm8UQZFhmJmDeWVJ36nGrGZHL4J5aTTaeFUJmmXDaJYiJ+K2/ioKgXqnXvltu0A9R8/LGy4nrTJRr4JMLuJFoUXvGm1gXQ70w2LSpk6yl71RNC0hCtsBe8BP8IhYCM0EP5jh7eCMQZNvM= nocomment
# comment asmdna,ndp
ssh-dss AAAAB3NzaC1kc3MAAACBAOChCC7lf6Uo9n7BmZ6M8St19PZf4Tn59NriyboW2x/DZuYAz3ibZ2OkQ3S0SqDIa0HXSEJ1zaExQdmbO+Ux/wsytWZmCczWOVsaszBZSl90q8UnWlSH6P+/YA+RWJm5SFtuV9PtGIhyZgoNuz5kBQ7K139wuQsecdKktISwTakzAAAAFQCzKsO2JhNKlL+wwwLGOcLffoAmkwAAAIBpK7/3xvduajLBD/9vASqBQIHrgK2J+wiQnIb/Wzy0UsVmvfn8A+udRbBo+csM8xrSnlnlJnjkJS3qiM5g+eTwsLIV1IdKPEwmwB+VcP53Cw6lSyWyJcvhFb0N6s08NZysLzvj0N+ZC/FnhKTLzIyMtkHf/IrPCwlM+pV/M/96YgAAAIEAqQcGn9CKgzgPaguIZooTAOQdvBLMI5y0bQjOW6734XOpqQGf/Kra90wpoasLKZjSYKNPjE+FRUOrStLrxcNs4BeVKhy2PYTRnybfYVk1/dmKgH6P1YSRONsGKvTsH6c5IyCRG0ncCgYeF8tXppyd642982daopE7zQ/NPAnJfag= nocomment`,
number: 2,
keyContents: []string{
"ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQC4cn+iXnA4KvcQYSV88vGn0Yi91vG47t1P7okprVmhNTkipNRIHWr6WdCO4VDr/cvsRkuVJAsLO2enwjGWWueOO6BodiBgyAOZ/5t5nJNMCNuLGT5UIo/RI1b0WRQwxEZTRjt6mFNw6lH14wRd8ulsr9toSWBPMOGWoYs1PDeDL0JuTjL+tr1SZi/EyxCngpYszKdXllJEHyI79KQgeD0Vt3pTrkbNVTOEcCNqZePSVmUH8X8Vhugz3bnE0/iE9Pb5fkWO9c4AnM1FgI/8Bvp27Fw2ShryIXuR6kKvUqhVMTuOSDHwu6A8jLE5Owt3GAYugDpDYuwTVNGrHLXKpPzrGGPE/jPmaLCMZcsdkec95dYeU3zKODEm8UQZFhmJmDeWVJ36nGrGZHL4J5aTTaeFUJmmXDaJYiJ+K2/ioKgXqnXvltu0A9R8/LGy4nrTJRr4JMLuJFoUXvGm1gXQ70w2LSpk6yl71RNC0hCtsBe8BP8IhYCM0EP5jh7eCMQZNvM=",
"ssh-dss AAAAB3NzaC1kc3MAAACBAOChCC7lf6Uo9n7BmZ6M8St19PZf4Tn59NriyboW2x/DZuYAz3ibZ2OkQ3S0SqDIa0HXSEJ1zaExQdmbO+Ux/wsytWZmCczWOVsaszBZSl90q8UnWlSH6P+/YA+RWJm5SFtuV9PtGIhyZgoNuz5kBQ7K139wuQsecdKktISwTakzAAAAFQCzKsO2JhNKlL+wwwLGOcLffoAmkwAAAIBpK7/3xvduajLBD/9vASqBQIHrgK2J+wiQnIb/Wzy0UsVmvfn8A+udRbBo+csM8xrSnlnlJnjkJS3qiM5g+eTwsLIV1IdKPEwmwB+VcP53Cw6lSyWyJcvhFb0N6s08NZysLzvj0N+ZC/FnhKTLzIyMtkHf/IrPCwlM+pV/M/96YgAAAIEAqQcGn9CKgzgPaguIZooTAOQdvBLMI5y0bQjOW6734XOpqQGf/Kra90wpoasLKZjSYKNPjE+FRUOrStLrxcNs4BeVKhy2PYTRnybfYVk1/dmKgH6P1YSRONsGKvTsH6c5IyCRG0ncCgYeF8tXppyd642982daopE7zQ/NPAnJfag=",
},
},
{
keyString: `ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQC4cn+iXnA4KvcQYSV88vGn0Yi91vG47t1P7okprVmhNTkipNRIHWr6WdCO4VDr/cvsRkuVJAsLO2enwjGWWueOO6BodiBgyAOZ/5t5nJNMCNuLGT5UIo/RI1b0WRQwxEZTRjt6mFNw6lH14wRd8ulsr9toSWBPMOGWoYs1PDeDL0JuTjL+tr1SZi/EyxCngpYszKdXllJEHyI79KQgeD0Vt3pTrkbNVTOEcCNqZePSVmUH8X8Vhugz3bnE0/iE9Pb5fkWO9c4AnM1FgI/8Bvp27Fw2ShryIXuR6kKvUqhVMTuOSDHwu6A8jLE5Owt3GAYugDpDYuwTVNGrHLXKpPzrGGPE/jPmaLCMZcsdkec95dYeU3zKODEm8UQZFhmJmDeWVJ36nGrGZHL4J5aTTaeFUJmmXDaJYiJ+K2/ioKgXqnXvltu0A9R8/LGy4nrTJRr4JMLuJFoUXvGm1gXQ70w2LSpk6yl71RNC0hCtsBe8BP8IhYCM0EP5jh7eCMQZNvM= nocomment
382488320jasdj1lasmva/vasodifipi4193-fksma.cm
ssh-dss AAAAB3NzaC1kc3MAAACBAOChCC7lf6Uo9n7BmZ6M8St19PZf4Tn59NriyboW2x/DZuYAz3ibZ2OkQ3S0SqDIa0HXSEJ1zaExQdmbO+Ux/wsytWZmCczWOVsaszBZSl90q8UnWlSH6P+/YA+RWJm5SFtuV9PtGIhyZgoNuz5kBQ7K139wuQsecdKktISwTakzAAAAFQCzKsO2JhNKlL+wwwLGOcLffoAmkwAAAIBpK7/3xvduajLBD/9vASqBQIHrgK2J+wiQnIb/Wzy0UsVmvfn8A+udRbBo+csM8xrSnlnlJnjkJS3qiM5g+eTwsLIV1IdKPEwmwB+VcP53Cw6lSyWyJcvhFb0N6s08NZysLzvj0N+ZC/FnhKTLzIyMtkHf/IrPCwlM+pV/M/96YgAAAIEAqQcGn9CKgzgPaguIZooTAOQdvBLMI5y0bQjOW6734XOpqQGf/Kra90wpoasLKZjSYKNPjE+FRUOrStLrxcNs4BeVKhy2PYTRnybfYVk1/dmKgH6P1YSRONsGKvTsH6c5IyCRG0ncCgYeF8tXppyd642982daopE7zQ/NPAnJfag= nocomment`,
number: 2,
keyContents: []string{
"ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQC4cn+iXnA4KvcQYSV88vGn0Yi91vG47t1P7okprVmhNTkipNRIHWr6WdCO4VDr/cvsRkuVJAsLO2enwjGWWueOO6BodiBgyAOZ/5t5nJNMCNuLGT5UIo/RI1b0WRQwxEZTRjt6mFNw6lH14wRd8ulsr9toSWBPMOGWoYs1PDeDL0JuTjL+tr1SZi/EyxCngpYszKdXllJEHyI79KQgeD0Vt3pTrkbNVTOEcCNqZePSVmUH8X8Vhugz3bnE0/iE9Pb5fkWO9c4AnM1FgI/8Bvp27Fw2ShryIXuR6kKvUqhVMTuOSDHwu6A8jLE5Owt3GAYugDpDYuwTVNGrHLXKpPzrGGPE/jPmaLCMZcsdkec95dYeU3zKODEm8UQZFhmJmDeWVJ36nGrGZHL4J5aTTaeFUJmmXDaJYiJ+K2/ioKgXqnXvltu0A9R8/LGy4nrTJRr4JMLuJFoUXvGm1gXQ70w2LSpk6yl71RNC0hCtsBe8BP8IhYCM0EP5jh7eCMQZNvM=",
"ssh-dss AAAAB3NzaC1kc3MAAACBAOChCC7lf6Uo9n7BmZ6M8St19PZf4Tn59NriyboW2x/DZuYAz3ibZ2OkQ3S0SqDIa0HXSEJ1zaExQdmbO+Ux/wsytWZmCczWOVsaszBZSl90q8UnWlSH6P+/YA+RWJm5SFtuV9PtGIhyZgoNuz5kBQ7K139wuQsecdKktISwTakzAAAAFQCzKsO2JhNKlL+wwwLGOcLffoAmkwAAAIBpK7/3xvduajLBD/9vASqBQIHrgK2J+wiQnIb/Wzy0UsVmvfn8A+udRbBo+csM8xrSnlnlJnjkJS3qiM5g+eTwsLIV1IdKPEwmwB+VcP53Cw6lSyWyJcvhFb0N6s08NZysLzvj0N+ZC/FnhKTLzIyMtkHf/IrPCwlM+pV/M/96YgAAAIEAqQcGn9CKgzgPaguIZooTAOQdvBLMI5y0bQjOW6734XOpqQGf/Kra90wpoasLKZjSYKNPjE+FRUOrStLrxcNs4BeVKhy2PYTRnybfYVk1/dmKgH6P1YSRONsGKvTsH6c5IyCRG0ncCgYeF8tXppyd642982daopE7zQ/NPAnJfag=",
},
},
}
for i, kase := range testCases {
s.ID = int64(i) + 20
asymkey_model.AddPublicKeysBySource(user, s, []string{kase.keyString})
keys, err := asymkey_model.ListPublicKeysBySource(user.ID, s.ID)
assert.NoError(t, err)
if err != nil {
continue
}
assert.Len(t, keys, kase.number)
for _, key := range keys {
assert.Contains(t, kase.keyContents, key.Content)
}
for _, key := range keys {
DeletePublicKey(user, key.ID)
}
}
}
| services/asymkey/ssh_key_test.go | 0 | https://github.com/go-gitea/gitea/commit/fc4680ea712fdf89065db882cd9d67946d004500 | [
0.000452404870884493,
0.00028686533914878964,
0.00016770820366218686,
0.0003080953611060977,
0.00010726073378464207
] |
{
"id": 1,
"code_window": [
"\t\tWhere(\"next_update_unix<=?\", time.Now().Unix()).\n",
"\t\tAnd(\"next_update_unix!=0\").\n",
"\t\tOrderBy(\"updated_unix ASC\").\n",
"\t\tLimit(limit).\n",
"\t\tIterate(new(Mirror), f)\n",
"}\n",
"\n",
"// InsertMirror inserts a mirror to database\n"
],
"labels": [
"keep",
"keep",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tOrderBy(\"updated_unix ASC\")\n",
"\tif limit > 0 {\n",
"\t\tsess = sess.Limit(limit)\n",
"\t}\n",
"\treturn sess.Iterate(new(Mirror), f)\n"
],
"file_path": "models/repo/mirror.go",
"type": "replace",
"edit_start_line_idx": 112
} | // Copyright 2016 The Gogs Authors. All rights reserved.
// Copyright 2018 The Gitea Authors. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package repo
import (
"context"
"errors"
"time"
"code.gitea.io/gitea/models/db"
"code.gitea.io/gitea/modules/log"
"code.gitea.io/gitea/modules/timeutil"
)
// ErrMirrorNotExist mirror does not exist error
var ErrMirrorNotExist = errors.New("Mirror does not exist")
// Mirror represents mirror information of a repository.
type Mirror struct {
ID int64 `xorm:"pk autoincr"`
RepoID int64 `xorm:"INDEX"`
Repo *Repository `xorm:"-"`
Interval time.Duration
EnablePrune bool `xorm:"NOT NULL DEFAULT true"`
UpdatedUnix timeutil.TimeStamp `xorm:"INDEX"`
NextUpdateUnix timeutil.TimeStamp `xorm:"INDEX"`
LFS bool `xorm:"lfs_enabled NOT NULL DEFAULT false"`
LFSEndpoint string `xorm:"lfs_endpoint TEXT"`
Address string `xorm:"-"`
}
func init() {
db.RegisterModel(new(Mirror))
}
// BeforeInsert will be invoked by XORM before inserting a record
func (m *Mirror) BeforeInsert() {
if m != nil {
m.UpdatedUnix = timeutil.TimeStampNow()
m.NextUpdateUnix = timeutil.TimeStampNow()
}
}
// GetRepository returns the repository.
func (m *Mirror) GetRepository() *Repository {
if m.Repo != nil {
return m.Repo
}
var err error
m.Repo, err = GetRepositoryByIDCtx(db.DefaultContext, m.RepoID)
if err != nil {
log.Error("getRepositoryByID[%d]: %v", m.ID, err)
}
return m.Repo
}
// GetRemoteName returns the name of the remote.
func (m *Mirror) GetRemoteName() string {
return "origin"
}
// ScheduleNextUpdate calculates and sets next update time.
func (m *Mirror) ScheduleNextUpdate() {
if m.Interval != 0 {
m.NextUpdateUnix = timeutil.TimeStampNow().AddDuration(m.Interval)
} else {
m.NextUpdateUnix = 0
}
}
// GetMirrorByRepoID returns mirror information of a repository.
func GetMirrorByRepoID(ctx context.Context, repoID int64) (*Mirror, error) {
m := &Mirror{RepoID: repoID}
has, err := db.GetEngine(ctx).Get(m)
if err != nil {
return nil, err
} else if !has {
return nil, ErrMirrorNotExist
}
return m, nil
}
// UpdateMirror updates the mirror
func UpdateMirror(ctx context.Context, m *Mirror) error {
_, err := db.GetEngine(ctx).ID(m.ID).AllCols().Update(m)
return err
}
// TouchMirror updates the mirror updatedUnix
func TouchMirror(ctx context.Context, m *Mirror) error {
m.UpdatedUnix = timeutil.TimeStampNow()
_, err := db.GetEngine(ctx).ID(m.ID).Cols("updated_unix").Update(m)
return err
}
// DeleteMirrorByRepoID deletes a mirror by repoID
func DeleteMirrorByRepoID(repoID int64) error {
_, err := db.GetEngine(db.DefaultContext).Delete(&Mirror{RepoID: repoID})
return err
}
// MirrorsIterate iterates all mirror repositories.
func MirrorsIterate(limit int, f func(idx int, bean interface{}) error) error {
return db.GetEngine(db.DefaultContext).
Where("next_update_unix<=?", time.Now().Unix()).
And("next_update_unix!=0").
OrderBy("updated_unix ASC").
Limit(limit).
Iterate(new(Mirror), f)
}
// InsertMirror inserts a mirror to database
func InsertMirror(ctx context.Context, mirror *Mirror) error {
_, err := db.GetEngine(ctx).Insert(mirror)
return err
}
| models/repo/mirror.go | 1 | https://github.com/go-gitea/gitea/commit/fc4680ea712fdf89065db882cd9d67946d004500 | [
0.9903140664100647,
0.07901991158723831,
0.0001741813903208822,
0.0018469837959855795,
0.2630833089351654
] |
{
"id": 1,
"code_window": [
"\t\tWhere(\"next_update_unix<=?\", time.Now().Unix()).\n",
"\t\tAnd(\"next_update_unix!=0\").\n",
"\t\tOrderBy(\"updated_unix ASC\").\n",
"\t\tLimit(limit).\n",
"\t\tIterate(new(Mirror), f)\n",
"}\n",
"\n",
"// InsertMirror inserts a mirror to database\n"
],
"labels": [
"keep",
"keep",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tOrderBy(\"updated_unix ASC\")\n",
"\tif limit > 0 {\n",
"\t\tsess = sess.Limit(limit)\n",
"\t}\n",
"\treturn sess.Iterate(new(Mirror), f)\n"
],
"file_path": "models/repo/mirror.go",
"type": "replace",
"edit_start_line_idx": 112
} | {{if eq .PackageDescriptor.Package.Type "composer"}}
{{range .PackageDescriptor.Metadata.Authors}}<div class="item" title="{{$.locale.Tr "packages.details.author"}}">{{svg "octicon-person" 16 "mr-3"}} {{.Name}}</div>{{end}}
{{if .PackageDescriptor.Metadata.Homepage}}<div class="item">{{svg "octicon-link-external" 16 "mr-3"}} <a href="{{.PackageDescriptor.Metadata.Homepage}}" target="_blank" rel="noopener noreferrer me">{{.locale.Tr "packages.details.project_site"}}</a></div>{{end}}
{{range .PackageDescriptor.Metadata.License}}<div class="item" title="{{$.locale.Tr "packages.details.license"}}">{{svg "octicon-law" 16 "mr-3"}} {{.}}</div>{{end}}
{{end}}
| templates/package/metadata/composer.tmpl | 0 | https://github.com/go-gitea/gitea/commit/fc4680ea712fdf89065db882cd9d67946d004500 | [
0.00017375922470819205,
0.00017375922470819205,
0.00017375922470819205,
0.00017375922470819205,
0
] |
{
"id": 1,
"code_window": [
"\t\tWhere(\"next_update_unix<=?\", time.Now().Unix()).\n",
"\t\tAnd(\"next_update_unix!=0\").\n",
"\t\tOrderBy(\"updated_unix ASC\").\n",
"\t\tLimit(limit).\n",
"\t\tIterate(new(Mirror), f)\n",
"}\n",
"\n",
"// InsertMirror inserts a mirror to database\n"
],
"labels": [
"keep",
"keep",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tOrderBy(\"updated_unix ASC\")\n",
"\tif limit > 0 {\n",
"\t\tsess = sess.Limit(limit)\n",
"\t}\n",
"\treturn sess.Iterate(new(Mirror), f)\n"
],
"file_path": "models/repo/mirror.go",
"type": "replace",
"edit_start_line_idx": 112
} | Additional permission under GNU GPL version 3 section 7
If you modify this Program, or any covered work, by linking or combining it with [name of library] (or a modified version of that library), containing parts covered by the terms of [name of library's license], the licensors of this Program grant you additional permission to convey the resulting work. Corresponding Source for a non-source form of such a combination shall include the source code for the parts of [name of library] used as well as that of the covered work.
| options/license/GPL-3.0-linking-source-exception | 0 | https://github.com/go-gitea/gitea/commit/fc4680ea712fdf89065db882cd9d67946d004500 | [
0.0001743515458656475,
0.0001743515458656475,
0.0001743515458656475,
0.0001743515458656475,
0
] |
{
"id": 1,
"code_window": [
"\t\tWhere(\"next_update_unix<=?\", time.Now().Unix()).\n",
"\t\tAnd(\"next_update_unix!=0\").\n",
"\t\tOrderBy(\"updated_unix ASC\").\n",
"\t\tLimit(limit).\n",
"\t\tIterate(new(Mirror), f)\n",
"}\n",
"\n",
"// InsertMirror inserts a mirror to database\n"
],
"labels": [
"keep",
"keep",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tOrderBy(\"updated_unix ASC\")\n",
"\tif limit > 0 {\n",
"\t\tsess = sess.Limit(limit)\n",
"\t}\n",
"\treturn sess.Iterate(new(Mirror), f)\n"
],
"file_path": "models/repo/mirror.go",
"type": "replace",
"edit_start_line_idx": 112
} | // Copyright 2017 The Gitea Authors. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package util
import (
"sort"
"strings"
)
// Int64Slice attaches the methods of Interface to []int64, sorting in increasing order.
type Int64Slice []int64
func (p Int64Slice) Len() int { return len(p) }
func (p Int64Slice) Less(i, j int) bool { return p[i] < p[j] }
func (p Int64Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
// IsSliceInt64Eq returns if the two slice has the same elements but different sequences.
func IsSliceInt64Eq(a, b []int64) bool {
if len(a) != len(b) {
return false
}
sort.Sort(Int64Slice(a))
sort.Sort(Int64Slice(b))
for i := 0; i < len(a); i++ {
if a[i] != b[i] {
return false
}
}
return true
}
// ExistsInSlice returns true if string exists in slice.
func ExistsInSlice(target string, slice []string) bool {
i := sort.Search(len(slice),
func(i int) bool { return slice[i] == target })
return i < len(slice)
}
// IsStringInSlice sequential searches if string exists in slice.
func IsStringInSlice(target string, slice []string, insensitive ...bool) bool {
caseInsensitive := false
if len(insensitive) != 0 && insensitive[0] {
caseInsensitive = true
target = strings.ToLower(target)
}
for i := 0; i < len(slice); i++ {
if caseInsensitive {
if strings.ToLower(slice[i]) == target {
return true
}
} else {
if slice[i] == target {
return true
}
}
}
return false
}
// IsInt64InSlice sequential searches if int64 exists in slice.
func IsInt64InSlice(target int64, slice []int64) bool {
for i := 0; i < len(slice); i++ {
if slice[i] == target {
return true
}
}
return false
}
// IsEqualSlice returns true if slices are equal.
func IsEqualSlice(target, source []string) bool {
if len(target) != len(source) {
return false
}
if (target == nil) != (source == nil) {
return false
}
sort.Strings(target)
sort.Strings(source)
for i, v := range target {
if v != source[i] {
return false
}
}
return true
}
| modules/util/compare.go | 0 | https://github.com/go-gitea/gitea/commit/fc4680ea712fdf89065db882cd9d67946d004500 | [
0.0001797489239834249,
0.00017411554290447384,
0.0001680490095168352,
0.00017528588068671525,
0.0000040520662878407165
] |
{
"id": 2,
"code_window": [
"\t\tFind(&mirrors)\n",
"}\n",
"\n",
"// PushMirrorsIterate iterates all push-mirror repositories.\n",
"func PushMirrorsIterate(ctx context.Context, limit int, f func(idx int, bean interface{}) error) error {\n",
"\treturn db.GetEngine(ctx).\n",
"\t\tWhere(\"last_update + (`interval` / ?) <= ?\", time.Second, time.Now().Unix()).\n",
"\t\tAnd(\"`interval` != 0\").\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep"
],
"after_edit": [
"\tsess := db.GetEngine(ctx).\n"
],
"file_path": "models/repo/pushmirror.go",
"type": "replace",
"edit_start_line_idx": 131
} | // Copyright 2021 The Gitea Authors. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package repo
import (
"context"
"errors"
"time"
"code.gitea.io/gitea/models/db"
"code.gitea.io/gitea/modules/log"
"code.gitea.io/gitea/modules/timeutil"
"xorm.io/builder"
)
// ErrPushMirrorNotExist mirror does not exist error
var ErrPushMirrorNotExist = errors.New("PushMirror does not exist")
// PushMirror represents mirror information of a repository.
type PushMirror struct {
ID int64 `xorm:"pk autoincr"`
RepoID int64 `xorm:"INDEX"`
Repo *Repository `xorm:"-"`
RemoteName string
SyncOnCommit bool `xorm:"NOT NULL DEFAULT true"`
Interval time.Duration
CreatedUnix timeutil.TimeStamp `xorm:"created"`
LastUpdateUnix timeutil.TimeStamp `xorm:"INDEX last_update"`
LastError string `xorm:"text"`
}
type PushMirrorOptions struct {
ID int64
RepoID int64
RemoteName string
}
func (opts *PushMirrorOptions) toConds() builder.Cond {
cond := builder.NewCond()
if opts.RepoID > 0 {
cond = cond.And(builder.Eq{"repo_id": opts.RepoID})
}
if opts.RemoteName != "" {
cond = cond.And(builder.Eq{"remote_name": opts.RemoteName})
}
if opts.ID > 0 {
cond = cond.And(builder.Eq{"id": opts.ID})
}
return cond
}
func init() {
db.RegisterModel(new(PushMirror))
}
// GetRepository returns the path of the repository.
func (m *PushMirror) GetRepository() *Repository {
if m.Repo != nil {
return m.Repo
}
var err error
m.Repo, err = GetRepositoryByIDCtx(db.DefaultContext, m.RepoID)
if err != nil {
log.Error("getRepositoryByID[%d]: %v", m.ID, err)
}
return m.Repo
}
// GetRemoteName returns the name of the remote.
func (m *PushMirror) GetRemoteName() string {
return m.RemoteName
}
// InsertPushMirror inserts a push-mirror to database
func InsertPushMirror(ctx context.Context, m *PushMirror) error {
_, err := db.GetEngine(ctx).Insert(m)
return err
}
// UpdatePushMirror updates the push-mirror
func UpdatePushMirror(ctx context.Context, m *PushMirror) error {
_, err := db.GetEngine(ctx).ID(m.ID).AllCols().Update(m)
return err
}
func DeletePushMirrors(ctx context.Context, opts PushMirrorOptions) error {
if opts.RepoID > 0 {
_, err := db.GetEngine(ctx).Where(opts.toConds()).Delete(&PushMirror{})
return err
}
return errors.New("repoID required and must be set")
}
func GetPushMirror(ctx context.Context, opts PushMirrorOptions) (*PushMirror, error) {
mirror := &PushMirror{}
exist, err := db.GetEngine(ctx).Where(opts.toConds()).Get(mirror)
if err != nil {
return nil, err
} else if !exist {
return nil, ErrPushMirrorNotExist
}
return mirror, nil
}
// GetPushMirrorsByRepoID returns push-mirror information of a repository.
func GetPushMirrorsByRepoID(ctx context.Context, repoID int64, listOptions db.ListOptions) ([]*PushMirror, int64, error) {
sess := db.GetEngine(ctx).Where("repo_id = ?", repoID)
if listOptions.Page != 0 {
sess = db.SetSessionPagination(sess, &listOptions)
mirrors := make([]*PushMirror, 0, listOptions.PageSize)
count, err := sess.FindAndCount(&mirrors)
return mirrors, count, err
}
mirrors := make([]*PushMirror, 0, 10)
count, err := sess.FindAndCount(&mirrors)
return mirrors, count, err
}
// GetPushMirrorsSyncedOnCommit returns push-mirrors for this repo that should be updated by new commits
func GetPushMirrorsSyncedOnCommit(repoID int64) ([]*PushMirror, error) {
mirrors := make([]*PushMirror, 0, 10)
return mirrors, db.GetEngine(db.DefaultContext).
Where("repo_id=? AND sync_on_commit=?", repoID, true).
Find(&mirrors)
}
// PushMirrorsIterate iterates all push-mirror repositories.
func PushMirrorsIterate(ctx context.Context, limit int, f func(idx int, bean interface{}) error) error {
return db.GetEngine(ctx).
Where("last_update + (`interval` / ?) <= ?", time.Second, time.Now().Unix()).
And("`interval` != 0").
OrderBy("last_update ASC").
Limit(limit).
Iterate(new(PushMirror), f)
}
| models/repo/pushmirror.go | 1 | https://github.com/go-gitea/gitea/commit/fc4680ea712fdf89065db882cd9d67946d004500 | [
0.9814366698265076,
0.1248847022652626,
0.00017015202320180833,
0.013151504099369049,
0.26551422476768494
] |
{
"id": 2,
"code_window": [
"\t\tFind(&mirrors)\n",
"}\n",
"\n",
"// PushMirrorsIterate iterates all push-mirror repositories.\n",
"func PushMirrorsIterate(ctx context.Context, limit int, f func(idx int, bean interface{}) error) error {\n",
"\treturn db.GetEngine(ctx).\n",
"\t\tWhere(\"last_update + (`interval` / ?) <= ?\", time.Second, time.Now().Unix()).\n",
"\t\tAnd(\"`interval` != 0\").\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep"
],
"after_edit": [
"\tsess := db.GetEngine(ctx).\n"
],
"file_path": "models/repo/pushmirror.go",
"type": "replace",
"edit_start_line_idx": 131
} | // Copyright 2021 The Gitea Authors. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package migrations
import (
"code.gitea.io/gitea/modules/json"
"code.gitea.io/gitea/modules/util"
"xorm.io/builder"
"xorm.io/xorm"
)
func deleteMigrationCredentials(x *xorm.Engine) (err error) {
// Task represents a task
type Task struct {
ID int64
DoerID int64 `xorm:"index"` // operator
OwnerID int64 `xorm:"index"` // repo owner id, when creating, the repoID maybe zero
RepoID int64 `xorm:"index"`
Type int
Status int `xorm:"index"`
StartTime int64
EndTime int64
PayloadContent string `xorm:"TEXT"`
Errors string `xorm:"TEXT"` // if task failed, saved the error reason
Created int64 `xorm:"created"`
}
const TaskTypeMigrateRepo = 0
const TaskStatusStopped = 2
const batchSize = 100
// only match migration tasks, that are not pending or running
cond := builder.Eq{
"type": TaskTypeMigrateRepo,
}.And(builder.Gte{
"status": TaskStatusStopped,
})
sess := x.NewSession()
defer sess.Close()
for start := 0; ; start += batchSize {
tasks := make([]*Task, 0, batchSize)
if err = sess.Limit(batchSize, start).Where(cond, 0).Find(&tasks); err != nil {
return
}
if len(tasks) == 0 {
break
}
if err = sess.Begin(); err != nil {
return
}
for _, t := range tasks {
if t.PayloadContent, err = removeCredentials(t.PayloadContent); err != nil {
return
}
if _, err = sess.ID(t.ID).Cols("payload_content").Update(t); err != nil {
return
}
}
if err = sess.Commit(); err != nil {
return
}
}
return err
}
func removeCredentials(payload string) (string, error) {
// MigrateOptions defines the way a repository gets migrated
// this is for internal usage by migrations module and func who interact with it
type MigrateOptions struct {
// required: true
CloneAddr string `json:"clone_addr" binding:"Required"`
CloneAddrEncrypted string `json:"clone_addr_encrypted,omitempty"`
AuthUsername string `json:"auth_username"`
AuthPassword string `json:"-"`
AuthPasswordEncrypted string `json:"auth_password_encrypted,omitempty"`
AuthToken string `json:"-"`
AuthTokenEncrypted string `json:"auth_token_encrypted,omitempty"`
// required: true
UID int `json:"uid" binding:"Required"`
// required: true
RepoName string `json:"repo_name" binding:"Required"`
Mirror bool `json:"mirror"`
LFS bool `json:"lfs"`
LFSEndpoint string `json:"lfs_endpoint"`
Private bool `json:"private"`
Description string `json:"description"`
OriginalURL string
GitServiceType int
Wiki bool
Issues bool
Milestones bool
Labels bool
Releases bool
Comments bool
PullRequests bool
ReleaseAssets bool
MigrateToRepoID int64
MirrorInterval string `json:"mirror_interval"`
}
var opts MigrateOptions
err := json.Unmarshal([]byte(payload), &opts)
if err != nil {
return "", err
}
opts.AuthPassword = ""
opts.AuthToken = ""
opts.CloneAddr = util.SanitizeCredentialURLs(opts.CloneAddr)
confBytes, err := json.Marshal(opts)
if err != nil {
return "", err
}
return string(confBytes), nil
}
| models/migrations/v180.go | 0 | https://github.com/go-gitea/gitea/commit/fc4680ea712fdf89065db882cd9d67946d004500 | [
0.0022223894484341145,
0.0003860941797029227,
0.00016488721303176135,
0.00017138269322458655,
0.0005612423992715776
] |
{
"id": 2,
"code_window": [
"\t\tFind(&mirrors)\n",
"}\n",
"\n",
"// PushMirrorsIterate iterates all push-mirror repositories.\n",
"func PushMirrorsIterate(ctx context.Context, limit int, f func(idx int, bean interface{}) error) error {\n",
"\treturn db.GetEngine(ctx).\n",
"\t\tWhere(\"last_update + (`interval` / ?) <= ?\", time.Second, time.Now().Unix()).\n",
"\t\tAnd(\"`interval` != 0\").\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep"
],
"after_edit": [
"\tsess := db.GetEngine(ctx).\n"
],
"file_path": "models/repo/pushmirror.go",
"type": "replace",
"edit_start_line_idx": 131
} | // Copyright 2019 The Gitea Authors. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package integrations
import (
"context"
"fmt"
"os"
"runtime"
"strings"
"sync"
"testing"
"time"
"code.gitea.io/gitea/modules/json"
"code.gitea.io/gitea/modules/log"
"code.gitea.io/gitea/modules/queue"
)
var (
prefix string
slowTest = 10 * time.Second
slowFlush = 5 * time.Second
)
// TestLogger is a logger which will write to the testing log
type TestLogger struct {
log.WriterLogger
}
var writerCloser = &testLoggerWriterCloser{}
type testLoggerWriterCloser struct {
sync.RWMutex
t []*testing.TB
}
func (w *testLoggerWriterCloser) setT(t *testing.TB) {
w.Lock()
w.t = append(w.t, t)
w.Unlock()
}
func (w *testLoggerWriterCloser) Write(p []byte) (int, error) {
w.RLock()
var t *testing.TB
if len(w.t) > 0 {
t = w.t[len(w.t)-1]
}
w.RUnlock()
if t != nil && *t != nil {
if len(p) > 0 && p[len(p)-1] == '\n' {
p = p[:len(p)-1]
}
defer func() {
err := recover()
if err == nil {
return
}
var errString string
errErr, ok := err.(error)
if ok {
errString = errErr.Error()
} else {
errString, ok = err.(string)
}
if !ok {
panic(err)
}
if !strings.HasPrefix(errString, "Log in goroutine after ") {
panic(err)
}
}()
(*t).Log(string(p))
return len(p), nil
}
return len(p), nil
}
func (w *testLoggerWriterCloser) Close() error {
w.Lock()
if len(w.t) > 0 {
w.t = w.t[:len(w.t)-1]
}
w.Unlock()
return nil
}
func (w *testLoggerWriterCloser) Reset() {
w.Lock()
if len(w.t) > 0 {
for _, t := range w.t {
if t == nil {
continue
}
fmt.Fprintf(os.Stdout, "Unclosed logger writer in test: %s", (*t).Name())
(*t).Errorf("Unclosed logger writer in test: %s", (*t).Name())
}
w.t = nil
}
w.Unlock()
}
// PrintCurrentTest prints the current test to os.Stdout
func PrintCurrentTest(t testing.TB, skip ...int) func() {
start := time.Now()
actualSkip := 1
if len(skip) > 0 {
actualSkip = skip[0]
}
_, filename, line, _ := runtime.Caller(actualSkip)
if log.CanColorStdout {
fmt.Fprintf(os.Stdout, "=== %s (%s:%d)\n", fmt.Formatter(log.NewColoredValue(t.Name())), strings.TrimPrefix(filename, prefix), line)
} else {
fmt.Fprintf(os.Stdout, "=== %s (%s:%d)\n", t.Name(), strings.TrimPrefix(filename, prefix), line)
}
writerCloser.setT(&t)
return func() {
took := time.Since(start)
if took > slowTest {
if log.CanColorStdout {
fmt.Fprintf(os.Stdout, "+++ %s is a slow test (took %v)\n", fmt.Formatter(log.NewColoredValue(t.Name(), log.Bold, log.FgYellow)), fmt.Formatter(log.NewColoredValue(took, log.Bold, log.FgYellow)))
} else {
fmt.Fprintf(os.Stdout, "+++ %s is a slow test (took %v)\n", t.Name(), took)
}
}
timer := time.AfterFunc(slowFlush, func() {
if log.CanColorStdout {
fmt.Fprintf(os.Stdout, "+++ %s ... still flushing after %v ...\n", fmt.Formatter(log.NewColoredValue(t.Name(), log.Bold, log.FgRed)), slowFlush)
} else {
fmt.Fprintf(os.Stdout, "+++ %s ... still flushing after %v ...\n", t.Name(), slowFlush)
}
})
if err := queue.GetManager().FlushAll(context.Background(), 2*time.Minute); err != nil {
t.Errorf("Flushing queues failed with error %v", err)
}
timer.Stop()
flushTook := time.Since(start) - took
if flushTook > slowFlush {
if log.CanColorStdout {
fmt.Fprintf(os.Stdout, "+++ %s had a slow clean-up flush (took %v)\n", fmt.Formatter(log.NewColoredValue(t.Name(), log.Bold, log.FgRed)), fmt.Formatter(log.NewColoredValue(flushTook, log.Bold, log.FgRed)))
} else {
fmt.Fprintf(os.Stdout, "+++ %s had a slow clean-up flush (took %v)\n", t.Name(), flushTook)
}
}
_ = writerCloser.Close()
}
}
// Printf takes a format and args and prints the string to os.Stdout
func Printf(format string, args ...interface{}) {
if log.CanColorStdout {
for i := 0; i < len(args); i++ {
args[i] = log.NewColoredValue(args[i])
}
}
fmt.Fprintf(os.Stdout, "\t"+format, args...)
}
// NewTestLogger creates a TestLogger as a log.LoggerProvider
func NewTestLogger() log.LoggerProvider {
logger := &TestLogger{}
logger.Colorize = log.CanColorStdout
logger.Level = log.TRACE
return logger
}
// Init inits connection writer with json config.
// json config only need key "level".
func (log *TestLogger) Init(config string) error {
err := json.Unmarshal([]byte(config), log)
if err != nil {
return err
}
log.NewWriterLogger(writerCloser)
return nil
}
// Content returns the content accumulated in the content provider
func (log *TestLogger) Content() (string, error) {
return "", fmt.Errorf("not supported")
}
// Flush when log should be flushed
func (log *TestLogger) Flush() {
}
// ReleaseReopen does nothing
func (log *TestLogger) ReleaseReopen() error {
return nil
}
// GetName returns the default name for this implementation
func (log *TestLogger) GetName() string {
return "test"
}
func init() {
log.Register("test", NewTestLogger)
_, filename, _, _ := runtime.Caller(0)
prefix = strings.TrimSuffix(filename, "integrations/testlogger.go")
}
| integrations/testlogger.go | 0 | https://github.com/go-gitea/gitea/commit/fc4680ea712fdf89065db882cd9d67946d004500 | [
0.0003003765596076846,
0.0001843842474045232,
0.0001646532618906349,
0.00017234610277228057,
0.00003244167237426154
] |
{
"id": 2,
"code_window": [
"\t\tFind(&mirrors)\n",
"}\n",
"\n",
"// PushMirrorsIterate iterates all push-mirror repositories.\n",
"func PushMirrorsIterate(ctx context.Context, limit int, f func(idx int, bean interface{}) error) error {\n",
"\treturn db.GetEngine(ctx).\n",
"\t\tWhere(\"last_update + (`interval` / ?) <= ?\", time.Second, time.Now().Unix()).\n",
"\t\tAnd(\"`interval` != 0\").\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep"
],
"after_edit": [
"\tsess := db.GetEngine(ctx).\n"
],
"file_path": "models/repo/pushmirror.go",
"type": "replace",
"edit_start_line_idx": 131
} | boot.php
index.php
install.php
/config/*
!/config/config.php
/controllers/*
/init/*
/logs/*
/phproad/*
/temp/*
/uploaded/*
/installer_files/*
/modules/backend/*
/modules/blog/*
/modules/cms/*
/modules/core/*
/modules/session/*
/modules/shop/*
/modules/system/*
/modules/users/*
# add content_*.php if you don't want erase client changes to content
| options/gitignore/LemonStand | 0 | https://github.com/go-gitea/gitea/commit/fc4680ea712fdf89065db882cd9d67946d004500 | [
0.00017133807705249637,
0.00016868261445779353,
0.0001648487086640671,
0.00016986105765681714,
0.000002777231657091761
] |
{
"id": 3,
"code_window": [
"\t\tWhere(\"last_update + (`interval` / ?) <= ?\", time.Second, time.Now().Unix()).\n",
"\t\tAnd(\"`interval` != 0\").\n",
"\t\tOrderBy(\"last_update ASC\").\n",
"\t\tLimit(limit).\n",
"\t\tIterate(new(PushMirror), f)\n",
"}"
],
"labels": [
"keep",
"keep",
"replace",
"replace",
"replace",
"keep"
],
"after_edit": [
"\t\tOrderBy(\"last_update ASC\")\n",
"\tif limit > 0 {\n",
"\t\tsess = sess.Limit(limit)\n",
"\t}\n",
"\treturn sess.Iterate(new(PushMirror), f)\n"
],
"file_path": "models/repo/pushmirror.go",
"type": "replace",
"edit_start_line_idx": 134
} | // Copyright 2021 The Gitea Authors. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package repo
import (
"context"
"errors"
"time"
"code.gitea.io/gitea/models/db"
"code.gitea.io/gitea/modules/log"
"code.gitea.io/gitea/modules/timeutil"
"xorm.io/builder"
)
// ErrPushMirrorNotExist mirror does not exist error
var ErrPushMirrorNotExist = errors.New("PushMirror does not exist")
// PushMirror represents mirror information of a repository.
type PushMirror struct {
ID int64 `xorm:"pk autoincr"`
RepoID int64 `xorm:"INDEX"`
Repo *Repository `xorm:"-"`
RemoteName string
SyncOnCommit bool `xorm:"NOT NULL DEFAULT true"`
Interval time.Duration
CreatedUnix timeutil.TimeStamp `xorm:"created"`
LastUpdateUnix timeutil.TimeStamp `xorm:"INDEX last_update"`
LastError string `xorm:"text"`
}
type PushMirrorOptions struct {
ID int64
RepoID int64
RemoteName string
}
func (opts *PushMirrorOptions) toConds() builder.Cond {
cond := builder.NewCond()
if opts.RepoID > 0 {
cond = cond.And(builder.Eq{"repo_id": opts.RepoID})
}
if opts.RemoteName != "" {
cond = cond.And(builder.Eq{"remote_name": opts.RemoteName})
}
if opts.ID > 0 {
cond = cond.And(builder.Eq{"id": opts.ID})
}
return cond
}
func init() {
db.RegisterModel(new(PushMirror))
}
// GetRepository returns the path of the repository.
func (m *PushMirror) GetRepository() *Repository {
if m.Repo != nil {
return m.Repo
}
var err error
m.Repo, err = GetRepositoryByIDCtx(db.DefaultContext, m.RepoID)
if err != nil {
log.Error("getRepositoryByID[%d]: %v", m.ID, err)
}
return m.Repo
}
// GetRemoteName returns the name of the remote.
func (m *PushMirror) GetRemoteName() string {
return m.RemoteName
}
// InsertPushMirror inserts a push-mirror to database
func InsertPushMirror(ctx context.Context, m *PushMirror) error {
_, err := db.GetEngine(ctx).Insert(m)
return err
}
// UpdatePushMirror updates the push-mirror
func UpdatePushMirror(ctx context.Context, m *PushMirror) error {
_, err := db.GetEngine(ctx).ID(m.ID).AllCols().Update(m)
return err
}
func DeletePushMirrors(ctx context.Context, opts PushMirrorOptions) error {
if opts.RepoID > 0 {
_, err := db.GetEngine(ctx).Where(opts.toConds()).Delete(&PushMirror{})
return err
}
return errors.New("repoID required and must be set")
}
func GetPushMirror(ctx context.Context, opts PushMirrorOptions) (*PushMirror, error) {
mirror := &PushMirror{}
exist, err := db.GetEngine(ctx).Where(opts.toConds()).Get(mirror)
if err != nil {
return nil, err
} else if !exist {
return nil, ErrPushMirrorNotExist
}
return mirror, nil
}
// GetPushMirrorsByRepoID returns push-mirror information of a repository.
func GetPushMirrorsByRepoID(ctx context.Context, repoID int64, listOptions db.ListOptions) ([]*PushMirror, int64, error) {
sess := db.GetEngine(ctx).Where("repo_id = ?", repoID)
if listOptions.Page != 0 {
sess = db.SetSessionPagination(sess, &listOptions)
mirrors := make([]*PushMirror, 0, listOptions.PageSize)
count, err := sess.FindAndCount(&mirrors)
return mirrors, count, err
}
mirrors := make([]*PushMirror, 0, 10)
count, err := sess.FindAndCount(&mirrors)
return mirrors, count, err
}
// GetPushMirrorsSyncedOnCommit returns push-mirrors for this repo that should be updated by new commits
func GetPushMirrorsSyncedOnCommit(repoID int64) ([]*PushMirror, error) {
mirrors := make([]*PushMirror, 0, 10)
return mirrors, db.GetEngine(db.DefaultContext).
Where("repo_id=? AND sync_on_commit=?", repoID, true).
Find(&mirrors)
}
// PushMirrorsIterate iterates all push-mirror repositories.
func PushMirrorsIterate(ctx context.Context, limit int, f func(idx int, bean interface{}) error) error {
return db.GetEngine(ctx).
Where("last_update + (`interval` / ?) <= ?", time.Second, time.Now().Unix()).
And("`interval` != 0").
OrderBy("last_update ASC").
Limit(limit).
Iterate(new(PushMirror), f)
}
| models/repo/pushmirror.go | 1 | https://github.com/go-gitea/gitea/commit/fc4680ea712fdf89065db882cd9d67946d004500 | [
0.9708544015884399,
0.07345384359359741,
0.0001755879638949409,
0.0022469935938715935,
0.24897293746471405
] |
{
"id": 3,
"code_window": [
"\t\tWhere(\"last_update + (`interval` / ?) <= ?\", time.Second, time.Now().Unix()).\n",
"\t\tAnd(\"`interval` != 0\").\n",
"\t\tOrderBy(\"last_update ASC\").\n",
"\t\tLimit(limit).\n",
"\t\tIterate(new(PushMirror), f)\n",
"}"
],
"labels": [
"keep",
"keep",
"replace",
"replace",
"replace",
"keep"
],
"after_edit": [
"\t\tOrderBy(\"last_update ASC\")\n",
"\tif limit > 0 {\n",
"\t\tsess = sess.Limit(limit)\n",
"\t}\n",
"\treturn sess.Iterate(new(PushMirror), f)\n"
],
"file_path": "models/repo/pushmirror.go",
"type": "replace",
"edit_start_line_idx": 134
} | // Copyright 2015 The Gogs Authors. All rights reserved.
// Copyright 2017 The Gitea Authors. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
//go:build !gogit
package git
import (
"bufio"
"context"
"errors"
"path/filepath"
"code.gitea.io/gitea/modules/log"
)
// Repository represents a Git repository.
type Repository struct {
Path string
tagCache *ObjectCache
gpgSettings *GPGSettings
batchCancel context.CancelFunc
batchReader *bufio.Reader
batchWriter WriteCloserError
checkCancel context.CancelFunc
checkReader *bufio.Reader
checkWriter WriteCloserError
Ctx context.Context
LastCommitCache *LastCommitCache
}
// openRepositoryWithDefaultContext opens the repository at the given path with DefaultContext.
func openRepositoryWithDefaultContext(repoPath string) (*Repository, error) {
return OpenRepository(DefaultContext, repoPath)
}
// OpenRepository opens the repository at the given path with the provided context.
func OpenRepository(ctx context.Context, repoPath string) (*Repository, error) {
repoPath, err := filepath.Abs(repoPath)
if err != nil {
return nil, err
} else if !isDir(repoPath) {
return nil, errors.New("no such file or directory")
}
// Now because of some insanity with git cat-file not immediately failing if not run in a valid git directory we need to run git rev-parse first!
if err := EnsureValidGitRepository(ctx, repoPath); err != nil {
return nil, err
}
repo := &Repository{
Path: repoPath,
tagCache: newObjectCache(),
Ctx: ctx,
}
repo.batchWriter, repo.batchReader, repo.batchCancel = CatFileBatch(ctx, repoPath)
repo.checkWriter, repo.checkReader, repo.checkCancel = CatFileBatchCheck(ctx, repo.Path)
return repo, nil
}
// CatFileBatch obtains a CatFileBatch for this repository
func (repo *Repository) CatFileBatch(ctx context.Context) (WriteCloserError, *bufio.Reader, func()) {
if repo.batchCancel == nil || repo.batchReader.Buffered() > 0 {
log.Debug("Opening temporary cat file batch for: %s", repo.Path)
return CatFileBatch(ctx, repo.Path)
}
return repo.batchWriter, repo.batchReader, func() {}
}
// CatFileBatchCheck obtains a CatFileBatchCheck for this repository
func (repo *Repository) CatFileBatchCheck(ctx context.Context) (WriteCloserError, *bufio.Reader, func()) {
if repo.checkCancel == nil || repo.checkReader.Buffered() > 0 {
log.Debug("Opening temporary cat file batch-check: %s", repo.Path)
return CatFileBatchCheck(ctx, repo.Path)
}
return repo.checkWriter, repo.checkReader, func() {}
}
// Close this repository, in particular close the underlying gogitStorage if this is not nil
func (repo *Repository) Close() (err error) {
if repo == nil {
return
}
if repo.batchCancel != nil {
repo.batchCancel()
repo.batchReader = nil
repo.batchWriter = nil
repo.batchCancel = nil
}
if repo.checkCancel != nil {
repo.checkCancel()
repo.checkCancel = nil
repo.checkReader = nil
repo.checkWriter = nil
}
repo.LastCommitCache = nil
repo.tagCache = nil
return err
}
| modules/git/repo_base_nogogit.go | 0 | https://github.com/go-gitea/gitea/commit/fc4680ea712fdf89065db882cd9d67946d004500 | [
0.00017920677782967687,
0.0001722187444102019,
0.00016576670168433338,
0.00017083401326090097,
0.000003958783509006025
] |
{
"id": 3,
"code_window": [
"\t\tWhere(\"last_update + (`interval` / ?) <= ?\", time.Second, time.Now().Unix()).\n",
"\t\tAnd(\"`interval` != 0\").\n",
"\t\tOrderBy(\"last_update ASC\").\n",
"\t\tLimit(limit).\n",
"\t\tIterate(new(PushMirror), f)\n",
"}"
],
"labels": [
"keep",
"keep",
"replace",
"replace",
"replace",
"keep"
],
"after_edit": [
"\t\tOrderBy(\"last_update ASC\")\n",
"\tif limit > 0 {\n",
"\t\tsess = sess.Limit(limit)\n",
"\t}\n",
"\treturn sess.Iterate(new(PushMirror), f)\n"
],
"file_path": "models/repo/pushmirror.go",
"type": "replace",
"edit_start_line_idx": 134
} | // Copyright 2022 The Gitea Authors. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package conan
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestNewRecipeReference(t *testing.T) {
cases := []struct {
Name string
Version string
User string
Channel string
Revision string
IsValid bool
}{
{"", "", "", "", "", false},
{"name", "", "", "", "", false},
{"", "1.0", "", "", "", false},
{"", "", "user", "", "", false},
{"", "", "", "channel", "", false},
{"", "", "", "", "0", false},
{"name", "1.0", "", "", "", true},
{"name", "1.0", "user", "", "", false},
{"name", "1.0", "", "channel", "", false},
{"name", "1.0", "user", "channel", "", true},
{"name", "1.0", "_", "", "", true},
{"name", "1.0", "", "_", "", true},
{"name", "1.0", "_", "_", "", true},
{"name", "1.0", "_", "_", "0", true},
{"name", "1.0", "", "", "0", true},
{"name", "1.0.0q", "", "", "0", true},
{"name", "1.0", "", "", "000000000000000000000000000000000000000000000000000000000000", false},
}
for i, c := range cases {
rref, err := NewRecipeReference(c.Name, c.Version, c.User, c.Channel, c.Revision)
if c.IsValid {
assert.NoError(t, err, "case %d, should be invalid", i)
assert.NotNil(t, rref, "case %d, should not be nil", i)
} else {
assert.Error(t, err, "case %d, should be valid", i)
}
}
}
func TestRecipeReferenceRevisionOrDefault(t *testing.T) {
rref, err := NewRecipeReference("name", "1.0", "", "", "")
assert.NoError(t, err)
assert.Equal(t, DefaultRevision, rref.RevisionOrDefault())
rref, err = NewRecipeReference("name", "1.0", "", "", DefaultRevision)
assert.NoError(t, err)
assert.Equal(t, DefaultRevision, rref.RevisionOrDefault())
rref, err = NewRecipeReference("name", "1.0", "", "", "Az09")
assert.NoError(t, err)
assert.Equal(t, "Az09", rref.RevisionOrDefault())
}
func TestRecipeReferenceString(t *testing.T) {
rref, err := NewRecipeReference("name", "1.0", "", "", "")
assert.NoError(t, err)
assert.Equal(t, "name/1.0", rref.String())
rref, err = NewRecipeReference("name", "1.0", "user", "channel", "")
assert.NoError(t, err)
assert.Equal(t, "name/1.0@user/channel", rref.String())
rref, err = NewRecipeReference("name", "1.0", "user", "channel", "Az09")
assert.NoError(t, err)
assert.Equal(t, "name/1.0@user/channel#Az09", rref.String())
}
func TestRecipeReferenceLinkName(t *testing.T) {
rref, err := NewRecipeReference("name", "1.0", "", "", "")
assert.NoError(t, err)
assert.Equal(t, "name/1.0/_/_/0", rref.LinkName())
rref, err = NewRecipeReference("name", "1.0", "user", "channel", "")
assert.NoError(t, err)
assert.Equal(t, "name/1.0/user/channel/0", rref.LinkName())
rref, err = NewRecipeReference("name", "1.0", "user", "channel", "Az09")
assert.NoError(t, err)
assert.Equal(t, "name/1.0/user/channel/Az09", rref.LinkName())
}
func TestNewPackageReference(t *testing.T) {
rref, _ := NewRecipeReference("name", "1.0", "", "", "")
cases := []struct {
Recipe *RecipeReference
Reference string
Revision string
IsValid bool
}{
{nil, "", "", false},
{rref, "", "", false},
{nil, "aZ09", "", false},
{rref, "aZ09", "", true},
{rref, "", "Az09", false},
{rref, "aZ09", "Az09", true},
}
for i, c := range cases {
pref, err := NewPackageReference(c.Recipe, c.Reference, c.Revision)
if c.IsValid {
assert.NoError(t, err, "case %d, should be invalid", i)
assert.NotNil(t, pref, "case %d, should not be nil", i)
} else {
assert.Error(t, err, "case %d, should be valid", i)
}
}
}
func TestPackageReferenceRevisionOrDefault(t *testing.T) {
rref, _ := NewRecipeReference("name", "1.0", "", "", "")
pref, err := NewPackageReference(rref, "ref", "")
assert.NoError(t, err)
assert.Equal(t, DefaultRevision, pref.RevisionOrDefault())
pref, err = NewPackageReference(rref, "ref", DefaultRevision)
assert.NoError(t, err)
assert.Equal(t, DefaultRevision, pref.RevisionOrDefault())
pref, err = NewPackageReference(rref, "ref", "Az09")
assert.NoError(t, err)
assert.Equal(t, "Az09", pref.RevisionOrDefault())
}
func TestPackageReferenceLinkName(t *testing.T) {
rref, _ := NewRecipeReference("name", "1.0", "", "", "")
pref, err := NewPackageReference(rref, "ref", "")
assert.NoError(t, err)
assert.Equal(t, "ref/0", pref.LinkName())
pref, err = NewPackageReference(rref, "ref", "Az09")
assert.NoError(t, err)
assert.Equal(t, "ref/Az09", pref.LinkName())
}
| modules/packages/conan/reference_test.go | 0 | https://github.com/go-gitea/gitea/commit/fc4680ea712fdf89065db882cd9d67946d004500 | [
0.00017780733469408005,
0.00017300024046562612,
0.00016568167484365404,
0.0001740490843076259,
0.000003667793862405233
] |
{
"id": 3,
"code_window": [
"\t\tWhere(\"last_update + (`interval` / ?) <= ?\", time.Second, time.Now().Unix()).\n",
"\t\tAnd(\"`interval` != 0\").\n",
"\t\tOrderBy(\"last_update ASC\").\n",
"\t\tLimit(limit).\n",
"\t\tIterate(new(PushMirror), f)\n",
"}"
],
"labels": [
"keep",
"keep",
"replace",
"replace",
"replace",
"keep"
],
"after_edit": [
"\t\tOrderBy(\"last_update ASC\")\n",
"\tif limit > 0 {\n",
"\t\tsess = sess.Limit(limit)\n",
"\t}\n",
"\treturn sess.Iterate(new(PushMirror), f)\n"
],
"file_path": "models/repo/pushmirror.go",
"type": "replace",
"edit_start_line_idx": 134
} | {{if .diff.EscapeStatus.HasInvisible}}{{.locale.Tr "repo.invisible_runes_line"}} {{end}}{{/*
*/}}{{if .diff.EscapeStatus.HasAmbiguous}}{{.locale.Tr "repo.ambiguous_runes_line"}}{{end}}
| templates/repo/diff/escape_title.tmpl | 0 | https://github.com/go-gitea/gitea/commit/fc4680ea712fdf89065db882cd9d67946d004500 | [
0.0001727658382151276,
0.0001727658382151276,
0.0001727658382151276,
0.0001727658382151276,
0
] |
{
"id": 0,
"code_window": [
"\t// EnableCollectExecutionInfo enables the TiDB to collect execution info.\n",
"\tEnableCollectExecutionInfo bool `toml:\"tidb_enable_collect_execution_info\" json:\"tidb_enable_collect_execution_info\"`\n",
"\tPluginDir string `toml:\"plugin_dir\" json:\"plugin_dir\"`\n",
"\tPluginLoad string `toml:\"plugin_load\" json:\"plugin_load\"`\n",
"\t// MaxConnections is the maximum permitted number of simultaneous client connections.\n",
"\tMaxConnections uint32 `toml:\"max_connections\" json:\"max_connections\"`\n",
"\tTiDBEnableDDL AtomicBool `toml:\"tidb_enable_ddl\" json:\"tidb_enable_ddl\"`\n",
"}\n",
"\n",
"func (l *Log) getDisableTimestamp() bool {\n",
"\tif l.EnableTimestamp == nbUnset && l.DisableTimestamp == nbUnset {\n",
"\t\treturn false\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tMaxConnections uint32 `toml:\"max_connections\" json:\"max_connections\"`\n",
"\tTiDBEnableDDL AtomicBool `toml:\"tidb_enable_ddl\" json:\"tidb_enable_ddl\"`\n",
"\tTiDBRCReadCheckTS bool `toml:\"tidb_rc_read_check_ts\" json:\"tidb_rc_read_check_ts\"`\n"
],
"file_path": "config/config.go",
"type": "replace",
"edit_start_line_idx": 490
} | // Copyright 2017 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package config
import (
"bytes"
"encoding/base64"
"encoding/json"
"fmt"
"math"
"os"
"os/user"
"path/filepath"
"sort"
"strings"
"sync"
"sync/atomic"
"time"
"github.com/BurntSushi/toml"
"github.com/pingcap/errors"
zaplog "github.com/pingcap/log"
logbackupconf "github.com/pingcap/tidb/br/pkg/streamhelper/config"
"github.com/pingcap/tidb/parser/terror"
"github.com/pingcap/tidb/util/logutil"
"github.com/pingcap/tidb/util/tikvutil"
"github.com/pingcap/tidb/util/versioninfo"
tikvcfg "github.com/tikv/client-go/v2/config"
tracing "github.com/uber/jaeger-client-go/config"
atomicutil "go.uber.org/atomic"
"go.uber.org/zap"
)
// Config number limitations
const (
MaxLogFileSize = 4096 // MB
// DefTxnEntrySizeLimit is the default value of TxnEntrySizeLimit.
DefTxnEntrySizeLimit = 6 * 1024 * 1024
// DefTxnTotalSizeLimit is the default value of TxnTxnTotalSizeLimit.
DefTxnTotalSizeLimit = 100 * 1024 * 1024
// DefMaxIndexLength is the maximum index length(in bytes). This value is consistent with MySQL.
DefMaxIndexLength = 3072
// DefMaxOfMaxIndexLength is the maximum index length(in bytes) for TiDB v3.0.7 and previous version.
DefMaxOfMaxIndexLength = 3072 * 4
// DefIndexLimit is the limitation of index on a single table. This value is consistent with MySQL.
DefIndexLimit = 64
// DefMaxOfIndexLimit is the maximum limitation of index on a single table for TiDB.
DefMaxOfIndexLimit = 64 * 8
// DefPort is the default port of TiDB
DefPort = 4000
// DefStatusPort is the default status port of TiDB
DefStatusPort = 10080
// DefHost is the default host of TiDB
DefHost = "0.0.0.0"
// DefStatusHost is the default status host of TiDB
DefStatusHost = "0.0.0.0"
// DefTableColumnCountLimit is limit of the number of columns in a table
DefTableColumnCountLimit = 1017
// DefMaxOfTableColumnCountLimit is maximum limitation of the number of columns in a table
DefMaxOfTableColumnCountLimit = 4096
// DefStatsLoadConcurrencyLimit is limit of the concurrency of stats-load
DefStatsLoadConcurrencyLimit = 1
// DefMaxOfStatsLoadConcurrencyLimit is maximum limitation of the concurrency of stats-load
DefMaxOfStatsLoadConcurrencyLimit = 128
// DefStatsLoadQueueSizeLimit is limit of the size of stats-load request queue
DefStatsLoadQueueSizeLimit = 1
// DefMaxOfStatsLoadQueueSizeLimit is maximum limitation of the size of stats-load request queue
DefMaxOfStatsLoadQueueSizeLimit = 100000
// DefDDLSlowOprThreshold sets log DDL operations whose execution time exceeds the threshold value.
DefDDLSlowOprThreshold = 300
// DefExpensiveQueryTimeThreshold indicates the time threshold of expensive query.
DefExpensiveQueryTimeThreshold = 60
// DefMemoryUsageAlarmRatio is the threshold triggering an alarm which the memory usage of tidb-server instance exceeds.
DefMemoryUsageAlarmRatio = 0.8
// DefTempDir is the default temporary directory path for TiDB.
DefTempDir = "/tmp/tidb"
)
// Valid config maps
var (
ValidStorage = map[string]bool{
"mocktikv": true,
"tikv": true,
"unistore": true,
}
// CheckTableBeforeDrop enable to execute `admin check table` before `drop table`.
CheckTableBeforeDrop = false
// checkBeforeDropLDFlag is a go build flag.
checkBeforeDropLDFlag = "None"
// tempStorageDirName is the default temporary storage dir name by base64 encoding a string `port/statusPort`
tempStorageDirName = encodeDefTempStorageDir(os.TempDir(), DefHost, DefStatusHost, DefPort, DefStatusPort)
)
// InstanceConfigSection indicates a config session that has options moved to [instance] session.
type InstanceConfigSection struct {
// SectionName indicates the origin section name.
SectionName string
// NameMappings maps the origin name to the name in [instance].
NameMappings map[string]string
}
var (
// sectionMovedToInstance records all config section and options that should be moved to [instance].
sectionMovedToInstance = []InstanceConfigSection{
{
"",
map[string]string{
"check-mb4-value-in-utf8": "tidb_check_mb4_value_in_utf8",
"enable-collect-execution-info": "tidb_enable_collect_execution_info",
"max-server-connections": "max_connections",
"run-ddl": "tidb_enable_ddl",
},
},
{
"log",
map[string]string{
"enable-slow-log": "tidb_enable_slow_log",
"slow-threshold": "tidb_slow_log_threshold",
"record-plan-in-slow-log": "tidb_record_plan_in_slow_log",
},
},
{
"performance",
map[string]string{
"force-priority": "tidb_force_priority",
"memory-usage-alarm-ratio": "tidb_memory_usage_alarm_ratio",
},
},
{
"plugin",
map[string]string{
"load": "plugin_load",
"dir": "plugin_dir",
},
},
}
// ConflictOptions indicates the conflict config options existing in both [instance] and other sections in config file.
ConflictOptions []InstanceConfigSection
// DeprecatedOptions indicates the config options existing in some other sections in config file.
// They should be moved to [instance] section.
DeprecatedOptions []InstanceConfigSection
// TikvConfigLock protects against concurrent tikv config refresh
TikvConfigLock sync.Mutex
)
// Config contains configuration options.
type Config struct {
Host string `toml:"host" json:"host"`
AdvertiseAddress string `toml:"advertise-address" json:"advertise-address"`
Port uint `toml:"port" json:"port"`
Cors string `toml:"cors" json:"cors"`
Store string `toml:"store" json:"store"`
Path string `toml:"path" json:"path"`
Socket string `toml:"socket" json:"socket"`
Lease string `toml:"lease" json:"lease"`
SplitTable bool `toml:"split-table" json:"split-table"`
TokenLimit uint `toml:"token-limit" json:"token-limit"`
TempDir string `toml:"temp-dir" json:"temp-dir"`
TempStoragePath string `toml:"tmp-storage-path" json:"tmp-storage-path"`
// TempStorageQuota describe the temporary storage Quota during query exector when TiDBEnableTmpStorageOnOOM is enabled
// If the quota exceed the capacity of the TempStoragePath, the tidb-server would exit with fatal error
TempStorageQuota int64 `toml:"tmp-storage-quota" json:"tmp-storage-quota"` // Bytes
TxnLocalLatches tikvcfg.TxnLocalLatches `toml:"-" json:"-"`
ServerVersion string `toml:"server-version" json:"server-version"`
VersionComment string `toml:"version-comment" json:"version-comment"`
TiDBEdition string `toml:"tidb-edition" json:"tidb-edition"`
TiDBReleaseVersion string `toml:"tidb-release-version" json:"tidb-release-version"`
Log Log `toml:"log" json:"log"`
Instance Instance `toml:"instance" json:"instance"`
Security Security `toml:"security" json:"security"`
Status Status `toml:"status" json:"status"`
Performance Performance `toml:"performance" json:"performance"`
PreparedPlanCache PreparedPlanCache `toml:"prepared-plan-cache" json:"prepared-plan-cache"`
OpenTracing OpenTracing `toml:"opentracing" json:"opentracing"`
ProxyProtocol ProxyProtocol `toml:"proxy-protocol" json:"proxy-protocol"`
PDClient tikvcfg.PDClient `toml:"pd-client" json:"pd-client"`
TiKVClient tikvcfg.TiKVClient `toml:"tikv-client" json:"tikv-client"`
Binlog Binlog `toml:"binlog" json:"binlog"`
CompatibleKillQuery bool `toml:"compatible-kill-query" json:"compatible-kill-query"`
PessimisticTxn PessimisticTxn `toml:"pessimistic-txn" json:"pessimistic-txn"`
MaxIndexLength int `toml:"max-index-length" json:"max-index-length"`
IndexLimit int `toml:"index-limit" json:"index-limit"`
TableColumnCountLimit uint32 `toml:"table-column-count-limit" json:"table-column-count-limit"`
GracefulWaitBeforeShutdown int `toml:"graceful-wait-before-shutdown" json:"graceful-wait-before-shutdown"`
// AlterPrimaryKey is used to control alter primary key feature.
AlterPrimaryKey bool `toml:"alter-primary-key" json:"alter-primary-key"`
// TreatOldVersionUTF8AsUTF8MB4 is use to treat old version table/column UTF8 charset as UTF8MB4. This is for compatibility.
// Currently not support dynamic modify, because this need to reload all old version schema.
TreatOldVersionUTF8AsUTF8MB4 bool `toml:"treat-old-version-utf8-as-utf8mb4" json:"treat-old-version-utf8-as-utf8mb4"`
// EnableTableLock indicate whether enable table lock.
// TODO: remove this after table lock features stable.
EnableTableLock bool `toml:"enable-table-lock" json:"enable-table-lock"`
DelayCleanTableLock uint64 `toml:"delay-clean-table-lock" json:"delay-clean-table-lock"`
SplitRegionMaxNum uint64 `toml:"split-region-max-num" json:"split-region-max-num"`
TopSQL TopSQL `toml:"top-sql" json:"top-sql"`
// RepairMode indicates that the TiDB is in the repair mode for table meta.
RepairMode bool `toml:"repair-mode" json:"repair-mode"`
RepairTableList []string `toml:"repair-table-list" json:"repair-table-list"`
// IsolationRead indicates that the TiDB reads data from which isolation level(engine and label).
IsolationRead IsolationRead `toml:"isolation-read" json:"isolation-read"`
// NewCollationsEnabledOnFirstBootstrap indicates if the new collations are enabled, it effects only when a TiDB cluster bootstrapped on the first time.
NewCollationsEnabledOnFirstBootstrap bool `toml:"new_collations_enabled_on_first_bootstrap" json:"new_collations_enabled_on_first_bootstrap"`
// Experimental contains parameters for experimental features.
Experimental Experimental `toml:"experimental" json:"experimental"`
// SkipRegisterToDashboard tells TiDB don't register itself to the dashboard.
SkipRegisterToDashboard bool `toml:"skip-register-to-dashboard" json:"skip-register-to-dashboard"`
// EnableTelemetry enables the usage data report to PingCAP.
EnableTelemetry bool `toml:"enable-telemetry" json:"enable-telemetry"`
// Labels indicates the labels set for the tidb server. The labels describe some specific properties for the tidb
// server like `zone`/`rack`/`host`. Currently, labels won't affect the tidb server except for some special
// label keys. Now we have following special keys:
// 1. 'group' is a special label key which should be automatically set by tidb-operator. We don't suggest
// users to set 'group' in labels.
// 2. 'zone' is a special key that indicates the DC location of this tidb-server. If it is set, the value for this
// key will be the default value of the session variable `txn_scope` for this tidb-server.
Labels map[string]string `toml:"labels" json:"labels"`
// EnableGlobalIndex enables creating global index.
EnableGlobalIndex bool `toml:"enable-global-index" json:"enable-global-index"`
// DeprecateIntegerDisplayWidth indicates whether deprecating the max display length for integer.
DeprecateIntegerDisplayWidth bool `toml:"deprecate-integer-display-length" json:"deprecate-integer-display-length"`
// EnableEnumLengthLimit indicates whether the enum/set element length is limited.
// According to MySQL 8.0 Refman:
// The maximum supported length of an individual SET element is M <= 255 and (M x w) <= 1020,
// where M is the element literal length and w is the number of bytes required for the maximum-length character in the character set.
// See https://dev.mysql.com/doc/refman/8.0/en/string-type-syntax.html for more details.
EnableEnumLengthLimit bool `toml:"enable-enum-length-limit" json:"enable-enum-length-limit"`
// StoresRefreshInterval indicates the interval of refreshing stores info, the unit is second.
StoresRefreshInterval uint64 `toml:"stores-refresh-interval" json:"stores-refresh-interval"`
// EnableTCP4Only enables net.Listen("tcp4",...)
// Note that: it can make lvs with toa work and thus tidb can get real client ip.
EnableTCP4Only bool `toml:"enable-tcp4-only" json:"enable-tcp4-only"`
// The client will forward the requests through the follower
// if one of the following conditions happens:
// 1. there is a network partition problem between TiDB and PD leader.
// 2. there is a network partition problem between TiDB and TiKV leader.
EnableForwarding bool `toml:"enable-forwarding" json:"enable-forwarding"`
// MaxBallastObjectSize set the max size of the ballast object, the unit is byte.
// The default value is the smallest of the following two values: 2GB or
// one quarter of the total physical memory in the current system.
MaxBallastObjectSize int `toml:"max-ballast-object-size" json:"max-ballast-object-size"`
// BallastObjectSize set the initial size of the ballast object, the unit is byte.
BallastObjectSize int `toml:"ballast-object-size" json:"ballast-object-size"`
// EnableGlobalKill indicates whether to enable global kill.
TrxSummary TrxSummary `toml:"transaction-summary" json:"transaction-summary"`
EnableGlobalKill bool `toml:"enable-global-kill" json:"enable-global-kill"`
// The following items are deprecated. We need to keep them here temporarily
// to support the upgrade process. They can be removed in future.
// EnableBatchDML, MemQuotaQuery, OOMAction unused since bootstrap v90
EnableBatchDML bool `toml:"enable-batch-dml" json:"enable-batch-dml"`
MemQuotaQuery int64 `toml:"mem-quota-query" json:"mem-quota-query"`
OOMAction string `toml:"oom-action" json:"oom-action"`
// OOMUseTmpStorage unused since bootstrap v93
OOMUseTmpStorage bool `toml:"oom-use-tmp-storage" json:"oom-use-tmp-storage"`
// These items are deprecated because they are turned into instance system variables.
CheckMb4ValueInUTF8 AtomicBool `toml:"check-mb4-value-in-utf8" json:"check-mb4-value-in-utf8"`
EnableCollectExecutionInfo bool `toml:"enable-collect-execution-info" json:"enable-collect-execution-info"`
Plugin Plugin `toml:"plugin" json:"plugin"`
MaxServerConnections uint32 `toml:"max-server-connections" json:"max-server-connections"`
RunDDL bool `toml:"run-ddl" json:"run-ddl"`
}
// UpdateTempStoragePath is to update the `TempStoragePath` if port/statusPort was changed
// and the `tmp-storage-path` was not specified in the conf.toml or was specified the same as the default value.
func (c *Config) UpdateTempStoragePath() {
if c.TempStoragePath == tempStorageDirName {
c.TempStoragePath = encodeDefTempStorageDir(os.TempDir(), c.Host, c.Status.StatusHost, c.Port, c.Status.StatusPort)
} else {
c.TempStoragePath = encodeDefTempStorageDir(c.TempStoragePath, c.Host, c.Status.StatusHost, c.Port, c.Status.StatusPort)
}
}
// GetTiKVConfig returns configuration options from tikvcfg
func (c *Config) GetTiKVConfig() *tikvcfg.Config {
return &tikvcfg.Config{
CommitterConcurrency: int(tikvutil.CommitterConcurrency.Load()),
MaxTxnTTL: c.Performance.MaxTxnTTL,
TiKVClient: c.TiKVClient,
Security: c.Security.ClusterSecurity(),
PDClient: c.PDClient,
PessimisticTxn: tikvcfg.PessimisticTxn{MaxRetryCount: c.PessimisticTxn.MaxRetryCount},
TxnLocalLatches: c.TxnLocalLatches,
StoresRefreshInterval: c.StoresRefreshInterval,
OpenTracingEnable: c.OpenTracing.Enable,
Path: c.Path,
EnableForwarding: c.EnableForwarding,
TxnScope: c.Labels["zone"],
}
}
func encodeDefTempStorageDir(tempDir string, host, statusHost string, port, statusPort uint) string {
dirName := base64.URLEncoding.EncodeToString([]byte(fmt.Sprintf("%v:%v/%v:%v", host, port, statusHost, statusPort)))
osUID := ""
currentUser, err := user.Current()
if err == nil {
osUID = currentUser.Uid
}
return filepath.Join(tempDir, osUID+"_tidb", dirName, "tmp-storage")
}
// nullableBool defaults unset bool options to unset instead of false, which enables us to know if the user has set 2
// conflict options at the same time.
type nullableBool struct {
IsValid bool
IsTrue bool
}
var (
nbUnset = nullableBool{false, false}
nbFalse = nullableBool{true, false}
nbTrue = nullableBool{true, true}
)
func (b *nullableBool) toBool() bool {
return b.IsValid && b.IsTrue
}
func (b nullableBool) MarshalJSON() ([]byte, error) {
switch b {
case nbTrue:
return json.Marshal(true)
case nbFalse:
return json.Marshal(false)
default:
return json.Marshal(nil)
}
}
func (b *nullableBool) UnmarshalText(text []byte) error {
str := string(text)
switch str {
case "", "null":
*b = nbUnset
return nil
case "true":
*b = nbTrue
case "false":
*b = nbFalse
default:
*b = nbUnset
return errors.New("Invalid value for bool type: " + str)
}
return nil
}
func (b nullableBool) MarshalText() ([]byte, error) {
if !b.IsValid {
return []byte(""), nil
}
if b.IsTrue {
return []byte("true"), nil
}
return []byte("false"), nil
}
func (b *nullableBool) UnmarshalJSON(data []byte) error {
var err error
var v interface{}
if err = json.Unmarshal(data, &v); err != nil {
return err
}
switch raw := v.(type) {
case bool:
*b = nullableBool{true, raw}
default:
*b = nbUnset
}
return err
}
// AtomicBool is a helper type for atomic operations on a boolean value.
type AtomicBool struct {
atomicutil.Bool
}
// NewAtomicBool creates an AtomicBool.
func NewAtomicBool(v bool) *AtomicBool {
return &AtomicBool{*atomicutil.NewBool(v)}
}
// MarshalText implements the encoding.TextMarshaler interface.
func (b AtomicBool) MarshalText() ([]byte, error) {
if b.Load() {
return []byte("true"), nil
}
return []byte("false"), nil
}
// UnmarshalText implements the encoding.TextUnmarshaler interface.
func (b *AtomicBool) UnmarshalText(text []byte) error {
str := string(text)
switch str {
case "", "null":
*b = AtomicBool{*atomicutil.NewBool(false)}
case "true":
*b = AtomicBool{*atomicutil.NewBool(true)}
case "false":
*b = AtomicBool{*atomicutil.NewBool(false)}
default:
*b = AtomicBool{*atomicutil.NewBool(false)}
return errors.New("Invalid value for bool type: " + str)
}
return nil
}
// LogBackup is the config for log backup service.
// For now, it includes the embed advancer.
type LogBackup struct {
Advancer logbackupconf.Config `toml:"advancer" json:"advancer"`
Enabled bool `toml:"enabled" json:"enabled"`
}
// Log is the log section of config.
type Log struct {
// Log level.
Level string `toml:"level" json:"level"`
// Log format, one of json or text.
Format string `toml:"format" json:"format"`
// Disable automatic timestamps in output. Deprecated: use EnableTimestamp instead.
DisableTimestamp nullableBool `toml:"disable-timestamp" json:"disable-timestamp"`
// EnableTimestamp enables automatic timestamps in log output.
EnableTimestamp nullableBool `toml:"enable-timestamp" json:"enable-timestamp"`
// DisableErrorStack stops annotating logs with the full stack error
// message. Deprecated: use EnableErrorStack instead.
DisableErrorStack nullableBool `toml:"disable-error-stack" json:"disable-error-stack"`
// EnableErrorStack enables annotating logs with the full stack error
// message.
EnableErrorStack nullableBool `toml:"enable-error-stack" json:"enable-error-stack"`
// File log config.
File logutil.FileLogConfig `toml:"file" json:"file"`
SlowQueryFile string `toml:"slow-query-file" json:"slow-query-file"`
ExpensiveThreshold uint `toml:"expensive-threshold" json:"expensive-threshold"`
// The following items are deprecated. We need to keep them here temporarily
// to support the upgrade process. They can be removed in future.
// QueryLogMaxLen unused since bootstrap v90
QueryLogMaxLen uint64 `toml:"query-log-max-len" json:"query-log-max-len"`
// EnableSlowLog, SlowThreshold, RecordPlanInSlowLog are deprecated.
EnableSlowLog AtomicBool `toml:"enable-slow-log" json:"enable-slow-log"`
SlowThreshold uint64 `toml:"slow-threshold" json:"slow-threshold"`
RecordPlanInSlowLog uint32 `toml:"record-plan-in-slow-log" json:"record-plan-in-slow-log"`
}
// Instance is the section of instance scope system variables.
type Instance struct {
// These variables only exist in [instance] section.
// TiDBGeneralLog is used to log every query in the server in info level.
TiDBGeneralLog bool `toml:"tidb_general_log" json:"tidb_general_log"`
// EnablePProfSQLCPU is used to add label sql label to pprof result.
EnablePProfSQLCPU bool `toml:"tidb_pprof_sql_cpu" json:"tidb_pprof_sql_cpu"`
// DDLSlowOprThreshold sets log DDL operations whose execution time exceeds the threshold value.
DDLSlowOprThreshold uint32 `toml:"ddl_slow_threshold" json:"ddl_slow_threshold"`
// ExpensiveQueryTimeThreshold indicates the time threshold of expensive query.
ExpensiveQueryTimeThreshold uint64 `toml:"tidb_expensive_query_time_threshold" json:"tidb_expensive_query_time_threshold"`
// These variables exist in both 'instance' section and another place.
// The configuration in 'instance' section takes precedence.
EnableSlowLog AtomicBool `toml:"tidb_enable_slow_log" json:"tidb_enable_slow_log"`
SlowThreshold uint64 `toml:"tidb_slow_log_threshold" json:"tidb_slow_log_threshold"`
RecordPlanInSlowLog uint32 `toml:"tidb_record_plan_in_slow_log" json:"tidb_record_plan_in_slow_log"`
CheckMb4ValueInUTF8 AtomicBool `toml:"tidb_check_mb4_value_in_utf8" json:"tidb_check_mb4_value_in_utf8"`
ForcePriority string `toml:"tidb_force_priority" json:"tidb_force_priority"`
MemoryUsageAlarmRatio float64 `toml:"tidb_memory_usage_alarm_ratio" json:"tidb_memory_usage_alarm_ratio"`
// EnableCollectExecutionInfo enables the TiDB to collect execution info.
EnableCollectExecutionInfo bool `toml:"tidb_enable_collect_execution_info" json:"tidb_enable_collect_execution_info"`
PluginDir string `toml:"plugin_dir" json:"plugin_dir"`
PluginLoad string `toml:"plugin_load" json:"plugin_load"`
// MaxConnections is the maximum permitted number of simultaneous client connections.
MaxConnections uint32 `toml:"max_connections" json:"max_connections"`
TiDBEnableDDL AtomicBool `toml:"tidb_enable_ddl" json:"tidb_enable_ddl"`
}
func (l *Log) getDisableTimestamp() bool {
if l.EnableTimestamp == nbUnset && l.DisableTimestamp == nbUnset {
return false
}
if l.EnableTimestamp == nbUnset {
return l.DisableTimestamp.toBool()
}
return !l.EnableTimestamp.toBool()
}
func (l *Log) getDisableErrorStack() bool {
if l.EnableErrorStack == nbUnset && l.DisableErrorStack == nbUnset {
return true
}
if l.EnableErrorStack == nbUnset {
return l.DisableErrorStack.toBool()
}
return !l.EnableErrorStack.toBool()
}
// The following constants represents the valid action configurations for Security.SpilledFileEncryptionMethod.
// "plaintext" means encryption is disabled.
// NOTE: Although the values is case insensitive, we should use lower-case
// strings because the configuration value will be transformed to lower-case
// string and compared with these constants in the further usage.
const (
SpilledFileEncryptionMethodPlaintext = "plaintext"
SpilledFileEncryptionMethodAES128CTR = "aes128-ctr"
)
// Security is the security section of the config.
type Security struct {
SkipGrantTable bool `toml:"skip-grant-table" json:"skip-grant-table"`
SSLCA string `toml:"ssl-ca" json:"ssl-ca"`
SSLCert string `toml:"ssl-cert" json:"ssl-cert"`
SSLKey string `toml:"ssl-key" json:"ssl-key"`
ClusterSSLCA string `toml:"cluster-ssl-ca" json:"cluster-ssl-ca"`
ClusterSSLCert string `toml:"cluster-ssl-cert" json:"cluster-ssl-cert"`
ClusterSSLKey string `toml:"cluster-ssl-key" json:"cluster-ssl-key"`
ClusterVerifyCN []string `toml:"cluster-verify-cn" json:"cluster-verify-cn"`
// If set to "plaintext", the spilled files will not be encrypted.
SpilledFileEncryptionMethod string `toml:"spilled-file-encryption-method" json:"spilled-file-encryption-method"`
// EnableSEM prevents SUPER users from having full access.
EnableSEM bool `toml:"enable-sem" json:"enable-sem"`
// Allow automatic TLS certificate generation
AutoTLS bool `toml:"auto-tls" json:"auto-tls"`
MinTLSVersion string `toml:"tls-version" json:"tls-version"`
RSAKeySize int `toml:"rsa-key-size" json:"rsa-key-size"`
SecureBootstrap bool `toml:"secure-bootstrap" json:"secure-bootstrap"`
}
// The ErrConfigValidationFailed error is used so that external callers can do a type assertion
// to defer handling of this specific error when someone does not want strict type checking.
// This is needed only because logging hasn't been set up at the time we parse the config file.
// This should all be ripped out once strict config checking is made the default behavior.
type ErrConfigValidationFailed struct {
confFile string
UndecodedItems []string
}
func (e *ErrConfigValidationFailed) Error() string {
return fmt.Sprintf("config file %s contained invalid configuration options: %s; check "+
"TiDB manual to make sure this option has not been deprecated and removed from your TiDB "+
"version if the option does not appear to be a typo", e.confFile, strings.Join(
e.UndecodedItems, ", "))
}
// ErrConfigInstanceSection error is used to warning the user
// which config options should be moved to 'instance'.
type ErrConfigInstanceSection struct {
confFile string
configSections *[]InstanceConfigSection
deprecatedSections *[]InstanceConfigSection
}
func (e *ErrConfigInstanceSection) Error() string {
var builder strings.Builder
if len(*e.configSections) > 0 {
builder.WriteString("Conflict configuration options exists on both [instance] section and some other sections. ")
}
if len(*e.deprecatedSections) > 0 {
builder.WriteString("Some configuration options should be moved to [instance] section. ")
}
builder.WriteString("Please use the latter config options in [instance] instead: ")
for _, configSection := range *e.configSections {
for oldName, newName := range configSection.NameMappings {
builder.WriteString(fmt.Sprintf(" (%s, %s)", oldName, newName))
}
}
for _, configSection := range *e.deprecatedSections {
for oldName, newName := range configSection.NameMappings {
builder.WriteString(fmt.Sprintf(" (%s, %s)", oldName, newName))
}
}
builder.WriteString(".")
return builder.String()
}
// ClusterSecurity returns Security info for cluster
func (s *Security) ClusterSecurity() tikvcfg.Security {
return tikvcfg.NewSecurity(s.ClusterSSLCA, s.ClusterSSLCert, s.ClusterSSLKey, s.ClusterVerifyCN)
}
// Status is the status section of the config.
type Status struct {
StatusHost string `toml:"status-host" json:"status-host"`
MetricsAddr string `toml:"metrics-addr" json:"metrics-addr"`
StatusPort uint `toml:"status-port" json:"status-port"`
MetricsInterval uint `toml:"metrics-interval" json:"metrics-interval"`
ReportStatus bool `toml:"report-status" json:"report-status"`
RecordQPSbyDB bool `toml:"record-db-qps" json:"record-db-qps"`
// After a duration of this time in seconds if the server doesn't see any activity it pings
// the client to see if the transport is still alive.
GRPCKeepAliveTime uint `toml:"grpc-keepalive-time" json:"grpc-keepalive-time"`
// After having pinged for keepalive check, the server waits for a duration of timeout in seconds
// and if no activity is seen even after that the connection is closed.
GRPCKeepAliveTimeout uint `toml:"grpc-keepalive-timeout" json:"grpc-keepalive-timeout"`
// The number of max concurrent streams/requests on a client connection.
GRPCConcurrentStreams uint `toml:"grpc-concurrent-streams" json:"grpc-concurrent-streams"`
// Sets window size for stream. The default value is 2MB.
GRPCInitialWindowSize int `toml:"grpc-initial-window-size" json:"grpc-initial-window-size"`
// Set maximum message length in bytes that gRPC can send. `-1` means unlimited. The default value is 10MB.
GRPCMaxSendMsgSize int `toml:"grpc-max-send-msg-size" json:"grpc-max-send-msg-size"`
}
// Performance is the performance section of the config.
type Performance struct {
MaxProcs uint `toml:"max-procs" json:"max-procs"`
// Deprecated: use ServerMemoryQuota instead
MaxMemory uint64 `toml:"max-memory" json:"max-memory"`
ServerMemoryQuota uint64 `toml:"server-memory-quota" json:"server-memory-quota"`
StatsLease string `toml:"stats-lease" json:"stats-lease"`
StmtCountLimit uint `toml:"stmt-count-limit" json:"stmt-count-limit"`
PseudoEstimateRatio float64 `toml:"pseudo-estimate-ratio" json:"pseudo-estimate-ratio"`
BindInfoLease string `toml:"bind-info-lease" json:"bind-info-lease"`
TxnEntrySizeLimit uint64 `toml:"txn-entry-size-limit" json:"txn-entry-size-limit"`
TxnTotalSizeLimit uint64 `toml:"txn-total-size-limit" json:"txn-total-size-limit"`
TCPKeepAlive bool `toml:"tcp-keep-alive" json:"tcp-keep-alive"`
TCPNoDelay bool `toml:"tcp-no-delay" json:"tcp-no-delay"`
CrossJoin bool `toml:"cross-join" json:"cross-join"`
DistinctAggPushDown bool `toml:"distinct-agg-push-down" json:"distinct-agg-push-down"`
// Whether enable projection push down for coprocessors (both tikv & tiflash), default false.
ProjectionPushDown bool `toml:"projection-push-down" json:"projection-push-down"`
MaxTxnTTL uint64 `toml:"max-txn-ttl" json:"max-txn-ttl"`
// Deprecated
MemProfileInterval string `toml:"-" json:"-"`
IndexUsageSyncLease string `toml:"index-usage-sync-lease" json:"index-usage-sync-lease"`
PlanReplayerGCLease string `toml:"plan-replayer-gc-lease" json:"plan-replayer-gc-lease"`
GOGC int `toml:"gogc" json:"gogc"`
EnforceMPP bool `toml:"enforce-mpp" json:"enforce-mpp"`
StatsLoadConcurrency uint `toml:"stats-load-concurrency" json:"stats-load-concurrency"`
StatsLoadQueueSize uint `toml:"stats-load-queue-size" json:"stats-load-queue-size"`
EnableStatsCacheMemQuota bool `toml:"enable-stats-cache-mem-quota" json:"enable-stats-cache-mem-quota"`
// The following items are deprecated. We need to keep them here temporarily
// to support the upgrade process. They can be removed in future.
// CommitterConcurrency, RunAutoAnalyze unused since bootstrap v90
CommitterConcurrency int `toml:"committer-concurrency" json:"committer-concurrency"`
RunAutoAnalyze bool `toml:"run-auto-analyze" json:"run-auto-analyze"`
// ForcePriority, MemoryUsageAlarmRatio are deprecated.
ForcePriority string `toml:"force-priority" json:"force-priority"`
MemoryUsageAlarmRatio float64 `toml:"memory-usage-alarm-ratio" json:"memory-usage-alarm-ratio"`
EnableLoadFMSketch bool `toml:"enable-load-fmsketch" json:"enable-load-fmsketch"`
}
// PlanCache is the PlanCache section of the config.
type PlanCache struct {
Enabled bool `toml:"enabled" json:"enabled"`
Capacity uint `toml:"capacity" json:"capacity"`
Shards uint `toml:"shards" json:"shards"`
}
// PreparedPlanCache is the PreparedPlanCache section of the config.
type PreparedPlanCache struct {
Enabled bool `toml:"enabled" json:"enabled"`
Capacity uint `toml:"capacity" json:"capacity"`
MemoryGuardRatio float64 `toml:"memory-guard-ratio" json:"memory-guard-ratio"`
}
// OpenTracing is the opentracing section of the config.
type OpenTracing struct {
Enable bool `toml:"enable" json:"enable"`
RPCMetrics bool `toml:"rpc-metrics" json:"rpc-metrics"`
Sampler OpenTracingSampler `toml:"sampler" json:"sampler"`
Reporter OpenTracingReporter `toml:"reporter" json:"reporter"`
}
// OpenTracingSampler is the config for opentracing sampler.
// See https://godoc.org/github.com/uber/jaeger-client-go/config#SamplerConfig
type OpenTracingSampler struct {
Type string `toml:"type" json:"type"`
Param float64 `toml:"param" json:"param"`
SamplingServerURL string `toml:"sampling-server-url" json:"sampling-server-url"`
MaxOperations int `toml:"max-operations" json:"max-operations"`
SamplingRefreshInterval time.Duration `toml:"sampling-refresh-interval" json:"sampling-refresh-interval"`
}
// OpenTracingReporter is the config for opentracing reporter.
// See https://godoc.org/github.com/uber/jaeger-client-go/config#ReporterConfig
type OpenTracingReporter struct {
QueueSize int `toml:"queue-size" json:"queue-size"`
BufferFlushInterval time.Duration `toml:"buffer-flush-interval" json:"buffer-flush-interval"`
LogSpans bool `toml:"log-spans" json:"log-spans"`
LocalAgentHostPort string `toml:"local-agent-host-port" json:"local-agent-host-port"`
}
// ProxyProtocol is the PROXY protocol section of the config.
type ProxyProtocol struct {
// PROXY protocol acceptable client networks.
// Empty string means disable PROXY protocol,
// * means all networks.
Networks string `toml:"networks" json:"networks"`
// PROXY protocol header read timeout, Unit is second.
HeaderTimeout uint `toml:"header-timeout" json:"header-timeout"`
}
// Binlog is the config for binlog.
type Binlog struct {
Enable bool `toml:"enable" json:"enable"`
// If IgnoreError is true, when writing binlog meets error, TiDB would
// ignore the error.
IgnoreError bool `toml:"ignore-error" json:"ignore-error"`
WriteTimeout string `toml:"write-timeout" json:"write-timeout"`
// Use socket file to write binlog, for compatible with kafka version tidb-binlog.
BinlogSocket string `toml:"binlog-socket" json:"binlog-socket"`
// The strategy for sending binlog to pump, value can be "range" or "hash" now.
Strategy string `toml:"strategy" json:"strategy"`
}
// PessimisticTxn is the config for pessimistic transaction.
type PessimisticTxn struct {
// The max count of retry for a single statement in a pessimistic transaction.
MaxRetryCount uint `toml:"max-retry-count" json:"max-retry-count"`
// The max count of deadlock events that will be recorded in the information_schema.deadlocks table.
DeadlockHistoryCapacity uint `toml:"deadlock-history-capacity" json:"deadlock-history-capacity"`
// Whether retryable deadlocks (in-statement deadlocks) are collected to the information_schema.deadlocks table.
DeadlockHistoryCollectRetryable bool `toml:"deadlock-history-collect-retryable" json:"deadlock-history-collect-retryable"`
// PessimisticAutoCommit represents if true it means the auto-commit transactions will be in pessimistic mode.
PessimisticAutoCommit AtomicBool `toml:"pessimistic-auto-commit" json:"pessimistic-auto-commit"`
}
// TrxSummary is the config for transaction summary collecting.
type TrxSummary struct {
// how many transaction summary in `transaction_summary` each TiDB node should keep.
TransactionSummaryCapacity uint `toml:"transaction-summary-capacity" json:"transaction-summary-capacity"`
// how long a transaction should be executed to make it be recorded in `transaction_id_digest`.
TransactionIDDigestMinDuration uint `toml:"transaction-id-digest-min-duration" json:"transaction-id-digest-min-duration"`
}
// Valid Validatse TrxSummary configs
func (config *TrxSummary) Valid() error {
if config.TransactionSummaryCapacity > 5000 {
return errors.New("transaction-summary.transaction-summary-capacity should not be larger than 5000")
}
return nil
}
// DefaultPessimisticTxn returns the default configuration for PessimisticTxn
func DefaultPessimisticTxn() PessimisticTxn {
return PessimisticTxn{
MaxRetryCount: 256,
DeadlockHistoryCapacity: 10,
DeadlockHistoryCollectRetryable: false,
PessimisticAutoCommit: *NewAtomicBool(false),
}
}
// DefaultTrxSummary returns the default configuration for TrxSummary collector
func DefaultTrxSummary() TrxSummary {
// TrxSummary is not enabled by default before GA
return TrxSummary{
TransactionSummaryCapacity: 500,
TransactionIDDigestMinDuration: 2147483647,
}
}
// Plugin is the config for plugin
type Plugin struct {
Dir string `toml:"dir" json:"dir"`
Load string `toml:"load" json:"load"`
}
// TopSQL is the config for TopSQL.
type TopSQL struct {
// The TopSQL's data receiver address.
ReceiverAddress string `toml:"receiver-address" json:"receiver-address"`
}
// IsolationRead is the config for isolation read.
type IsolationRead struct {
// Engines filters tidb-server access paths by engine type.
Engines []string `toml:"engines" json:"engines"`
}
// Experimental controls the features that are still experimental: their semantics, interfaces are subject to change.
// Using these features in the production environment is not recommended.
type Experimental struct {
// Whether enable creating expression index.
AllowsExpressionIndex bool `toml:"allow-expression-index" json:"allow-expression-index"`
// Whether enable charset feature.
EnableNewCharset bool `toml:"enable-new-charset" json:"-"`
}
var defTiKVCfg = tikvcfg.DefaultConfig()
var defaultConf = Config{
Host: DefHost,
AdvertiseAddress: "",
Port: DefPort,
Socket: "/tmp/tidb-{Port}.sock",
Cors: "",
Store: "unistore",
Path: "/tmp/tidb",
RunDDL: true,
SplitTable: true,
Lease: "45s",
TokenLimit: 1000,
OOMUseTmpStorage: true,
TempDir: DefTempDir,
TempStorageQuota: -1,
TempStoragePath: tempStorageDirName,
MemQuotaQuery: 1 << 30,
OOMAction: "cancel",
EnableBatchDML: false,
CheckMb4ValueInUTF8: *NewAtomicBool(true),
MaxIndexLength: 3072,
IndexLimit: 64,
TableColumnCountLimit: 1017,
AlterPrimaryKey: false,
TreatOldVersionUTF8AsUTF8MB4: true,
EnableTableLock: false,
DelayCleanTableLock: 0,
SplitRegionMaxNum: 1000,
RepairMode: false,
RepairTableList: []string{},
MaxServerConnections: 0,
TxnLocalLatches: defTiKVCfg.TxnLocalLatches,
GracefulWaitBeforeShutdown: 0,
ServerVersion: "",
TiDBEdition: "",
VersionComment: "",
TiDBReleaseVersion: "",
Log: Log{
Level: "info",
Format: "text",
File: logutil.NewFileLogConfig(logutil.DefaultLogMaxSize),
SlowQueryFile: "tidb-slow.log",
SlowThreshold: logutil.DefaultSlowThreshold,
ExpensiveThreshold: 10000,
DisableErrorStack: nbUnset,
EnableErrorStack: nbUnset, // If both options are nbUnset, getDisableErrorStack() returns true
EnableTimestamp: nbUnset,
DisableTimestamp: nbUnset, // If both options are nbUnset, getDisableTimestamp() returns false
QueryLogMaxLen: logutil.DefaultQueryLogMaxLen,
RecordPlanInSlowLog: logutil.DefaultRecordPlanInSlowLog,
EnableSlowLog: *NewAtomicBool(logutil.DefaultTiDBEnableSlowLog),
},
Instance: Instance{
TiDBGeneralLog: false,
EnablePProfSQLCPU: false,
DDLSlowOprThreshold: DefDDLSlowOprThreshold,
ExpensiveQueryTimeThreshold: DefExpensiveQueryTimeThreshold,
EnableSlowLog: *NewAtomicBool(logutil.DefaultTiDBEnableSlowLog),
SlowThreshold: logutil.DefaultSlowThreshold,
RecordPlanInSlowLog: logutil.DefaultRecordPlanInSlowLog,
CheckMb4ValueInUTF8: *NewAtomicBool(true),
ForcePriority: "NO_PRIORITY",
MemoryUsageAlarmRatio: DefMemoryUsageAlarmRatio,
EnableCollectExecutionInfo: true,
PluginDir: "/data/deploy/plugin",
PluginLoad: "",
MaxConnections: 0,
TiDBEnableDDL: *NewAtomicBool(true),
},
Status: Status{
ReportStatus: true,
StatusHost: DefStatusHost,
StatusPort: DefStatusPort,
MetricsInterval: 15,
RecordQPSbyDB: false,
GRPCKeepAliveTime: 10,
GRPCKeepAliveTimeout: 3,
GRPCConcurrentStreams: 1024,
GRPCInitialWindowSize: 2 * 1024 * 1024,
GRPCMaxSendMsgSize: math.MaxInt32,
},
Performance: Performance{
MaxMemory: 0,
ServerMemoryQuota: 0,
MemoryUsageAlarmRatio: DefMemoryUsageAlarmRatio,
TCPKeepAlive: true,
TCPNoDelay: true,
CrossJoin: true,
StatsLease: "3s",
StmtCountLimit: 5000,
PseudoEstimateRatio: 0.8,
ForcePriority: "NO_PRIORITY",
BindInfoLease: "3s",
TxnEntrySizeLimit: DefTxnEntrySizeLimit,
TxnTotalSizeLimit: DefTxnTotalSizeLimit,
DistinctAggPushDown: false,
ProjectionPushDown: false,
CommitterConcurrency: defTiKVCfg.CommitterConcurrency,
MaxTxnTTL: defTiKVCfg.MaxTxnTTL, // 1hour
// TODO: set indexUsageSyncLease to 60s.
IndexUsageSyncLease: "0s",
GOGC: 100,
EnforceMPP: false,
PlanReplayerGCLease: "10m",
StatsLoadConcurrency: 5,
StatsLoadQueueSize: 1000,
EnableStatsCacheMemQuota: false,
RunAutoAnalyze: true,
EnableLoadFMSketch: false,
},
ProxyProtocol: ProxyProtocol{
Networks: "",
HeaderTimeout: 5,
},
PreparedPlanCache: PreparedPlanCache{
Enabled: true,
Capacity: 100,
MemoryGuardRatio: 0.1,
},
OpenTracing: OpenTracing{
Enable: false,
Sampler: OpenTracingSampler{
Type: "const",
Param: 1.0,
},
Reporter: OpenTracingReporter{},
},
PDClient: defTiKVCfg.PDClient,
TiKVClient: defTiKVCfg.TiKVClient,
Binlog: Binlog{
WriteTimeout: "15s",
Strategy: "range",
},
Plugin: Plugin{
Dir: "/data/deploy/plugin",
Load: "",
},
PessimisticTxn: DefaultPessimisticTxn(),
IsolationRead: IsolationRead{
Engines: []string{"tikv", "tiflash", "tidb"},
},
Experimental: Experimental{},
EnableCollectExecutionInfo: true,
EnableTelemetry: true,
Labels: make(map[string]string),
EnableGlobalIndex: false,
Security: Security{
SpilledFileEncryptionMethod: SpilledFileEncryptionMethodPlaintext,
EnableSEM: false,
AutoTLS: false,
RSAKeySize: 4096,
},
DeprecateIntegerDisplayWidth: false,
EnableEnumLengthLimit: true,
StoresRefreshInterval: defTiKVCfg.StoresRefreshInterval,
EnableForwarding: defTiKVCfg.EnableForwarding,
NewCollationsEnabledOnFirstBootstrap: true,
EnableGlobalKill: true,
TrxSummary: DefaultTrxSummary(),
}
var (
globalConf atomic.Value
)
// NewConfig creates a new config instance with default value.
func NewConfig() *Config {
conf := defaultConf
return &conf
}
// GetGlobalConfig returns the global configuration for this server.
// It should store configuration from command line and configuration file.
// Other parts of the system can read the global configuration use this function.
func GetGlobalConfig() *Config {
return globalConf.Load().(*Config)
}
// StoreGlobalConfig stores a new config to the globalConf. It mostly uses in the test to avoid some data races.
func StoreGlobalConfig(config *Config) {
globalConf.Store(config)
TikvConfigLock.Lock()
defer TikvConfigLock.Unlock()
cfg := *config.GetTiKVConfig()
tikvcfg.StoreGlobalConfig(&cfg)
}
// removedConfig contains items that are no longer supported.
// they might still be in the config struct to support import,
// but are not actively used.
var removedConfig = map[string]struct{}{
"pessimistic-txn.ttl": {},
"pessimistic-txn.enable": {},
"log.file.log-rotate": {},
"log.log-slow-query": {},
"txn-local-latches": {},
"txn-local-latches.enabled": {},
"txn-local-latches.capacity": {},
"performance.max-memory": {},
"max-txn-time-use": {},
"experimental.allow-auto-random": {},
"enable-redact-log": {}, // use variable tidb_redact_log instead
"enable-streaming": {},
"performance.mem-profile-interval": {},
"security.require-secure-transport": {},
"lower-case-table-names": {},
"stmt-summary": {},
"stmt-summary.enable": {},
"stmt-summary.enable-internal-query": {},
"stmt-summary.max-stmt-count": {},
"stmt-summary.max-sql-length": {},
"stmt-summary.refresh-interval": {},
"stmt-summary.history-size": {},
"enable-batch-dml": {}, // use tidb_enable_batch_dml
"mem-quota-query": {},
"log.query-log-max-len": {},
"performance.committer-concurrency": {},
"experimental.enable-global-kill": {},
"performance.run-auto-analyze": {}, //use tidb_enable_auto_analyze
// use tidb_enable_prepared_plan_cache, tidb_prepared_plan_cache_size and tidb_prepared_plan_cache_memory_guard_ratio
"prepared-plan-cache.enabled": {},
"prepared-plan-cache.capacity": {},
"prepared-plan-cache.memory-guard-ratio": {},
"oom-action": {},
"check-mb4-value-in-utf8": {}, // use tidb_check_mb4_value_in_utf8
"enable-collect-execution-info": {}, // use tidb_enable_collect_execution_info
"log.enable-slow-log": {}, // use tidb_enable_slow_log
"log.slow-threshold": {}, // use tidb_slow_log_threshold
"log.record-plan-in-slow-log": {}, // use tidb_record_plan_in_slow_log
"performance.force-priority": {}, // use tidb_force_priority
"performance.memory-usage-alarm-ratio": {}, // use tidb_memory_usage_alarm_ratio
"plugin.load": {}, // use plugin_load
"plugin.dir": {}, // use plugin_dir
"performance.feedback-probability": {}, // This feature is deprecated
"performance.query-feedback-limit": {},
"oom-use-tmp-storage": {}, // use tidb_enable_tmp_storage_on_oom
"max-server-connections": {}, // use sysvar max_connections
"run-ddl": {}, // use sysvar tidb_enable_ddl
}
// isAllRemovedConfigItems returns true if all the items that couldn't validate
// belong to the list of removedConfig items.
func isAllRemovedConfigItems(items []string) bool {
for _, item := range items {
if _, ok := removedConfig[item]; !ok {
return false
}
}
return true
}
// InitializeConfig initialize the global config handler.
// The function enforceCmdArgs is used to merge the config file with command arguments:
// For example, if you start TiDB by the command "./tidb-server --port=3000", the port number should be
// overwritten to 3000 and ignore the port number in the config file.
func InitializeConfig(confPath string, configCheck, configStrict bool, enforceCmdArgs func(*Config)) {
cfg := GetGlobalConfig()
var err error
if confPath != "" {
if err = cfg.Load(confPath); err != nil {
// Unused config item error turns to warnings.
if tmp, ok := err.(*ErrConfigValidationFailed); ok {
// This block is to accommodate an interim situation where strict config checking
// is not the default behavior of TiDB. The warning message must be deferred until
// logging has been set up. After strict config checking is the default behavior,
// This should all be removed.
if (!configCheck && !configStrict) || isAllRemovedConfigItems(tmp.UndecodedItems) {
fmt.Fprintln(os.Stderr, err.Error())
err = nil
}
} else if tmp, ok := err.(*ErrConfigInstanceSection); ok {
logutil.BgLogger().Warn(tmp.Error())
err = nil
}
}
// In configCheck we always print out which options in the config file
// have been removed. This helps users upgrade better.
if configCheck {
err = cfg.RemovedVariableCheck(confPath)
if err != nil {
logutil.BgLogger().Warn(err.Error())
err = nil // treat as warning
}
}
terror.MustNil(err)
} else {
// configCheck should have the config file specified.
if configCheck {
fmt.Fprintln(os.Stderr, "config check failed", errors.New("no config file specified for config-check"))
os.Exit(1)
}
}
enforceCmdArgs(cfg)
if err := cfg.Valid(); err != nil {
if !filepath.IsAbs(confPath) {
if tmp, err := filepath.Abs(confPath); err == nil {
confPath = tmp
}
}
fmt.Fprintln(os.Stderr, "load config file:", confPath)
fmt.Fprintln(os.Stderr, "invalid config", err)
os.Exit(1)
}
if configCheck {
fmt.Println("config check successful")
os.Exit(0)
}
StoreGlobalConfig(cfg)
}
// RemovedVariableCheck checks if the config file contains any items
// which have been removed. These will not take effect any more.
func (c *Config) RemovedVariableCheck(confFile string) error {
metaData, err := toml.DecodeFile(confFile, c)
if err != nil {
return err
}
var removed []string
for item := range removedConfig {
// We need to split the string to account for the top level
// and the section hierarchy of config.
tmp := strings.Split(item, ".")
if len(tmp) == 2 && metaData.IsDefined(tmp[0], tmp[1]) {
removed = append(removed, item)
} else if len(tmp) == 1 && metaData.IsDefined(tmp[0]) {
removed = append(removed, item)
}
}
if len(removed) > 0 {
sort.Strings(removed) // deterministic for tests
return fmt.Errorf("The following configuration options are no longer supported in this version of TiDB. Check the release notes for more information: %s", strings.Join(removed, ", "))
}
return nil
}
// Load loads config options from a toml file.
func (c *Config) Load(confFile string) error {
metaData, err := toml.DecodeFile(confFile, c)
if c.TokenLimit == 0 {
c.TokenLimit = 1000
}
// If any items in confFile file are not mapped into the Config struct, issue
// an error and stop the server from starting.
undecoded := metaData.Undecoded()
if len(undecoded) > 0 && err == nil {
var undecodedItems []string
for _, item := range undecoded {
undecodedItems = append(undecodedItems, item.String())
}
err = &ErrConfigValidationFailed{confFile, undecodedItems}
}
for _, section := range sectionMovedToInstance {
newConflictSection := InstanceConfigSection{SectionName: section.SectionName, NameMappings: map[string]string{}}
newDeprecatedSection := InstanceConfigSection{SectionName: section.SectionName, NameMappings: map[string]string{}}
for oldName, newName := range section.NameMappings {
if section.SectionName == "" && metaData.IsDefined(oldName) ||
section.SectionName != "" && metaData.IsDefined(section.SectionName, oldName) {
if metaData.IsDefined("instance", newName) {
newConflictSection.NameMappings[oldName] = newName
} else {
newDeprecatedSection.NameMappings[oldName] = newName
}
}
}
if len(newConflictSection.NameMappings) > 0 {
ConflictOptions = append(ConflictOptions, newConflictSection)
}
if len(newDeprecatedSection.NameMappings) > 0 {
DeprecatedOptions = append(DeprecatedOptions, newDeprecatedSection)
}
}
if len(ConflictOptions) > 0 || len(DeprecatedOptions) > 0 {
// Give a warning that the 'instance' section should be used.
err = &ErrConfigInstanceSection{confFile, &ConflictOptions, &DeprecatedOptions}
}
return err
}
// Valid checks if this config is valid.
func (c *Config) Valid() error {
if c.Log.EnableErrorStack == c.Log.DisableErrorStack && c.Log.EnableErrorStack != nbUnset {
logutil.BgLogger().Warn(fmt.Sprintf("\"enable-error-stack\" (%v) conflicts \"disable-error-stack\" (%v). \"disable-error-stack\" is deprecated, please use \"enable-error-stack\" instead. disable-error-stack is ignored.", c.Log.EnableErrorStack, c.Log.DisableErrorStack))
// if two options conflict, we will use the value of EnableErrorStack
c.Log.DisableErrorStack = nbUnset
}
if c.Log.EnableTimestamp == c.Log.DisableTimestamp && c.Log.EnableTimestamp != nbUnset {
logutil.BgLogger().Warn(fmt.Sprintf("\"enable-timestamp\" (%v) conflicts \"disable-timestamp\" (%v). \"disable-timestamp\" is deprecated, please use \"enable-timestamp\" instead", c.Log.EnableTimestamp, c.Log.DisableTimestamp))
// if two options conflict, we will use the value of EnableTimestamp
c.Log.DisableTimestamp = nbUnset
}
if c.Security.SkipGrantTable && !hasRootPrivilege() {
return fmt.Errorf("TiDB run with skip-grant-table need root privilege")
}
if !ValidStorage[c.Store] {
nameList := make([]string, 0, len(ValidStorage))
for k, v := range ValidStorage {
if v {
nameList = append(nameList, k)
}
}
return fmt.Errorf("invalid store=%s, valid storages=%v", c.Store, nameList)
}
if c.Store == "mocktikv" && !c.Instance.TiDBEnableDDL.Load() {
return fmt.Errorf("can't disable DDL on mocktikv")
}
if c.MaxIndexLength < DefMaxIndexLength || c.MaxIndexLength > DefMaxOfMaxIndexLength {
return fmt.Errorf("max-index-length should be [%d, %d]", DefMaxIndexLength, DefMaxOfMaxIndexLength)
}
if c.IndexLimit < DefIndexLimit || c.IndexLimit > DefMaxOfIndexLimit {
return fmt.Errorf("index-limit should be [%d, %d]", DefIndexLimit, DefMaxOfIndexLimit)
}
if c.Log.File.MaxSize > MaxLogFileSize {
return fmt.Errorf("invalid max log file size=%v which is larger than max=%v", c.Log.File.MaxSize, MaxLogFileSize)
}
if c.TableColumnCountLimit < DefTableColumnCountLimit || c.TableColumnCountLimit > DefMaxOfTableColumnCountLimit {
return fmt.Errorf("table-column-limit should be [%d, %d]", DefIndexLimit, DefMaxOfTableColumnCountLimit)
}
// txn-local-latches
if err := c.TxnLocalLatches.Valid(); err != nil {
return err
}
// For tikvclient.
if err := c.TiKVClient.Valid(); err != nil {
return err
}
if err := c.TrxSummary.Valid(); err != nil {
return err
}
if c.Performance.TxnTotalSizeLimit > 1<<40 {
return fmt.Errorf("txn-total-size-limit should be less than %d", 1<<40)
}
if c.Instance.MemoryUsageAlarmRatio > 1 || c.Instance.MemoryUsageAlarmRatio < 0 {
return fmt.Errorf("tidb_memory_usage_alarm_ratio in [Instance] must be greater than or equal to 0 and less than or equal to 1")
}
if len(c.IsolationRead.Engines) < 1 {
return fmt.Errorf("the number of [isolation-read]engines for isolation read should be at least 1")
}
for _, engine := range c.IsolationRead.Engines {
if engine != "tidb" && engine != "tikv" && engine != "tiflash" {
return fmt.Errorf("type of [isolation-read]engines can't be %v should be one of tidb or tikv or tiflash", engine)
}
}
// test security
c.Security.SpilledFileEncryptionMethod = strings.ToLower(c.Security.SpilledFileEncryptionMethod)
switch c.Security.SpilledFileEncryptionMethod {
case SpilledFileEncryptionMethodPlaintext, SpilledFileEncryptionMethodAES128CTR:
default:
return fmt.Errorf("unsupported [security]spilled-file-encryption-method %v, TiDB only supports [%v, %v]",
c.Security.SpilledFileEncryptionMethod, SpilledFileEncryptionMethodPlaintext, SpilledFileEncryptionMethodAES128CTR)
}
// check stats load config
if c.Performance.StatsLoadConcurrency < DefStatsLoadConcurrencyLimit || c.Performance.StatsLoadConcurrency > DefMaxOfStatsLoadConcurrencyLimit {
return fmt.Errorf("stats-load-concurrency should be [%d, %d]", DefStatsLoadConcurrencyLimit, DefMaxOfStatsLoadConcurrencyLimit)
}
if c.Performance.StatsLoadQueueSize < DefStatsLoadQueueSizeLimit || c.Performance.StatsLoadQueueSize > DefMaxOfStatsLoadQueueSizeLimit {
return fmt.Errorf("stats-load-queue-size should be [%d, %d]", DefStatsLoadQueueSizeLimit, DefMaxOfStatsLoadQueueSizeLimit)
}
// test log level
l := zap.NewAtomicLevel()
return l.UnmarshalText([]byte(c.Log.Level))
}
// UpdateGlobal updates the global config, and provide a restore function that can be used to restore to the original.
func UpdateGlobal(f func(conf *Config)) {
g := GetGlobalConfig()
newConf := *g
f(&newConf)
StoreGlobalConfig(&newConf)
}
// RestoreFunc gets a function that restore the config to the current value.
func RestoreFunc() (restore func()) {
g := GetGlobalConfig()
return func() {
StoreGlobalConfig(g)
}
}
func hasRootPrivilege() bool {
return os.Geteuid() == 0
}
// TableLockEnabled uses to check whether enabled the table lock feature.
func TableLockEnabled() bool {
return GetGlobalConfig().EnableTableLock
}
// TableLockDelayClean uses to get the time of delay clean table lock.
var TableLockDelayClean = func() uint64 {
return GetGlobalConfig().DelayCleanTableLock
}
// ToLogConfig converts *Log to *logutil.LogConfig.
func (l *Log) ToLogConfig() *logutil.LogConfig {
return logutil.NewLogConfig(l.Level, l.Format, l.SlowQueryFile, l.File, l.getDisableTimestamp(), func(config *zaplog.Config) { config.DisableErrorVerbose = l.getDisableErrorStack() })
}
// ToTracingConfig converts *OpenTracing to *tracing.Configuration.
func (t *OpenTracing) ToTracingConfig() *tracing.Configuration {
ret := &tracing.Configuration{
Disabled: !t.Enable,
RPCMetrics: t.RPCMetrics,
Reporter: &tracing.ReporterConfig{},
Sampler: &tracing.SamplerConfig{},
}
ret.Reporter.QueueSize = t.Reporter.QueueSize
ret.Reporter.BufferFlushInterval = t.Reporter.BufferFlushInterval
ret.Reporter.LogSpans = t.Reporter.LogSpans
ret.Reporter.LocalAgentHostPort = t.Reporter.LocalAgentHostPort
ret.Sampler.Type = t.Sampler.Type
ret.Sampler.Param = t.Sampler.Param
ret.Sampler.SamplingServerURL = t.Sampler.SamplingServerURL
ret.Sampler.MaxOperations = t.Sampler.MaxOperations
ret.Sampler.SamplingRefreshInterval = t.Sampler.SamplingRefreshInterval
return ret
}
func init() {
initByLDFlags(versioninfo.TiDBEdition, checkBeforeDropLDFlag)
}
func initByLDFlags(edition, checkBeforeDropLDFlag string) {
if edition != versioninfo.CommunityEdition {
defaultConf.EnableTelemetry = false
}
conf := defaultConf
StoreGlobalConfig(&conf)
if checkBeforeDropLDFlag == "1" {
CheckTableBeforeDrop = true
}
}
// hideConfig is used to filter a single line of config for hiding.
var hideConfig = []string{
"performance.index-usage-sync-lease",
}
// GetJSONConfig returns the config as JSON with hidden items removed
// It replaces the earlier HideConfig() which used strings.Split() in
// an way that didn't work for similarly named items (like enable).
func GetJSONConfig() (string, error) {
j, err := json.Marshal(GetGlobalConfig())
if err != nil {
return "", err
}
jsonValue := make(map[string]interface{})
err = json.Unmarshal(j, &jsonValue)
if err != nil {
return "", err
}
removedPaths := make([]string, 0, len(removedConfig)+len(hideConfig))
for removedItem := range removedConfig {
removedPaths = append(removedPaths, removedItem)
}
removedPaths = append(removedPaths, hideConfig...)
for _, path := range removedPaths {
s := strings.Split(path, ".")
curValue := jsonValue
for i, key := range s {
if i == len(s)-1 {
delete(curValue, key)
}
if curValue[key] != nil {
mapValue, ok := curValue[key].(map[string]interface{})
if !ok {
break
}
curValue = mapValue
} else {
break
}
}
}
buf, err := json.Marshal(jsonValue)
if err != nil {
return "", err
}
var resBuf bytes.Buffer
if err = json.Indent(&resBuf, buf, "", "\t"); err != nil {
return "", err
}
return resBuf.String(), nil
}
// ContainHiddenConfig checks whether it contains the configuration that needs to be hidden.
func ContainHiddenConfig(s string) bool {
s = strings.ToLower(s)
for _, hc := range hideConfig {
if strings.Contains(s, hc) {
return true
}
}
for dc := range removedConfig {
if strings.Contains(s, dc) {
return true
}
}
return false
}
| config/config.go | 1 | https://github.com/pingcap/tidb/commit/f61024b2dc6f9e3a6f7fd4d941e19fb81f002b1e | [
0.9975696206092834,
0.023100337013602257,
0.00016226706793531775,
0.00017159804701805115,
0.14028610289096832
] |
{
"id": 0,
"code_window": [
"\t// EnableCollectExecutionInfo enables the TiDB to collect execution info.\n",
"\tEnableCollectExecutionInfo bool `toml:\"tidb_enable_collect_execution_info\" json:\"tidb_enable_collect_execution_info\"`\n",
"\tPluginDir string `toml:\"plugin_dir\" json:\"plugin_dir\"`\n",
"\tPluginLoad string `toml:\"plugin_load\" json:\"plugin_load\"`\n",
"\t// MaxConnections is the maximum permitted number of simultaneous client connections.\n",
"\tMaxConnections uint32 `toml:\"max_connections\" json:\"max_connections\"`\n",
"\tTiDBEnableDDL AtomicBool `toml:\"tidb_enable_ddl\" json:\"tidb_enable_ddl\"`\n",
"}\n",
"\n",
"func (l *Log) getDisableTimestamp() bool {\n",
"\tif l.EnableTimestamp == nbUnset && l.DisableTimestamp == nbUnset {\n",
"\t\treturn false\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tMaxConnections uint32 `toml:\"max_connections\" json:\"max_connections\"`\n",
"\tTiDBEnableDDL AtomicBool `toml:\"tidb_enable_ddl\" json:\"tidb_enable_ddl\"`\n",
"\tTiDBRCReadCheckTS bool `toml:\"tidb_rc_read_check_ts\" json:\"tidb_rc_read_check_ts\"`\n"
],
"file_path": "config/config.go",
"type": "replace",
"edit_start_line_idx": 490
} | [lightning]
region-concurrency = 1
[checkpoint]
enable = true
schema = "checkpoint_test_parquet"
driver = "mysql"
keep-after-success = true
[tikv-importer]
max-kv-pairs = 32
| br/tests/lightning_checkpoint_parquet/config.toml | 0 | https://github.com/pingcap/tidb/commit/f61024b2dc6f9e3a6f7fd4d941e19fb81f002b1e | [
0.0001708696800051257,
0.00017015999765135348,
0.00016945032984949648,
0.00017015999765135348,
7.096750778146088e-7
] |
{
"id": 0,
"code_window": [
"\t// EnableCollectExecutionInfo enables the TiDB to collect execution info.\n",
"\tEnableCollectExecutionInfo bool `toml:\"tidb_enable_collect_execution_info\" json:\"tidb_enable_collect_execution_info\"`\n",
"\tPluginDir string `toml:\"plugin_dir\" json:\"plugin_dir\"`\n",
"\tPluginLoad string `toml:\"plugin_load\" json:\"plugin_load\"`\n",
"\t// MaxConnections is the maximum permitted number of simultaneous client connections.\n",
"\tMaxConnections uint32 `toml:\"max_connections\" json:\"max_connections\"`\n",
"\tTiDBEnableDDL AtomicBool `toml:\"tidb_enable_ddl\" json:\"tidb_enable_ddl\"`\n",
"}\n",
"\n",
"func (l *Log) getDisableTimestamp() bool {\n",
"\tif l.EnableTimestamp == nbUnset && l.DisableTimestamp == nbUnset {\n",
"\t\treturn false\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tMaxConnections uint32 `toml:\"max_connections\" json:\"max_connections\"`\n",
"\tTiDBEnableDDL AtomicBool `toml:\"tidb_enable_ddl\" json:\"tidb_enable_ddl\"`\n",
"\tTiDBRCReadCheckTS bool `toml:\"tidb_rc_read_check_ts\" json:\"tidb_rc_read_check_ts\"`\n"
],
"file_path": "config/config.go",
"type": "replace",
"edit_start_line_idx": 490
} | load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
go_library(
name = "util",
srcs = [
"cpu_posix.go",
"cpu_windows.go",
"errors.go",
"etcd.go",
"gogc.go",
"misc.go",
"prefix_helper.go",
"printer.go",
"processinfo.go",
"security.go",
"tso.go",
"urls.go",
"util.go",
"wait_group_wrapper.go",
],
importpath = "github.com/pingcap/tidb/util",
visibility = ["//visibility:public"],
deps = [
"//config",
"//kv",
"//metrics",
"//parser",
"//parser/model",
"//parser/mysql",
"//parser/terror",
"//session/txninfo",
"//sessionctx/stmtctx",
"//util/collate",
"//util/execdetails",
"//util/logutil",
"//util/tls",
"@com_github_pingcap_errors//:errors",
"@com_github_pingcap_failpoint//:failpoint",
"@com_github_pingcap_log//:log",
"@com_github_pingcap_tipb//go-tipb",
"@com_github_tikv_client_go_v2//oracle",
"@io_etcd_go_etcd_client_v3//:client",
"@io_etcd_go_etcd_client_v3//concurrency",
"@org_golang_google_grpc//:grpc",
"@org_uber_go_zap//:zap",
],
)
go_test(
name = "util_test",
timeout = "short",
srcs = [
"errors_test.go",
"main_test.go",
"misc_test.go",
"prefix_helper_test.go",
"processinfo_test.go",
"security_test.go",
"urls_test.go",
"wait_group_wrapper_test.go",
],
data = glob(["tls_test/**"]),
embed = [":util"],
flaky = True,
shard_count = 50,
deps = [
"//config",
"//kv",
"//parser",
"//parser/model",
"//parser/mysql",
"//parser/terror",
"//sessionctx/stmtctx",
"//store/mockstore",
"//testkit/testsetup",
"//types",
"//util/fastrand",
"//util/memory",
"@com_github_pingcap_errors//:errors",
"@com_github_stretchr_testify//assert",
"@com_github_stretchr_testify//require",
"@org_uber_go_atomic//:atomic",
"@org_uber_go_goleak//:goleak",
],
)
| util/BUILD.bazel | 0 | https://github.com/pingcap/tidb/commit/f61024b2dc6f9e3a6f7fd4d941e19fb81f002b1e | [
0.0001687001931713894,
0.00016653951024636626,
0.00016498361947014928,
0.0001661231799516827,
0.000001173918121821771
] |
{
"id": 0,
"code_window": [
"\t// EnableCollectExecutionInfo enables the TiDB to collect execution info.\n",
"\tEnableCollectExecutionInfo bool `toml:\"tidb_enable_collect_execution_info\" json:\"tidb_enable_collect_execution_info\"`\n",
"\tPluginDir string `toml:\"plugin_dir\" json:\"plugin_dir\"`\n",
"\tPluginLoad string `toml:\"plugin_load\" json:\"plugin_load\"`\n",
"\t// MaxConnections is the maximum permitted number of simultaneous client connections.\n",
"\tMaxConnections uint32 `toml:\"max_connections\" json:\"max_connections\"`\n",
"\tTiDBEnableDDL AtomicBool `toml:\"tidb_enable_ddl\" json:\"tidb_enable_ddl\"`\n",
"}\n",
"\n",
"func (l *Log) getDisableTimestamp() bool {\n",
"\tif l.EnableTimestamp == nbUnset && l.DisableTimestamp == nbUnset {\n",
"\t\treturn false\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tMaxConnections uint32 `toml:\"max_connections\" json:\"max_connections\"`\n",
"\tTiDBEnableDDL AtomicBool `toml:\"tidb_enable_ddl\" json:\"tidb_enable_ddl\"`\n",
"\tTiDBRCReadCheckTS bool `toml:\"tidb_rc_read_check_ts\" json:\"tidb_rc_read_check_ts\"`\n"
],
"file_path": "config/config.go",
"type": "replace",
"edit_start_line_idx": 490
} | // Copyright 2019 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package expression
import (
"sync/atomic"
"testing"
"time"
"github.com/pingcap/tidb/parser/ast"
"github.com/pingcap/tidb/types"
"github.com/pingcap/tidb/util/chunk"
"github.com/pingcap/tidb/util/mock"
"github.com/stretchr/testify/require"
)
var vecBuiltinMiscellaneousCases = map[string][]vecExprBenchCase{
ast.Inet6Aton: {
{retEvalType: types.ETString, childrenTypes: []types.EvalType{types.ETString}, geners: []dataGenerator{&ipv6StrGener{newDefaultRandGen()}}},
},
ast.IsIPv6: {
{retEvalType: types.ETInt, childrenTypes: []types.EvalType{types.ETString}},
},
ast.Sleep: {
{retEvalType: types.ETInt, childrenTypes: []types.EvalType{types.ETReal}, geners: []dataGenerator{
newSelectRealGener([]float64{0, 0.000001}),
}},
},
ast.UUID: {},
ast.Inet6Ntoa: {
{retEvalType: types.ETString, childrenTypes: []types.EvalType{types.ETString}, geners: []dataGenerator{
newSelectStringGener(
[]string{
"192.168.0.1",
"2001:db8::68", // ipv6
},
)}},
},
ast.InetAton: {
{retEvalType: types.ETInt, childrenTypes: []types.EvalType{types.ETString}, geners: []dataGenerator{&ipv4StrGener{newDefaultRandGen()}}},
{retEvalType: types.ETInt, childrenTypes: []types.EvalType{types.ETString}, geners: []dataGenerator{
newSelectStringGener(
[]string{
"11.11.11.11",
"255.255.255.255",
"127",
".122",
".123.123",
"127.255",
"127.2.1",
},
)}},
},
ast.IsIPv4Mapped: {
{retEvalType: types.ETInt, childrenTypes: []types.EvalType{types.ETString}, geners: []dataGenerator{&ipv4MappedByteGener{newDefaultRandGen()}}},
{retEvalType: types.ETInt, childrenTypes: []types.EvalType{types.ETString}, geners: []dataGenerator{&ipv6ByteGener{newDefaultRandGen()}}},
{retEvalType: types.ETInt, childrenTypes: []types.EvalType{types.ETString}, geners: []dataGenerator{&ipv4ByteGener{newDefaultRandGen()}}},
{retEvalType: types.ETInt, childrenTypes: []types.EvalType{types.ETString}, geners: []dataGenerator{newDefaultGener(1.0, types.ETString)}},
},
ast.IsIPv4Compat: {
{retEvalType: types.ETInt, childrenTypes: []types.EvalType{types.ETString}, geners: []dataGenerator{&ipv4CompatByteGener{newDefaultRandGen()}}},
{retEvalType: types.ETInt, childrenTypes: []types.EvalType{types.ETString}, geners: []dataGenerator{&ipv6ByteGener{newDefaultRandGen()}}},
{retEvalType: types.ETInt, childrenTypes: []types.EvalType{types.ETString}, geners: []dataGenerator{&ipv4ByteGener{newDefaultRandGen()}}},
{retEvalType: types.ETInt, childrenTypes: []types.EvalType{types.ETString}, geners: []dataGenerator{newDefaultGener(1.0, types.ETString)}},
},
ast.InetNtoa: {
{retEvalType: types.ETString, childrenTypes: []types.EvalType{types.ETInt}},
},
ast.IsIPv4: {
{retEvalType: types.ETInt, childrenTypes: []types.EvalType{types.ETString}},
},
ast.AnyValue: {
{retEvalType: types.ETDuration, childrenTypes: []types.EvalType{types.ETDuration}},
{retEvalType: types.ETInt, childrenTypes: []types.EvalType{types.ETInt}},
{retEvalType: types.ETDecimal, childrenTypes: []types.EvalType{types.ETDecimal}},
{retEvalType: types.ETTimestamp, childrenTypes: []types.EvalType{types.ETTimestamp}},
{retEvalType: types.ETReal, childrenTypes: []types.EvalType{types.ETReal}},
{retEvalType: types.ETString, childrenTypes: []types.EvalType{types.ETString}},
{retEvalType: types.ETJson, childrenTypes: []types.EvalType{types.ETJson}},
},
ast.NameConst: {
{retEvalType: types.ETDuration, childrenTypes: []types.EvalType{types.ETString, types.ETDuration}},
{retEvalType: types.ETString, childrenTypes: []types.EvalType{types.ETString, types.ETString}},
{retEvalType: types.ETDecimal, childrenTypes: []types.EvalType{types.ETString, types.ETDecimal}},
{retEvalType: types.ETInt, childrenTypes: []types.EvalType{types.ETString, types.ETInt}},
{retEvalType: types.ETReal, childrenTypes: []types.EvalType{types.ETString, types.ETReal}},
{retEvalType: types.ETJson, childrenTypes: []types.EvalType{types.ETString, types.ETJson}},
{retEvalType: types.ETTimestamp, childrenTypes: []types.EvalType{types.ETString, types.ETTimestamp}},
},
ast.UUIDToBin: {
{retEvalType: types.ETString, childrenTypes: []types.EvalType{types.ETString}, geners: []dataGenerator{&uuidStrGener{newDefaultRandGen()}}},
{retEvalType: types.ETString, childrenTypes: []types.EvalType{types.ETString, types.ETInt}, geners: []dataGenerator{&uuidStrGener{newDefaultRandGen()}}},
},
ast.BinToUUID: {
{retEvalType: types.ETString, childrenTypes: []types.EvalType{types.ETString}, geners: []dataGenerator{&uuidBinGener{newDefaultRandGen()}}},
{retEvalType: types.ETString, childrenTypes: []types.EvalType{types.ETString, types.ETInt}, geners: []dataGenerator{&uuidBinGener{newDefaultRandGen()}}},
},
ast.IsUUID: {
{retEvalType: types.ETInt, childrenTypes: []types.EvalType{types.ETString}, geners: []dataGenerator{&uuidStrGener{newDefaultRandGen()}}},
},
}
func TestVectorizedBuiltinMiscellaneousEvalOneVec(t *testing.T) {
testVectorizedEvalOneVec(t, vecBuiltinMiscellaneousCases)
}
func TestVectorizedBuiltinMiscellaneousFunc(t *testing.T) {
testVectorizedBuiltinFunc(t, vecBuiltinMiscellaneousCases)
}
func BenchmarkVectorizedBuiltinMiscellaneousEvalOneVec(b *testing.B) {
benchmarkVectorizedEvalOneVec(b, vecBuiltinMiscellaneousCases)
}
func BenchmarkVectorizedBuiltinMiscellaneousFunc(b *testing.B) {
benchmarkVectorizedBuiltinFunc(b, vecBuiltinMiscellaneousCases)
}
type counter struct {
count int
}
func (c *counter) add(diff int) int {
c.count += diff
return c.count
}
func TestSleepVectorized(t *testing.T) {
ctx := mock.NewContext()
sessVars := ctx.GetSessionVars()
fc := funcs[ast.Sleep]
ft := eType2FieldType(types.ETReal)
col0 := &Column{RetType: ft, Index: 0}
f, err := fc.getFunction(ctx, []Expression{col0})
require.NoError(t, err)
input := chunk.NewChunkWithCapacity([]*types.FieldType{ft}, 1024)
result := chunk.NewColumn(ft, 1024)
warnCnt := counter{}
// non-strict model
sessVars.StmtCtx.BadNullAsWarning = true
input.AppendFloat64(0, 1)
err = f.vecEvalInt(input, result)
require.NoError(t, err)
require.Equal(t, int64(0), result.GetInt64(0))
require.Equal(t, uint16(warnCnt.add(0)), sessVars.StmtCtx.WarningCount())
input.Reset()
input.AppendFloat64(0, -1)
err = f.vecEvalInt(input, result)
require.NoError(t, err)
require.Equal(t, int64(0), result.GetInt64(0))
require.Equal(t, uint16(warnCnt.add(1)), sessVars.StmtCtx.WarningCount())
input.Reset()
input.AppendNull(0)
err = f.vecEvalInt(input, result)
require.NoError(t, err)
require.Equal(t, int64(0), result.GetInt64(0))
require.Equal(t, uint16(warnCnt.add(1)), sessVars.StmtCtx.WarningCount())
input.Reset()
input.AppendNull(0)
input.AppendFloat64(0, 1)
input.AppendFloat64(0, -1)
err = f.vecEvalInt(input, result)
require.NoError(t, err)
require.Equal(t, int64(0), result.GetInt64(0))
require.Equal(t, int64(0), result.GetInt64(1))
require.Equal(t, int64(0), result.GetInt64(2))
require.Equal(t, uint16(warnCnt.add(2)), sessVars.StmtCtx.WarningCount())
// for error case under the strict model
sessVars.StmtCtx.BadNullAsWarning = false
input.Reset()
input.AppendNull(0)
err = f.vecEvalInt(input, result)
require.Error(t, err)
require.Equal(t, int64(0), result.GetInt64(0))
sessVars.StmtCtx.SetWarnings(nil)
input.Reset()
input.AppendFloat64(0, -2.5)
err = f.vecEvalInt(input, result)
require.Error(t, err)
require.Equal(t, int64(0), result.GetInt64(0))
// strict model
input.Reset()
input.AppendFloat64(0, 0.5)
start := time.Now()
err = f.vecEvalInt(input, result)
require.NoError(t, err)
require.Equal(t, int64(0), result.GetInt64(0))
sub := time.Since(start)
require.GreaterOrEqual(t, sub.Nanoseconds(), int64(0.5*1e9))
input.Reset()
input.AppendFloat64(0, 0.01)
input.AppendFloat64(0, 2)
input.AppendFloat64(0, 2)
start = time.Now()
go func() {
time.Sleep(1 * time.Second)
atomic.CompareAndSwapUint32(&ctx.GetSessionVars().Killed, 0, 1)
}()
err = f.vecEvalInt(input, result)
sub = time.Since(start)
require.NoError(t, err)
require.Equal(t, int64(0), result.GetInt64(0))
require.Equal(t, int64(1), result.GetInt64(1))
require.Equal(t, int64(1), result.GetInt64(2))
require.LessOrEqual(t, sub.Nanoseconds(), int64(2*1e9))
require.GreaterOrEqual(t, sub.Nanoseconds(), int64(1*1e9))
}
| expression/builtin_miscellaneous_vec_test.go | 0 | https://github.com/pingcap/tidb/commit/f61024b2dc6f9e3a6f7fd4d941e19fb81f002b1e | [
0.00017483482952229679,
0.00016857092850841582,
0.0001635311054997146,
0.00016894310829229653,
0.0000025571146125002997
] |
{
"id": 2,
"code_window": [
"\t// CostModelVersion is a internal switch to indicates the Cost Model Version.\n",
"\tCostModelVersion int\n",
"\t// BatchPendingTiFlashCount shows the threshold of pending TiFlash tables when batch adding.\n",
"\tBatchPendingTiFlashCount int\n",
"\t// RcReadCheckTS indicates if ts check optimization is enabled for current session.\n",
"\tRcReadCheckTS bool\n",
"\t// RcWriteCheckTS indicates whether some special write statements don't get latest tso from PD at RC\n",
"\tRcWriteCheckTS bool\n",
"\t// RemoveOrderbyInSubquery indicates whether to remove ORDER BY in subquery.\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "sessionctx/variable/session.go",
"type": "replace",
"edit_start_line_idx": 1000
} | // Copyright 2017 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package config
import (
"bytes"
"encoding/base64"
"encoding/json"
"fmt"
"math"
"os"
"os/user"
"path/filepath"
"sort"
"strings"
"sync"
"sync/atomic"
"time"
"github.com/BurntSushi/toml"
"github.com/pingcap/errors"
zaplog "github.com/pingcap/log"
logbackupconf "github.com/pingcap/tidb/br/pkg/streamhelper/config"
"github.com/pingcap/tidb/parser/terror"
"github.com/pingcap/tidb/util/logutil"
"github.com/pingcap/tidb/util/tikvutil"
"github.com/pingcap/tidb/util/versioninfo"
tikvcfg "github.com/tikv/client-go/v2/config"
tracing "github.com/uber/jaeger-client-go/config"
atomicutil "go.uber.org/atomic"
"go.uber.org/zap"
)
// Config number limitations
const (
MaxLogFileSize = 4096 // MB
// DefTxnEntrySizeLimit is the default value of TxnEntrySizeLimit.
DefTxnEntrySizeLimit = 6 * 1024 * 1024
// DefTxnTotalSizeLimit is the default value of TxnTxnTotalSizeLimit.
DefTxnTotalSizeLimit = 100 * 1024 * 1024
// DefMaxIndexLength is the maximum index length(in bytes). This value is consistent with MySQL.
DefMaxIndexLength = 3072
// DefMaxOfMaxIndexLength is the maximum index length(in bytes) for TiDB v3.0.7 and previous version.
DefMaxOfMaxIndexLength = 3072 * 4
// DefIndexLimit is the limitation of index on a single table. This value is consistent with MySQL.
DefIndexLimit = 64
// DefMaxOfIndexLimit is the maximum limitation of index on a single table for TiDB.
DefMaxOfIndexLimit = 64 * 8
// DefPort is the default port of TiDB
DefPort = 4000
// DefStatusPort is the default status port of TiDB
DefStatusPort = 10080
// DefHost is the default host of TiDB
DefHost = "0.0.0.0"
// DefStatusHost is the default status host of TiDB
DefStatusHost = "0.0.0.0"
// DefTableColumnCountLimit is limit of the number of columns in a table
DefTableColumnCountLimit = 1017
// DefMaxOfTableColumnCountLimit is maximum limitation of the number of columns in a table
DefMaxOfTableColumnCountLimit = 4096
// DefStatsLoadConcurrencyLimit is limit of the concurrency of stats-load
DefStatsLoadConcurrencyLimit = 1
// DefMaxOfStatsLoadConcurrencyLimit is maximum limitation of the concurrency of stats-load
DefMaxOfStatsLoadConcurrencyLimit = 128
// DefStatsLoadQueueSizeLimit is limit of the size of stats-load request queue
DefStatsLoadQueueSizeLimit = 1
// DefMaxOfStatsLoadQueueSizeLimit is maximum limitation of the size of stats-load request queue
DefMaxOfStatsLoadQueueSizeLimit = 100000
// DefDDLSlowOprThreshold sets log DDL operations whose execution time exceeds the threshold value.
DefDDLSlowOprThreshold = 300
// DefExpensiveQueryTimeThreshold indicates the time threshold of expensive query.
DefExpensiveQueryTimeThreshold = 60
// DefMemoryUsageAlarmRatio is the threshold triggering an alarm which the memory usage of tidb-server instance exceeds.
DefMemoryUsageAlarmRatio = 0.8
// DefTempDir is the default temporary directory path for TiDB.
DefTempDir = "/tmp/tidb"
)
// Valid config maps
var (
ValidStorage = map[string]bool{
"mocktikv": true,
"tikv": true,
"unistore": true,
}
// CheckTableBeforeDrop enable to execute `admin check table` before `drop table`.
CheckTableBeforeDrop = false
// checkBeforeDropLDFlag is a go build flag.
checkBeforeDropLDFlag = "None"
// tempStorageDirName is the default temporary storage dir name by base64 encoding a string `port/statusPort`
tempStorageDirName = encodeDefTempStorageDir(os.TempDir(), DefHost, DefStatusHost, DefPort, DefStatusPort)
)
// InstanceConfigSection indicates a config session that has options moved to [instance] session.
type InstanceConfigSection struct {
// SectionName indicates the origin section name.
SectionName string
// NameMappings maps the origin name to the name in [instance].
NameMappings map[string]string
}
var (
// sectionMovedToInstance records all config section and options that should be moved to [instance].
sectionMovedToInstance = []InstanceConfigSection{
{
"",
map[string]string{
"check-mb4-value-in-utf8": "tidb_check_mb4_value_in_utf8",
"enable-collect-execution-info": "tidb_enable_collect_execution_info",
"max-server-connections": "max_connections",
"run-ddl": "tidb_enable_ddl",
},
},
{
"log",
map[string]string{
"enable-slow-log": "tidb_enable_slow_log",
"slow-threshold": "tidb_slow_log_threshold",
"record-plan-in-slow-log": "tidb_record_plan_in_slow_log",
},
},
{
"performance",
map[string]string{
"force-priority": "tidb_force_priority",
"memory-usage-alarm-ratio": "tidb_memory_usage_alarm_ratio",
},
},
{
"plugin",
map[string]string{
"load": "plugin_load",
"dir": "plugin_dir",
},
},
}
// ConflictOptions indicates the conflict config options existing in both [instance] and other sections in config file.
ConflictOptions []InstanceConfigSection
// DeprecatedOptions indicates the config options existing in some other sections in config file.
// They should be moved to [instance] section.
DeprecatedOptions []InstanceConfigSection
// TikvConfigLock protects against concurrent tikv config refresh
TikvConfigLock sync.Mutex
)
// Config contains configuration options.
type Config struct {
Host string `toml:"host" json:"host"`
AdvertiseAddress string `toml:"advertise-address" json:"advertise-address"`
Port uint `toml:"port" json:"port"`
Cors string `toml:"cors" json:"cors"`
Store string `toml:"store" json:"store"`
Path string `toml:"path" json:"path"`
Socket string `toml:"socket" json:"socket"`
Lease string `toml:"lease" json:"lease"`
SplitTable bool `toml:"split-table" json:"split-table"`
TokenLimit uint `toml:"token-limit" json:"token-limit"`
TempDir string `toml:"temp-dir" json:"temp-dir"`
TempStoragePath string `toml:"tmp-storage-path" json:"tmp-storage-path"`
// TempStorageQuota describe the temporary storage Quota during query exector when TiDBEnableTmpStorageOnOOM is enabled
// If the quota exceed the capacity of the TempStoragePath, the tidb-server would exit with fatal error
TempStorageQuota int64 `toml:"tmp-storage-quota" json:"tmp-storage-quota"` // Bytes
TxnLocalLatches tikvcfg.TxnLocalLatches `toml:"-" json:"-"`
ServerVersion string `toml:"server-version" json:"server-version"`
VersionComment string `toml:"version-comment" json:"version-comment"`
TiDBEdition string `toml:"tidb-edition" json:"tidb-edition"`
TiDBReleaseVersion string `toml:"tidb-release-version" json:"tidb-release-version"`
Log Log `toml:"log" json:"log"`
Instance Instance `toml:"instance" json:"instance"`
Security Security `toml:"security" json:"security"`
Status Status `toml:"status" json:"status"`
Performance Performance `toml:"performance" json:"performance"`
PreparedPlanCache PreparedPlanCache `toml:"prepared-plan-cache" json:"prepared-plan-cache"`
OpenTracing OpenTracing `toml:"opentracing" json:"opentracing"`
ProxyProtocol ProxyProtocol `toml:"proxy-protocol" json:"proxy-protocol"`
PDClient tikvcfg.PDClient `toml:"pd-client" json:"pd-client"`
TiKVClient tikvcfg.TiKVClient `toml:"tikv-client" json:"tikv-client"`
Binlog Binlog `toml:"binlog" json:"binlog"`
CompatibleKillQuery bool `toml:"compatible-kill-query" json:"compatible-kill-query"`
PessimisticTxn PessimisticTxn `toml:"pessimistic-txn" json:"pessimistic-txn"`
MaxIndexLength int `toml:"max-index-length" json:"max-index-length"`
IndexLimit int `toml:"index-limit" json:"index-limit"`
TableColumnCountLimit uint32 `toml:"table-column-count-limit" json:"table-column-count-limit"`
GracefulWaitBeforeShutdown int `toml:"graceful-wait-before-shutdown" json:"graceful-wait-before-shutdown"`
// AlterPrimaryKey is used to control alter primary key feature.
AlterPrimaryKey bool `toml:"alter-primary-key" json:"alter-primary-key"`
// TreatOldVersionUTF8AsUTF8MB4 is use to treat old version table/column UTF8 charset as UTF8MB4. This is for compatibility.
// Currently not support dynamic modify, because this need to reload all old version schema.
TreatOldVersionUTF8AsUTF8MB4 bool `toml:"treat-old-version-utf8-as-utf8mb4" json:"treat-old-version-utf8-as-utf8mb4"`
// EnableTableLock indicate whether enable table lock.
// TODO: remove this after table lock features stable.
EnableTableLock bool `toml:"enable-table-lock" json:"enable-table-lock"`
DelayCleanTableLock uint64 `toml:"delay-clean-table-lock" json:"delay-clean-table-lock"`
SplitRegionMaxNum uint64 `toml:"split-region-max-num" json:"split-region-max-num"`
TopSQL TopSQL `toml:"top-sql" json:"top-sql"`
// RepairMode indicates that the TiDB is in the repair mode for table meta.
RepairMode bool `toml:"repair-mode" json:"repair-mode"`
RepairTableList []string `toml:"repair-table-list" json:"repair-table-list"`
// IsolationRead indicates that the TiDB reads data from which isolation level(engine and label).
IsolationRead IsolationRead `toml:"isolation-read" json:"isolation-read"`
// NewCollationsEnabledOnFirstBootstrap indicates if the new collations are enabled, it effects only when a TiDB cluster bootstrapped on the first time.
NewCollationsEnabledOnFirstBootstrap bool `toml:"new_collations_enabled_on_first_bootstrap" json:"new_collations_enabled_on_first_bootstrap"`
// Experimental contains parameters for experimental features.
Experimental Experimental `toml:"experimental" json:"experimental"`
// SkipRegisterToDashboard tells TiDB don't register itself to the dashboard.
SkipRegisterToDashboard bool `toml:"skip-register-to-dashboard" json:"skip-register-to-dashboard"`
// EnableTelemetry enables the usage data report to PingCAP.
EnableTelemetry bool `toml:"enable-telemetry" json:"enable-telemetry"`
// Labels indicates the labels set for the tidb server. The labels describe some specific properties for the tidb
// server like `zone`/`rack`/`host`. Currently, labels won't affect the tidb server except for some special
// label keys. Now we have following special keys:
// 1. 'group' is a special label key which should be automatically set by tidb-operator. We don't suggest
// users to set 'group' in labels.
// 2. 'zone' is a special key that indicates the DC location of this tidb-server. If it is set, the value for this
// key will be the default value of the session variable `txn_scope` for this tidb-server.
Labels map[string]string `toml:"labels" json:"labels"`
// EnableGlobalIndex enables creating global index.
EnableGlobalIndex bool `toml:"enable-global-index" json:"enable-global-index"`
// DeprecateIntegerDisplayWidth indicates whether deprecating the max display length for integer.
DeprecateIntegerDisplayWidth bool `toml:"deprecate-integer-display-length" json:"deprecate-integer-display-length"`
// EnableEnumLengthLimit indicates whether the enum/set element length is limited.
// According to MySQL 8.0 Refman:
// The maximum supported length of an individual SET element is M <= 255 and (M x w) <= 1020,
// where M is the element literal length and w is the number of bytes required for the maximum-length character in the character set.
// See https://dev.mysql.com/doc/refman/8.0/en/string-type-syntax.html for more details.
EnableEnumLengthLimit bool `toml:"enable-enum-length-limit" json:"enable-enum-length-limit"`
// StoresRefreshInterval indicates the interval of refreshing stores info, the unit is second.
StoresRefreshInterval uint64 `toml:"stores-refresh-interval" json:"stores-refresh-interval"`
// EnableTCP4Only enables net.Listen("tcp4",...)
// Note that: it can make lvs with toa work and thus tidb can get real client ip.
EnableTCP4Only bool `toml:"enable-tcp4-only" json:"enable-tcp4-only"`
// The client will forward the requests through the follower
// if one of the following conditions happens:
// 1. there is a network partition problem between TiDB and PD leader.
// 2. there is a network partition problem between TiDB and TiKV leader.
EnableForwarding bool `toml:"enable-forwarding" json:"enable-forwarding"`
// MaxBallastObjectSize set the max size of the ballast object, the unit is byte.
// The default value is the smallest of the following two values: 2GB or
// one quarter of the total physical memory in the current system.
MaxBallastObjectSize int `toml:"max-ballast-object-size" json:"max-ballast-object-size"`
// BallastObjectSize set the initial size of the ballast object, the unit is byte.
BallastObjectSize int `toml:"ballast-object-size" json:"ballast-object-size"`
// EnableGlobalKill indicates whether to enable global kill.
TrxSummary TrxSummary `toml:"transaction-summary" json:"transaction-summary"`
EnableGlobalKill bool `toml:"enable-global-kill" json:"enable-global-kill"`
// The following items are deprecated. We need to keep them here temporarily
// to support the upgrade process. They can be removed in future.
// EnableBatchDML, MemQuotaQuery, OOMAction unused since bootstrap v90
EnableBatchDML bool `toml:"enable-batch-dml" json:"enable-batch-dml"`
MemQuotaQuery int64 `toml:"mem-quota-query" json:"mem-quota-query"`
OOMAction string `toml:"oom-action" json:"oom-action"`
// OOMUseTmpStorage unused since bootstrap v93
OOMUseTmpStorage bool `toml:"oom-use-tmp-storage" json:"oom-use-tmp-storage"`
// These items are deprecated because they are turned into instance system variables.
CheckMb4ValueInUTF8 AtomicBool `toml:"check-mb4-value-in-utf8" json:"check-mb4-value-in-utf8"`
EnableCollectExecutionInfo bool `toml:"enable-collect-execution-info" json:"enable-collect-execution-info"`
Plugin Plugin `toml:"plugin" json:"plugin"`
MaxServerConnections uint32 `toml:"max-server-connections" json:"max-server-connections"`
RunDDL bool `toml:"run-ddl" json:"run-ddl"`
}
// UpdateTempStoragePath is to update the `TempStoragePath` if port/statusPort was changed
// and the `tmp-storage-path` was not specified in the conf.toml or was specified the same as the default value.
func (c *Config) UpdateTempStoragePath() {
if c.TempStoragePath == tempStorageDirName {
c.TempStoragePath = encodeDefTempStorageDir(os.TempDir(), c.Host, c.Status.StatusHost, c.Port, c.Status.StatusPort)
} else {
c.TempStoragePath = encodeDefTempStorageDir(c.TempStoragePath, c.Host, c.Status.StatusHost, c.Port, c.Status.StatusPort)
}
}
// GetTiKVConfig returns configuration options from tikvcfg
func (c *Config) GetTiKVConfig() *tikvcfg.Config {
return &tikvcfg.Config{
CommitterConcurrency: int(tikvutil.CommitterConcurrency.Load()),
MaxTxnTTL: c.Performance.MaxTxnTTL,
TiKVClient: c.TiKVClient,
Security: c.Security.ClusterSecurity(),
PDClient: c.PDClient,
PessimisticTxn: tikvcfg.PessimisticTxn{MaxRetryCount: c.PessimisticTxn.MaxRetryCount},
TxnLocalLatches: c.TxnLocalLatches,
StoresRefreshInterval: c.StoresRefreshInterval,
OpenTracingEnable: c.OpenTracing.Enable,
Path: c.Path,
EnableForwarding: c.EnableForwarding,
TxnScope: c.Labels["zone"],
}
}
func encodeDefTempStorageDir(tempDir string, host, statusHost string, port, statusPort uint) string {
dirName := base64.URLEncoding.EncodeToString([]byte(fmt.Sprintf("%v:%v/%v:%v", host, port, statusHost, statusPort)))
osUID := ""
currentUser, err := user.Current()
if err == nil {
osUID = currentUser.Uid
}
return filepath.Join(tempDir, osUID+"_tidb", dirName, "tmp-storage")
}
// nullableBool defaults unset bool options to unset instead of false, which enables us to know if the user has set 2
// conflict options at the same time.
type nullableBool struct {
IsValid bool
IsTrue bool
}
var (
nbUnset = nullableBool{false, false}
nbFalse = nullableBool{true, false}
nbTrue = nullableBool{true, true}
)
func (b *nullableBool) toBool() bool {
return b.IsValid && b.IsTrue
}
func (b nullableBool) MarshalJSON() ([]byte, error) {
switch b {
case nbTrue:
return json.Marshal(true)
case nbFalse:
return json.Marshal(false)
default:
return json.Marshal(nil)
}
}
func (b *nullableBool) UnmarshalText(text []byte) error {
str := string(text)
switch str {
case "", "null":
*b = nbUnset
return nil
case "true":
*b = nbTrue
case "false":
*b = nbFalse
default:
*b = nbUnset
return errors.New("Invalid value for bool type: " + str)
}
return nil
}
func (b nullableBool) MarshalText() ([]byte, error) {
if !b.IsValid {
return []byte(""), nil
}
if b.IsTrue {
return []byte("true"), nil
}
return []byte("false"), nil
}
func (b *nullableBool) UnmarshalJSON(data []byte) error {
var err error
var v interface{}
if err = json.Unmarshal(data, &v); err != nil {
return err
}
switch raw := v.(type) {
case bool:
*b = nullableBool{true, raw}
default:
*b = nbUnset
}
return err
}
// AtomicBool is a helper type for atomic operations on a boolean value.
type AtomicBool struct {
atomicutil.Bool
}
// NewAtomicBool creates an AtomicBool.
func NewAtomicBool(v bool) *AtomicBool {
return &AtomicBool{*atomicutil.NewBool(v)}
}
// MarshalText implements the encoding.TextMarshaler interface.
func (b AtomicBool) MarshalText() ([]byte, error) {
if b.Load() {
return []byte("true"), nil
}
return []byte("false"), nil
}
// UnmarshalText implements the encoding.TextUnmarshaler interface.
func (b *AtomicBool) UnmarshalText(text []byte) error {
str := string(text)
switch str {
case "", "null":
*b = AtomicBool{*atomicutil.NewBool(false)}
case "true":
*b = AtomicBool{*atomicutil.NewBool(true)}
case "false":
*b = AtomicBool{*atomicutil.NewBool(false)}
default:
*b = AtomicBool{*atomicutil.NewBool(false)}
return errors.New("Invalid value for bool type: " + str)
}
return nil
}
// LogBackup is the config for log backup service.
// For now, it includes the embed advancer.
type LogBackup struct {
Advancer logbackupconf.Config `toml:"advancer" json:"advancer"`
Enabled bool `toml:"enabled" json:"enabled"`
}
// Log is the log section of config.
type Log struct {
// Log level.
Level string `toml:"level" json:"level"`
// Log format, one of json or text.
Format string `toml:"format" json:"format"`
// Disable automatic timestamps in output. Deprecated: use EnableTimestamp instead.
DisableTimestamp nullableBool `toml:"disable-timestamp" json:"disable-timestamp"`
// EnableTimestamp enables automatic timestamps in log output.
EnableTimestamp nullableBool `toml:"enable-timestamp" json:"enable-timestamp"`
// DisableErrorStack stops annotating logs with the full stack error
// message. Deprecated: use EnableErrorStack instead.
DisableErrorStack nullableBool `toml:"disable-error-stack" json:"disable-error-stack"`
// EnableErrorStack enables annotating logs with the full stack error
// message.
EnableErrorStack nullableBool `toml:"enable-error-stack" json:"enable-error-stack"`
// File log config.
File logutil.FileLogConfig `toml:"file" json:"file"`
SlowQueryFile string `toml:"slow-query-file" json:"slow-query-file"`
ExpensiveThreshold uint `toml:"expensive-threshold" json:"expensive-threshold"`
// The following items are deprecated. We need to keep them here temporarily
// to support the upgrade process. They can be removed in future.
// QueryLogMaxLen unused since bootstrap v90
QueryLogMaxLen uint64 `toml:"query-log-max-len" json:"query-log-max-len"`
// EnableSlowLog, SlowThreshold, RecordPlanInSlowLog are deprecated.
EnableSlowLog AtomicBool `toml:"enable-slow-log" json:"enable-slow-log"`
SlowThreshold uint64 `toml:"slow-threshold" json:"slow-threshold"`
RecordPlanInSlowLog uint32 `toml:"record-plan-in-slow-log" json:"record-plan-in-slow-log"`
}
// Instance is the section of instance scope system variables.
type Instance struct {
// These variables only exist in [instance] section.
// TiDBGeneralLog is used to log every query in the server in info level.
TiDBGeneralLog bool `toml:"tidb_general_log" json:"tidb_general_log"`
// EnablePProfSQLCPU is used to add label sql label to pprof result.
EnablePProfSQLCPU bool `toml:"tidb_pprof_sql_cpu" json:"tidb_pprof_sql_cpu"`
// DDLSlowOprThreshold sets log DDL operations whose execution time exceeds the threshold value.
DDLSlowOprThreshold uint32 `toml:"ddl_slow_threshold" json:"ddl_slow_threshold"`
// ExpensiveQueryTimeThreshold indicates the time threshold of expensive query.
ExpensiveQueryTimeThreshold uint64 `toml:"tidb_expensive_query_time_threshold" json:"tidb_expensive_query_time_threshold"`
// These variables exist in both 'instance' section and another place.
// The configuration in 'instance' section takes precedence.
EnableSlowLog AtomicBool `toml:"tidb_enable_slow_log" json:"tidb_enable_slow_log"`
SlowThreshold uint64 `toml:"tidb_slow_log_threshold" json:"tidb_slow_log_threshold"`
RecordPlanInSlowLog uint32 `toml:"tidb_record_plan_in_slow_log" json:"tidb_record_plan_in_slow_log"`
CheckMb4ValueInUTF8 AtomicBool `toml:"tidb_check_mb4_value_in_utf8" json:"tidb_check_mb4_value_in_utf8"`
ForcePriority string `toml:"tidb_force_priority" json:"tidb_force_priority"`
MemoryUsageAlarmRatio float64 `toml:"tidb_memory_usage_alarm_ratio" json:"tidb_memory_usage_alarm_ratio"`
// EnableCollectExecutionInfo enables the TiDB to collect execution info.
EnableCollectExecutionInfo bool `toml:"tidb_enable_collect_execution_info" json:"tidb_enable_collect_execution_info"`
PluginDir string `toml:"plugin_dir" json:"plugin_dir"`
PluginLoad string `toml:"plugin_load" json:"plugin_load"`
// MaxConnections is the maximum permitted number of simultaneous client connections.
MaxConnections uint32 `toml:"max_connections" json:"max_connections"`
TiDBEnableDDL AtomicBool `toml:"tidb_enable_ddl" json:"tidb_enable_ddl"`
}
func (l *Log) getDisableTimestamp() bool {
if l.EnableTimestamp == nbUnset && l.DisableTimestamp == nbUnset {
return false
}
if l.EnableTimestamp == nbUnset {
return l.DisableTimestamp.toBool()
}
return !l.EnableTimestamp.toBool()
}
func (l *Log) getDisableErrorStack() bool {
if l.EnableErrorStack == nbUnset && l.DisableErrorStack == nbUnset {
return true
}
if l.EnableErrorStack == nbUnset {
return l.DisableErrorStack.toBool()
}
return !l.EnableErrorStack.toBool()
}
// The following constants represents the valid action configurations for Security.SpilledFileEncryptionMethod.
// "plaintext" means encryption is disabled.
// NOTE: Although the values is case insensitive, we should use lower-case
// strings because the configuration value will be transformed to lower-case
// string and compared with these constants in the further usage.
const (
SpilledFileEncryptionMethodPlaintext = "plaintext"
SpilledFileEncryptionMethodAES128CTR = "aes128-ctr"
)
// Security is the security section of the config.
type Security struct {
SkipGrantTable bool `toml:"skip-grant-table" json:"skip-grant-table"`
SSLCA string `toml:"ssl-ca" json:"ssl-ca"`
SSLCert string `toml:"ssl-cert" json:"ssl-cert"`
SSLKey string `toml:"ssl-key" json:"ssl-key"`
ClusterSSLCA string `toml:"cluster-ssl-ca" json:"cluster-ssl-ca"`
ClusterSSLCert string `toml:"cluster-ssl-cert" json:"cluster-ssl-cert"`
ClusterSSLKey string `toml:"cluster-ssl-key" json:"cluster-ssl-key"`
ClusterVerifyCN []string `toml:"cluster-verify-cn" json:"cluster-verify-cn"`
// If set to "plaintext", the spilled files will not be encrypted.
SpilledFileEncryptionMethod string `toml:"spilled-file-encryption-method" json:"spilled-file-encryption-method"`
// EnableSEM prevents SUPER users from having full access.
EnableSEM bool `toml:"enable-sem" json:"enable-sem"`
// Allow automatic TLS certificate generation
AutoTLS bool `toml:"auto-tls" json:"auto-tls"`
MinTLSVersion string `toml:"tls-version" json:"tls-version"`
RSAKeySize int `toml:"rsa-key-size" json:"rsa-key-size"`
SecureBootstrap bool `toml:"secure-bootstrap" json:"secure-bootstrap"`
}
// The ErrConfigValidationFailed error is used so that external callers can do a type assertion
// to defer handling of this specific error when someone does not want strict type checking.
// This is needed only because logging hasn't been set up at the time we parse the config file.
// This should all be ripped out once strict config checking is made the default behavior.
type ErrConfigValidationFailed struct {
confFile string
UndecodedItems []string
}
func (e *ErrConfigValidationFailed) Error() string {
return fmt.Sprintf("config file %s contained invalid configuration options: %s; check "+
"TiDB manual to make sure this option has not been deprecated and removed from your TiDB "+
"version if the option does not appear to be a typo", e.confFile, strings.Join(
e.UndecodedItems, ", "))
}
// ErrConfigInstanceSection error is used to warning the user
// which config options should be moved to 'instance'.
type ErrConfigInstanceSection struct {
confFile string
configSections *[]InstanceConfigSection
deprecatedSections *[]InstanceConfigSection
}
func (e *ErrConfigInstanceSection) Error() string {
var builder strings.Builder
if len(*e.configSections) > 0 {
builder.WriteString("Conflict configuration options exists on both [instance] section and some other sections. ")
}
if len(*e.deprecatedSections) > 0 {
builder.WriteString("Some configuration options should be moved to [instance] section. ")
}
builder.WriteString("Please use the latter config options in [instance] instead: ")
for _, configSection := range *e.configSections {
for oldName, newName := range configSection.NameMappings {
builder.WriteString(fmt.Sprintf(" (%s, %s)", oldName, newName))
}
}
for _, configSection := range *e.deprecatedSections {
for oldName, newName := range configSection.NameMappings {
builder.WriteString(fmt.Sprintf(" (%s, %s)", oldName, newName))
}
}
builder.WriteString(".")
return builder.String()
}
// ClusterSecurity returns Security info for cluster
func (s *Security) ClusterSecurity() tikvcfg.Security {
return tikvcfg.NewSecurity(s.ClusterSSLCA, s.ClusterSSLCert, s.ClusterSSLKey, s.ClusterVerifyCN)
}
// Status is the status section of the config.
type Status struct {
StatusHost string `toml:"status-host" json:"status-host"`
MetricsAddr string `toml:"metrics-addr" json:"metrics-addr"`
StatusPort uint `toml:"status-port" json:"status-port"`
MetricsInterval uint `toml:"metrics-interval" json:"metrics-interval"`
ReportStatus bool `toml:"report-status" json:"report-status"`
RecordQPSbyDB bool `toml:"record-db-qps" json:"record-db-qps"`
// After a duration of this time in seconds if the server doesn't see any activity it pings
// the client to see if the transport is still alive.
GRPCKeepAliveTime uint `toml:"grpc-keepalive-time" json:"grpc-keepalive-time"`
// After having pinged for keepalive check, the server waits for a duration of timeout in seconds
// and if no activity is seen even after that the connection is closed.
GRPCKeepAliveTimeout uint `toml:"grpc-keepalive-timeout" json:"grpc-keepalive-timeout"`
// The number of max concurrent streams/requests on a client connection.
GRPCConcurrentStreams uint `toml:"grpc-concurrent-streams" json:"grpc-concurrent-streams"`
// Sets window size for stream. The default value is 2MB.
GRPCInitialWindowSize int `toml:"grpc-initial-window-size" json:"grpc-initial-window-size"`
// Set maximum message length in bytes that gRPC can send. `-1` means unlimited. The default value is 10MB.
GRPCMaxSendMsgSize int `toml:"grpc-max-send-msg-size" json:"grpc-max-send-msg-size"`
}
// Performance is the performance section of the config.
type Performance struct {
MaxProcs uint `toml:"max-procs" json:"max-procs"`
// Deprecated: use ServerMemoryQuota instead
MaxMemory uint64 `toml:"max-memory" json:"max-memory"`
ServerMemoryQuota uint64 `toml:"server-memory-quota" json:"server-memory-quota"`
StatsLease string `toml:"stats-lease" json:"stats-lease"`
StmtCountLimit uint `toml:"stmt-count-limit" json:"stmt-count-limit"`
PseudoEstimateRatio float64 `toml:"pseudo-estimate-ratio" json:"pseudo-estimate-ratio"`
BindInfoLease string `toml:"bind-info-lease" json:"bind-info-lease"`
TxnEntrySizeLimit uint64 `toml:"txn-entry-size-limit" json:"txn-entry-size-limit"`
TxnTotalSizeLimit uint64 `toml:"txn-total-size-limit" json:"txn-total-size-limit"`
TCPKeepAlive bool `toml:"tcp-keep-alive" json:"tcp-keep-alive"`
TCPNoDelay bool `toml:"tcp-no-delay" json:"tcp-no-delay"`
CrossJoin bool `toml:"cross-join" json:"cross-join"`
DistinctAggPushDown bool `toml:"distinct-agg-push-down" json:"distinct-agg-push-down"`
// Whether enable projection push down for coprocessors (both tikv & tiflash), default false.
ProjectionPushDown bool `toml:"projection-push-down" json:"projection-push-down"`
MaxTxnTTL uint64 `toml:"max-txn-ttl" json:"max-txn-ttl"`
// Deprecated
MemProfileInterval string `toml:"-" json:"-"`
IndexUsageSyncLease string `toml:"index-usage-sync-lease" json:"index-usage-sync-lease"`
PlanReplayerGCLease string `toml:"plan-replayer-gc-lease" json:"plan-replayer-gc-lease"`
GOGC int `toml:"gogc" json:"gogc"`
EnforceMPP bool `toml:"enforce-mpp" json:"enforce-mpp"`
StatsLoadConcurrency uint `toml:"stats-load-concurrency" json:"stats-load-concurrency"`
StatsLoadQueueSize uint `toml:"stats-load-queue-size" json:"stats-load-queue-size"`
EnableStatsCacheMemQuota bool `toml:"enable-stats-cache-mem-quota" json:"enable-stats-cache-mem-quota"`
// The following items are deprecated. We need to keep them here temporarily
// to support the upgrade process. They can be removed in future.
// CommitterConcurrency, RunAutoAnalyze unused since bootstrap v90
CommitterConcurrency int `toml:"committer-concurrency" json:"committer-concurrency"`
RunAutoAnalyze bool `toml:"run-auto-analyze" json:"run-auto-analyze"`
// ForcePriority, MemoryUsageAlarmRatio are deprecated.
ForcePriority string `toml:"force-priority" json:"force-priority"`
MemoryUsageAlarmRatio float64 `toml:"memory-usage-alarm-ratio" json:"memory-usage-alarm-ratio"`
EnableLoadFMSketch bool `toml:"enable-load-fmsketch" json:"enable-load-fmsketch"`
}
// PlanCache is the PlanCache section of the config.
type PlanCache struct {
Enabled bool `toml:"enabled" json:"enabled"`
Capacity uint `toml:"capacity" json:"capacity"`
Shards uint `toml:"shards" json:"shards"`
}
// PreparedPlanCache is the PreparedPlanCache section of the config.
type PreparedPlanCache struct {
Enabled bool `toml:"enabled" json:"enabled"`
Capacity uint `toml:"capacity" json:"capacity"`
MemoryGuardRatio float64 `toml:"memory-guard-ratio" json:"memory-guard-ratio"`
}
// OpenTracing is the opentracing section of the config.
type OpenTracing struct {
Enable bool `toml:"enable" json:"enable"`
RPCMetrics bool `toml:"rpc-metrics" json:"rpc-metrics"`
Sampler OpenTracingSampler `toml:"sampler" json:"sampler"`
Reporter OpenTracingReporter `toml:"reporter" json:"reporter"`
}
// OpenTracingSampler is the config for opentracing sampler.
// See https://godoc.org/github.com/uber/jaeger-client-go/config#SamplerConfig
type OpenTracingSampler struct {
Type string `toml:"type" json:"type"`
Param float64 `toml:"param" json:"param"`
SamplingServerURL string `toml:"sampling-server-url" json:"sampling-server-url"`
MaxOperations int `toml:"max-operations" json:"max-operations"`
SamplingRefreshInterval time.Duration `toml:"sampling-refresh-interval" json:"sampling-refresh-interval"`
}
// OpenTracingReporter is the config for opentracing reporter.
// See https://godoc.org/github.com/uber/jaeger-client-go/config#ReporterConfig
type OpenTracingReporter struct {
QueueSize int `toml:"queue-size" json:"queue-size"`
BufferFlushInterval time.Duration `toml:"buffer-flush-interval" json:"buffer-flush-interval"`
LogSpans bool `toml:"log-spans" json:"log-spans"`
LocalAgentHostPort string `toml:"local-agent-host-port" json:"local-agent-host-port"`
}
// ProxyProtocol is the PROXY protocol section of the config.
type ProxyProtocol struct {
// PROXY protocol acceptable client networks.
// Empty string means disable PROXY protocol,
// * means all networks.
Networks string `toml:"networks" json:"networks"`
// PROXY protocol header read timeout, Unit is second.
HeaderTimeout uint `toml:"header-timeout" json:"header-timeout"`
}
// Binlog is the config for binlog.
type Binlog struct {
Enable bool `toml:"enable" json:"enable"`
// If IgnoreError is true, when writing binlog meets error, TiDB would
// ignore the error.
IgnoreError bool `toml:"ignore-error" json:"ignore-error"`
WriteTimeout string `toml:"write-timeout" json:"write-timeout"`
// Use socket file to write binlog, for compatible with kafka version tidb-binlog.
BinlogSocket string `toml:"binlog-socket" json:"binlog-socket"`
// The strategy for sending binlog to pump, value can be "range" or "hash" now.
Strategy string `toml:"strategy" json:"strategy"`
}
// PessimisticTxn is the config for pessimistic transaction.
type PessimisticTxn struct {
// The max count of retry for a single statement in a pessimistic transaction.
MaxRetryCount uint `toml:"max-retry-count" json:"max-retry-count"`
// The max count of deadlock events that will be recorded in the information_schema.deadlocks table.
DeadlockHistoryCapacity uint `toml:"deadlock-history-capacity" json:"deadlock-history-capacity"`
// Whether retryable deadlocks (in-statement deadlocks) are collected to the information_schema.deadlocks table.
DeadlockHistoryCollectRetryable bool `toml:"deadlock-history-collect-retryable" json:"deadlock-history-collect-retryable"`
// PessimisticAutoCommit represents if true it means the auto-commit transactions will be in pessimistic mode.
PessimisticAutoCommit AtomicBool `toml:"pessimistic-auto-commit" json:"pessimistic-auto-commit"`
}
// TrxSummary is the config for transaction summary collecting.
type TrxSummary struct {
// how many transaction summary in `transaction_summary` each TiDB node should keep.
TransactionSummaryCapacity uint `toml:"transaction-summary-capacity" json:"transaction-summary-capacity"`
// how long a transaction should be executed to make it be recorded in `transaction_id_digest`.
TransactionIDDigestMinDuration uint `toml:"transaction-id-digest-min-duration" json:"transaction-id-digest-min-duration"`
}
// Valid Validatse TrxSummary configs
func (config *TrxSummary) Valid() error {
if config.TransactionSummaryCapacity > 5000 {
return errors.New("transaction-summary.transaction-summary-capacity should not be larger than 5000")
}
return nil
}
// DefaultPessimisticTxn returns the default configuration for PessimisticTxn
func DefaultPessimisticTxn() PessimisticTxn {
return PessimisticTxn{
MaxRetryCount: 256,
DeadlockHistoryCapacity: 10,
DeadlockHistoryCollectRetryable: false,
PessimisticAutoCommit: *NewAtomicBool(false),
}
}
// DefaultTrxSummary returns the default configuration for TrxSummary collector
func DefaultTrxSummary() TrxSummary {
// TrxSummary is not enabled by default before GA
return TrxSummary{
TransactionSummaryCapacity: 500,
TransactionIDDigestMinDuration: 2147483647,
}
}
// Plugin is the config for plugin
type Plugin struct {
Dir string `toml:"dir" json:"dir"`
Load string `toml:"load" json:"load"`
}
// TopSQL is the config for TopSQL.
type TopSQL struct {
// The TopSQL's data receiver address.
ReceiverAddress string `toml:"receiver-address" json:"receiver-address"`
}
// IsolationRead is the config for isolation read.
type IsolationRead struct {
// Engines filters tidb-server access paths by engine type.
Engines []string `toml:"engines" json:"engines"`
}
// Experimental controls the features that are still experimental: their semantics, interfaces are subject to change.
// Using these features in the production environment is not recommended.
type Experimental struct {
// Whether enable creating expression index.
AllowsExpressionIndex bool `toml:"allow-expression-index" json:"allow-expression-index"`
// Whether enable charset feature.
EnableNewCharset bool `toml:"enable-new-charset" json:"-"`
}
var defTiKVCfg = tikvcfg.DefaultConfig()
var defaultConf = Config{
Host: DefHost,
AdvertiseAddress: "",
Port: DefPort,
Socket: "/tmp/tidb-{Port}.sock",
Cors: "",
Store: "unistore",
Path: "/tmp/tidb",
RunDDL: true,
SplitTable: true,
Lease: "45s",
TokenLimit: 1000,
OOMUseTmpStorage: true,
TempDir: DefTempDir,
TempStorageQuota: -1,
TempStoragePath: tempStorageDirName,
MemQuotaQuery: 1 << 30,
OOMAction: "cancel",
EnableBatchDML: false,
CheckMb4ValueInUTF8: *NewAtomicBool(true),
MaxIndexLength: 3072,
IndexLimit: 64,
TableColumnCountLimit: 1017,
AlterPrimaryKey: false,
TreatOldVersionUTF8AsUTF8MB4: true,
EnableTableLock: false,
DelayCleanTableLock: 0,
SplitRegionMaxNum: 1000,
RepairMode: false,
RepairTableList: []string{},
MaxServerConnections: 0,
TxnLocalLatches: defTiKVCfg.TxnLocalLatches,
GracefulWaitBeforeShutdown: 0,
ServerVersion: "",
TiDBEdition: "",
VersionComment: "",
TiDBReleaseVersion: "",
Log: Log{
Level: "info",
Format: "text",
File: logutil.NewFileLogConfig(logutil.DefaultLogMaxSize),
SlowQueryFile: "tidb-slow.log",
SlowThreshold: logutil.DefaultSlowThreshold,
ExpensiveThreshold: 10000,
DisableErrorStack: nbUnset,
EnableErrorStack: nbUnset, // If both options are nbUnset, getDisableErrorStack() returns true
EnableTimestamp: nbUnset,
DisableTimestamp: nbUnset, // If both options are nbUnset, getDisableTimestamp() returns false
QueryLogMaxLen: logutil.DefaultQueryLogMaxLen,
RecordPlanInSlowLog: logutil.DefaultRecordPlanInSlowLog,
EnableSlowLog: *NewAtomicBool(logutil.DefaultTiDBEnableSlowLog),
},
Instance: Instance{
TiDBGeneralLog: false,
EnablePProfSQLCPU: false,
DDLSlowOprThreshold: DefDDLSlowOprThreshold,
ExpensiveQueryTimeThreshold: DefExpensiveQueryTimeThreshold,
EnableSlowLog: *NewAtomicBool(logutil.DefaultTiDBEnableSlowLog),
SlowThreshold: logutil.DefaultSlowThreshold,
RecordPlanInSlowLog: logutil.DefaultRecordPlanInSlowLog,
CheckMb4ValueInUTF8: *NewAtomicBool(true),
ForcePriority: "NO_PRIORITY",
MemoryUsageAlarmRatio: DefMemoryUsageAlarmRatio,
EnableCollectExecutionInfo: true,
PluginDir: "/data/deploy/plugin",
PluginLoad: "",
MaxConnections: 0,
TiDBEnableDDL: *NewAtomicBool(true),
},
Status: Status{
ReportStatus: true,
StatusHost: DefStatusHost,
StatusPort: DefStatusPort,
MetricsInterval: 15,
RecordQPSbyDB: false,
GRPCKeepAliveTime: 10,
GRPCKeepAliveTimeout: 3,
GRPCConcurrentStreams: 1024,
GRPCInitialWindowSize: 2 * 1024 * 1024,
GRPCMaxSendMsgSize: math.MaxInt32,
},
Performance: Performance{
MaxMemory: 0,
ServerMemoryQuota: 0,
MemoryUsageAlarmRatio: DefMemoryUsageAlarmRatio,
TCPKeepAlive: true,
TCPNoDelay: true,
CrossJoin: true,
StatsLease: "3s",
StmtCountLimit: 5000,
PseudoEstimateRatio: 0.8,
ForcePriority: "NO_PRIORITY",
BindInfoLease: "3s",
TxnEntrySizeLimit: DefTxnEntrySizeLimit,
TxnTotalSizeLimit: DefTxnTotalSizeLimit,
DistinctAggPushDown: false,
ProjectionPushDown: false,
CommitterConcurrency: defTiKVCfg.CommitterConcurrency,
MaxTxnTTL: defTiKVCfg.MaxTxnTTL, // 1hour
// TODO: set indexUsageSyncLease to 60s.
IndexUsageSyncLease: "0s",
GOGC: 100,
EnforceMPP: false,
PlanReplayerGCLease: "10m",
StatsLoadConcurrency: 5,
StatsLoadQueueSize: 1000,
EnableStatsCacheMemQuota: false,
RunAutoAnalyze: true,
EnableLoadFMSketch: false,
},
ProxyProtocol: ProxyProtocol{
Networks: "",
HeaderTimeout: 5,
},
PreparedPlanCache: PreparedPlanCache{
Enabled: true,
Capacity: 100,
MemoryGuardRatio: 0.1,
},
OpenTracing: OpenTracing{
Enable: false,
Sampler: OpenTracingSampler{
Type: "const",
Param: 1.0,
},
Reporter: OpenTracingReporter{},
},
PDClient: defTiKVCfg.PDClient,
TiKVClient: defTiKVCfg.TiKVClient,
Binlog: Binlog{
WriteTimeout: "15s",
Strategy: "range",
},
Plugin: Plugin{
Dir: "/data/deploy/plugin",
Load: "",
},
PessimisticTxn: DefaultPessimisticTxn(),
IsolationRead: IsolationRead{
Engines: []string{"tikv", "tiflash", "tidb"},
},
Experimental: Experimental{},
EnableCollectExecutionInfo: true,
EnableTelemetry: true,
Labels: make(map[string]string),
EnableGlobalIndex: false,
Security: Security{
SpilledFileEncryptionMethod: SpilledFileEncryptionMethodPlaintext,
EnableSEM: false,
AutoTLS: false,
RSAKeySize: 4096,
},
DeprecateIntegerDisplayWidth: false,
EnableEnumLengthLimit: true,
StoresRefreshInterval: defTiKVCfg.StoresRefreshInterval,
EnableForwarding: defTiKVCfg.EnableForwarding,
NewCollationsEnabledOnFirstBootstrap: true,
EnableGlobalKill: true,
TrxSummary: DefaultTrxSummary(),
}
var (
globalConf atomic.Value
)
// NewConfig creates a new config instance with default value.
func NewConfig() *Config {
conf := defaultConf
return &conf
}
// GetGlobalConfig returns the global configuration for this server.
// It should store configuration from command line and configuration file.
// Other parts of the system can read the global configuration use this function.
func GetGlobalConfig() *Config {
return globalConf.Load().(*Config)
}
// StoreGlobalConfig stores a new config to the globalConf. It mostly uses in the test to avoid some data races.
func StoreGlobalConfig(config *Config) {
globalConf.Store(config)
TikvConfigLock.Lock()
defer TikvConfigLock.Unlock()
cfg := *config.GetTiKVConfig()
tikvcfg.StoreGlobalConfig(&cfg)
}
// removedConfig contains items that are no longer supported.
// they might still be in the config struct to support import,
// but are not actively used.
var removedConfig = map[string]struct{}{
"pessimistic-txn.ttl": {},
"pessimistic-txn.enable": {},
"log.file.log-rotate": {},
"log.log-slow-query": {},
"txn-local-latches": {},
"txn-local-latches.enabled": {},
"txn-local-latches.capacity": {},
"performance.max-memory": {},
"max-txn-time-use": {},
"experimental.allow-auto-random": {},
"enable-redact-log": {}, // use variable tidb_redact_log instead
"enable-streaming": {},
"performance.mem-profile-interval": {},
"security.require-secure-transport": {},
"lower-case-table-names": {},
"stmt-summary": {},
"stmt-summary.enable": {},
"stmt-summary.enable-internal-query": {},
"stmt-summary.max-stmt-count": {},
"stmt-summary.max-sql-length": {},
"stmt-summary.refresh-interval": {},
"stmt-summary.history-size": {},
"enable-batch-dml": {}, // use tidb_enable_batch_dml
"mem-quota-query": {},
"log.query-log-max-len": {},
"performance.committer-concurrency": {},
"experimental.enable-global-kill": {},
"performance.run-auto-analyze": {}, //use tidb_enable_auto_analyze
// use tidb_enable_prepared_plan_cache, tidb_prepared_plan_cache_size and tidb_prepared_plan_cache_memory_guard_ratio
"prepared-plan-cache.enabled": {},
"prepared-plan-cache.capacity": {},
"prepared-plan-cache.memory-guard-ratio": {},
"oom-action": {},
"check-mb4-value-in-utf8": {}, // use tidb_check_mb4_value_in_utf8
"enable-collect-execution-info": {}, // use tidb_enable_collect_execution_info
"log.enable-slow-log": {}, // use tidb_enable_slow_log
"log.slow-threshold": {}, // use tidb_slow_log_threshold
"log.record-plan-in-slow-log": {}, // use tidb_record_plan_in_slow_log
"performance.force-priority": {}, // use tidb_force_priority
"performance.memory-usage-alarm-ratio": {}, // use tidb_memory_usage_alarm_ratio
"plugin.load": {}, // use plugin_load
"plugin.dir": {}, // use plugin_dir
"performance.feedback-probability": {}, // This feature is deprecated
"performance.query-feedback-limit": {},
"oom-use-tmp-storage": {}, // use tidb_enable_tmp_storage_on_oom
"max-server-connections": {}, // use sysvar max_connections
"run-ddl": {}, // use sysvar tidb_enable_ddl
}
// isAllRemovedConfigItems returns true if all the items that couldn't validate
// belong to the list of removedConfig items.
func isAllRemovedConfigItems(items []string) bool {
for _, item := range items {
if _, ok := removedConfig[item]; !ok {
return false
}
}
return true
}
// InitializeConfig initialize the global config handler.
// The function enforceCmdArgs is used to merge the config file with command arguments:
// For example, if you start TiDB by the command "./tidb-server --port=3000", the port number should be
// overwritten to 3000 and ignore the port number in the config file.
func InitializeConfig(confPath string, configCheck, configStrict bool, enforceCmdArgs func(*Config)) {
cfg := GetGlobalConfig()
var err error
if confPath != "" {
if err = cfg.Load(confPath); err != nil {
// Unused config item error turns to warnings.
if tmp, ok := err.(*ErrConfigValidationFailed); ok {
// This block is to accommodate an interim situation where strict config checking
// is not the default behavior of TiDB. The warning message must be deferred until
// logging has been set up. After strict config checking is the default behavior,
// This should all be removed.
if (!configCheck && !configStrict) || isAllRemovedConfigItems(tmp.UndecodedItems) {
fmt.Fprintln(os.Stderr, err.Error())
err = nil
}
} else if tmp, ok := err.(*ErrConfigInstanceSection); ok {
logutil.BgLogger().Warn(tmp.Error())
err = nil
}
}
// In configCheck we always print out which options in the config file
// have been removed. This helps users upgrade better.
if configCheck {
err = cfg.RemovedVariableCheck(confPath)
if err != nil {
logutil.BgLogger().Warn(err.Error())
err = nil // treat as warning
}
}
terror.MustNil(err)
} else {
// configCheck should have the config file specified.
if configCheck {
fmt.Fprintln(os.Stderr, "config check failed", errors.New("no config file specified for config-check"))
os.Exit(1)
}
}
enforceCmdArgs(cfg)
if err := cfg.Valid(); err != nil {
if !filepath.IsAbs(confPath) {
if tmp, err := filepath.Abs(confPath); err == nil {
confPath = tmp
}
}
fmt.Fprintln(os.Stderr, "load config file:", confPath)
fmt.Fprintln(os.Stderr, "invalid config", err)
os.Exit(1)
}
if configCheck {
fmt.Println("config check successful")
os.Exit(0)
}
StoreGlobalConfig(cfg)
}
// RemovedVariableCheck checks if the config file contains any items
// which have been removed. These will not take effect any more.
func (c *Config) RemovedVariableCheck(confFile string) error {
metaData, err := toml.DecodeFile(confFile, c)
if err != nil {
return err
}
var removed []string
for item := range removedConfig {
// We need to split the string to account for the top level
// and the section hierarchy of config.
tmp := strings.Split(item, ".")
if len(tmp) == 2 && metaData.IsDefined(tmp[0], tmp[1]) {
removed = append(removed, item)
} else if len(tmp) == 1 && metaData.IsDefined(tmp[0]) {
removed = append(removed, item)
}
}
if len(removed) > 0 {
sort.Strings(removed) // deterministic for tests
return fmt.Errorf("The following configuration options are no longer supported in this version of TiDB. Check the release notes for more information: %s", strings.Join(removed, ", "))
}
return nil
}
// Load loads config options from a toml file.
func (c *Config) Load(confFile string) error {
metaData, err := toml.DecodeFile(confFile, c)
if c.TokenLimit == 0 {
c.TokenLimit = 1000
}
// If any items in confFile file are not mapped into the Config struct, issue
// an error and stop the server from starting.
undecoded := metaData.Undecoded()
if len(undecoded) > 0 && err == nil {
var undecodedItems []string
for _, item := range undecoded {
undecodedItems = append(undecodedItems, item.String())
}
err = &ErrConfigValidationFailed{confFile, undecodedItems}
}
for _, section := range sectionMovedToInstance {
newConflictSection := InstanceConfigSection{SectionName: section.SectionName, NameMappings: map[string]string{}}
newDeprecatedSection := InstanceConfigSection{SectionName: section.SectionName, NameMappings: map[string]string{}}
for oldName, newName := range section.NameMappings {
if section.SectionName == "" && metaData.IsDefined(oldName) ||
section.SectionName != "" && metaData.IsDefined(section.SectionName, oldName) {
if metaData.IsDefined("instance", newName) {
newConflictSection.NameMappings[oldName] = newName
} else {
newDeprecatedSection.NameMappings[oldName] = newName
}
}
}
if len(newConflictSection.NameMappings) > 0 {
ConflictOptions = append(ConflictOptions, newConflictSection)
}
if len(newDeprecatedSection.NameMappings) > 0 {
DeprecatedOptions = append(DeprecatedOptions, newDeprecatedSection)
}
}
if len(ConflictOptions) > 0 || len(DeprecatedOptions) > 0 {
// Give a warning that the 'instance' section should be used.
err = &ErrConfigInstanceSection{confFile, &ConflictOptions, &DeprecatedOptions}
}
return err
}
// Valid checks if this config is valid.
func (c *Config) Valid() error {
if c.Log.EnableErrorStack == c.Log.DisableErrorStack && c.Log.EnableErrorStack != nbUnset {
logutil.BgLogger().Warn(fmt.Sprintf("\"enable-error-stack\" (%v) conflicts \"disable-error-stack\" (%v). \"disable-error-stack\" is deprecated, please use \"enable-error-stack\" instead. disable-error-stack is ignored.", c.Log.EnableErrorStack, c.Log.DisableErrorStack))
// if two options conflict, we will use the value of EnableErrorStack
c.Log.DisableErrorStack = nbUnset
}
if c.Log.EnableTimestamp == c.Log.DisableTimestamp && c.Log.EnableTimestamp != nbUnset {
logutil.BgLogger().Warn(fmt.Sprintf("\"enable-timestamp\" (%v) conflicts \"disable-timestamp\" (%v). \"disable-timestamp\" is deprecated, please use \"enable-timestamp\" instead", c.Log.EnableTimestamp, c.Log.DisableTimestamp))
// if two options conflict, we will use the value of EnableTimestamp
c.Log.DisableTimestamp = nbUnset
}
if c.Security.SkipGrantTable && !hasRootPrivilege() {
return fmt.Errorf("TiDB run with skip-grant-table need root privilege")
}
if !ValidStorage[c.Store] {
nameList := make([]string, 0, len(ValidStorage))
for k, v := range ValidStorage {
if v {
nameList = append(nameList, k)
}
}
return fmt.Errorf("invalid store=%s, valid storages=%v", c.Store, nameList)
}
if c.Store == "mocktikv" && !c.Instance.TiDBEnableDDL.Load() {
return fmt.Errorf("can't disable DDL on mocktikv")
}
if c.MaxIndexLength < DefMaxIndexLength || c.MaxIndexLength > DefMaxOfMaxIndexLength {
return fmt.Errorf("max-index-length should be [%d, %d]", DefMaxIndexLength, DefMaxOfMaxIndexLength)
}
if c.IndexLimit < DefIndexLimit || c.IndexLimit > DefMaxOfIndexLimit {
return fmt.Errorf("index-limit should be [%d, %d]", DefIndexLimit, DefMaxOfIndexLimit)
}
if c.Log.File.MaxSize > MaxLogFileSize {
return fmt.Errorf("invalid max log file size=%v which is larger than max=%v", c.Log.File.MaxSize, MaxLogFileSize)
}
if c.TableColumnCountLimit < DefTableColumnCountLimit || c.TableColumnCountLimit > DefMaxOfTableColumnCountLimit {
return fmt.Errorf("table-column-limit should be [%d, %d]", DefIndexLimit, DefMaxOfTableColumnCountLimit)
}
// txn-local-latches
if err := c.TxnLocalLatches.Valid(); err != nil {
return err
}
// For tikvclient.
if err := c.TiKVClient.Valid(); err != nil {
return err
}
if err := c.TrxSummary.Valid(); err != nil {
return err
}
if c.Performance.TxnTotalSizeLimit > 1<<40 {
return fmt.Errorf("txn-total-size-limit should be less than %d", 1<<40)
}
if c.Instance.MemoryUsageAlarmRatio > 1 || c.Instance.MemoryUsageAlarmRatio < 0 {
return fmt.Errorf("tidb_memory_usage_alarm_ratio in [Instance] must be greater than or equal to 0 and less than or equal to 1")
}
if len(c.IsolationRead.Engines) < 1 {
return fmt.Errorf("the number of [isolation-read]engines for isolation read should be at least 1")
}
for _, engine := range c.IsolationRead.Engines {
if engine != "tidb" && engine != "tikv" && engine != "tiflash" {
return fmt.Errorf("type of [isolation-read]engines can't be %v should be one of tidb or tikv or tiflash", engine)
}
}
// test security
c.Security.SpilledFileEncryptionMethod = strings.ToLower(c.Security.SpilledFileEncryptionMethod)
switch c.Security.SpilledFileEncryptionMethod {
case SpilledFileEncryptionMethodPlaintext, SpilledFileEncryptionMethodAES128CTR:
default:
return fmt.Errorf("unsupported [security]spilled-file-encryption-method %v, TiDB only supports [%v, %v]",
c.Security.SpilledFileEncryptionMethod, SpilledFileEncryptionMethodPlaintext, SpilledFileEncryptionMethodAES128CTR)
}
// check stats load config
if c.Performance.StatsLoadConcurrency < DefStatsLoadConcurrencyLimit || c.Performance.StatsLoadConcurrency > DefMaxOfStatsLoadConcurrencyLimit {
return fmt.Errorf("stats-load-concurrency should be [%d, %d]", DefStatsLoadConcurrencyLimit, DefMaxOfStatsLoadConcurrencyLimit)
}
if c.Performance.StatsLoadQueueSize < DefStatsLoadQueueSizeLimit || c.Performance.StatsLoadQueueSize > DefMaxOfStatsLoadQueueSizeLimit {
return fmt.Errorf("stats-load-queue-size should be [%d, %d]", DefStatsLoadQueueSizeLimit, DefMaxOfStatsLoadQueueSizeLimit)
}
// test log level
l := zap.NewAtomicLevel()
return l.UnmarshalText([]byte(c.Log.Level))
}
// UpdateGlobal updates the global config, and provide a restore function that can be used to restore to the original.
func UpdateGlobal(f func(conf *Config)) {
g := GetGlobalConfig()
newConf := *g
f(&newConf)
StoreGlobalConfig(&newConf)
}
// RestoreFunc gets a function that restore the config to the current value.
func RestoreFunc() (restore func()) {
g := GetGlobalConfig()
return func() {
StoreGlobalConfig(g)
}
}
func hasRootPrivilege() bool {
return os.Geteuid() == 0
}
// TableLockEnabled uses to check whether enabled the table lock feature.
func TableLockEnabled() bool {
return GetGlobalConfig().EnableTableLock
}
// TableLockDelayClean uses to get the time of delay clean table lock.
var TableLockDelayClean = func() uint64 {
return GetGlobalConfig().DelayCleanTableLock
}
// ToLogConfig converts *Log to *logutil.LogConfig.
func (l *Log) ToLogConfig() *logutil.LogConfig {
return logutil.NewLogConfig(l.Level, l.Format, l.SlowQueryFile, l.File, l.getDisableTimestamp(), func(config *zaplog.Config) { config.DisableErrorVerbose = l.getDisableErrorStack() })
}
// ToTracingConfig converts *OpenTracing to *tracing.Configuration.
func (t *OpenTracing) ToTracingConfig() *tracing.Configuration {
ret := &tracing.Configuration{
Disabled: !t.Enable,
RPCMetrics: t.RPCMetrics,
Reporter: &tracing.ReporterConfig{},
Sampler: &tracing.SamplerConfig{},
}
ret.Reporter.QueueSize = t.Reporter.QueueSize
ret.Reporter.BufferFlushInterval = t.Reporter.BufferFlushInterval
ret.Reporter.LogSpans = t.Reporter.LogSpans
ret.Reporter.LocalAgentHostPort = t.Reporter.LocalAgentHostPort
ret.Sampler.Type = t.Sampler.Type
ret.Sampler.Param = t.Sampler.Param
ret.Sampler.SamplingServerURL = t.Sampler.SamplingServerURL
ret.Sampler.MaxOperations = t.Sampler.MaxOperations
ret.Sampler.SamplingRefreshInterval = t.Sampler.SamplingRefreshInterval
return ret
}
func init() {
initByLDFlags(versioninfo.TiDBEdition, checkBeforeDropLDFlag)
}
func initByLDFlags(edition, checkBeforeDropLDFlag string) {
if edition != versioninfo.CommunityEdition {
defaultConf.EnableTelemetry = false
}
conf := defaultConf
StoreGlobalConfig(&conf)
if checkBeforeDropLDFlag == "1" {
CheckTableBeforeDrop = true
}
}
// hideConfig is used to filter a single line of config for hiding.
var hideConfig = []string{
"performance.index-usage-sync-lease",
}
// GetJSONConfig returns the config as JSON with hidden items removed
// It replaces the earlier HideConfig() which used strings.Split() in
// an way that didn't work for similarly named items (like enable).
func GetJSONConfig() (string, error) {
j, err := json.Marshal(GetGlobalConfig())
if err != nil {
return "", err
}
jsonValue := make(map[string]interface{})
err = json.Unmarshal(j, &jsonValue)
if err != nil {
return "", err
}
removedPaths := make([]string, 0, len(removedConfig)+len(hideConfig))
for removedItem := range removedConfig {
removedPaths = append(removedPaths, removedItem)
}
removedPaths = append(removedPaths, hideConfig...)
for _, path := range removedPaths {
s := strings.Split(path, ".")
curValue := jsonValue
for i, key := range s {
if i == len(s)-1 {
delete(curValue, key)
}
if curValue[key] != nil {
mapValue, ok := curValue[key].(map[string]interface{})
if !ok {
break
}
curValue = mapValue
} else {
break
}
}
}
buf, err := json.Marshal(jsonValue)
if err != nil {
return "", err
}
var resBuf bytes.Buffer
if err = json.Indent(&resBuf, buf, "", "\t"); err != nil {
return "", err
}
return resBuf.String(), nil
}
// ContainHiddenConfig checks whether it contains the configuration that needs to be hidden.
func ContainHiddenConfig(s string) bool {
s = strings.ToLower(s)
for _, hc := range hideConfig {
if strings.Contains(s, hc) {
return true
}
}
for dc := range removedConfig {
if strings.Contains(s, dc) {
return true
}
}
return false
}
| config/config.go | 1 | https://github.com/pingcap/tidb/commit/f61024b2dc6f9e3a6f7fd4d941e19fb81f002b1e | [
0.0011460705427452922,
0.00021234450105112046,
0.00016015104483813047,
0.00016894230793695897,
0.0001383367198286578
] |
{
"id": 2,
"code_window": [
"\t// CostModelVersion is a internal switch to indicates the Cost Model Version.\n",
"\tCostModelVersion int\n",
"\t// BatchPendingTiFlashCount shows the threshold of pending TiFlash tables when batch adding.\n",
"\tBatchPendingTiFlashCount int\n",
"\t// RcReadCheckTS indicates if ts check optimization is enabled for current session.\n",
"\tRcReadCheckTS bool\n",
"\t// RcWriteCheckTS indicates whether some special write statements don't get latest tso from PD at RC\n",
"\tRcWriteCheckTS bool\n",
"\t// RemoveOrderbyInSubquery indicates whether to remove ORDER BY in subquery.\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "sessionctx/variable/session.go",
"type": "replace",
"edit_start_line_idx": 1000
} | CREATE TABLE c(
id INT NOT NULL PRIMARY KEY,
k INT NOT NULL
);
| br/tests/lightning_error_summary/data/error_summary.c-schema.sql | 0 | https://github.com/pingcap/tidb/commit/f61024b2dc6f9e3a6f7fd4d941e19fb81f002b1e | [
0.00017790909623727202,
0.00017790909623727202,
0.00017790909623727202,
0.00017790909623727202,
0
] |
{
"id": 2,
"code_window": [
"\t// CostModelVersion is a internal switch to indicates the Cost Model Version.\n",
"\tCostModelVersion int\n",
"\t// BatchPendingTiFlashCount shows the threshold of pending TiFlash tables when batch adding.\n",
"\tBatchPendingTiFlashCount int\n",
"\t// RcReadCheckTS indicates if ts check optimization is enabled for current session.\n",
"\tRcReadCheckTS bool\n",
"\t// RcWriteCheckTS indicates whether some special write statements don't get latest tso from PD at RC\n",
"\tRcWriteCheckTS bool\n",
"\t// RemoveOrderbyInSubquery indicates whether to remove ORDER BY in subquery.\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "sessionctx/variable/session.go",
"type": "replace",
"edit_start_line_idx": 1000
} | // Copyright 2018 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package chunk
import "github.com/pingcap/errors"
// CopySelectedJoinRowsDirect directly copies the selected joined rows from the source Chunk
// to the destination Chunk.
// Return true if at least one joined row was selected.
func CopySelectedJoinRowsDirect(src *Chunk, selected []bool, dst *Chunk) (bool, error) {
if src.NumRows() == 0 {
return false, nil
}
if src.sel != nil || dst.sel != nil {
return false, errors.New(msgErrSelNotNil)
}
if len(src.columns) == 0 {
numSelected := 0
for _, s := range selected {
if s {
numSelected++
}
}
dst.numVirtualRows += numSelected
return numSelected > 0, nil
}
oldLen := dst.columns[0].length
for j, srcCol := range src.columns {
dstCol := dst.columns[j]
if srcCol.isFixed() {
for i := 0; i < len(selected); i++ {
if !selected[i] {
continue
}
dstCol.appendNullBitmap(!srcCol.IsNull(i))
dstCol.length++
elemLen := len(srcCol.elemBuf)
offset := i * elemLen
dstCol.data = append(dstCol.data, srcCol.data[offset:offset+elemLen]...)
}
} else {
for i := 0; i < len(selected); i++ {
if !selected[i] {
continue
}
dstCol.appendNullBitmap(!srcCol.IsNull(i))
dstCol.length++
start, end := srcCol.offsets[i], srcCol.offsets[i+1]
dstCol.data = append(dstCol.data, srcCol.data[start:end]...)
dstCol.offsets = append(dstCol.offsets, int64(len(dstCol.data)))
}
}
}
numSelected := dst.columns[0].length - oldLen
dst.numVirtualRows += numSelected
return numSelected > 0, nil
}
// CopySelectedJoinRowsWithSameOuterRows copies the selected joined rows from the source Chunk
// to the destination Chunk.
// Return true if at least one joined row was selected.
//
// NOTE: All the outer rows in the source Chunk should be the same.
func CopySelectedJoinRowsWithSameOuterRows(src *Chunk, innerColOffset, innerColLen, outerColOffset, outerColLen int, selected []bool, dst *Chunk) (bool, error) {
if src.NumRows() == 0 {
return false, nil
}
if src.sel != nil || dst.sel != nil {
return false, errors.New(msgErrSelNotNil)
}
numSelected := copySelectedInnerRows(innerColOffset, innerColLen, src, selected, dst)
copySameOuterRows(outerColOffset, outerColLen, src, numSelected, dst)
dst.numVirtualRows += numSelected
return numSelected > 0, nil
}
// copySelectedInnerRows copies the selected inner rows from the source Chunk
// to the destination Chunk.
// return the number of rows which is selected.
func copySelectedInnerRows(innerColOffset, innerColLen int, src *Chunk, selected []bool, dst *Chunk) int {
srcCols := src.columns[innerColOffset : innerColOffset+innerColLen]
if len(srcCols) == 0 {
numSelected := 0
for _, s := range selected {
if s {
numSelected++
}
}
return numSelected
}
oldLen := dst.columns[innerColOffset].length
for j, srcCol := range srcCols {
dstCol := dst.columns[innerColOffset+j]
if srcCol.isFixed() {
for i := 0; i < len(selected); i++ {
if !selected[i] {
continue
}
dstCol.appendNullBitmap(!srcCol.IsNull(i))
dstCol.length++
elemLen := len(srcCol.elemBuf)
offset := i * elemLen
dstCol.data = append(dstCol.data, srcCol.data[offset:offset+elemLen]...)
}
} else {
for i := 0; i < len(selected); i++ {
if !selected[i] {
continue
}
dstCol.appendNullBitmap(!srcCol.IsNull(i))
dstCol.length++
start, end := srcCol.offsets[i], srcCol.offsets[i+1]
dstCol.data = append(dstCol.data, srcCol.data[start:end]...)
dstCol.offsets = append(dstCol.offsets, int64(len(dstCol.data)))
}
}
}
return dst.columns[innerColOffset].length - oldLen
}
// copySameOuterRows copies the continuous 'numRows' outer rows in the source Chunk
// to the destination Chunk.
func copySameOuterRows(outerColOffset, outerColLen int, src *Chunk, numRows int, dst *Chunk) {
if numRows <= 0 || outerColLen <= 0 {
return
}
row := src.GetRow(0)
srcCols := src.columns[outerColOffset : outerColOffset+outerColLen]
for i, srcCol := range srcCols {
dstCol := dst.columns[outerColOffset+i]
dstCol.appendMultiSameNullBitmap(!srcCol.IsNull(row.idx), numRows)
dstCol.length += numRows
if srcCol.isFixed() {
elemLen := len(srcCol.elemBuf)
start := row.idx * elemLen
end := start + numRows*elemLen
dstCol.data = append(dstCol.data, srcCol.data[start:end]...)
} else {
start, end := srcCol.offsets[row.idx], srcCol.offsets[row.idx+numRows]
dstCol.data = append(dstCol.data, srcCol.data[start:end]...)
offsets := dstCol.offsets
elemLen := srcCol.offsets[row.idx+1] - srcCol.offsets[row.idx]
for j := 0; j < numRows; j++ {
offsets = append(offsets, offsets[len(offsets)-1]+elemLen)
}
dstCol.offsets = offsets
}
}
}
| util/chunk/chunk_util.go | 0 | https://github.com/pingcap/tidb/commit/f61024b2dc6f9e3a6f7fd4d941e19fb81f002b1e | [
0.00019747608166653663,
0.00017141425632871687,
0.0001653251820243895,
0.0001691816869424656,
0.000007135729447327321
] |
{
"id": 2,
"code_window": [
"\t// CostModelVersion is a internal switch to indicates the Cost Model Version.\n",
"\tCostModelVersion int\n",
"\t// BatchPendingTiFlashCount shows the threshold of pending TiFlash tables when batch adding.\n",
"\tBatchPendingTiFlashCount int\n",
"\t// RcReadCheckTS indicates if ts check optimization is enabled for current session.\n",
"\tRcReadCheckTS bool\n",
"\t// RcWriteCheckTS indicates whether some special write statements don't get latest tso from PD at RC\n",
"\tRcWriteCheckTS bool\n",
"\t// RemoveOrderbyInSubquery indicates whether to remove ORDER BY in subquery.\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "sessionctx/variable/session.go",
"type": "replace",
"edit_start_line_idx": 1000
} | // Copyright 2019 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package metric_test
import (
"context"
"errors"
"testing"
"github.com/pingcap/tidb/br/pkg/lightning/metric"
"github.com/pingcap/tidb/util/promutil"
"github.com/prometheus/client_golang/prometheus"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestReadCounter(t *testing.T) {
counter := prometheus.NewCounter(prometheus.CounterOpts{})
counter.Add(1256.0)
counter.Add(2214.0)
require.Equal(t, 3470.0, metric.ReadCounter(counter))
}
func TestReadHistogramSum(t *testing.T) {
histogram := prometheus.NewHistogram(prometheus.HistogramOpts{})
histogram.Observe(11131.5)
histogram.Observe(15261.0)
require.Equal(t, 26392.5, metric.ReadHistogramSum(histogram))
}
func TestRecordEngineCount(t *testing.T) {
m := metric.NewMetrics(promutil.NewDefaultFactory())
m.RecordEngineCount("table1", nil)
m.RecordEngineCount("table1", errors.New("mock error"))
successCounter, err := m.ProcessedEngineCounter.GetMetricWithLabelValues("table1", "success")
require.NoError(t, err)
require.Equal(t, 1.0, metric.ReadCounter(successCounter))
failureCount, err := m.ProcessedEngineCounter.GetMetricWithLabelValues("table1", "failure")
require.NoError(t, err)
require.Equal(t, 1.0, metric.ReadCounter(failureCount))
}
func TestMetricsRegister(t *testing.T) {
m := metric.NewMetrics(promutil.NewDefaultFactory())
r := prometheus.NewRegistry()
m.RegisterTo(r)
assert.True(t, r.Unregister(m.ImporterEngineCounter))
assert.True(t, r.Unregister(m.IdleWorkersGauge))
assert.True(t, r.Unregister(m.KvEncoderCounter))
assert.True(t, r.Unregister(m.TableCounter))
assert.True(t, r.Unregister(m.ProcessedEngineCounter))
assert.True(t, r.Unregister(m.ChunkCounter))
assert.True(t, r.Unregister(m.BytesCounter))
assert.True(t, r.Unregister(m.ImportSecondsHistogram))
assert.True(t, r.Unregister(m.ChunkParserReadBlockSecondsHistogram))
assert.True(t, r.Unregister(m.ApplyWorkerSecondsHistogram))
assert.True(t, r.Unregister(m.RowReadSecondsHistogram))
assert.True(t, r.Unregister(m.RowReadBytesHistogram))
assert.True(t, r.Unregister(m.RowEncodeSecondsHistogram))
assert.True(t, r.Unregister(m.RowKVDeliverSecondsHistogram))
assert.True(t, r.Unregister(m.BlockDeliverSecondsHistogram))
assert.True(t, r.Unregister(m.BlockDeliverBytesHistogram))
assert.True(t, r.Unregister(m.BlockDeliverKVPairsHistogram))
assert.True(t, r.Unregister(m.ChecksumSecondsHistogram))
assert.True(t, r.Unregister(m.LocalStorageUsageBytesGauge))
assert.True(t, r.Unregister(m.ProgressGauge))
}
func TestMetricsUnregister(t *testing.T) {
m := metric.NewMetrics(promutil.NewDefaultFactory())
r := prometheus.NewRegistry()
m.RegisterTo(r)
m.UnregisterFrom(r)
assert.False(t, r.Unregister(m.ImporterEngineCounter))
assert.False(t, r.Unregister(m.IdleWorkersGauge))
assert.False(t, r.Unregister(m.KvEncoderCounter))
assert.False(t, r.Unregister(m.TableCounter))
assert.False(t, r.Unregister(m.ProcessedEngineCounter))
assert.False(t, r.Unregister(m.ChunkCounter))
assert.False(t, r.Unregister(m.BytesCounter))
assert.False(t, r.Unregister(m.ImportSecondsHistogram))
assert.False(t, r.Unregister(m.ChunkParserReadBlockSecondsHistogram))
assert.False(t, r.Unregister(m.ApplyWorkerSecondsHistogram))
assert.False(t, r.Unregister(m.RowReadSecondsHistogram))
assert.False(t, r.Unregister(m.RowReadBytesHistogram))
assert.False(t, r.Unregister(m.RowEncodeSecondsHistogram))
assert.False(t, r.Unregister(m.RowKVDeliverSecondsHistogram))
assert.False(t, r.Unregister(m.BlockDeliverSecondsHistogram))
assert.False(t, r.Unregister(m.BlockDeliverBytesHistogram))
assert.False(t, r.Unregister(m.BlockDeliverKVPairsHistogram))
assert.False(t, r.Unregister(m.ChecksumSecondsHistogram))
assert.False(t, r.Unregister(m.LocalStorageUsageBytesGauge))
assert.False(t, r.Unregister(m.ProgressGauge))
}
func TestContext(t *testing.T) {
ctx := metric.NewContext(context.Background(), metric.NewMetrics(promutil.NewDefaultFactory()))
m, ok := metric.FromContext(ctx)
require.True(t, ok)
require.NotNil(t, m)
}
| br/pkg/lightning/metric/metric_test.go | 0 | https://github.com/pingcap/tidb/commit/f61024b2dc6f9e3a6f7fd4d941e19fb81f002b1e | [
0.0001760120503604412,
0.00017079163808375597,
0.0001630419137654826,
0.00017140631098300219,
0.000003688439619509154
] |
{
"id": 3,
"code_window": [
"\t\t},\n",
"\t\tGetGlobal: func(s *SessionVars) (string, error) {\n",
"\t\t\treturn BoolToOnOff(config.GetGlobalConfig().Instance.TiDBEnableDDL.Load()), nil\n",
"\t\t},\n",
"\t},\n",
"\n",
"\t/* The system variables below have GLOBAL scope */\n",
"\t{Scope: ScopeGlobal, Name: MaxPreparedStmtCount, Value: strconv.FormatInt(DefMaxPreparedStmtCount, 10), Type: TypeInt, MinValue: -1, MaxValue: 1048576},\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t{Scope: ScopeInstance, Name: TiDBRCReadCheckTS, Value: BoolToOnOff(DefRCReadCheckTS), Type: TypeBool, SetGlobal: func(s *SessionVars, val string) error {\n",
"\t\tEnableRCReadCheckTS.Store(TiDBOptOn(val))\n",
"\t\treturn nil\n",
"\t}, GetGlobal: func(s *SessionVars) (string, error) {\n",
"\t\treturn BoolToOnOff(EnableRCReadCheckTS.Load()), nil\n",
"\t}},\n"
],
"file_path": "sessionctx/variable/sysvar.go",
"type": "add",
"edit_start_line_idx": 463
} | // Copyright 2015 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package variable
import (
"encoding/json"
"fmt"
"strconv"
"sync/atomic"
"testing"
"github.com/pingcap/tidb/config"
"github.com/pingcap/tidb/parser/mysql"
"github.com/pingcap/tidb/parser/terror"
"github.com/stretchr/testify/require"
)
func TestSQLSelectLimit(t *testing.T) {
sv := GetSysVar(SQLSelectLimit)
vars := NewSessionVars()
val, err := sv.Validate(vars, "-10", ScopeSession)
require.NoError(t, err) // it has autoconvert out of range.
require.Equal(t, "0", val)
val, err = sv.Validate(vars, "9999", ScopeSession)
require.NoError(t, err)
require.Equal(t, "9999", val)
require.Nil(t, sv.SetSessionFromHook(vars, "9999")) // sets
require.Equal(t, uint64(9999), vars.SelectLimit)
}
func TestSQLModeVar(t *testing.T) {
sv := GetSysVar(SQLModeVar)
vars := NewSessionVars()
val, err := sv.Validate(vars, "strict_trans_tabLES ", ScopeSession)
require.NoError(t, err)
require.Equal(t, "STRICT_TRANS_TABLES", val)
_, err = sv.Validate(vars, "strict_trans_tabLES,nonsense_option", ScopeSession)
require.Equal(t, "ERROR 1231 (42000): Variable 'sql_mode' can't be set to the value of 'NONSENSE_OPTION'", err.Error())
val, err = sv.Validate(vars, "ONLY_FULL_GROUP_BY,STRICT_TRANS_TABLES,NO_ZERO_IN_DATE,NO_ZERO_DATE,ERROR_FOR_DIVISION_BY_ZERO,NO_AUTO_CREATE_USER,NO_ENGINE_SUBSTITUTION", ScopeSession)
require.NoError(t, err)
require.Equal(t, "ONLY_FULL_GROUP_BY,STRICT_TRANS_TABLES,NO_ZERO_IN_DATE,NO_ZERO_DATE,ERROR_FOR_DIVISION_BY_ZERO,NO_AUTO_CREATE_USER,NO_ENGINE_SUBSTITUTION", val)
require.Nil(t, sv.SetSessionFromHook(vars, val)) // sets to strict from above
require.True(t, vars.StrictSQLMode)
sqlMode, err := mysql.GetSQLMode(val)
require.NoError(t, err)
require.Equal(t, sqlMode, vars.SQLMode)
// Set it to non strict.
val, err = sv.Validate(vars, "ERROR_FOR_DIVISION_BY_ZERO,NO_AUTO_CREATE_USER,NO_ENGINE_SUBSTITUTION", ScopeSession)
require.NoError(t, err)
require.Equal(t, "ERROR_FOR_DIVISION_BY_ZERO,NO_AUTO_CREATE_USER,NO_ENGINE_SUBSTITUTION", val)
require.Nil(t, sv.SetSessionFromHook(vars, val)) // sets to non-strict from above
require.False(t, vars.StrictSQLMode)
sqlMode, err = mysql.GetSQLMode(val)
require.NoError(t, err)
require.Equal(t, sqlMode, vars.SQLMode)
}
func TestMaxExecutionTime(t *testing.T) {
sv := GetSysVar(MaxExecutionTime)
vars := NewSessionVars()
val, err := sv.Validate(vars, "-10", ScopeSession)
require.NoError(t, err) // it has autoconvert out of range.
require.Equal(t, "0", val)
val, err = sv.Validate(vars, "99999", ScopeSession)
require.NoError(t, err) // it has autoconvert out of range.
require.Equal(t, "99999", val)
require.Nil(t, sv.SetSessionFromHook(vars, "99999")) // sets
require.Equal(t, uint64(99999), vars.MaxExecutionTime)
}
func TestCollationServer(t *testing.T) {
sv := GetSysVar(CollationServer)
vars := NewSessionVars()
val, err := sv.Validate(vars, "LATIN1_bin", ScopeSession)
require.NoError(t, err)
require.Equal(t, "latin1_bin", val) // test normalization
_, err = sv.Validate(vars, "BOGUSCOLLation", ScopeSession)
require.Equal(t, "[ddl:1273]Unknown collation: 'BOGUSCOLLation'", err.Error())
require.Nil(t, sv.SetSessionFromHook(vars, "latin1_bin"))
require.Equal(t, "latin1", vars.systems[CharacterSetServer]) // check it also changes charset.
require.Nil(t, sv.SetSessionFromHook(vars, "utf8mb4_bin"))
require.Equal(t, "utf8mb4", vars.systems[CharacterSetServer]) // check it also changes charset.
}
func TestTimeZone(t *testing.T) {
sv := GetSysVar(TimeZone)
vars := NewSessionVars()
// TiDB uses the Golang TZ library, so TZs are case-sensitive.
// Unfortunately this is not strictly MySQL compatible. i.e.
// This should not fail:
// val, err := sv.Validate(vars, "America/EDMONTON", ScopeSession)
// See: https://github.com/pingcap/tidb/issues/8087
val, err := sv.Validate(vars, "America/Edmonton", ScopeSession)
require.NoError(t, err)
require.Equal(t, "America/Edmonton", val)
val, err = sv.Validate(vars, "+10:00", ScopeSession)
require.NoError(t, err)
require.Equal(t, "+10:00", val)
val, err = sv.Validate(vars, "UTC", ScopeSession)
require.NoError(t, err)
require.Equal(t, "UTC", val)
val, err = sv.Validate(vars, "+00:00", ScopeSession)
require.NoError(t, err)
require.Equal(t, "+00:00", val)
require.Nil(t, sv.SetSessionFromHook(vars, "UTC")) // sets
tz, err := parseTimeZone("UTC")
require.NoError(t, err)
require.Equal(t, tz, vars.TimeZone)
}
func TestTxnIsolation(t *testing.T) {
sv := GetSysVar(TxnIsolation)
vars := NewSessionVars()
_, err := sv.Validate(vars, "on", ScopeSession)
require.Equal(t, "[variable:1231]Variable 'tx_isolation' can't be set to the value of 'on'", err.Error())
val, err := sv.Validate(vars, "read-COMMitted", ScopeSession)
require.NoError(t, err)
require.Equal(t, "READ-COMMITTED", val)
_, err = sv.Validate(vars, "Serializable", ScopeSession)
require.Equal(t, "[variable:8048]The isolation level 'SERIALIZABLE' is not supported. Set tidb_skip_isolation_level_check=1 to skip this error", err.Error())
_, err = sv.Validate(vars, "read-uncommitted", ScopeSession)
require.Equal(t, "[variable:8048]The isolation level 'READ-UNCOMMITTED' is not supported. Set tidb_skip_isolation_level_check=1 to skip this error", err.Error())
// Enable global skip isolation check doesn't affect current session
require.Nil(t, GetSysVar(TiDBSkipIsolationLevelCheck).SetGlobalFromHook(vars, "ON", true))
_, err = sv.Validate(vars, "Serializable", ScopeSession)
require.Equal(t, "[variable:8048]The isolation level 'SERIALIZABLE' is not supported. Set tidb_skip_isolation_level_check=1 to skip this error", err.Error())
// Enable session skip isolation check
require.Nil(t, GetSysVar(TiDBSkipIsolationLevelCheck).SetSessionFromHook(vars, "ON"))
val, err = sv.Validate(vars, "Serializable", ScopeSession)
require.NoError(t, err)
require.Equal(t, "SERIALIZABLE", val)
// Init TiDBSkipIsolationLevelCheck like what loadCommonGlobalVariables does
vars = NewSessionVars()
require.NoError(t, vars.SetSystemVarWithRelaxedValidation(TiDBSkipIsolationLevelCheck, "1"))
val, err = sv.Validate(vars, "Serializable", ScopeSession)
require.NoError(t, err)
require.Equal(t, "SERIALIZABLE", val)
}
func TestTiDBMultiStatementMode(t *testing.T) {
sv := GetSysVar(TiDBMultiStatementMode)
vars := NewSessionVars()
val, err := sv.Validate(vars, "on", ScopeSession)
require.NoError(t, err)
require.Equal(t, "ON", val)
require.Nil(t, sv.SetSessionFromHook(vars, val))
require.Equal(t, 1, vars.MultiStatementMode)
val, err = sv.Validate(vars, "0", ScopeSession)
require.NoError(t, err)
require.Equal(t, "OFF", val)
require.Nil(t, sv.SetSessionFromHook(vars, val))
require.Equal(t, 0, vars.MultiStatementMode)
val, err = sv.Validate(vars, "Warn", ScopeSession)
require.NoError(t, err)
require.Equal(t, "WARN", val)
require.Nil(t, sv.SetSessionFromHook(vars, val))
require.Equal(t, 2, vars.MultiStatementMode)
}
func TestReadOnlyNoop(t *testing.T) {
vars := NewSessionVars()
mock := NewMockGlobalAccessor4Tests()
mock.SessionVars = vars
vars.GlobalVarsAccessor = mock
noopFuncs := GetSysVar(TiDBEnableNoopFuncs)
// For session scope
for _, name := range []string{TxReadOnly, TransactionReadOnly} {
sv := GetSysVar(name)
val, err := sv.Validate(vars, "on", ScopeSession)
require.Equal(t, "[variable:1235]function READ ONLY has only noop implementation in tidb now, use tidb_enable_noop_functions to enable these functions", err.Error())
require.Equal(t, "OFF", val)
require.NoError(t, noopFuncs.SetSessionFromHook(vars, "ON"))
_, err = sv.Validate(vars, "on", ScopeSession)
require.NoError(t, err)
require.NoError(t, noopFuncs.SetSessionFromHook(vars, "OFF")) // restore default.
}
// For global scope
for _, name := range []string{TxReadOnly, TransactionReadOnly, OfflineMode, SuperReadOnly, ReadOnly} {
sv := GetSysVar(name)
val, err := sv.Validate(vars, "on", ScopeGlobal)
if name == OfflineMode {
require.Equal(t, "[variable:1235]function OFFLINE MODE has only noop implementation in tidb now, use tidb_enable_noop_functions to enable these functions", err.Error())
} else {
require.Equal(t, "[variable:1235]function READ ONLY has only noop implementation in tidb now, use tidb_enable_noop_functions to enable these functions", err.Error())
}
require.Equal(t, "OFF", val)
require.NoError(t, vars.GlobalVarsAccessor.SetGlobalSysVar(TiDBEnableNoopFuncs, "ON"))
_, err = sv.Validate(vars, "on", ScopeGlobal)
require.NoError(t, err)
require.NoError(t, vars.GlobalVarsAccessor.SetGlobalSysVar(TiDBEnableNoopFuncs, "OFF"))
}
}
func TestSkipInit(t *testing.T) {
sv := SysVar{Scope: ScopeGlobal, Name: "skipinit1", Value: On, Type: TypeBool}
require.True(t, sv.SkipInit())
sv = SysVar{Scope: ScopeGlobal | ScopeSession, Name: "skipinit1", Value: On, Type: TypeBool}
require.False(t, sv.SkipInit())
sv = SysVar{Scope: ScopeSession, Name: "skipinit1", Value: On, Type: TypeBool}
require.False(t, sv.SkipInit())
sv = SysVar{Scope: ScopeSession, Name: "skipinit1", Value: On, Type: TypeBool, skipInit: true}
require.True(t, sv.SkipInit())
}
func TestSessionGetterFuncs(t *testing.T) {
vars := NewSessionVars()
val, err := vars.GetSessionOrGlobalSystemVar(TiDBCurrentTS)
require.NoError(t, err)
require.Equal(t, fmt.Sprintf("%d", vars.TxnCtx.StartTS), val)
val, err = vars.GetSessionOrGlobalSystemVar(TiDBLastTxnInfo)
require.NoError(t, err)
require.Equal(t, vars.LastTxnInfo, val)
val, err = vars.GetSessionOrGlobalSystemVar(TiDBLastQueryInfo)
require.NoError(t, err)
info, err := json.Marshal(vars.LastQueryInfo)
require.NoError(t, err)
require.Equal(t, string(info), val)
val, err = vars.GetSessionOrGlobalSystemVar(TiDBFoundInPlanCache)
require.NoError(t, err)
require.Equal(t, BoolToOnOff(vars.PrevFoundInPlanCache), val)
val, err = vars.GetSessionOrGlobalSystemVar(TiDBFoundInBinding)
require.NoError(t, err)
require.Equal(t, BoolToOnOff(vars.PrevFoundInBinding), val)
val, err = vars.GetSessionOrGlobalSystemVar(TiDBTxnScope)
require.NoError(t, err)
require.Equal(t, vars.TxnScope.GetVarValue(), val)
}
func TestInstanceScopedVars(t *testing.T) {
vars := NewSessionVars()
val, err := vars.GetSessionOrGlobalSystemVar(TiDBGeneralLog)
require.NoError(t, err)
require.Equal(t, BoolToOnOff(ProcessGeneralLog.Load()), val)
val, err = vars.GetSessionOrGlobalSystemVar(TiDBPProfSQLCPU)
require.NoError(t, err)
expected := "0"
if EnablePProfSQLCPU.Load() {
expected = "1"
}
require.Equal(t, expected, val)
val, err = vars.GetSessionOrGlobalSystemVar(TiDBExpensiveQueryTimeThreshold)
require.NoError(t, err)
require.Equal(t, fmt.Sprintf("%d", atomic.LoadUint64(&ExpensiveQueryTimeThreshold)), val)
val, err = vars.GetSessionOrGlobalSystemVar(TiDBMemoryUsageAlarmRatio)
require.NoError(t, err)
require.Equal(t, fmt.Sprintf("%g", MemoryUsageAlarmRatio.Load()), val)
val, err = vars.GetSessionOrGlobalSystemVar(TiDBForcePriority)
require.NoError(t, err)
require.Equal(t, mysql.Priority2Str[mysql.PriorityEnum(atomic.LoadInt32(&ForcePriority))], val)
val, err = vars.GetSessionOrGlobalSystemVar(TiDBDDLSlowOprThreshold)
require.NoError(t, err)
require.Equal(t, strconv.FormatUint(uint64(atomic.LoadUint32(&DDLSlowOprThreshold)), 10), val)
val, err = vars.GetSessionOrGlobalSystemVar(PluginDir)
require.NoError(t, err)
require.Equal(t, config.GetGlobalConfig().Instance.PluginDir, val)
val, err = vars.GetSessionOrGlobalSystemVar(PluginLoad)
require.NoError(t, err)
require.Equal(t, config.GetGlobalConfig().Instance.PluginLoad, val)
val, err = vars.GetSessionOrGlobalSystemVar(TiDBSlowLogThreshold)
require.NoError(t, err)
require.Equal(t, strconv.FormatUint(atomic.LoadUint64(&config.GetGlobalConfig().Instance.SlowThreshold), 10), val)
val, err = vars.GetSessionOrGlobalSystemVar(TiDBRecordPlanInSlowLog)
require.NoError(t, err)
enabled := atomic.LoadUint32(&config.GetGlobalConfig().Instance.RecordPlanInSlowLog) == 1
require.Equal(t, BoolToOnOff(enabled), val)
val, err = vars.GetSessionOrGlobalSystemVar(TiDBEnableSlowLog)
require.NoError(t, err)
require.Equal(t, BoolToOnOff(config.GetGlobalConfig().Instance.EnableSlowLog.Load()), val)
val, err = vars.GetSessionOrGlobalSystemVar(TiDBCheckMb4ValueInUTF8)
require.NoError(t, err)
require.Equal(t, BoolToOnOff(config.GetGlobalConfig().Instance.CheckMb4ValueInUTF8.Load()), val)
val, err = vars.GetSessionOrGlobalSystemVar(TiDBEnableCollectExecutionInfo)
require.NoError(t, err)
require.Equal(t, BoolToOnOff(config.GetGlobalConfig().Instance.EnableCollectExecutionInfo), val)
val, err = vars.GetSessionOrGlobalSystemVar(TiDBConfig)
require.NoError(t, err)
expected, err = config.GetJSONConfig()
require.NoError(t, err)
require.Equal(t, expected, val)
val, err = vars.GetSessionOrGlobalSystemVar(TiDBLogFileMaxDays)
require.NoError(t, err)
require.Equal(t, fmt.Sprint(GlobalLogMaxDays.Load()), val)
}
func TestSecureAuth(t *testing.T) {
sv := GetSysVar(SecureAuth)
vars := NewSessionVars()
_, err := sv.Validate(vars, "OFF", ScopeGlobal)
require.Equal(t, "[variable:1231]Variable 'secure_auth' can't be set to the value of 'OFF'", err.Error())
val, err := sv.Validate(vars, "ON", ScopeGlobal)
require.NoError(t, err)
require.Equal(t, "ON", val)
}
func TestTiDBReplicaRead(t *testing.T) {
sv := GetSysVar(TiDBReplicaRead)
vars := NewSessionVars()
val, err := sv.Validate(vars, "follower", ScopeGlobal)
require.Equal(t, val, "follower")
require.NoError(t, err)
}
func TestSQLAutoIsNull(t *testing.T) {
svSQL, svNoop := GetSysVar(SQLAutoIsNull), GetSysVar(TiDBEnableNoopFuncs)
vars := NewSessionVars()
vars.GlobalVarsAccessor = NewMockGlobalAccessor4Tests()
_, err := svSQL.Validate(vars, "ON", ScopeSession)
require.True(t, terror.ErrorEqual(err, ErrFunctionsNoopImpl))
// change tidb_enable_noop_functions to 1, it will success
require.NoError(t, svNoop.SetSessionFromHook(vars, "ON"))
_, err = svSQL.Validate(vars, "ON", ScopeSession)
require.NoError(t, err)
require.NoError(t, svSQL.SetSessionFromHook(vars, "ON"))
res, ok := vars.GetSystemVar(SQLAutoIsNull)
require.True(t, ok)
require.Equal(t, "ON", res)
// restore tidb_enable_noop_functions to 0 failed, as sql_auto_is_null is 1
_, err = svNoop.Validate(vars, "OFF", ScopeSession)
require.True(t, terror.ErrorEqual(err, errValueNotSupportedWhen))
// after set sql_auto_is_null to 0, restore success
require.NoError(t, svSQL.SetSessionFromHook(vars, "OFF"))
require.NoError(t, svNoop.SetSessionFromHook(vars, "OFF"))
// Only test validate as MockGlobalAccessor do not support SetGlobalSysVar
_, err = svSQL.Validate(vars, "ON", ScopeGlobal)
require.True(t, terror.ErrorEqual(err, ErrFunctionsNoopImpl))
}
func TestLastInsertID(t *testing.T) {
vars := NewSessionVars()
val, err := vars.GetSessionOrGlobalSystemVar(LastInsertID)
require.NoError(t, err)
require.Equal(t, val, "0")
vars.StmtCtx.PrevLastInsertID = 21
val, err = vars.GetSessionOrGlobalSystemVar(LastInsertID)
require.NoError(t, err)
require.Equal(t, val, "21")
}
func TestTimestamp(t *testing.T) {
vars := NewSessionVars()
val, err := vars.GetSessionOrGlobalSystemVar(Timestamp)
require.NoError(t, err)
require.NotEqual(t, "", val)
vars.systems[Timestamp] = "10"
val, err = vars.GetSessionOrGlobalSystemVar(Timestamp)
require.NoError(t, err)
require.Equal(t, "10", val)
vars.systems[Timestamp] = "0" // set to default
val, err = vars.GetSessionOrGlobalSystemVar(Timestamp)
require.NoError(t, err)
require.NotEqual(t, "", val)
require.NotEqual(t, "10", val)
// Test validating a value that less than the minimum one.
sv := GetSysVar(Timestamp)
_, err = sv.Validate(vars, "-5", ScopeSession)
require.NoError(t, err)
warn := vars.StmtCtx.GetWarnings()[0].Err
require.Equal(t, "[variable:1292]Truncated incorrect timestamp value: '-5'", warn.Error())
// Test validating values that larger than the maximum one.
_, err = sv.Validate(vars, "3147483698", ScopeSession)
require.Equal(t, "[variable:1231]Variable 'timestamp' can't be set to the value of '3147483698'", err.Error())
_, err = sv.Validate(vars, "2147483648", ScopeSession)
require.Equal(t, "[variable:1231]Variable 'timestamp' can't be set to the value of '2147483648'", err.Error())
// Test validating the maximum value.
_, err = sv.Validate(vars, "2147483647", ScopeSession)
require.NoError(t, err)
}
func TestIdentity(t *testing.T) {
vars := NewSessionVars()
val, err := vars.GetSessionOrGlobalSystemVar(Identity)
require.NoError(t, err)
require.Equal(t, val, "0")
vars.StmtCtx.PrevLastInsertID = 21
val, err = vars.GetSessionOrGlobalSystemVar(Identity)
require.NoError(t, err)
require.Equal(t, val, "21")
}
func TestLcTimeNamesReadOnly(t *testing.T) {
sv := GetSysVar("lc_time_names")
vars := NewSessionVars()
vars.GlobalVarsAccessor = NewMockGlobalAccessor4Tests()
_, err := sv.Validate(vars, "newvalue", ScopeGlobal)
require.Error(t, err)
}
func TestLcMessagesReadOnly(t *testing.T) {
sv := GetSysVar("lc_messages")
vars := NewSessionVars()
vars.GlobalVarsAccessor = NewMockGlobalAccessor4Tests()
_, err := sv.Validate(vars, "newvalue", ScopeGlobal)
require.Error(t, err)
}
func TestDDLWorkers(t *testing.T) {
svWorkerCount, svBatchSize := GetSysVar(TiDBDDLReorgWorkerCount), GetSysVar(TiDBDDLReorgBatchSize)
vars := NewSessionVars()
vars.GlobalVarsAccessor = NewMockGlobalAccessor4Tests()
val, err := svWorkerCount.Validate(vars, "-100", ScopeGlobal)
require.NoError(t, err)
require.Equal(t, val, "1") // converts it to min value
val, err = svWorkerCount.Validate(vars, "1234", ScopeGlobal)
require.NoError(t, err)
require.Equal(t, val, "256") // converts it to max value
val, err = svWorkerCount.Validate(vars, "100", ScopeGlobal)
require.NoError(t, err)
require.Equal(t, val, "100") // unchanged
val, err = svBatchSize.Validate(vars, "10", ScopeGlobal)
require.NoError(t, err)
require.Equal(t, val, fmt.Sprint(MinDDLReorgBatchSize)) // converts it to min value
val, err = svBatchSize.Validate(vars, "999999", ScopeGlobal)
require.NoError(t, err)
require.Equal(t, val, fmt.Sprint(MaxDDLReorgBatchSize)) // converts it to max value
val, err = svBatchSize.Validate(vars, "100", ScopeGlobal)
require.NoError(t, err)
require.Equal(t, val, "100") // unchanged
}
func TestDefaultCharsetAndCollation(t *testing.T) {
vars := NewSessionVars()
val, err := vars.GetSessionOrGlobalSystemVar(CharacterSetConnection)
require.NoError(t, err)
require.Equal(t, val, mysql.DefaultCharset)
val, err = vars.GetSessionOrGlobalSystemVar(CollationConnection)
require.NoError(t, err)
require.Equal(t, val, mysql.DefaultCollationName)
}
func TestIndexMergeSwitcher(t *testing.T) {
vars := NewSessionVars()
vars.GlobalVarsAccessor = NewMockGlobalAccessor4Tests()
val, err := vars.GetSessionOrGlobalSystemVar(TiDBEnableIndexMerge)
require.NoError(t, err)
require.Equal(t, DefTiDBEnableIndexMerge, true)
require.Equal(t, BoolToOnOff(DefTiDBEnableIndexMerge), val)
}
func TestNetBufferLength(t *testing.T) {
netBufferLength := GetSysVar(NetBufferLength)
vars := NewSessionVars()
vars.GlobalVarsAccessor = NewMockGlobalAccessor4Tests()
val, err := netBufferLength.Validate(vars, "1", ScopeGlobal)
require.NoError(t, err)
require.Equal(t, "1024", val) // converts it to min value
val, err = netBufferLength.Validate(vars, "10485760", ScopeGlobal)
require.NoError(t, err)
require.Equal(t, "1048576", val) // converts it to max value
val, err = netBufferLength.Validate(vars, "524288", ScopeGlobal)
require.NoError(t, err)
require.Equal(t, "524288", val) // unchanged
}
func TestTiDBBatchPendingTiFlashCount(t *testing.T) {
sv := GetSysVar(TiDBBatchPendingTiFlashCount)
vars := NewSessionVars()
val, err := sv.Validate(vars, "-10", ScopeSession)
require.NoError(t, err) // it has autoconvert out of range.
require.Equal(t, "0", val)
val, err = sv.Validate(vars, "9999", ScopeSession)
require.NoError(t, err)
require.Equal(t, "9999", val)
_, err = sv.Validate(vars, "1.5", ScopeSession)
require.Error(t, err)
require.EqualError(t, err, "[variable:1232]Incorrect argument type to variable 'tidb_batch_pending_tiflash_count'")
}
func TestTiDBMemQuotaQuery(t *testing.T) {
sv := GetSysVar(TiDBMemQuotaQuery)
vars := NewSessionVars()
for _, scope := range []ScopeFlag{ScopeGlobal, ScopeSession} {
newVal := 32 * 1024 * 1024
val, err := sv.Validate(vars, fmt.Sprintf("%d", newVal), scope)
require.Equal(t, val, "33554432")
require.NoError(t, err)
// min value out of range
newVal = -2
expected := -1
val, err = sv.Validate(vars, fmt.Sprintf("%d", newVal), scope)
// expected to truncate
require.Equal(t, val, fmt.Sprintf("%d", expected))
require.NoError(t, err)
}
}
func TestTiDBQueryLogMaxLen(t *testing.T) {
sv := GetSysVar(TiDBQueryLogMaxLen)
vars := NewSessionVars()
newVal := 32 * 1024 * 1024
val, err := sv.Validate(vars, fmt.Sprintf("%d", newVal), ScopeGlobal)
require.Equal(t, val, "33554432")
require.NoError(t, err)
// out of range
newVal = 1073741825
expected := 1073741824
val, err = sv.Validate(vars, fmt.Sprintf("%d", newVal), ScopeGlobal)
// expected to truncate
require.Equal(t, val, fmt.Sprintf("%d", expected))
require.NoError(t, err)
// min value out of range
newVal = -2
expected = 0
val, err = sv.Validate(vars, fmt.Sprintf("%d", newVal), ScopeGlobal)
// expected to set to min value
require.Equal(t, val, fmt.Sprintf("%d", expected))
require.NoError(t, err)
}
func TestTiDBCommitterConcurrency(t *testing.T) {
sv := GetSysVar(TiDBCommitterConcurrency)
vars := NewSessionVars()
newVal := 1024
val, err := sv.Validate(vars, fmt.Sprintf("%d", newVal), ScopeGlobal)
require.Equal(t, val, "1024")
require.NoError(t, err)
// out of range
newVal = 10001
expected := 10000
val, err = sv.Validate(vars, fmt.Sprintf("%d", newVal), ScopeGlobal)
// expected to truncate
require.Equal(t, val, fmt.Sprintf("%d", expected))
require.NoError(t, err)
// min value out of range
newVal = 0
expected = 1
val, err = sv.Validate(vars, fmt.Sprintf("%d", newVal), ScopeGlobal)
// expected to set to min value
require.Equal(t, val, fmt.Sprintf("%d", expected))
require.NoError(t, err)
}
func TestTiDBDDLFlashbackConcurrency(t *testing.T) {
sv := GetSysVar(TiDBDDLFlashbackConcurrency)
vars := NewSessionVars()
newVal := 128
val, err := sv.Validate(vars, fmt.Sprintf("%d", newVal), ScopeGlobal)
require.Equal(t, val, "128")
require.NoError(t, err)
// out of range
newVal = MaxConfigurableConcurrency + 1
expected := MaxConfigurableConcurrency
val, err = sv.Validate(vars, fmt.Sprintf("%d", newVal), ScopeGlobal)
// expected to truncate
require.Equal(t, val, fmt.Sprintf("%d", expected))
require.NoError(t, err)
// min value out of range
newVal = 0
expected = 1
val, err = sv.Validate(vars, fmt.Sprintf("%d", newVal), ScopeGlobal)
// expected to set to min value
require.Equal(t, val, fmt.Sprintf("%d", expected))
require.NoError(t, err)
}
func TestDefaultMemoryDebugModeValue(t *testing.T) {
vars := NewSessionVars()
val, err := vars.GetSessionOrGlobalSystemVar(TiDBMemoryDebugModeMinHeapInUse)
require.NoError(t, err)
require.Equal(t, val, "0")
val, err = vars.GetSessionOrGlobalSystemVar(TiDBMemoryDebugModeAlarmRatio)
require.NoError(t, err)
require.Equal(t, val, "0")
}
func TestDefaultPartitionPruneMode(t *testing.T) {
vars := NewSessionVars()
mock := NewMockGlobalAccessor4Tests()
mock.SessionVars = vars
vars.GlobalVarsAccessor = mock
val, err := vars.GetSessionOrGlobalSystemVar(TiDBPartitionPruneMode)
require.NoError(t, err)
require.Equal(t, "dynamic", val)
require.Equal(t, "dynamic", DefTiDBPartitionPruneMode)
}
func TestSetTIDBFastDDL(t *testing.T) {
vars := NewSessionVars()
mock := NewMockGlobalAccessor4Tests()
mock.SessionVars = vars
vars.GlobalVarsAccessor = mock
fastDDL := GetSysVar(TiDBDDLEnableFastReorg)
// Default off
require.Equal(t, fastDDL.Value, Off)
// Set to On
err := mock.SetGlobalSysVar(TiDBDDLEnableFastReorg, On)
require.NoError(t, err)
val, err1 := mock.GetGlobalSysVar(TiDBDDLEnableFastReorg)
require.NoError(t, err1)
require.Equal(t, On, val)
// Set to off
err = mock.SetGlobalSysVar(TiDBDDLEnableFastReorg, Off)
require.NoError(t, err)
val, err1 = mock.GetGlobalSysVar(TiDBDDLEnableFastReorg)
require.NoError(t, err1)
require.Equal(t, Off, val)
}
func TestSetTIDBDiskQuota(t *testing.T) {
vars := NewSessionVars()
mock := NewMockGlobalAccessor4Tests()
mock.SessionVars = vars
vars.GlobalVarsAccessor = mock
diskQuota := GetSysVar(TiDBDDLDiskQuota)
var (
gb int64 = 1024 * 1024 * 1024
pb int64 = 1024 * 1024 * 1024 * 1024 * 1024
err error
val string
)
// Default 100 GB
require.Equal(t, diskQuota.Value, strconv.FormatInt(100*gb, 10))
// MinValue is 100 GB, set to 50 Gb is not allowed
err = mock.SetGlobalSysVar(TiDBDDLDiskQuota, strconv.FormatInt(50*gb, 10))
require.NoError(t, err)
val, err = mock.GetGlobalSysVar(TiDBDDLDiskQuota)
require.NoError(t, err)
require.Equal(t, strconv.FormatInt(100*gb, 10), val)
// Set to 100 GB
err = mock.SetGlobalSysVar(TiDBDDLDiskQuota, strconv.FormatInt(100*gb, 10))
require.NoError(t, err)
val, err = mock.GetGlobalSysVar(TiDBDDLDiskQuota)
require.NoError(t, err)
require.Equal(t, strconv.FormatInt(100*gb, 10), val)
// Set to 200 GB
err = mock.SetGlobalSysVar(TiDBDDLDiskQuota, strconv.FormatInt(200*gb, 10))
require.NoError(t, err)
val, err = mock.GetGlobalSysVar(TiDBDDLDiskQuota)
require.NoError(t, err)
require.Equal(t, strconv.FormatInt(200*gb, 10), val)
// Set to 1 Pb
err = mock.SetGlobalSysVar(TiDBDDLDiskQuota, strconv.FormatInt(pb, 10))
require.NoError(t, err)
val, err = mock.GetGlobalSysVar(TiDBDDLDiskQuota)
require.NoError(t, err)
require.Equal(t, strconv.FormatInt(pb, 10), val)
// MaxValue is 1 PB, set to 2 Pb is not allowed, it will set back to 1 PB max allowed value.
err = mock.SetGlobalSysVar(TiDBDDLDiskQuota, strconv.FormatInt(2*pb, 10))
require.NoError(t, err)
val, err = mock.GetGlobalSysVar(TiDBDDLDiskQuota)
require.NoError(t, err)
require.Equal(t, strconv.FormatInt(pb, 10), val)
}
| sessionctx/variable/sysvar_test.go | 1 | https://github.com/pingcap/tidb/commit/f61024b2dc6f9e3a6f7fd4d941e19fb81f002b1e | [
0.006812130566686392,
0.0009334358619526029,
0.00016567076090723276,
0.0002829729928635061,
0.001393010257743299
] |
{
"id": 3,
"code_window": [
"\t\t},\n",
"\t\tGetGlobal: func(s *SessionVars) (string, error) {\n",
"\t\t\treturn BoolToOnOff(config.GetGlobalConfig().Instance.TiDBEnableDDL.Load()), nil\n",
"\t\t},\n",
"\t},\n",
"\n",
"\t/* The system variables below have GLOBAL scope */\n",
"\t{Scope: ScopeGlobal, Name: MaxPreparedStmtCount, Value: strconv.FormatInt(DefMaxPreparedStmtCount, 10), Type: TypeInt, MinValue: -1, MaxValue: 1048576},\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t{Scope: ScopeInstance, Name: TiDBRCReadCheckTS, Value: BoolToOnOff(DefRCReadCheckTS), Type: TypeBool, SetGlobal: func(s *SessionVars, val string) error {\n",
"\t\tEnableRCReadCheckTS.Store(TiDBOptOn(val))\n",
"\t\treturn nil\n",
"\t}, GetGlobal: func(s *SessionVars) (string, error) {\n",
"\t\treturn BoolToOnOff(EnableRCReadCheckTS.Load()), nil\n",
"\t}},\n"
],
"file_path": "sessionctx/variable/sysvar.go",
"type": "add",
"edit_start_line_idx": 463
} | [lightning]
table-concurrency = 1
[tikv-importer]
backend = "local"
[checkpoint]
enable = true
driver = "file"
[mydumper]
batch-size = 50 # force splitting the data into 4 batches
| br/tests/lightning_checkpoint_engines/config.toml | 0 | https://github.com/pingcap/tidb/commit/f61024b2dc6f9e3a6f7fd4d941e19fb81f002b1e | [
0.0001734161050990224,
0.00017095920338761061,
0.00016850230167619884,
0.00017095920338761061,
0.000002456901711411774
] |
{
"id": 3,
"code_window": [
"\t\t},\n",
"\t\tGetGlobal: func(s *SessionVars) (string, error) {\n",
"\t\t\treturn BoolToOnOff(config.GetGlobalConfig().Instance.TiDBEnableDDL.Load()), nil\n",
"\t\t},\n",
"\t},\n",
"\n",
"\t/* The system variables below have GLOBAL scope */\n",
"\t{Scope: ScopeGlobal, Name: MaxPreparedStmtCount, Value: strconv.FormatInt(DefMaxPreparedStmtCount, 10), Type: TypeInt, MinValue: -1, MaxValue: 1048576},\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t{Scope: ScopeInstance, Name: TiDBRCReadCheckTS, Value: BoolToOnOff(DefRCReadCheckTS), Type: TypeBool, SetGlobal: func(s *SessionVars, val string) error {\n",
"\t\tEnableRCReadCheckTS.Store(TiDBOptOn(val))\n",
"\t\treturn nil\n",
"\t}, GetGlobal: func(s *SessionVars) (string, error) {\n",
"\t\treturn BoolToOnOff(EnableRCReadCheckTS.Load()), nil\n",
"\t}},\n"
],
"file_path": "sessionctx/variable/sysvar.go",
"type": "add",
"edit_start_line_idx": 463
} | // Copyright 2016 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package kv
import (
"context"
"sync"
"github.com/tikv/client-go/v2/tikv"
)
// InjectionConfig is used for fault injections for KV components.
type InjectionConfig struct {
sync.RWMutex
getError error // kv.Get() always return this error.
commitError error // Transaction.Commit() always return this error.
}
// SetGetError injects an error for all kv.Get() methods.
func (c *InjectionConfig) SetGetError(err error) {
c.Lock()
defer c.Unlock()
c.getError = err
}
// SetCommitError injects an error for all Transaction.Commit() methods.
func (c *InjectionConfig) SetCommitError(err error) {
c.Lock()
defer c.Unlock()
c.commitError = err
}
// InjectedStore wraps a Storage with injections.
type InjectedStore struct {
Storage
cfg *InjectionConfig
}
// NewInjectedStore creates a InjectedStore with config.
func NewInjectedStore(store Storage, cfg *InjectionConfig) Storage {
return &InjectedStore{
Storage: store,
cfg: cfg,
}
}
// Begin creates an injected Transaction.
func (s *InjectedStore) Begin(opts ...tikv.TxnOption) (Transaction, error) {
txn, err := s.Storage.Begin(opts...)
return &InjectedTransaction{
Transaction: txn,
cfg: s.cfg,
}, err
}
// GetSnapshot creates an injected Snapshot.
func (s *InjectedStore) GetSnapshot(ver Version) Snapshot {
snapshot := s.Storage.GetSnapshot(ver)
return &InjectedSnapshot{
Snapshot: snapshot,
cfg: s.cfg,
}
}
// InjectedTransaction wraps a Transaction with injections.
type InjectedTransaction struct {
Transaction
cfg *InjectionConfig
}
// Get returns an error if cfg.getError is set.
func (t *InjectedTransaction) Get(ctx context.Context, k Key) ([]byte, error) {
t.cfg.RLock()
defer t.cfg.RUnlock()
if t.cfg.getError != nil {
return nil, t.cfg.getError
}
return t.Transaction.Get(ctx, k)
}
// BatchGet returns an error if cfg.getError is set.
func (t *InjectedTransaction) BatchGet(ctx context.Context, keys []Key) (map[string][]byte, error) {
t.cfg.RLock()
defer t.cfg.RUnlock()
if t.cfg.getError != nil {
return nil, t.cfg.getError
}
return t.Transaction.BatchGet(ctx, keys)
}
// Commit returns an error if cfg.commitError is set.
func (t *InjectedTransaction) Commit(ctx context.Context) error {
t.cfg.RLock()
defer t.cfg.RUnlock()
if t.cfg.commitError != nil {
return t.cfg.commitError
}
return t.Transaction.Commit(ctx)
}
// InjectedSnapshot wraps a Snapshot with injections.
type InjectedSnapshot struct {
Snapshot
cfg *InjectionConfig
}
// Get returns an error if cfg.getError is set.
func (t *InjectedSnapshot) Get(ctx context.Context, k Key) ([]byte, error) {
t.cfg.RLock()
defer t.cfg.RUnlock()
if t.cfg.getError != nil {
return nil, t.cfg.getError
}
return t.Snapshot.Get(ctx, k)
}
// BatchGet returns an error if cfg.getError is set.
func (t *InjectedSnapshot) BatchGet(ctx context.Context, keys []Key) (map[string][]byte, error) {
t.cfg.RLock()
defer t.cfg.RUnlock()
if t.cfg.getError != nil {
return nil, t.cfg.getError
}
return t.Snapshot.BatchGet(ctx, keys)
}
| kv/fault_injection.go | 0 | https://github.com/pingcap/tidb/commit/f61024b2dc6f9e3a6f7fd4d941e19fb81f002b1e | [
0.0012296400964260101,
0.0002459367970004678,
0.0001619135873625055,
0.00016747263725847006,
0.00027291328296996653
] |
{
"id": 3,
"code_window": [
"\t\t},\n",
"\t\tGetGlobal: func(s *SessionVars) (string, error) {\n",
"\t\t\treturn BoolToOnOff(config.GetGlobalConfig().Instance.TiDBEnableDDL.Load()), nil\n",
"\t\t},\n",
"\t},\n",
"\n",
"\t/* The system variables below have GLOBAL scope */\n",
"\t{Scope: ScopeGlobal, Name: MaxPreparedStmtCount, Value: strconv.FormatInt(DefMaxPreparedStmtCount, 10), Type: TypeInt, MinValue: -1, MaxValue: 1048576},\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t{Scope: ScopeInstance, Name: TiDBRCReadCheckTS, Value: BoolToOnOff(DefRCReadCheckTS), Type: TypeBool, SetGlobal: func(s *SessionVars, val string) error {\n",
"\t\tEnableRCReadCheckTS.Store(TiDBOptOn(val))\n",
"\t\treturn nil\n",
"\t}, GetGlobal: func(s *SessionVars) (string, error) {\n",
"\t\treturn BoolToOnOff(EnableRCReadCheckTS.Load()), nil\n",
"\t}},\n"
],
"file_path": "sessionctx/variable/sysvar.go",
"type": "add",
"edit_start_line_idx": 463
} | // Copyright 2021 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package executor_test
import (
"strings"
"testing"
"github.com/pingcap/failpoint"
"github.com/pingcap/tidb/sessionctx/variable"
"github.com/pingcap/tidb/testkit"
"github.com/pingcap/tidb/testkit/testdata"
"github.com/pingcap/tidb/util/plancodec"
"github.com/stretchr/testify/require"
)
func TestIndexLookupMergeJoinHang(t *testing.T) {
require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/executor/IndexMergeJoinMockOOM", `return(true)`))
defer func() {
require.NoError(t, failpoint.Disable("github.com/pingcap/tidb/executor/IndexMergeJoinMockOOM"))
}()
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t1, t2")
tk.MustExec("create table t1 (a int,b int,index idx(a))")
tk.MustExec("create table t2 (a int,b int,index idx(a))")
tk.MustExec("insert into t1 values (1,1),(2,2),(3,3),(2000,2000)")
tk.MustExec("insert into t2 values (1,1),(2,2),(3,3),(2000,2000)")
// Do not hang in index merge join when OOM occurs.
err := tk.QueryToErr("select /*+ INL_MERGE_JOIN(t1, t2) */ * from t1, t2 where t1.a = t2.a")
require.Error(t, err)
require.Equal(t, "OOM test index merge join doesn't hang here.", err.Error())
}
func TestIssue28052(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("CREATE TABLE `t` (" +
"`col_tinyint_key_signed` tinyint(4) DEFAULT NULL," +
"`col_year_key_signed` year(4) DEFAULT NULL," +
"KEY `col_tinyint_key_signed` (`col_tinyint_key_signed`)," +
"KEY `col_year_key_signed` (`col_year_key_signed`)" +
" ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin")
tk.MustExec("insert into t values(-100,NULL);")
tk.MustQuery("select /*+ inl_merge_join(t1, t2) */ count(*) from t t1 right join t t2 on t1. `col_year_key_signed` = t2. `col_tinyint_key_signed`").Check(testkit.Rows("1"))
}
func TestIssue18068(t *testing.T) {
require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/executor/testIssue18068", `return(true)`))
defer func() {
require.NoError(t, failpoint.Disable("github.com/pingcap/tidb/executor/testIssue18068"))
}()
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t, s")
tk.MustExec("create table t (a int, index idx(a))")
tk.MustExec("create table s (a int, index idx(a))")
tk.MustExec("insert into t values(1),(1),(1),(1),(1),(1),(1),(1),(1),(1),(1),(1),(1),(1),(1),(1);")
tk.MustExec("insert into s values(1),(1),(1),(1),(1),(1),(1),(1),(1),(1),(1),(1),(1),(1),(1),(1);")
tk.MustExec("set @@tidb_index_join_batch_size=1")
tk.MustExec("set @@tidb_max_chunk_size=32")
tk.MustExec("set @@tidb_init_chunk_size=1")
tk.MustExec("set @@tidb_index_lookup_join_concurrency=2")
tk.MustExec("select /*+ inl_merge_join(s)*/ 1 from t join s on t.a = s.a limit 1")
// Do not hang in index merge join when the second and third execute.
tk.MustExec("select /*+ inl_merge_join(s)*/ 1 from t join s on t.a = s.a limit 1")
tk.MustExec("select /*+ inl_merge_join(s)*/ 1 from t join s on t.a = s.a limit 1")
}
func TestIssue18631(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t1, t2")
tk.MustExec("create table t1(a int, b int, c int, d int, primary key(a,b,c))")
tk.MustExec("create table t2(a int, b int, c int, d int, primary key(a,b,c))")
tk.MustExec("insert into t1 values(1,1,1,1),(2,2,2,2),(3,3,3,3)")
tk.MustExec("insert into t2 values(1,1,1,1),(2,2,2,2)")
firstOperator := tk.MustQuery("explain format = 'brief' select /*+ inl_merge_join(t1,t2) */ * from t1 left join t2 on t1.a = t2.a and t1.c = t2.c and t1.b = t2.b order by t1.a desc").Rows()[0][0].(string)
require.Equal(t, 0, strings.Index(firstOperator, plancodec.TypeIndexMergeJoin))
tk.MustQuery("select /*+ inl_merge_join(t1,t2) */ * from t1 left join t2 on t1.a = t2.a and t1.c = t2.c and t1.b = t2.b order by t1.a desc").Check(testkit.Rows(
"3 3 3 3 <nil> <nil> <nil> <nil>",
"2 2 2 2 2 2 2 2",
"1 1 1 1 1 1 1 1"))
}
func TestIssue19408(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t1, t2")
tk.MustExec("create table t1 (c_int int, primary key(c_int))")
tk.MustExec("create table t2 (c_int int, unique key (c_int)) partition by hash (c_int) partitions 4")
tk.MustExec("insert into t1 values (1), (2), (3), (4), (5)")
tk.MustExec("insert into t2 select * from t1")
tk.MustExec("begin")
tk.MustExec("delete from t1 where c_int = 1")
tk.MustQuery("select /*+ INL_MERGE_JOIN(t1,t2) */ * from t1, t2 where t1.c_int = t2.c_int").Sort().Check(testkit.Rows(
"2 2",
"3 3",
"4 4",
"5 5"))
tk.MustQuery("select /*+ INL_JOIN(t1,t2) */ * from t1, t2 where t1.c_int = t2.c_int").Sort().Check(testkit.Rows(
"2 2",
"3 3",
"4 4",
"5 5"))
tk.MustQuery("select /*+ INL_HASH_JOIN(t1,t2) */ * from t1, t2 where t1.c_int = t2.c_int").Sort().Check(testkit.Rows(
"2 2",
"3 3",
"4 4",
"5 5"))
tk.MustExec("commit")
}
func TestIssue20137(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t1, t2")
tk.MustExec("create table t1 (id bigint(20) unsigned, primary key(id))")
tk.MustExec("create table t2 (id bigint(20) unsigned)")
tk.MustExec("insert into t1 values (8738875760185212610)")
tk.MustExec("insert into t1 values (9814441339970117597)")
tk.MustExec("insert into t2 values (8738875760185212610)")
tk.MustExec("insert into t2 values (9814441339970117597)")
tk.MustQuery("select /*+ INL_MERGE_JOIN(t1, t2) */ * from t2 left join t1 on t1.id = t2.id order by t1.id").Check(
testkit.Rows("8738875760185212610 8738875760185212610", "9814441339970117597 9814441339970117597"))
}
func TestIndexJoinOnSinglePartitionTable(t *testing.T) {
// For issue 19145
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec("use test")
for _, val := range []string{string(variable.Static), string(variable.Dynamic)} {
tk.MustExec("set @@tidb_partition_prune_mode= '" + val + "'")
tk.MustExec("drop table if exists t1, t2")
tk.MustExec("create table t1 (c_int int, c_str varchar(40), primary key (c_int) ) partition by range (c_int) ( partition p0 values less than (10), partition p1 values less than maxvalue )")
tk.MustExec("create table t2 (c_int int, c_str varchar(40), primary key (c_int) ) partition by range (c_int) ( partition p0 values less than (10), partition p1 values less than maxvalue )")
tk.MustExec("insert into t1 values (1, 'Alice')")
tk.MustExec("insert into t2 values (1, 'Bob')")
sql := "select /*+ INL_MERGE_JOIN(t1,t2) */ * from t1 join t2 partition(p0) on t1.c_int = t2.c_int and t1.c_str < t2.c_str"
tk.MustQuery(sql).Check(testkit.Rows("1 Alice 1 Bob"))
rows := testdata.ConvertRowsToStrings(tk.MustQuery("explain format = 'brief' " + sql).Rows())
// Partition table can't be inner side of index merge join, because it can't keep order.
require.Equal(t, -1, strings.Index(rows[0], "IndexMergeJoin"))
require.Equal(t, true, len(tk.MustQuery("show warnings").Rows()) > 0)
sql = "select /*+ INL_HASH_JOIN(t1,t2) */ * from t1 join t2 partition(p0) on t1.c_int = t2.c_int and t1.c_str < t2.c_str"
tk.MustQuery(sql).Check(testkit.Rows("1 Alice 1 Bob"))
rows = testdata.ConvertRowsToStrings(tk.MustQuery("explain format = 'brief' " + sql).Rows())
require.Equal(t, 0, strings.Index(rows[0], "IndexHashJoin"))
sql = "select /*+ INL_JOIN(t1,t2) */ * from t1 join t2 partition(p0) on t1.c_int = t2.c_int and t1.c_str < t2.c_str"
tk.MustQuery(sql).Check(testkit.Rows("1 Alice 1 Bob"))
rows = testdata.ConvertRowsToStrings(tk.MustQuery("explain format = 'brief' " + sql).Rows())
require.Equal(t, 0, strings.Index(rows[0], "IndexJoin"))
}
}
func TestIssue20400(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t, s")
tk.MustExec("create table s(a int, index(a))")
tk.MustExec("create table t(a int)")
tk.MustExec("insert into t values(1)")
tk.MustQuery("select /*+ hash_join(t,s)*/ * from t left join s on t.a=s.a and t.a>1").Check(
testkit.Rows("1 <nil>"))
tk.MustQuery("select /*+ inl_merge_join(t,s)*/ * from t left join s on t.a=s.a and t.a>1").Check(
testkit.Rows("1 <nil>"))
}
func TestIssue20549(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t1, t2")
tk.MustExec("CREATE TABLE `t1` (`id` bigint(20) NOT NULL AUTO_INCREMENT, `t2id` bigint(20) DEFAULT NULL, PRIMARY KEY (`id`), KEY `t2id` (`t2id`));")
tk.MustExec("INSERT INTO `t1` VALUES (1,NULL);")
tk.MustExec("CREATE TABLE `t2` (`id` bigint(20) NOT NULL AUTO_INCREMENT, PRIMARY KEY (`id`));")
tk.MustQuery("SELECT /*+ INL_MERGE_JOIN(t1,t2) */ 1 from t1 left outer join t2 on t1.t2id=t2.id;").Check(
testkit.Rows("1"))
tk.MustQuery("SELECT /*+ HASH_JOIN(t1,t2) */ 1 from t1 left outer join t2 on t1.t2id=t2.id;\n").Check(
testkit.Rows("1"))
}
func TestIssue24473AndIssue25669(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec("use test")
tk.MustExec("drop table if exists x, t2, t3")
tk.MustExec("CREATE TABLE `x` ( `a` enum('y','b','1','x','0','null') DEFAULT NULL, KEY `a` (`a`));")
tk.MustExec("insert into x values(\"x\"),(\"x\"),(\"b\"),(\"y\");")
tk.MustQuery("SELECT /*+ merge_join (t2,t3) */ t2.a,t3.a FROM x t2 inner join x t3 on t2.a = t3.a;").Sort().Check(
testkit.Rows("b b", "x x", "x x", "x x", "x x", "y y"))
tk.MustQuery("SELECT /*+ inl_merge_join (t2,t3) */ t2.a,t3.a FROM x t2 inner join x t3 on t2.a = t3.a;").Sort().Check(
testkit.Rows("b b", "x x", "x x", "x x", "x x", "y y"))
tk.MustExec("drop table if exists x, t2, t3")
tk.MustExec("CREATE TABLE `x` ( `a` set('y','b','1','x','0','null') DEFAULT NULL, KEY `a` (`a`));")
tk.MustExec("insert into x values(\"x\"),(\"x\"),(\"b\"),(\"y\");")
tk.MustQuery("SELECT /*+ merge_join (t2,t3) */ t2.a,t3.a FROM x t2 inner join x t3 on t2.a = t3.a;").Sort().Check(
testkit.Rows("b b", "x x", "x x", "x x", "x x", "y y"))
tk.MustQuery("SELECT /*+ inl_merge_join (t2,t3) */ t2.a,t3.a FROM x t2 inner join x t3 on t2.a = t3.a;").Sort().Check(
testkit.Rows("b b", "x x", "x x", "x x", "x x", "y y"))
}
| executor/index_lookup_merge_join_test.go | 0 | https://github.com/pingcap/tidb/commit/f61024b2dc6f9e3a6f7fd4d941e19fb81f002b1e | [
0.00017534637299831957,
0.00016868948296178132,
0.00016383295587729663,
0.00016837332805152982,
0.0000028638417006732197
] |
{
"id": 4,
"code_window": [
"\t\t\treturn nil\n",
"\t\t},\n",
"\t},\n",
"\t{Scope: ScopeGlobal | ScopeSession, Name: TiDBRCReadCheckTS, Type: TypeBool, Value: BoolToOnOff(DefRCReadCheckTS), SetSession: func(s *SessionVars, val string) error {\n",
"\t\ts.RcReadCheckTS = TiDBOptOn(val)\n",
"\t\treturn nil\n",
"\t}},\n",
"\t{Scope: ScopeGlobal | ScopeSession, Name: TiDBRCWriteCheckTs, Type: TypeBool, Value: BoolToOnOff(DefTiDBRcWriteCheckTs), SetSession: func(s *SessionVars, val string) error {\n",
"\t\ts.RcWriteCheckTS = TiDBOptOn(val)\n",
"\t\treturn nil\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "sessionctx/variable/sysvar.go",
"type": "replace",
"edit_start_line_idx": 1723
} | // Copyright 2022 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package isolation
import (
"context"
"github.com/pingcap/errors"
"github.com/pingcap/failpoint"
"github.com/pingcap/tidb/kv"
"github.com/pingcap/tidb/parser/ast"
"github.com/pingcap/tidb/parser/terror"
plannercore "github.com/pingcap/tidb/planner/core"
"github.com/pingcap/tidb/sessionctx"
"github.com/pingcap/tidb/sessionctx/variable"
"github.com/pingcap/tidb/sessiontxn"
"github.com/pingcap/tidb/util/logutil"
tikverr "github.com/tikv/client-go/v2/error"
"github.com/tikv/client-go/v2/oracle"
"go.uber.org/zap"
)
type stmtState struct {
stmtTS uint64
stmtTSFuture oracle.Future
stmtUseStartTS bool
}
func (s *stmtState) prepareStmt(useStartTS bool) error {
*s = stmtState{
stmtUseStartTS: useStartTS,
}
return nil
}
// PessimisticRCTxnContextProvider provides txn context for isolation level read-committed
type PessimisticRCTxnContextProvider struct {
baseTxnContextProvider
stmtState
latestOracleTS uint64
// latestOracleTSValid shows whether we have already fetched a ts from pd and whether the ts we fetched is still valid.
latestOracleTSValid bool
// checkTSInWriteStmt is used to set RCCheckTS isolation for getting value when doing point-write
checkTSInWriteStmt bool
}
// NewPessimisticRCTxnContextProvider returns a new PessimisticRCTxnContextProvider
func NewPessimisticRCTxnContextProvider(sctx sessionctx.Context, causalConsistencyOnly bool) *PessimisticRCTxnContextProvider {
provider := &PessimisticRCTxnContextProvider{
baseTxnContextProvider: baseTxnContextProvider{
sctx: sctx,
causalConsistencyOnly: causalConsistencyOnly,
onInitializeTxnCtx: func(txnCtx *variable.TransactionContext) {
txnCtx.IsPessimistic = true
txnCtx.Isolation = ast.ReadCommitted
},
onTxnActiveFunc: func(txn kv.Transaction, _ sessiontxn.EnterNewTxnType) {
txn.SetOption(kv.Pessimistic, true)
},
},
}
provider.onTxnActiveFunc = func(txn kv.Transaction, _ sessiontxn.EnterNewTxnType) {
txn.SetOption(kv.Pessimistic, true)
provider.latestOracleTS = txn.StartTS()
provider.latestOracleTSValid = true
}
provider.getStmtReadTSFunc = provider.getStmtTS
provider.getStmtForUpdateTSFunc = provider.getStmtTS
return provider
}
// OnStmtStart is the hook that should be called when a new statement started
func (p *PessimisticRCTxnContextProvider) OnStmtStart(ctx context.Context, node ast.StmtNode) error {
if err := p.baseTxnContextProvider.OnStmtStart(ctx, node); err != nil {
return err
}
// Try to mark the `RCCheckTS` flag for the first time execution of in-transaction read requests
// using read-consistency isolation level.
if node != nil && NeedSetRCCheckTSFlag(p.sctx, node) {
p.sctx.GetSessionVars().StmtCtx.RCCheckTS = true
}
p.checkTSInWriteStmt = false
return p.prepareStmt(!p.isTxnPrepared)
}
// NeedSetRCCheckTSFlag checks whether it's needed to set `RCCheckTS` flag in current stmtctx.
func NeedSetRCCheckTSFlag(ctx sessionctx.Context, node ast.Node) bool {
sessionVars := ctx.GetSessionVars()
if sessionVars.ConnectionID > 0 && sessionVars.RcReadCheckTS && sessionVars.InTxn() &&
!sessionVars.RetryInfo.Retrying && plannercore.IsReadOnly(node, sessionVars) {
return true
}
return false
}
// OnStmtErrorForNextAction is the hook that should be called when a new statement get an error
func (p *PessimisticRCTxnContextProvider) OnStmtErrorForNextAction(point sessiontxn.StmtErrorHandlePoint, err error) (sessiontxn.StmtErrorAction, error) {
switch point {
case sessiontxn.StmtErrAfterQuery:
return p.handleAfterQueryError(err)
case sessiontxn.StmtErrAfterPessimisticLock:
return p.handleAfterPessimisticLockError(err)
default:
return p.baseTxnContextProvider.OnStmtErrorForNextAction(point, err)
}
}
// OnStmtRetry is the hook that should be called when a statement is retried internally.
func (p *PessimisticRCTxnContextProvider) OnStmtRetry(ctx context.Context) error {
if err := p.baseTxnContextProvider.OnStmtRetry(ctx); err != nil {
return err
}
p.checkTSInWriteStmt = false
return p.prepareStmt(false)
}
func (p *PessimisticRCTxnContextProvider) prepareStmtTS() {
if p.stmtTSFuture != nil {
return
}
sessVars := p.sctx.GetSessionVars()
var stmtTSFuture oracle.Future
switch {
case p.stmtUseStartTS:
stmtTSFuture = funcFuture(p.getTxnStartTS)
case p.latestOracleTSValid && sessVars.StmtCtx.RCCheckTS:
stmtTSFuture = sessiontxn.ConstantFuture(p.latestOracleTS)
default:
stmtTSFuture = p.getOracleFuture()
}
p.stmtTSFuture = stmtTSFuture
}
func (p *PessimisticRCTxnContextProvider) getOracleFuture() funcFuture {
txnCtx := p.sctx.GetSessionVars().TxnCtx
future := newOracleFuture(p.ctx, p.sctx, txnCtx.TxnScope)
return func() (ts uint64, err error) {
if ts, err = future.Wait(); err != nil {
return
}
failpoint.Inject("waitTsoOfOracleFuture", func() {
sessiontxn.TsoWaitCountInc(p.sctx)
})
txnCtx.SetForUpdateTS(ts)
ts = txnCtx.GetForUpdateTS()
p.latestOracleTS = ts
p.latestOracleTSValid = true
return
}
}
func (p *PessimisticRCTxnContextProvider) getStmtTS() (ts uint64, err error) {
if p.stmtTS != 0 {
return p.stmtTS, nil
}
var txn kv.Transaction
if txn, err = p.ActivateTxn(); err != nil {
return 0, err
}
p.prepareStmtTS()
if ts, err = p.stmtTSFuture.Wait(); err != nil {
return 0, err
}
txn.SetOption(kv.SnapshotTS, ts)
p.stmtTS = ts
return
}
// handleAfterQueryError will be called when the handle point is `StmtErrAfterQuery`.
// At this point the query will be retried from the beginning.
func (p *PessimisticRCTxnContextProvider) handleAfterQueryError(queryErr error) (sessiontxn.StmtErrorAction, error) {
sessVars := p.sctx.GetSessionVars()
if !errors.ErrorEqual(queryErr, kv.ErrWriteConflict) || !sessVars.StmtCtx.RCCheckTS {
return sessiontxn.NoIdea()
}
p.latestOracleTSValid = false
logutil.Logger(p.ctx).Info("RC read with ts checking has failed, retry RC read",
zap.String("sql", sessVars.StmtCtx.OriginalSQL), zap.Error(queryErr))
return sessiontxn.RetryReady()
}
func (p *PessimisticRCTxnContextProvider) handleAfterPessimisticLockError(lockErr error) (sessiontxn.StmtErrorAction, error) {
p.latestOracleTSValid = false
txnCtx := p.sctx.GetSessionVars().TxnCtx
retryable := false
if deadlock, ok := errors.Cause(lockErr).(*tikverr.ErrDeadlock); ok && deadlock.IsRetryable {
logutil.Logger(p.ctx).Info("single statement deadlock, retry statement",
zap.Uint64("txn", txnCtx.StartTS),
zap.Uint64("lockTS", deadlock.LockTs),
zap.Stringer("lockKey", kv.Key(deadlock.LockKey)),
zap.Uint64("deadlockKeyHash", deadlock.DeadlockKeyHash))
retryable = true
} else if terror.ErrorEqual(kv.ErrWriteConflict, lockErr) {
logutil.Logger(p.ctx).Debug("pessimistic write conflict, retry statement",
zap.Uint64("txn", txnCtx.StartTS),
zap.Uint64("forUpdateTS", txnCtx.GetForUpdateTS()),
zap.String("err", lockErr.Error()))
retryable = true
}
if retryable {
return sessiontxn.RetryReady()
}
return sessiontxn.ErrorAction(lockErr)
}
// AdviseWarmup provides warmup for inner state
func (p *PessimisticRCTxnContextProvider) AdviseWarmup() error {
if err := p.prepareTxn(); err != nil {
return err
}
if !p.isTidbSnapshotEnabled() {
p.prepareStmtTS()
}
return nil
}
// planSkipGetTsoFromPD identifies the plans which don't need get newest ts from PD.
func planSkipGetTsoFromPD(sctx sessionctx.Context, plan plannercore.Plan, inLockOrWriteStmt bool) bool {
switch v := plan.(type) {
case *plannercore.PointGetPlan:
return sctx.GetSessionVars().RcWriteCheckTS && (v.Lock || inLockOrWriteStmt)
case plannercore.PhysicalPlan:
if len(v.Children()) == 0 {
return false
}
_, isPhysicalLock := v.(*plannercore.PhysicalLock)
for _, p := range v.Children() {
if !planSkipGetTsoFromPD(sctx, p, isPhysicalLock || inLockOrWriteStmt) {
return false
}
}
return true
case *plannercore.Update:
return planSkipGetTsoFromPD(sctx, v.SelectPlan, true)
case *plannercore.Delete:
return planSkipGetTsoFromPD(sctx, v.SelectPlan, true)
case *plannercore.Insert:
return v.SelectPlan == nil && len(v.OnDuplicate) == 0 && !v.IsReplace
}
return false
}
// AdviseOptimizeWithPlan in read-committed covers as many cases as repeatable-read.
// We do not fetch latest ts immediately for such scenes.
// 1. A query like the form of "SELECT ... FOR UPDATE" whose execution plan is "PointGet".
// 2. An INSERT statement without "SELECT" subquery.
// 3. A UPDATE statement whose sub execution plan is "PointGet".
// 4. A DELETE statement whose sub execution plan is "PointGet".
func (p *PessimisticRCTxnContextProvider) AdviseOptimizeWithPlan(val interface{}) (err error) {
if p.isTidbSnapshotEnabled() || p.isBeginStmtWithStaleRead() {
return nil
}
if p.stmtUseStartTS || !p.latestOracleTSValid {
return nil
}
plan, ok := val.(plannercore.Plan)
if !ok {
return nil
}
if execute, ok := plan.(*plannercore.Execute); ok {
plan = execute.Plan
}
useLastOracleTS := false
if !p.sctx.GetSessionVars().RetryInfo.Retrying {
useLastOracleTS = planSkipGetTsoFromPD(p.sctx, plan, false)
}
if useLastOracleTS {
failpoint.Inject("tsoUseConstantFuture", func() {
sessiontxn.TsoUseConstantCountInc(p.sctx)
})
p.checkTSInWriteStmt = true
p.stmtTSFuture = sessiontxn.ConstantFuture(p.latestOracleTS)
}
return nil
}
// GetSnapshotWithStmtForUpdateTS gets snapshot with for update ts
func (p *PessimisticRCTxnContextProvider) GetSnapshotWithStmtForUpdateTS() (kv.Snapshot, error) {
snapshot, err := p.baseTxnContextProvider.GetSnapshotWithStmtForUpdateTS()
if err != nil {
return nil, err
}
if p.checkTSInWriteStmt {
snapshot.SetOption(kv.IsolationLevel, kv.RCCheckTS)
}
return snapshot, err
}
// GetSnapshotWithStmtReadTS gets snapshot with read ts
func (p *PessimisticRCTxnContextProvider) GetSnapshotWithStmtReadTS() (kv.Snapshot, error) {
snapshot, err := p.baseTxnContextProvider.GetSnapshotWithStmtForUpdateTS()
if err != nil {
return nil, err
}
if p.sctx.GetSessionVars().StmtCtx.RCCheckTS {
snapshot.SetOption(kv.IsolationLevel, kv.RCCheckTS)
}
return snapshot, nil
}
// IsCheckTSInWriteStmtMode is only used for test
func (p *PessimisticRCTxnContextProvider) IsCheckTSInWriteStmtMode() bool {
return p.checkTSInWriteStmt
}
| sessiontxn/isolation/readcommitted.go | 1 | https://github.com/pingcap/tidb/commit/f61024b2dc6f9e3a6f7fd4d941e19fb81f002b1e | [
0.003854715032503009,
0.000318573642289266,
0.000162428681505844,
0.00016870800754986703,
0.0006530898390337825
] |
{
"id": 4,
"code_window": [
"\t\t\treturn nil\n",
"\t\t},\n",
"\t},\n",
"\t{Scope: ScopeGlobal | ScopeSession, Name: TiDBRCReadCheckTS, Type: TypeBool, Value: BoolToOnOff(DefRCReadCheckTS), SetSession: func(s *SessionVars, val string) error {\n",
"\t\ts.RcReadCheckTS = TiDBOptOn(val)\n",
"\t\treturn nil\n",
"\t}},\n",
"\t{Scope: ScopeGlobal | ScopeSession, Name: TiDBRCWriteCheckTs, Type: TypeBool, Value: BoolToOnOff(DefTiDBRcWriteCheckTs), SetSession: func(s *SessionVars, val string) error {\n",
"\t\ts.RcWriteCheckTS = TiDBOptOn(val)\n",
"\t\treturn nil\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "sessionctx/variable/sysvar.go",
"type": "replace",
"edit_start_line_idx": 1723
} | // Copyright 2015 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package server
import (
"github.com/pingcap/tidb/parser/charset"
"github.com/pingcap/tidb/parser/mysql"
)
const maxColumnNameSize = 256
// ColumnInfo contains information of a column
type ColumnInfo struct {
Schema string
Table string
OrgTable string
Name string
OrgName string
ColumnLength uint32
Charset uint16
Flag uint16
Decimal uint8
Type uint8
DefaultValueLength uint64
DefaultValue []byte
}
// Dump dumps ColumnInfo to bytes.
func (column *ColumnInfo) Dump(buffer []byte, d *resultEncoder) []byte {
if d == nil {
d = newResultEncoder(charset.CharsetUTF8MB4)
}
nameDump, orgnameDump := []byte(column.Name), []byte(column.OrgName)
if len(nameDump) > maxColumnNameSize {
nameDump = nameDump[0:maxColumnNameSize]
}
if len(orgnameDump) > maxColumnNameSize {
orgnameDump = orgnameDump[0:maxColumnNameSize]
}
buffer = dumpLengthEncodedString(buffer, []byte("def"))
buffer = dumpLengthEncodedString(buffer, d.encodeMeta([]byte(column.Schema)))
buffer = dumpLengthEncodedString(buffer, d.encodeMeta([]byte(column.Table)))
buffer = dumpLengthEncodedString(buffer, d.encodeMeta([]byte(column.OrgTable)))
buffer = dumpLengthEncodedString(buffer, d.encodeMeta(nameDump))
buffer = dumpLengthEncodedString(buffer, d.encodeMeta(orgnameDump))
buffer = append(buffer, 0x0c)
buffer = dumpUint16(buffer, d.columnTypeInfoCharsetID(column))
buffer = dumpUint32(buffer, column.ColumnLength)
buffer = append(buffer, dumpType(column.Type))
buffer = dumpUint16(buffer, dumpFlag(column.Type, column.Flag))
buffer = append(buffer, column.Decimal)
buffer = append(buffer, 0, 0)
if column.DefaultValue != nil {
buffer = dumpUint64(buffer, uint64(len(column.DefaultValue)))
buffer = append(buffer, column.DefaultValue...)
}
return buffer
}
func isStringColumnType(tp byte) bool {
switch tp {
case mysql.TypeString, mysql.TypeVarString, mysql.TypeVarchar, mysql.TypeBit,
mysql.TypeTinyBlob, mysql.TypeMediumBlob, mysql.TypeLongBlob, mysql.TypeBlob,
mysql.TypeEnum, mysql.TypeSet, mysql.TypeJSON:
return true
}
return false
}
func dumpFlag(tp byte, flag uint16) uint16 {
switch tp {
case mysql.TypeSet:
return flag | uint16(mysql.SetFlag)
case mysql.TypeEnum:
return flag | uint16(mysql.EnumFlag)
default:
return flag
}
}
func dumpType(tp byte) byte {
switch tp {
case mysql.TypeSet, mysql.TypeEnum:
return mysql.TypeString
default:
return tp
}
}
| server/column.go | 0 | https://github.com/pingcap/tidb/commit/f61024b2dc6f9e3a6f7fd4d941e19fb81f002b1e | [
0.0001779978338163346,
0.00016987182607408613,
0.00016172936011571437,
0.00016824847261887044,
0.000004777849426318426
] |
{
"id": 4,
"code_window": [
"\t\t\treturn nil\n",
"\t\t},\n",
"\t},\n",
"\t{Scope: ScopeGlobal | ScopeSession, Name: TiDBRCReadCheckTS, Type: TypeBool, Value: BoolToOnOff(DefRCReadCheckTS), SetSession: func(s *SessionVars, val string) error {\n",
"\t\ts.RcReadCheckTS = TiDBOptOn(val)\n",
"\t\treturn nil\n",
"\t}},\n",
"\t{Scope: ScopeGlobal | ScopeSession, Name: TiDBRCWriteCheckTs, Type: TypeBool, Value: BoolToOnOff(DefTiDBRcWriteCheckTs), SetSession: func(s *SessionVars, val string) error {\n",
"\t\ts.RcWriteCheckTS = TiDBOptOn(val)\n",
"\t\treturn nil\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "sessionctx/variable/sysvar.go",
"type": "replace",
"edit_start_line_idx": 1723
} | // Copyright 2019-present PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package tikv
import (
"context"
"io"
"sync/atomic"
"time"
"github.com/pingcap/errors"
"github.com/pingcap/kvproto/pkg/coprocessor"
deadlockPb "github.com/pingcap/kvproto/pkg/deadlock"
"github.com/pingcap/kvproto/pkg/errorpb"
"github.com/pingcap/kvproto/pkg/kvrpcpb"
"github.com/pingcap/kvproto/pkg/metapb"
"github.com/pingcap/kvproto/pkg/mpp"
"github.com/pingcap/kvproto/pkg/tikvpb"
"github.com/pingcap/log"
"github.com/pingcap/tidb/kv"
"github.com/pingcap/tidb/store/mockstore/unistore/client"
"github.com/pingcap/tidb/store/mockstore/unistore/cophandler"
"github.com/pingcap/tidb/store/mockstore/unistore/tikv/dbreader"
"github.com/pingcap/tidb/store/mockstore/unistore/tikv/kverrors"
"github.com/pingcap/tidb/store/mockstore/unistore/tikv/pberror"
"github.com/pingcap/tidb/store/mockstore/unistore/util/lockwaiter"
"github.com/pingcap/tipb/go-tipb"
"go.uber.org/zap"
)
var _ tikvpb.TikvServer = new(Server)
// Server implements the tikvpb.TikvServer interface.
type Server struct {
// After updating the kvproto, some methods of TikvServer are not implemented.
// Construct `Server` based on `UnimplementedTikvServer`, in order to compile successfully
tikvpb.UnimplementedTikvServer
mvccStore *MVCCStore
regionManager RegionManager
innerServer InnerServer
RPCClient client.Client
refCount int32
stopped int32
}
// NewServer returns a new server.
func NewServer(rm RegionManager, store *MVCCStore, innerServer InnerServer) *Server {
return &Server{
mvccStore: store,
regionManager: rm,
innerServer: innerServer,
}
}
// Stop stops the server.
func (svr *Server) Stop() {
atomic.StoreInt32(&svr.stopped, 1)
for {
if atomic.LoadInt32(&svr.refCount) == 0 {
break
}
time.Sleep(time.Millisecond * 10)
}
if err := svr.mvccStore.Close(); err != nil {
log.Error("close mvcc store failed", zap.Error(err))
}
if err := svr.regionManager.Close(); err != nil {
log.Error("close region manager failed", zap.Error(err))
}
if err := svr.innerServer.Stop(); err != nil {
log.Error("close inner server failed", zap.Error(err))
}
}
// GetStoreIDByAddr gets a store id by the store address.
func (svr *Server) GetStoreIDByAddr(addr string) (uint64, error) {
return svr.regionManager.GetStoreIDByAddr(addr)
}
// GetStoreAddrByStoreID gets a store address by the store id.
func (svr *Server) GetStoreAddrByStoreID(storeID uint64) (string, error) {
return svr.regionManager.GetStoreAddrByStoreID(storeID)
}
type requestCtx struct {
svr *Server
regCtx RegionCtx
regErr *errorpb.Error
buf []byte
reader *dbreader.DBReader
method string
startTime time.Time
rpcCtx *kvrpcpb.Context
storeAddr string
storeID uint64
asyncMinCommitTS uint64
onePCCommitTS uint64
}
func newRequestCtx(svr *Server, ctx *kvrpcpb.Context, method string) (*requestCtx, error) {
atomic.AddInt32(&svr.refCount, 1)
if atomic.LoadInt32(&svr.stopped) > 0 {
atomic.AddInt32(&svr.refCount, -1)
return nil, kverrors.ErrRetryable("server is closed")
}
req := &requestCtx{
svr: svr,
method: method,
startTime: time.Now(),
rpcCtx: ctx,
}
req.regCtx, req.regErr = svr.regionManager.GetRegionFromCtx(ctx)
storeAddr, storeID, regErr := svr.regionManager.GetStoreInfoFromCtx(ctx)
req.storeAddr = storeAddr
req.storeID = storeID
if regErr != nil {
req.regErr = regErr
}
return req, nil
}
// For read-only requests that doesn't acquire latches, this function must be called after all locks has been checked.
func (req *requestCtx) getDBReader() *dbreader.DBReader {
if req.reader == nil {
mvccStore := req.svr.mvccStore
txn := mvccStore.db.NewTransaction(false)
req.reader = dbreader.NewDBReader(req.regCtx.RawStart(), req.regCtx.RawEnd(), txn)
req.reader.RcCheckTS = req.isRcCheckTSIsolationLevel()
}
return req.reader
}
func (req *requestCtx) isSnapshotIsolation() bool {
return req.rpcCtx.IsolationLevel == kvrpcpb.IsolationLevel_SI
}
func (req *requestCtx) isRcCheckTSIsolationLevel() bool {
return req.rpcCtx.IsolationLevel == kvrpcpb.IsolationLevel_RCCheckTS
}
func (req *requestCtx) finish() {
atomic.AddInt32(&req.svr.refCount, -1)
if req.reader != nil {
req.reader.Close()
}
}
// KvGet implements implements the tikvpb.TikvServer interface.
func (svr *Server) KvGet(ctx context.Context, req *kvrpcpb.GetRequest) (*kvrpcpb.GetResponse, error) {
reqCtx, err := newRequestCtx(svr, req.Context, "KvGet")
if err != nil {
return &kvrpcpb.GetResponse{Error: convertToKeyError(err)}, nil
}
defer reqCtx.finish()
if reqCtx.regErr != nil {
return &kvrpcpb.GetResponse{RegionError: reqCtx.regErr}, nil
}
val, err := svr.mvccStore.Get(reqCtx, req.Key, req.Version)
return &kvrpcpb.GetResponse{
Value: val,
Error: convertToKeyError(err),
}, nil
}
// KvScan implements implements the tikvpb.TikvServer interface.
func (svr *Server) KvScan(ctx context.Context, req *kvrpcpb.ScanRequest) (*kvrpcpb.ScanResponse, error) {
reqCtx, err := newRequestCtx(svr, req.Context, "KvScan")
if err != nil {
return &kvrpcpb.ScanResponse{Pairs: []*kvrpcpb.KvPair{{Error: convertToKeyError(err)}}}, nil
}
defer reqCtx.finish()
if reqCtx.regErr != nil {
return &kvrpcpb.ScanResponse{RegionError: reqCtx.regErr}, nil
}
pairs := svr.mvccStore.Scan(reqCtx, req)
return &kvrpcpb.ScanResponse{
Pairs: pairs,
}, nil
}
// KvPessimisticLock implements implements the tikvpb.TikvServer interface.
func (svr *Server) KvPessimisticLock(ctx context.Context, req *kvrpcpb.PessimisticLockRequest) (*kvrpcpb.PessimisticLockResponse, error) {
reqCtx, err := newRequestCtx(svr, req.Context, "PessimisticLock")
if err != nil {
return &kvrpcpb.PessimisticLockResponse{Errors: []*kvrpcpb.KeyError{convertToKeyError(err)}}, nil
}
defer reqCtx.finish()
if reqCtx.regErr != nil {
return &kvrpcpb.PessimisticLockResponse{RegionError: reqCtx.regErr}, nil
}
resp := &kvrpcpb.PessimisticLockResponse{}
waiter, err := svr.mvccStore.PessimisticLock(reqCtx, req, resp)
resp.Errors, resp.RegionError = convertToPBErrors(err)
if waiter == nil {
return resp, nil
}
result := waiter.Wait()
svr.mvccStore.DeadlockDetectCli.CleanUpWaitFor(req.StartVersion, waiter.LockTS, waiter.KeyHash)
svr.mvccStore.lockWaiterManager.CleanUp(waiter)
if result.WakeupSleepTime == lockwaiter.WaitTimeout {
return resp, nil
}
if result.DeadlockResp != nil {
log.Error("deadlock found", zap.Stringer("entry", &result.DeadlockResp.Entry))
errLocked := err.(*kverrors.ErrLocked)
deadlockErr := &kverrors.ErrDeadlock{
LockKey: errLocked.Key,
LockTS: errLocked.Lock.StartTS,
DeadlockKeyHash: result.DeadlockResp.DeadlockKeyHash,
WaitChain: result.DeadlockResp.WaitChain,
}
resp.Errors, resp.RegionError = convertToPBErrors(deadlockErr)
return resp, nil
}
if result.WakeupSleepTime == lockwaiter.WakeUpThisWaiter {
if req.Force {
req.WaitTimeout = lockwaiter.LockNoWait
_, err := svr.mvccStore.PessimisticLock(reqCtx, req, resp)
resp.Errors, resp.RegionError = convertToPBErrors(err)
if err == nil {
return resp, nil
}
if _, ok := err.(*kverrors.ErrLocked); !ok {
resp.Errors, resp.RegionError = convertToPBErrors(err)
return resp, nil
}
log.Warn("wakeup force lock request, try lock still failed", zap.Error(err))
}
}
// The key is rollbacked, we don't have the exact commitTS, but we can use the server's latest.
// Always use the store latest ts since the waiter result commitTs may not be the real conflict ts
conflictCommitTS := svr.mvccStore.getLatestTS()
err = &kverrors.ErrConflict{
StartTS: req.GetForUpdateTs(),
ConflictTS: waiter.LockTS,
ConflictCommitTS: conflictCommitTS,
}
resp.Errors, _ = convertToPBErrors(err)
return resp, nil
}
// KVPessimisticRollback implements implements the tikvpb.TikvServer interface.
func (svr *Server) KVPessimisticRollback(ctx context.Context, req *kvrpcpb.PessimisticRollbackRequest) (*kvrpcpb.PessimisticRollbackResponse, error) {
reqCtx, err := newRequestCtx(svr, req.Context, "PessimisticRollback")
if err != nil {
return &kvrpcpb.PessimisticRollbackResponse{Errors: []*kvrpcpb.KeyError{convertToKeyError(err)}}, nil
}
defer reqCtx.finish()
if reqCtx.regErr != nil {
return &kvrpcpb.PessimisticRollbackResponse{RegionError: reqCtx.regErr}, nil
}
err = svr.mvccStore.PessimisticRollback(reqCtx, req)
resp := &kvrpcpb.PessimisticRollbackResponse{}
resp.Errors, resp.RegionError = convertToPBErrors(err)
return resp, nil
}
// KvTxnHeartBeat implements implements the tikvpb.TikvServer interface.
func (svr *Server) KvTxnHeartBeat(ctx context.Context, req *kvrpcpb.TxnHeartBeatRequest) (*kvrpcpb.TxnHeartBeatResponse, error) {
reqCtx, err := newRequestCtx(svr, req.Context, "TxnHeartBeat")
if err != nil {
return &kvrpcpb.TxnHeartBeatResponse{Error: convertToKeyError(err)}, nil
}
defer reqCtx.finish()
if reqCtx.regErr != nil {
return &kvrpcpb.TxnHeartBeatResponse{RegionError: reqCtx.regErr}, nil
}
lockTTL, err := svr.mvccStore.TxnHeartBeat(reqCtx, req)
resp := &kvrpcpb.TxnHeartBeatResponse{LockTtl: lockTTL}
resp.Error, resp.RegionError = convertToPBError(err)
return resp, nil
}
// KvCheckTxnStatus implements implements the tikvpb.TikvServer interface.
func (svr *Server) KvCheckTxnStatus(ctx context.Context, req *kvrpcpb.CheckTxnStatusRequest) (*kvrpcpb.CheckTxnStatusResponse, error) {
reqCtx, err := newRequestCtx(svr, req.Context, "KvCheckTxnStatus")
if err != nil {
return &kvrpcpb.CheckTxnStatusResponse{Error: convertToKeyError(err)}, nil
}
defer reqCtx.finish()
if reqCtx.regErr != nil {
return &kvrpcpb.CheckTxnStatusResponse{RegionError: reqCtx.regErr}, nil
}
txnStatus, err := svr.mvccStore.CheckTxnStatus(reqCtx, req)
ttl := uint64(0)
if txnStatus.lockInfo != nil {
ttl = txnStatus.lockInfo.LockTtl
}
resp := &kvrpcpb.CheckTxnStatusResponse{
LockTtl: ttl,
CommitVersion: txnStatus.commitTS,
Action: txnStatus.action,
LockInfo: txnStatus.lockInfo,
}
resp.Error, resp.RegionError = convertToPBError(err)
return resp, nil
}
// KvCheckSecondaryLocks implements implements the tikvpb.TikvServer interface.
func (svr *Server) KvCheckSecondaryLocks(ctx context.Context, req *kvrpcpb.CheckSecondaryLocksRequest) (*kvrpcpb.CheckSecondaryLocksResponse, error) {
reqCtx, err := newRequestCtx(svr, req.Context, "KvCheckSecondaryLocks")
if err != nil {
return &kvrpcpb.CheckSecondaryLocksResponse{Error: convertToKeyError(err)}, nil
}
defer reqCtx.finish()
if reqCtx.regErr != nil {
return &kvrpcpb.CheckSecondaryLocksResponse{RegionError: reqCtx.regErr}, nil
}
locksStatus, err := svr.mvccStore.CheckSecondaryLocks(reqCtx, req.Keys, req.StartVersion)
resp := &kvrpcpb.CheckSecondaryLocksResponse{}
if err == nil {
resp.Locks = locksStatus.locks
resp.CommitTs = locksStatus.commitTS
} else {
resp.Error, resp.RegionError = convertToPBError(err)
}
return resp, nil
}
// KvPrewrite implements implements the tikvpb.TikvServer interface.
func (svr *Server) KvPrewrite(ctx context.Context, req *kvrpcpb.PrewriteRequest) (*kvrpcpb.PrewriteResponse, error) {
reqCtx, err := newRequestCtx(svr, req.Context, "KvPrewrite")
if err != nil {
return &kvrpcpb.PrewriteResponse{Errors: []*kvrpcpb.KeyError{convertToKeyError(err)}}, nil
}
defer reqCtx.finish()
if reqCtx.regErr != nil {
return &kvrpcpb.PrewriteResponse{RegionError: reqCtx.regErr}, nil
}
err = svr.mvccStore.Prewrite(reqCtx, req)
resp := &kvrpcpb.PrewriteResponse{}
if reqCtx.asyncMinCommitTS > 0 {
resp.MinCommitTs = reqCtx.asyncMinCommitTS
}
if reqCtx.onePCCommitTS > 0 {
resp.OnePcCommitTs = reqCtx.onePCCommitTS
}
resp.Errors, resp.RegionError = convertToPBErrors(err)
return resp, nil
}
// KvCommit implements implements the tikvpb.TikvServer interface.
func (svr *Server) KvCommit(ctx context.Context, req *kvrpcpb.CommitRequest) (*kvrpcpb.CommitResponse, error) {
reqCtx, err := newRequestCtx(svr, req.Context, "KvCommit")
if err != nil {
return &kvrpcpb.CommitResponse{Error: convertToKeyError(err)}, nil
}
defer reqCtx.finish()
if reqCtx.regErr != nil {
return &kvrpcpb.CommitResponse{RegionError: reqCtx.regErr}, nil
}
resp := new(kvrpcpb.CommitResponse)
err = svr.mvccStore.Commit(reqCtx, req.Keys, req.GetStartVersion(), req.GetCommitVersion())
if err != nil {
resp.Error, resp.RegionError = convertToPBError(err)
}
return resp, nil
}
// RawGetKeyTTL implements implements the tikvpb.TikvServer interface.
func (svr *Server) RawGetKeyTTL(ctx context.Context, req *kvrpcpb.RawGetKeyTTLRequest) (*kvrpcpb.RawGetKeyTTLResponse, error) {
// TODO
return &kvrpcpb.RawGetKeyTTLResponse{}, nil
}
// KvImport implements implements the tikvpb.TikvServer interface.
func (svr *Server) KvImport(context.Context, *kvrpcpb.ImportRequest) (*kvrpcpb.ImportResponse, error) {
// TODO
return &kvrpcpb.ImportResponse{}, nil
}
// KvCleanup implements implements the tikvpb.TikvServer interface.
func (svr *Server) KvCleanup(ctx context.Context, req *kvrpcpb.CleanupRequest) (*kvrpcpb.CleanupResponse, error) {
reqCtx, err := newRequestCtx(svr, req.Context, "KvCleanup")
if err != nil {
return &kvrpcpb.CleanupResponse{Error: convertToKeyError(err)}, nil
}
defer reqCtx.finish()
if reqCtx.regErr != nil {
return &kvrpcpb.CleanupResponse{RegionError: reqCtx.regErr}, nil
}
err = svr.mvccStore.Cleanup(reqCtx, req.Key, req.StartVersion, req.CurrentTs)
resp := new(kvrpcpb.CleanupResponse)
if committed, ok := err.(kverrors.ErrAlreadyCommitted); ok {
resp.CommitVersion = uint64(committed)
} else if err != nil {
log.Error("cleanup failed", zap.Error(err))
resp.Error, resp.RegionError = convertToPBError(err)
}
return resp, nil
}
// KvBatchGet implements implements the tikvpb.TikvServer interface.
func (svr *Server) KvBatchGet(ctx context.Context, req *kvrpcpb.BatchGetRequest) (*kvrpcpb.BatchGetResponse, error) {
reqCtx, err := newRequestCtx(svr, req.Context, "KvBatchGet")
if err != nil {
return &kvrpcpb.BatchGetResponse{Pairs: []*kvrpcpb.KvPair{{Error: convertToKeyError(err)}}}, nil
}
defer reqCtx.finish()
if reqCtx.regErr != nil {
return &kvrpcpb.BatchGetResponse{RegionError: reqCtx.regErr}, nil
}
pairs := svr.mvccStore.BatchGet(reqCtx, req.Keys, req.GetVersion())
return &kvrpcpb.BatchGetResponse{
Pairs: pairs,
}, nil
}
// KvBatchRollback implements implements the tikvpb.TikvServer interface.
func (svr *Server) KvBatchRollback(ctx context.Context, req *kvrpcpb.BatchRollbackRequest) (*kvrpcpb.BatchRollbackResponse, error) {
reqCtx, err := newRequestCtx(svr, req.Context, "KvBatchRollback")
if err != nil {
return &kvrpcpb.BatchRollbackResponse{Error: convertToKeyError(err)}, nil
}
defer reqCtx.finish()
if reqCtx.regErr != nil {
return &kvrpcpb.BatchRollbackResponse{RegionError: reqCtx.regErr}, nil
}
resp := new(kvrpcpb.BatchRollbackResponse)
err = svr.mvccStore.Rollback(reqCtx, req.Keys, req.StartVersion)
resp.Error, resp.RegionError = convertToPBError(err)
return resp, nil
}
// KvScanLock implements implements the tikvpb.TikvServer interface.
func (svr *Server) KvScanLock(ctx context.Context, req *kvrpcpb.ScanLockRequest) (*kvrpcpb.ScanLockResponse, error) {
reqCtx, err := newRequestCtx(svr, req.Context, "KvScanLock")
if err != nil {
return &kvrpcpb.ScanLockResponse{Error: convertToKeyError(err)}, nil
}
defer reqCtx.finish()
if reqCtx.regErr != nil {
return &kvrpcpb.ScanLockResponse{RegionError: reqCtx.regErr}, nil
}
log.Debug("kv scan lock")
locks, err := svr.mvccStore.ScanLock(reqCtx, req.MaxVersion, int(req.Limit))
return &kvrpcpb.ScanLockResponse{Error: convertToKeyError(err), Locks: locks}, nil
}
// KvResolveLock implements implements the tikvpb.TikvServer interface.
func (svr *Server) KvResolveLock(ctx context.Context, req *kvrpcpb.ResolveLockRequest) (*kvrpcpb.ResolveLockResponse, error) {
reqCtx, err := newRequestCtx(svr, req.Context, "KvResolveLock")
if err != nil {
return &kvrpcpb.ResolveLockResponse{Error: convertToKeyError(err)}, nil
}
defer reqCtx.finish()
if reqCtx.regErr != nil {
return &kvrpcpb.ResolveLockResponse{RegionError: reqCtx.regErr}, nil
}
resp := &kvrpcpb.ResolveLockResponse{}
if len(req.TxnInfos) > 0 {
for _, txnInfo := range req.TxnInfos {
log.S().Debugf("kv resolve lock region:%d txn:%v", reqCtx.regCtx.Meta().Id, txnInfo.Txn)
err := svr.mvccStore.ResolveLock(reqCtx, nil, txnInfo.Txn, txnInfo.Status)
if err != nil {
resp.Error, resp.RegionError = convertToPBError(err)
break
}
}
} else {
log.S().Debugf("kv resolve lock region:%d txn:%v", reqCtx.regCtx.Meta().Id, req.StartVersion)
err := svr.mvccStore.ResolveLock(reqCtx, req.Keys, req.StartVersion, req.CommitVersion)
resp.Error, resp.RegionError = convertToPBError(err)
}
return resp, nil
}
// KvGC implements implements the tikvpb.TikvServer interface.
func (svr *Server) KvGC(ctx context.Context, req *kvrpcpb.GCRequest) (*kvrpcpb.GCResponse, error) {
reqCtx, err := newRequestCtx(svr, req.Context, "KvGC")
if err != nil {
return &kvrpcpb.GCResponse{Error: convertToKeyError(err)}, nil
}
defer reqCtx.finish()
svr.mvccStore.UpdateSafePoint(req.SafePoint)
return &kvrpcpb.GCResponse{}, nil
}
// KvDeleteRange implements implements the tikvpb.TikvServer interface.
func (svr *Server) KvDeleteRange(ctx context.Context, req *kvrpcpb.DeleteRangeRequest) (*kvrpcpb.DeleteRangeResponse, error) {
reqCtx, err := newRequestCtx(svr, req.Context, "KvDeleteRange")
if err != nil {
return &kvrpcpb.DeleteRangeResponse{Error: convertToKeyError(err).String()}, nil
}
defer reqCtx.finish()
if reqCtx.regErr != nil {
return &kvrpcpb.DeleteRangeResponse{RegionError: reqCtx.regErr}, nil
}
err = svr.mvccStore.dbWriter.DeleteRange(req.StartKey, req.EndKey, reqCtx.regCtx)
if err != nil {
log.Error("delete range failed", zap.Error(err))
}
return &kvrpcpb.DeleteRangeResponse{}, nil
}
// RawKV commands.
// RawGet implements implements the tikvpb.TikvServer interface.
func (svr *Server) RawGet(context.Context, *kvrpcpb.RawGetRequest) (*kvrpcpb.RawGetResponse, error) {
return &kvrpcpb.RawGetResponse{}, nil
}
// RawPut implements implements the tikvpb.TikvServer interface.
func (svr *Server) RawPut(context.Context, *kvrpcpb.RawPutRequest) (*kvrpcpb.RawPutResponse, error) {
return &kvrpcpb.RawPutResponse{}, nil
}
// RawDelete implements implements the tikvpb.TikvServer interface.
func (svr *Server) RawDelete(context.Context, *kvrpcpb.RawDeleteRequest) (*kvrpcpb.RawDeleteResponse, error) {
return &kvrpcpb.RawDeleteResponse{}, nil
}
// RawScan implements implements the tikvpb.TikvServer interface.
func (svr *Server) RawScan(context.Context, *kvrpcpb.RawScanRequest) (*kvrpcpb.RawScanResponse, error) {
return &kvrpcpb.RawScanResponse{}, nil
}
// RawBatchDelete implements implements the tikvpb.TikvServer interface.
func (svr *Server) RawBatchDelete(context.Context, *kvrpcpb.RawBatchDeleteRequest) (*kvrpcpb.RawBatchDeleteResponse, error) {
return &kvrpcpb.RawBatchDeleteResponse{}, nil
}
// RawBatchGet implements implements the tikvpb.TikvServer interface.
func (svr *Server) RawBatchGet(context.Context, *kvrpcpb.RawBatchGetRequest) (*kvrpcpb.RawBatchGetResponse, error) {
return &kvrpcpb.RawBatchGetResponse{}, nil
}
// RawBatchPut implements implements the tikvpb.TikvServer interface.
func (svr *Server) RawBatchPut(context.Context, *kvrpcpb.RawBatchPutRequest) (*kvrpcpb.RawBatchPutResponse, error) {
return &kvrpcpb.RawBatchPutResponse{}, nil
}
// RawBatchScan implements implements the tikvpb.TikvServer interface.
func (svr *Server) RawBatchScan(context.Context, *kvrpcpb.RawBatchScanRequest) (*kvrpcpb.RawBatchScanResponse, error) {
return &kvrpcpb.RawBatchScanResponse{}, nil
}
// RawDeleteRange implements implements the tikvpb.TikvServer interface.
func (svr *Server) RawDeleteRange(context.Context, *kvrpcpb.RawDeleteRangeRequest) (*kvrpcpb.RawDeleteRangeResponse, error) {
return &kvrpcpb.RawDeleteRangeResponse{}, nil
}
// SQL push down commands.
// Coprocessor implements implements the tikvpb.TikvServer interface.
func (svr *Server) Coprocessor(_ context.Context, req *coprocessor.Request) (*coprocessor.Response, error) {
reqCtx, err := newRequestCtx(svr, req.Context, "Coprocessor")
if err != nil {
return &coprocessor.Response{OtherError: convertToKeyError(err).String()}, nil
}
defer reqCtx.finish()
if reqCtx.regErr != nil {
return &coprocessor.Response{RegionError: reqCtx.regErr}, nil
}
return cophandler.HandleCopRequest(reqCtx.getDBReader(), svr.mvccStore.lockStore, req), nil
}
// CoprocessorStream implements implements the tikvpb.TikvServer interface.
func (svr *Server) CoprocessorStream(*coprocessor.Request, tikvpb.Tikv_CoprocessorStreamServer) error {
// TODO
return nil
}
// RegionError represents a region error
type RegionError struct {
err *errorpb.Error
}
// Error implements Error method.
func (regionError *RegionError) Error() string {
return regionError.err.Message
}
// BatchCoprocessor implements implements the tikvpb.TikvServer interface.
func (svr *Server) BatchCoprocessor(req *coprocessor.BatchRequest, batchCopServer tikvpb.Tikv_BatchCoprocessorServer) error {
reqCtxs := make([]*requestCtx, 0, len(req.Regions))
defer func() {
for _, ctx := range reqCtxs {
ctx.finish()
}
}()
if req.TableRegions != nil {
// Support PartitionTableScan for BatchCop
req.Regions = req.Regions[:]
for _, tr := range req.TableRegions {
req.Regions = append(req.Regions, tr.Regions...)
}
}
for _, ri := range req.Regions {
cop := coprocessor.Request{
Tp: kv.ReqTypeDAG,
Data: req.Data,
StartTs: req.StartTs,
Ranges: ri.Ranges,
}
regionCtx := *req.Context
regionCtx.RegionEpoch = ri.RegionEpoch
regionCtx.RegionId = ri.RegionId
cop.Context = ®ionCtx
reqCtx, err := newRequestCtx(svr, ®ionCtx, "Coprocessor")
if err != nil {
return err
}
reqCtxs = append(reqCtxs, reqCtx)
if reqCtx.regErr != nil {
return &RegionError{err: reqCtx.regErr}
}
copResponse := cophandler.HandleCopRequestWithMPPCtx(reqCtx.getDBReader(), svr.mvccStore.lockStore, &cop, nil)
err = batchCopServer.Send(&coprocessor.BatchResponse{Data: copResponse.Data})
if err != nil {
return err
}
}
return nil
}
// RawCoprocessor implements implements the tikvpb.TikvServer interface.
func (svr *Server) RawCoprocessor(context.Context, *kvrpcpb.RawCoprocessorRequest) (*kvrpcpb.RawCoprocessorResponse, error) {
panic("unimplemented")
}
func (mrm *MockRegionManager) removeMPPTaskHandler(taskID int64, storeID uint64) error {
set := mrm.getMPPTaskSet(storeID)
if set == nil {
return errors.New("cannot find mpp task set for store")
}
set.mu.Lock()
defer set.mu.Unlock()
if _, ok := set.taskHandlers[taskID]; ok {
delete(set.taskHandlers, taskID)
return nil
}
return errors.New("cannot find mpp task")
}
// IsAlive implements the tikvpb.TikvServer interface.
func (svr *Server) IsAlive(_ context.Context, _ *mpp.IsAliveRequest) (*mpp.IsAliveResponse, error) {
panic("todo")
}
// DispatchMPPTask implements the tikvpb.TikvServer interface.
func (svr *Server) DispatchMPPTask(_ context.Context, _ *mpp.DispatchTaskRequest) (*mpp.DispatchTaskResponse, error) {
panic("todo")
}
func (svr *Server) executeMPPDispatch(ctx context.Context, req *mpp.DispatchTaskRequest, storeAddr string, storeID uint64, handler *cophandler.MPPTaskHandler) error {
var reqCtx *requestCtx
if len(req.TableRegions) > 0 {
// Simple unistore logic for PartitionTableScan.
for _, tr := range req.TableRegions {
req.Regions = append(req.Regions, tr.Regions...)
}
}
if len(req.Regions) > 0 {
kvContext := &kvrpcpb.Context{
RegionId: req.Regions[0].RegionId,
RegionEpoch: req.Regions[0].RegionEpoch,
// this is a hack to reuse task id in kvContext to pass mpp task id
TaskId: uint64(handler.Meta.TaskId),
Peer: &metapb.Peer{StoreId: storeID},
}
var err error
reqCtx, err = newRequestCtx(svr, kvContext, "Mpp")
if err != nil {
return errors.Trace(err)
}
}
copReq := &coprocessor.Request{
Tp: kv.ReqTypeDAG,
Data: req.EncodedPlan,
StartTs: req.Meta.StartTs,
}
for _, regionMeta := range req.Regions {
copReq.Ranges = append(copReq.Ranges, regionMeta.Ranges...)
}
var dbreader *dbreader.DBReader
if reqCtx != nil {
dbreader = reqCtx.getDBReader()
}
go func() {
resp := cophandler.HandleCopRequestWithMPPCtx(dbreader, svr.mvccStore.lockStore, copReq, &cophandler.MPPCtx{
RPCClient: svr.RPCClient,
StoreAddr: storeAddr,
TaskHandler: handler,
Ctx: ctx,
})
handler.Err = svr.RemoveMPPTaskHandler(req.Meta.TaskId, storeID)
if len(resp.OtherError) > 0 {
handler.Err = errors.New(resp.OtherError)
}
if reqCtx != nil {
reqCtx.finish()
}
}()
return nil
}
// DispatchMPPTaskWithStoreID implements implements the tikvpb.TikvServer interface.
func (svr *Server) DispatchMPPTaskWithStoreID(ctx context.Context, req *mpp.DispatchTaskRequest, storeID uint64) (*mpp.DispatchTaskResponse, error) {
mppHandler, err := svr.CreateMPPTaskHandler(req.Meta, storeID)
if err != nil {
return nil, errors.Trace(err)
}
storeAddr, err := svr.GetStoreAddrByStoreID(storeID)
if err != nil {
return nil, err
}
err = svr.executeMPPDispatch(ctx, req, storeAddr, storeID, mppHandler)
resp := &mpp.DispatchTaskResponse{}
if err != nil {
resp.Error = &mpp.Error{Msg: err.Error()}
}
return resp, nil
}
// CancelMPPTask implements implements the tikvpb.TikvServer interface.
func (svr *Server) CancelMPPTask(_ context.Context, _ *mpp.CancelTaskRequest) (*mpp.CancelTaskResponse, error) {
panic("todo")
}
// GetMPPTaskHandler implements implements the tikvpb.TikvServer interface.
func (svr *Server) GetMPPTaskHandler(taskID int64, storeID uint64) (*cophandler.MPPTaskHandler, error) {
if mrm, ok := svr.regionManager.(*MockRegionManager); ok {
set := mrm.getMPPTaskSet(storeID)
if set == nil {
return nil, errors.New("cannot find mpp task set for store")
}
set.mu.Lock()
defer set.mu.Unlock()
if handler, ok := set.taskHandlers[taskID]; ok {
return handler, nil
}
return nil, nil
}
return nil, errors.New("Only mock region mgr supports get mpp task")
}
// RemoveMPPTaskHandler implements implements the tikvpb.TikvServer interface.
func (svr *Server) RemoveMPPTaskHandler(taskID int64, storeID uint64) error {
if mrm, ok := svr.regionManager.(*MockRegionManager); ok {
err := mrm.removeMPPTaskHandler(taskID, storeID)
return errors.Trace(err)
}
return errors.New("Only mock region mgr supports remove mpp task")
}
// CreateMPPTaskHandler implements implements the tikvpb.TikvServer interface.
func (svr *Server) CreateMPPTaskHandler(meta *mpp.TaskMeta, storeID uint64) (*cophandler.MPPTaskHandler, error) {
if mrm, ok := svr.regionManager.(*MockRegionManager); ok {
set := mrm.getMPPTaskSet(storeID)
if set == nil {
return nil, errors.New("cannot find mpp task set for store")
}
set.mu.Lock()
defer set.mu.Unlock()
if handler, ok := set.taskHandlers[meta.TaskId]; ok {
return handler, errors.Errorf("Task %d has been created", meta.TaskId)
}
handler := &cophandler.MPPTaskHandler{
TunnelSet: make(map[int64]*cophandler.ExchangerTunnel),
Meta: meta,
RPCClient: svr.RPCClient,
}
set.taskHandlers[meta.TaskId] = handler
return handler, nil
}
return nil, errors.New("Only mock region mgr supports get mpp task")
}
// EstablishMPPConnection implements implements the tikvpb.TikvServer interface.
func (svr *Server) EstablishMPPConnection(*mpp.EstablishMPPConnectionRequest, tikvpb.Tikv_EstablishMPPConnectionServer) error {
panic("todo")
}
// EstablishMPPConnectionWithStoreID implements implements the tikvpb.TikvServer interface.
func (svr *Server) EstablishMPPConnectionWithStoreID(req *mpp.EstablishMPPConnectionRequest, server tikvpb.Tikv_EstablishMPPConnectionServer, storeID uint64) error {
var (
mppHandler *cophandler.MPPTaskHandler
err error
)
maxRetryTime := 5
for i := 0; i < maxRetryTime; i++ {
mppHandler, err = svr.GetMPPTaskHandler(req.SenderMeta.TaskId, storeID)
if err != nil {
return errors.Trace(err)
}
if mppHandler == nil {
time.Sleep(time.Second)
} else {
break
}
}
if mppHandler == nil {
return errors.New("task not found")
}
ctx1, cancel := context.WithCancel(context.Background())
defer cancel()
tunnel, err := mppHandler.HandleEstablishConn(ctx1, req)
if err != nil {
return errors.Trace(err)
}
var sendError error
for sendError == nil {
chunk, err := tunnel.RecvChunk()
if err != nil {
sendError = server.Send(&mpp.MPPDataPacket{Error: &mpp.Error{Msg: err.Error()}})
break
}
if chunk == nil {
// todo return io.EOF error?
break
}
res := tipb.SelectResponse{
Chunks: []tipb.Chunk{*chunk},
}
raw, err := res.Marshal()
if err != nil {
sendError = server.Send(&mpp.MPPDataPacket{Error: &mpp.Error{Msg: err.Error()}})
break
}
sendError = server.Send(&mpp.MPPDataPacket{Data: raw})
}
return sendError
}
// Raft commands (tikv <-> tikv).
// Raft implements implements the tikvpb.TikvServer interface.
func (svr *Server) Raft(stream tikvpb.Tikv_RaftServer) error {
return svr.innerServer.Raft(stream)
}
// Snapshot implements implements the tikvpb.TikvServer interface.
func (svr *Server) Snapshot(stream tikvpb.Tikv_SnapshotServer) error {
return svr.innerServer.Snapshot(stream)
}
// BatchRaft implements implements the tikvpb.TikvServer interface.
func (svr *Server) BatchRaft(stream tikvpb.Tikv_BatchRaftServer) error {
return svr.innerServer.BatchRaft(stream)
}
// Region commands.
// SplitRegion implements implements the tikvpb.TikvServer interface.
func (svr *Server) SplitRegion(ctx context.Context, req *kvrpcpb.SplitRegionRequest) (*kvrpcpb.SplitRegionResponse, error) {
reqCtx, err := newRequestCtx(svr, req.Context, "SplitRegion")
if err != nil {
return &kvrpcpb.SplitRegionResponse{RegionError: &errorpb.Error{Message: err.Error()}}, nil
}
defer reqCtx.finish()
return svr.regionManager.SplitRegion(req), nil
}
// Compact implements the tikvpb.TikvServer interface.
func (svr *Server) Compact(ctx context.Context, req *kvrpcpb.CompactRequest) (*kvrpcpb.CompactResponse, error) {
panic("unimplemented")
}
// ReadIndex implements implements the tikvpb.TikvServer interface.
func (svr *Server) ReadIndex(context.Context, *kvrpcpb.ReadIndexRequest) (*kvrpcpb.ReadIndexResponse, error) {
// TODO:
return &kvrpcpb.ReadIndexResponse{}, nil
}
// transaction debugger commands.
// MvccGetByKey implements implements the tikvpb.TikvServer interface.
func (svr *Server) MvccGetByKey(ctx context.Context, req *kvrpcpb.MvccGetByKeyRequest) (*kvrpcpb.MvccGetByKeyResponse, error) {
reqCtx, err := newRequestCtx(svr, req.Context, "MvccGetByKey")
if err != nil {
return &kvrpcpb.MvccGetByKeyResponse{Error: err.Error()}, nil
}
defer reqCtx.finish()
if reqCtx.regErr != nil {
return &kvrpcpb.MvccGetByKeyResponse{RegionError: reqCtx.regErr}, nil
}
resp := new(kvrpcpb.MvccGetByKeyResponse)
mvccInfo, err := svr.mvccStore.MvccGetByKey(reqCtx, req.GetKey())
if err != nil {
resp.Error = err.Error()
}
resp.Info = mvccInfo
return resp, nil
}
// MvccGetByStartTs implements implements the tikvpb.TikvServer interface.
func (svr *Server) MvccGetByStartTs(ctx context.Context, req *kvrpcpb.MvccGetByStartTsRequest) (*kvrpcpb.MvccGetByStartTsResponse, error) {
reqCtx, err := newRequestCtx(svr, req.Context, "MvccGetByStartTs")
if err != nil {
return &kvrpcpb.MvccGetByStartTsResponse{Error: err.Error()}, nil
}
defer reqCtx.finish()
if reqCtx.regErr != nil {
return &kvrpcpb.MvccGetByStartTsResponse{RegionError: reqCtx.regErr}, nil
}
resp := new(kvrpcpb.MvccGetByStartTsResponse)
mvccInfo, key, err := svr.mvccStore.MvccGetByStartTs(reqCtx, req.StartTs)
if err != nil {
resp.Error = err.Error()
}
resp.Info = mvccInfo
resp.Key = key
return resp, nil
}
// UnsafeDestroyRange implements implements the tikvpb.TikvServer interface.
func (svr *Server) UnsafeDestroyRange(ctx context.Context, req *kvrpcpb.UnsafeDestroyRangeRequest) (*kvrpcpb.UnsafeDestroyRangeResponse, error) {
start, end := req.GetStartKey(), req.GetEndKey()
svr.mvccStore.DeleteFileInRange(start, end)
return &kvrpcpb.UnsafeDestroyRangeResponse{}, nil
}
// GetWaitForEntries tries to get the waitFor entries
// deadlock detection related services
func (svr *Server) GetWaitForEntries(ctx context.Context,
req *deadlockPb.WaitForEntriesRequest) (*deadlockPb.WaitForEntriesResponse, error) {
// TODO
return &deadlockPb.WaitForEntriesResponse{}, nil
}
// Detect will handle detection rpc from other nodes
func (svr *Server) Detect(stream deadlockPb.Deadlock_DetectServer) error {
for {
req, err := stream.Recv()
if err != nil {
if err == io.EOF {
break
}
return err
}
if !svr.mvccStore.DeadlockDetectSvr.IsLeader() {
log.Warn("detection requests received on non leader node")
break
}
resp := svr.mvccStore.DeadlockDetectSvr.Detect(req)
if resp != nil {
if sendErr := stream.Send(resp); sendErr != nil {
log.Error("send deadlock response failed", zap.Error(sendErr))
break
}
}
}
return nil
}
// CheckLockObserver implements implements the tikvpb.TikvServer interface.
func (svr *Server) CheckLockObserver(context.Context, *kvrpcpb.CheckLockObserverRequest) (*kvrpcpb.CheckLockObserverResponse, error) {
// TODO: implement Observer
return &kvrpcpb.CheckLockObserverResponse{IsClean: true}, nil
}
// PhysicalScanLock implements implements the tikvpb.TikvServer interface.
func (svr *Server) PhysicalScanLock(ctx context.Context, req *kvrpcpb.PhysicalScanLockRequest) (*kvrpcpb.PhysicalScanLockResponse, error) {
resp := &kvrpcpb.PhysicalScanLockResponse{}
resp.Locks = svr.mvccStore.PhysicalScanLock(req.StartKey, req.MaxTs, int(req.Limit))
return resp, nil
}
// RegisterLockObserver implements implements the tikvpb.TikvServer interface.
func (svr *Server) RegisterLockObserver(context.Context, *kvrpcpb.RegisterLockObserverRequest) (*kvrpcpb.RegisterLockObserverResponse, error) {
// TODO: implement Observer
return &kvrpcpb.RegisterLockObserverResponse{}, nil
}
// RemoveLockObserver implements implements the tikvpb.TikvServer interface.
func (svr *Server) RemoveLockObserver(context.Context, *kvrpcpb.RemoveLockObserverRequest) (*kvrpcpb.RemoveLockObserverResponse, error) {
// TODO: implement Observer
return &kvrpcpb.RemoveLockObserverResponse{}, nil
}
// CheckLeader implements implements the tikvpb.TikvServer interface.
func (svr *Server) CheckLeader(context.Context, *kvrpcpb.CheckLeaderRequest) (*kvrpcpb.CheckLeaderResponse, error) {
panic("unimplemented")
}
// RawCompareAndSwap implements the tikvpb.TikvServer interface.
func (svr *Server) RawCompareAndSwap(context.Context, *kvrpcpb.RawCASRequest) (*kvrpcpb.RawCASResponse, error) {
panic("implement me")
}
// GetStoreSafeTS implements the tikvpb.TikvServer interface.
func (svr *Server) GetStoreSafeTS(context.Context, *kvrpcpb.StoreSafeTSRequest) (*kvrpcpb.StoreSafeTSResponse, error) {
return &kvrpcpb.StoreSafeTSResponse{}, nil
}
// GetLockWaitInfo implements the tikvpb.TikvServer interface.
func (svr *Server) GetLockWaitInfo(context.Context, *kvrpcpb.GetLockWaitInfoRequest) (*kvrpcpb.GetLockWaitInfoResponse, error) {
panic("unimplemented")
}
// RawChecksum implements implements the tikvpb.TikvServer interface.
func (svr *Server) RawChecksum(context.Context, *kvrpcpb.RawChecksumRequest) (*kvrpcpb.RawChecksumResponse, error) {
panic("unimplemented")
}
func convertToKeyError(err error) *kvrpcpb.KeyError {
if err == nil {
return nil
}
causeErr := errors.Cause(err)
switch x := causeErr.(type) {
case *kverrors.ErrLocked:
return &kvrpcpb.KeyError{
Locked: x.Lock.ToLockInfo(x.Key),
}
case kverrors.ErrRetryable:
return &kvrpcpb.KeyError{
Retryable: x.Error(),
}
case *kverrors.ErrKeyAlreadyExists:
return &kvrpcpb.KeyError{
AlreadyExist: &kvrpcpb.AlreadyExist{
Key: x.Key,
},
}
case *kverrors.ErrConflict:
return &kvrpcpb.KeyError{
Conflict: &kvrpcpb.WriteConflict{
StartTs: x.StartTS,
ConflictTs: x.ConflictTS,
ConflictCommitTs: x.ConflictCommitTS,
Key: x.Key,
Reason: x.Reason,
},
}
case *kverrors.ErrDeadlock:
return &kvrpcpb.KeyError{
Deadlock: &kvrpcpb.Deadlock{
LockKey: x.LockKey,
LockTs: x.LockTS,
DeadlockKeyHash: x.DeadlockKeyHash,
WaitChain: x.WaitChain,
},
}
case *kverrors.ErrCommitExpire:
return &kvrpcpb.KeyError{
CommitTsExpired: &kvrpcpb.CommitTsExpired{
StartTs: x.StartTs,
AttemptedCommitTs: x.CommitTs,
Key: x.Key,
MinCommitTs: x.MinCommitTs,
},
}
case *kverrors.ErrTxnNotFound:
return &kvrpcpb.KeyError{
TxnNotFound: &kvrpcpb.TxnNotFound{
StartTs: x.StartTS,
PrimaryKey: x.PrimaryKey,
},
}
case *kverrors.ErrAssertionFailed:
return &kvrpcpb.KeyError{
AssertionFailed: &kvrpcpb.AssertionFailed{
StartTs: x.StartTS,
Key: x.Key,
Assertion: x.Assertion,
ExistingStartTs: x.ExistingStartTS,
ExistingCommitTs: x.ExistingCommitTS,
},
}
default:
return &kvrpcpb.KeyError{
Abort: err.Error(),
}
}
}
func convertToPBError(err error) (*kvrpcpb.KeyError, *errorpb.Error) {
if regErr := extractRegionError(err); regErr != nil {
return nil, regErr
}
return convertToKeyError(err), nil
}
func convertToPBErrors(err error) ([]*kvrpcpb.KeyError, *errorpb.Error) {
if err != nil {
if regErr := extractRegionError(err); regErr != nil {
return nil, regErr
}
return []*kvrpcpb.KeyError{convertToKeyError(err)}, nil
}
return nil, nil
}
func extractRegionError(err error) *errorpb.Error {
if pbError, ok := err.(*pberror.PBError); ok {
return pbError.RequestErr
}
return nil
}
| store/mockstore/unistore/tikv/server.go | 0 | https://github.com/pingcap/tidb/commit/f61024b2dc6f9e3a6f7fd4d941e19fb81f002b1e | [
0.002607952570542693,
0.00019903085194528103,
0.00016020267503336072,
0.00017018058861140162,
0.00023540844267699867
] |
{
"id": 4,
"code_window": [
"\t\t\treturn nil\n",
"\t\t},\n",
"\t},\n",
"\t{Scope: ScopeGlobal | ScopeSession, Name: TiDBRCReadCheckTS, Type: TypeBool, Value: BoolToOnOff(DefRCReadCheckTS), SetSession: func(s *SessionVars, val string) error {\n",
"\t\ts.RcReadCheckTS = TiDBOptOn(val)\n",
"\t\treturn nil\n",
"\t}},\n",
"\t{Scope: ScopeGlobal | ScopeSession, Name: TiDBRCWriteCheckTs, Type: TypeBool, Value: BoolToOnOff(DefTiDBRcWriteCheckTs), SetSession: func(s *SessionVars, val string) error {\n",
"\t\ts.RcWriteCheckTS = TiDBOptOn(val)\n",
"\t\treturn nil\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "sessionctx/variable/sysvar.go",
"type": "replace",
"edit_start_line_idx": 1723
} | // Copyright 2019 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package ddl_test
import (
"context"
"fmt"
"math"
"strings"
"sync"
"sync/atomic"
"testing"
"time"
"github.com/pingcap/failpoint"
"github.com/pingcap/tidb/config"
"github.com/pingcap/tidb/ddl"
"github.com/pingcap/tidb/ddl/util"
"github.com/pingcap/tidb/domain"
"github.com/pingcap/tidb/errno"
"github.com/pingcap/tidb/infoschema"
"github.com/pingcap/tidb/kv"
"github.com/pingcap/tidb/meta"
"github.com/pingcap/tidb/meta/autoid"
"github.com/pingcap/tidb/parser/model"
"github.com/pingcap/tidb/parser/mysql"
"github.com/pingcap/tidb/planner/core"
"github.com/pingcap/tidb/session"
"github.com/pingcap/tidb/sessionctx/variable"
"github.com/pingcap/tidb/sessiontxn"
"github.com/pingcap/tidb/store/mockstore"
"github.com/pingcap/tidb/tablecodec"
"github.com/pingcap/tidb/testkit"
"github.com/pingcap/tidb/testkit/external"
"github.com/pingcap/tidb/util/dbterror"
"github.com/pingcap/tidb/util/gcutil"
"github.com/stretchr/testify/require"
"github.com/tikv/client-go/v2/testutils"
)
func TestTruncateAllPartitions(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec("use test")
tk.MustExec("create table partition_table (v int) partition by hash (v) partitions 10")
tk.MustExec("insert into partition_table values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9),(10)")
tk.MustExec("alter table partition_table truncate partition all")
tk.MustQuery("select count(*) from partition_table").Check(testkit.Rows("0"))
}
func TestIssue23872(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec("use test")
for _, test := range []struct {
sql string
flag uint
}{
{
"create table t(id smallint,id1 int, primary key (id))",
mysql.NotNullFlag | mysql.PriKeyFlag | mysql.NoDefaultValueFlag,
},
{
"create table t(a int default 1, primary key(a))",
mysql.NotNullFlag | mysql.PriKeyFlag,
},
} {
tk.MustExec("drop table if exists t")
tk.MustExec(test.sql)
rs, err := tk.Exec("select * from t")
require.NoError(t, err)
cols := rs.Fields()
require.NoError(t, rs.Close())
require.Equal(t, test.flag, cols[0].Column.GetFlag())
}
}
func TestChangeMaxIndexLength(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
defer config.RestoreFunc()()
config.UpdateGlobal(func(conf *config.Config) {
conf.MaxIndexLength = config.DefMaxOfMaxIndexLength
})
tk.MustExec("use test")
tk.MustExec("create table t (c1 varchar(3073), index(c1)) charset = ascii")
tk.MustExec(fmt.Sprintf("create table t1 (c1 varchar(%d), index(c1)) charset = ascii;", config.DefMaxOfMaxIndexLength))
err := tk.ExecToErr(fmt.Sprintf("create table t2 (c1 varchar(%d), index(c1)) charset = ascii;", config.DefMaxOfMaxIndexLength+1))
require.EqualError(t, err, "[ddl:1071]Specified key was too long; max key length is 12288 bytes")
}
func TestCreateTableWithLike(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
// for the same database
tk.MustExec("create database ctwl_db")
tk.MustExec("use ctwl_db")
tk.MustExec("create table tt(id int primary key)")
tk.MustExec("create table t (c1 int not null auto_increment, c2 int, constraint cc foreign key (c2) references tt(id), primary key(c1)) auto_increment = 10")
tk.MustExec("insert into t set c2=1")
tk.MustExec("create table t1 like ctwl_db.t")
tk.MustExec("insert into t1 set c2=11")
tk.MustExec("create table t2 (like ctwl_db.t1)")
tk.MustExec("insert into t2 set c2=12")
tk.MustQuery("select * from t").Check(testkit.Rows("10 1"))
tk.MustQuery("select * from t1").Check(testkit.Rows("1 11"))
tk.MustQuery("select * from t2").Check(testkit.Rows("1 12"))
is := domain.GetDomain(tk.Session()).InfoSchema()
tbl1, err := is.TableByName(model.NewCIStr("ctwl_db"), model.NewCIStr("t1"))
require.NoError(t, err)
tbl1Info := tbl1.Meta()
require.Nil(t, tbl1Info.ForeignKeys)
require.True(t, tbl1Info.PKIsHandle)
col := tbl1Info.Columns[0]
hasNotNull := mysql.HasNotNullFlag(col.GetFlag())
require.True(t, hasNotNull)
tbl2, err := is.TableByName(model.NewCIStr("ctwl_db"), model.NewCIStr("t2"))
require.NoError(t, err)
tbl2Info := tbl2.Meta()
require.Nil(t, tbl2Info.ForeignKeys)
require.True(t, tbl2Info.PKIsHandle)
require.True(t, mysql.HasNotNullFlag(tbl2Info.Columns[0].GetFlag()))
// for different databases
tk.MustExec("create database ctwl_db1")
tk.MustExec("use ctwl_db1")
tk.MustExec("create table t1 like ctwl_db.t")
tk.MustExec("insert into t1 set c2=11")
tk.MustQuery("select * from t1").Check(testkit.Rows("1 11"))
is = domain.GetDomain(tk.Session()).InfoSchema()
tbl1, err = is.TableByName(model.NewCIStr("ctwl_db1"), model.NewCIStr("t1"))
require.NoError(t, err)
require.Nil(t, tbl1.Meta().ForeignKeys)
// for table partition
tk.MustExec("use ctwl_db")
tk.MustExec("create table pt1 (id int) partition by range columns (id) (partition p0 values less than (10))")
tk.MustExec("insert into pt1 values (1),(2),(3),(4)")
tk.MustExec("create table ctwl_db1.pt1 like ctwl_db.pt1")
tk.MustQuery("select * from ctwl_db1.pt1").Check(testkit.Rows())
// Test create table like for partition table.
atomic.StoreUint32(&ddl.EnableSplitTableRegion, 1)
tk.MustExec("use test")
tk.MustExec("set @@global.tidb_scatter_region=1")
tk.MustExec("drop table if exists partition_t")
tk.MustExec("create table partition_t (a int, b int,index(a)) partition by hash (a) partitions 3")
tk.MustExec("drop table if exists t1")
tk.MustExec("create table t1 like partition_t")
re := tk.MustQuery("show table t1 regions")
rows := re.Rows()
require.Len(t, rows, 3)
tbl := external.GetTableByName(t, tk, "test", "t1")
partitionDef := tbl.Meta().GetPartitionInfo().Definitions
require.Regexp(t, fmt.Sprintf("t_%d_.*", partitionDef[0].ID), rows[0][1])
require.Regexp(t, fmt.Sprintf("t_%d_.*", partitionDef[1].ID), rows[1][1])
require.Regexp(t, fmt.Sprintf("t_%d_.*", partitionDef[2].ID), rows[2][1])
// Test pre-split table region when create table like.
tk.MustExec("drop table if exists t_pre")
tk.MustExec("create table t_pre (a int, b int) shard_row_id_bits = 2 pre_split_regions=2")
tk.MustExec("drop table if exists t2")
tk.MustExec("create table t2 like t_pre")
re = tk.MustQuery("show table t2 regions")
rows = re.Rows()
// Table t2 which create like t_pre should have 4 regions now.
require.Len(t, rows, 4)
tbl = external.GetTableByName(t, tk, "test", "t2")
require.Equal(t, fmt.Sprintf("t_%d_r_2305843009213693952", tbl.Meta().ID), rows[1][1])
require.Equal(t, fmt.Sprintf("t_%d_r_4611686018427387904", tbl.Meta().ID), rows[2][1])
require.Equal(t, fmt.Sprintf("t_%d_r_6917529027641081856", tbl.Meta().ID), rows[3][1])
// Test after truncate table the region is also splited.
tk.MustExec("truncate table t2")
re = tk.MustQuery("show table t2 regions")
rows = re.Rows()
require.Equal(t, 4, len(rows))
tbl = external.GetTableByName(t, tk, "test", "t2")
require.Equal(t, fmt.Sprintf("t_%d_r_2305843009213693952", tbl.Meta().ID), rows[1][1])
require.Equal(t, fmt.Sprintf("t_%d_r_4611686018427387904", tbl.Meta().ID), rows[2][1])
require.Equal(t, fmt.Sprintf("t_%d_r_6917529027641081856", tbl.Meta().ID), rows[3][1])
defer atomic.StoreUint32(&ddl.EnableSplitTableRegion, 0)
// for failure table cases
tk.MustExec("use ctwl_db")
failSQL := "create table t1 like test_not_exist.t"
tk.MustGetErrCode(failSQL, mysql.ErrNoSuchTable)
failSQL = "create table t1 like test.t_not_exist"
tk.MustGetErrCode(failSQL, mysql.ErrNoSuchTable)
failSQL = "create table t1 (like test_not_exist.t)"
tk.MustGetErrCode(failSQL, mysql.ErrNoSuchTable)
failSQL = "create table test_not_exis.t1 like ctwl_db.t"
tk.MustGetErrCode(failSQL, mysql.ErrBadDB)
failSQL = "create table t1 like ctwl_db.t"
tk.MustGetErrCode(failSQL, mysql.ErrTableExists)
// test failure for wrong object cases
tk.MustExec("drop view if exists v")
tk.MustExec("create view v as select 1 from dual")
tk.MustGetErrCode("create table viewTable like v", mysql.ErrWrongObject)
tk.MustExec("drop sequence if exists seq")
tk.MustExec("create sequence seq")
tk.MustGetErrCode("create table sequenceTable like seq", mysql.ErrWrongObject)
tk.MustExec("drop database ctwl_db")
tk.MustExec("drop database ctwl_db1")
}
func TestCreateTableWithLikeAtTemporaryMode(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
// Test create table like at temporary mode.
tk.MustExec("use test")
tk.MustExec("drop table if exists temporary_table")
tk.MustExec("create global temporary table temporary_table (a int, b int,index(a)) on commit delete rows")
tk.MustExec("drop table if exists temporary_table_t1")
err := tk.ExecToErr("create table temporary_table_t1 like temporary_table")
require.Equal(t, core.ErrOptOnTemporaryTable.GenWithStackByArgs("create table like").Error(), err.Error())
tk.MustExec("drop table if exists temporary_table")
// Test create temporary table like.
// Test auto_random.
tk.MustExec("drop table if exists auto_random_table")
err = tk.ExecToErr("create table auto_random_table (a bigint primary key auto_random(3), b varchar(255))")
defer tk.MustExec("drop table if exists auto_random_table")
tk.MustExec("drop table if exists auto_random_temporary_global")
err = tk.ExecToErr("create global temporary table auto_random_temporary_global like auto_random_table on commit delete rows")
require.Equal(t, core.ErrOptOnTemporaryTable.GenWithStackByArgs("auto_random").Error(), err.Error())
// Test pre split regions.
tk.MustExec("drop table if exists table_pre_split")
err = tk.ExecToErr("create table table_pre_split(id int) shard_row_id_bits = 2 pre_split_regions=2")
defer tk.MustExec("drop table if exists table_pre_split")
tk.MustExec("drop table if exists temporary_table_pre_split")
err = tk.ExecToErr("create global temporary table temporary_table_pre_split like table_pre_split ON COMMIT DELETE ROWS")
require.Equal(t, core.ErrOptOnTemporaryTable.GenWithStackByArgs("pre split regions").Error(), err.Error())
// Test shard_row_id_bits.
tk.MustExec("drop table if exists shard_row_id_table, shard_row_id_temporary_table, shard_row_id_table_plus, shard_row_id_temporary_table_plus")
err = tk.ExecToErr("create table shard_row_id_table (a int) shard_row_id_bits = 5")
err = tk.ExecToErr("create global temporary table shard_row_id_temporary_table like shard_row_id_table on commit delete rows")
require.Equal(t, core.ErrOptOnTemporaryTable.GenWithStackByArgs("shard_row_id_bits").Error(), err.Error())
tk.MustExec("create table shard_row_id_table_plus (a int)")
tk.MustExec("create global temporary table shard_row_id_temporary_table_plus (a int) on commit delete rows")
defer tk.MustExec("drop table if exists shard_row_id_table, shard_row_id_temporary_table, shard_row_id_table_plus, shard_row_id_temporary_table_plus")
err = tk.ExecToErr("alter table shard_row_id_temporary_table_plus shard_row_id_bits = 4")
require.Equal(t, dbterror.ErrOptOnTemporaryTable.GenWithStackByArgs("shard_row_id_bits").Error(), err.Error())
// Test partition.
tk.MustExec("drop table if exists global_partition_table")
tk.MustExec("create table global_partition_table (a int, b int) partition by hash(a) partitions 3")
defer tk.MustExec("drop table if exists global_partition_table")
tk.MustGetErrCode("create global temporary table global_partition_temp_table like global_partition_table ON COMMIT DELETE ROWS;", errno.ErrPartitionNoTemporary)
// Test virtual columns.
tk.MustExec("drop table if exists test_gv_ddl, test_gv_ddl_temp")
tk.MustExec(`create table test_gv_ddl(a int, b int as (a+8) virtual, c int as (b + 2) stored)`)
tk.MustExec(`create global temporary table test_gv_ddl_temp like test_gv_ddl on commit delete rows;`)
defer tk.MustExec("drop table if exists test_gv_ddl_temp, test_gv_ddl")
is := sessiontxn.GetTxnManager(tk.Session()).GetTxnInfoSchema()
table, err := is.TableByName(model.NewCIStr("test"), model.NewCIStr("test_gv_ddl"))
require.NoError(t, err)
testCases := []struct {
generatedExprString string
generatedStored bool
}{
{"", false},
{"`a` + 8", false},
{"`b` + 2", true},
}
for i, column := range table.Meta().Columns {
require.Equal(t, testCases[i].generatedExprString, column.GeneratedExprString)
require.Equal(t, testCases[i].generatedStored, column.GeneratedStored)
}
result := tk.MustQuery(`DESC test_gv_ddl_temp`)
result.Check(testkit.Rows(`a int(11) YES <nil> `, `b int(11) YES <nil> VIRTUAL GENERATED`, `c int(11) YES <nil> STORED GENERATED`))
tk.MustExec("begin")
tk.MustExec("insert into test_gv_ddl_temp values (1, default, default)")
tk.MustQuery("select * from test_gv_ddl_temp").Check(testkit.Rows("1 9 11"))
err = tk.ExecToErr("commit")
require.NoError(t, err)
// Test foreign key.
tk.MustExec("drop table if exists test_foreign_key, t1")
tk.MustExec("create table t1 (a int, b int)")
tk.MustExec("create table test_foreign_key (c int,d int,foreign key (d) references t1 (b))")
defer tk.MustExec("drop table if exists test_foreign_key, t1")
tk.MustExec("create global temporary table test_foreign_key_temp like test_foreign_key on commit delete rows")
is = sessiontxn.GetTxnManager(tk.Session()).GetTxnInfoSchema()
table, err = is.TableByName(model.NewCIStr("test"), model.NewCIStr("test_foreign_key_temp"))
require.NoError(t, err)
tableInfo := table.Meta()
require.Equal(t, 0, len(tableInfo.ForeignKeys))
// Issue 25613.
// Test from->normal, to->normal.
tk.MustExec("drop table if exists tb1, tb2")
tk.MustExec("create table tb1(id int)")
tk.MustExec("create table tb2 like tb1")
defer tk.MustExec("drop table if exists tb1, tb2")
tk.MustQuery("show create table tb2").Check(testkit.Rows("tb2 CREATE TABLE `tb2` (\n" +
" `id` int(11) DEFAULT NULL\n" +
") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin"))
// Test from->normal, to->global temporary.
tk.MustExec("drop table if exists tb3, tb4")
tk.MustExec("create table tb3(id int)")
tk.MustExec("create global temporary table tb4 like tb3 on commit delete rows")
defer tk.MustExec("drop table if exists tb3, tb4")
tk.MustQuery("show create table tb4").Check(testkit.Rows("tb4 CREATE GLOBAL TEMPORARY TABLE `tb4` (\n" +
" `id` int(11) DEFAULT NULL\n" +
") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin ON COMMIT DELETE ROWS"))
// Test from->global temporary, to->normal.
tk.MustExec("drop table if exists tb5, tb6")
tk.MustExec("create global temporary table tb5(id int) on commit delete rows")
err = tk.ExecToErr("create table tb6 like tb5")
require.EqualError(t, err, core.ErrOptOnTemporaryTable.GenWithStackByArgs("create table like").Error())
defer tk.MustExec("drop table if exists tb5, tb6")
// Test from->global temporary, to->global temporary.
tk.MustExec("drop table if exists tb7, tb8")
tk.MustExec("create global temporary table tb7(id int) on commit delete rows")
err = tk.ExecToErr("create global temporary table tb8 like tb7 on commit delete rows")
require.EqualError(t, err, core.ErrOptOnTemporaryTable.GenWithStackByArgs("create table like").Error())
defer tk.MustExec("drop table if exists tb7, tb8")
// Test from->normal, to->local temporary
tk.MustExec("drop table if exists tb11, tb12")
tk.MustExec("create table tb11 (i int primary key, j int)")
tk.MustExec("create temporary table tb12 like tb11")
tk.MustQuery("show create table tb12").Check(testkit.Rows("tb12 CREATE TEMPORARY TABLE `tb12` (\n" +
" `i` int(11) NOT NULL,\n `j` int(11) DEFAULT NULL,\n PRIMARY KEY (`i`) /*T![clustered_index] CLUSTERED */\n" +
") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin"))
tk.MustExec("create temporary table if not exists tb12 like tb11")
err = infoschema.ErrTableExists.GenWithStackByArgs("test.tb12")
require.EqualError(t, err, tk.Session().GetSessionVars().StmtCtx.GetWarnings()[0].Err.Error())
defer tk.MustExec("drop table if exists tb11, tb12")
// Test from->local temporary, to->local temporary
tk.MustExec("drop table if exists tb13, tb14")
tk.MustExec("create temporary table tb13 (i int primary key, j int)")
err = tk.ExecToErr("create temporary table tb14 like tb13")
require.Equal(t, core.ErrOptOnTemporaryTable.GenWithStackByArgs("create table like").Error(), err.Error())
defer tk.MustExec("drop table if exists tb13, tb14")
// Test from->local temporary, to->normal
tk.MustExec("drop table if exists tb15, tb16")
tk.MustExec("create temporary table tb15 (i int primary key, j int)")
err = tk.ExecToErr("create table tb16 like tb15")
require.Equal(t, core.ErrOptOnTemporaryTable.GenWithStackByArgs("create table like").Error(), err.Error())
defer tk.MustExec("drop table if exists tb15, tb16")
tk.MustExec("drop table if exists table_pre_split, tmp_pre_split")
tk.MustExec("create table table_pre_split(id int) shard_row_id_bits=2 pre_split_regions=2")
err = tk.ExecToErr("create temporary table tmp_pre_split like table_pre_split")
require.Equal(t, core.ErrOptOnTemporaryTable.GenWithStackByArgs("pre split regions").Error(), err.Error())
defer tk.MustExec("drop table if exists table_pre_split, tmp_pre_split")
tk.MustExec("drop table if exists table_shard_row_id, tmp_shard_row_id")
tk.MustExec("create table table_shard_row_id(id int) shard_row_id_bits=2")
err = tk.ExecToErr("create temporary table tmp_shard_row_id like table_shard_row_id")
require.Equal(t, core.ErrOptOnTemporaryTable.GenWithStackByArgs("shard_row_id_bits").Error(), err.Error())
defer tk.MustExec("drop table if exists table_shard_row_id, tmp_shard_row_id")
tk.MustExec("drop table if exists partition_table, tmp_partition_table")
tk.MustExec("create table partition_table (a int, b int) partition by hash(a) partitions 3")
tk.MustGetErrCode("create temporary table tmp_partition_table like partition_table", errno.ErrPartitionNoTemporary)
defer tk.MustExec("drop table if exists partition_table, tmp_partition_table")
tk.MustExec("drop table if exists foreign_key_table1, foreign_key_table2, foreign_key_tmp")
tk.MustExec("create table foreign_key_table1 (a int, b int)")
tk.MustExec("create table foreign_key_table2 (c int,d int,foreign key (d) references foreign_key_table1 (b))")
tk.MustExec("create temporary table foreign_key_tmp like foreign_key_table2")
is = sessiontxn.GetTxnManager(tk.Session()).GetTxnInfoSchema()
table, err = is.TableByName(model.NewCIStr("test"), model.NewCIStr("foreign_key_tmp"))
require.NoError(t, err)
tableInfo = table.Meta()
require.Equal(t, 0, len(tableInfo.ForeignKeys))
defer tk.MustExec("drop table if exists foreign_key_table1, foreign_key_table2, foreign_key_tmp")
// Test for placement
tk.MustExec("drop placement policy if exists p1")
tk.MustExec("create placement policy p1 primary_region='r1' regions='r1,r2'")
defer tk.MustExec("drop placement policy p1")
tk.MustExec("drop table if exists placement_table1")
tk.MustExec("create table placement_table1(id int) placement policy p1")
defer tk.MustExec("drop table if exists placement_table1")
err = tk.ExecToErr("create global temporary table g_tmp_placement1 like placement_table1 on commit delete rows")
require.Equal(t, core.ErrOptOnTemporaryTable.GenWithStackByArgs("placement").Error(), err.Error())
err = tk.ExecToErr("create temporary table l_tmp_placement1 like placement_table1")
require.Equal(t, core.ErrOptOnTemporaryTable.GenWithStackByArgs("placement").Error(), err.Error())
}
func createMockStoreAndDomain(t *testing.T) (store kv.Storage, dom *domain.Domain) {
session.SetSchemaLease(200 * time.Millisecond)
session.DisableStats4Test()
ddl.SetWaitTimeWhenErrorOccurred(1 * time.Microsecond)
var err error
store, err = mockstore.NewMockStore()
require.NoError(t, err)
dom, err = session.BootstrapSession(store)
require.NoError(t, err)
t.Cleanup(func() {
dom.Close()
require.NoError(t, store.Close())
})
return
}
// TestCancelAddIndex1 tests canceling ddl job when the add index worker is not started.
func TestCancelAddIndexPanic(t *testing.T) {
store, dom := createMockStoreAndDomain(t)
tk := testkit.NewTestKit(t, store)
require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/ddl/errorMockPanic", `return(true)`))
defer func() {
require.NoError(t, failpoint.Disable("github.com/pingcap/tidb/ddl/errorMockPanic"))
}()
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(c1 int, c2 int)")
tkCancel := testkit.NewTestKit(t, store)
defer tk.MustExec("drop table t")
for i := 0; i < 5; i++ {
tk.MustExec("insert into t values (?, ?)", i, i)
}
var checkErr error
oldReorgWaitTimeout := ddl.ReorgWaitTimeout
ddl.ReorgWaitTimeout = 50 * time.Millisecond
defer func() { ddl.ReorgWaitTimeout = oldReorgWaitTimeout }()
hook := &ddl.TestDDLCallback{Do: dom}
hook.OnJobRunBeforeExported = func(job *model.Job) {
if job.Type == model.ActionAddIndex && job.State == model.JobStateRunning && job.SchemaState == model.StateWriteReorganization && job.SnapshotVer != 0 {
tkCancel.MustQuery(fmt.Sprintf("admin cancel ddl jobs %d", job.ID))
}
}
dom.DDL().SetHook(hook)
rs, err := tk.Exec("alter table t add index idx_c2(c2)")
if rs != nil {
require.NoError(t, rs.Close())
}
require.NoError(t, checkErr)
require.Error(t, err)
errMsg := err.Error()
require.Truef(t, strings.HasPrefix(errMsg, "[ddl:8214]Cancelled DDL job"), "%v", errMsg)
}
func TestRecoverTableByJobID(t *testing.T) {
store, _ := createMockStoreAndDomain(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec("create database if not exists test_recover")
tk.MustExec("use test_recover")
tk.MustExec("drop table if exists t_recover")
tk.MustExec("create table t_recover (a int)")
defer func(originGC bool) {
if originGC {
util.EmulatorGCEnable()
} else {
util.EmulatorGCDisable()
}
}(util.IsEmulatorGCEnable())
// disable emulator GC.
// Otherwise emulator GC will delete table record as soon as possible after execute drop table ddl.
util.EmulatorGCDisable()
gcTimeFormat := "20060102-15:04:05 -0700 MST"
timeBeforeDrop := time.Now().Add(0 - 48*60*60*time.Second).Format(gcTimeFormat)
timeAfterDrop := time.Now().Add(48 * 60 * 60 * time.Second).Format(gcTimeFormat)
safePointSQL := `INSERT HIGH_PRIORITY INTO mysql.tidb VALUES ('tikv_gc_safe_point', '%[1]s', '')
ON DUPLICATE KEY
UPDATE variable_value = '%[1]s'`
// clear GC variables first.
tk.MustExec("delete from mysql.tidb where variable_name in ( 'tikv_gc_safe_point','tikv_gc_enable' )")
tk.MustExec("insert into t_recover values (1),(2),(3)")
tk.MustExec("drop table t_recover")
getDDLJobID := func(table, tp string) int64 {
rs, err := tk.Exec("admin show ddl jobs")
require.NoError(t, err)
rows, err := session.GetRows4Test(context.Background(), tk.Session(), rs)
require.NoError(t, err)
for _, row := range rows {
if row.GetString(1) == table && row.GetString(3) == tp {
return row.GetInt64(0)
}
}
require.FailNowf(t, "can't find %s table of %s", tp, table)
return -1
}
jobID := getDDLJobID("test_recover", "drop table")
// if GC safe point is not exists in mysql.tidb
err := tk.ExecToErr(fmt.Sprintf("recover table by job %d", jobID))
require.EqualError(t, err, "can not get 'tikv_gc_safe_point'")
// set GC safe point
tk.MustExec(fmt.Sprintf(safePointSQL, timeBeforeDrop))
// if GC enable is not exists in mysql.tidb
tk.MustExec(fmt.Sprintf("recover table by job %d", jobID))
tk.MustExec("DROP TABLE t_recover")
err = gcutil.EnableGC(tk.Session())
require.NoError(t, err)
// recover job is before GC safe point
tk.MustExec(fmt.Sprintf(safePointSQL, timeAfterDrop))
err = tk.ExecToErr(fmt.Sprintf("recover table by job %d", jobID))
require.Error(t, err)
require.Contains(t, err.Error(), "snapshot is older than GC safe point")
// set GC safe point
tk.MustExec(fmt.Sprintf(safePointSQL, timeBeforeDrop))
// if there is a new table with the same name, should return failed.
tk.MustExec("create table t_recover (a int)")
err = tk.ExecToErr(fmt.Sprintf("recover table by job %d", jobID))
require.EqualError(t, err, infoschema.ErrTableExists.GenWithStackByArgs("t_recover").Error())
// drop the new table with the same name, then recover table.
tk.MustExec("drop table t_recover")
// do recover table.
tk.MustExec(fmt.Sprintf("recover table by job %d", jobID))
// check recover table meta and data record.
tk.MustQuery("select * from t_recover").Check(testkit.Rows("1", "2", "3"))
// check recover table autoID.
tk.MustExec("insert into t_recover values (4),(5),(6)")
tk.MustQuery("select * from t_recover").Check(testkit.Rows("1", "2", "3", "4", "5", "6"))
// recover table by none exits job.
err = tk.ExecToErr(fmt.Sprintf("recover table by job %d", 10000000))
require.Error(t, err)
// Disable GC by manual first, then after recover table, the GC enable status should also be disabled.
err = gcutil.DisableGC(tk.Session())
require.NoError(t, err)
tk.MustExec("delete from t_recover where a > 1")
tk.MustExec("drop table t_recover")
jobID = getDDLJobID("test_recover", "drop table")
tk.MustExec(fmt.Sprintf("recover table by job %d", jobID))
// check recover table meta and data record.
tk.MustQuery("select * from t_recover").Check(testkit.Rows("1"))
// check recover table autoID.
tk.MustExec("insert into t_recover values (7),(8),(9)")
tk.MustQuery("select * from t_recover").Check(testkit.Rows("1", "7", "8", "9"))
// Test for recover truncate table.
tk.MustExec("truncate table t_recover")
tk.MustExec("rename table t_recover to t_recover_new")
jobID = getDDLJobID("test_recover", "truncate table")
tk.MustExec(fmt.Sprintf("recover table by job %d", jobID))
tk.MustExec("insert into t_recover values (10)")
tk.MustQuery("select * from t_recover").Check(testkit.Rows("1", "7", "8", "9", "10"))
gcEnable, err := gcutil.CheckGCEnable(tk.Session())
require.NoError(t, err)
require.Equal(t, false, gcEnable)
}
func TestRecoverTableByJobIDFail(t *testing.T) {
store, dom := createMockStoreAndDomain(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec("create database if not exists test_recover")
tk.MustExec("use test_recover")
tk.MustExec("drop table if exists t_recover")
tk.MustExec("create table t_recover (a int)")
defer func(originGC bool) {
if originGC {
util.EmulatorGCEnable()
} else {
util.EmulatorGCDisable()
}
}(util.IsEmulatorGCEnable())
// disable emulator GC.
// Otherwise, emulator GC will delete table record as soon as possible after execute drop table util.
util.EmulatorGCDisable()
gcTimeFormat := "20060102-15:04:05 -0700 MST"
timeBeforeDrop := time.Now().Add(0 - 48*60*60*time.Second).Format(gcTimeFormat)
safePointSQL := `INSERT HIGH_PRIORITY INTO mysql.tidb VALUES ('tikv_gc_safe_point', '%[1]s', '')
ON DUPLICATE KEY
UPDATE variable_value = '%[1]s'`
tk.MustExec("insert into t_recover values (1),(2),(3)")
tk.MustExec("drop table t_recover")
rs, err := tk.Exec("admin show ddl jobs")
require.NoError(t, err)
rows, err := session.GetRows4Test(context.Background(), tk.Session(), rs)
require.NoError(t, err)
row := rows[0]
require.Equal(t, "test_recover", row.GetString(1))
require.Equal(t, "drop table", row.GetString(3))
jobID := row.GetInt64(0)
// enableGC first
err = gcutil.EnableGC(tk.Session())
require.NoError(t, err)
tk.MustExec(fmt.Sprintf(safePointSQL, timeBeforeDrop))
// set hook
hook := &ddl.TestDDLCallback{}
hook.OnJobRunBeforeExported = func(job *model.Job) {
if job.Type == model.ActionRecoverTable {
require.NoError(t, failpoint.Enable("tikvclient/mockCommitError", `return(true)`))
require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/ddl/mockRecoverTableCommitErr", `return(true)`))
}
}
dom.DDL().SetHook(hook)
// do recover table.
tk.MustExec(fmt.Sprintf("recover table by job %d", jobID))
require.NoError(t, failpoint.Disable("tikvclient/mockCommitError"))
require.NoError(t, failpoint.Disable("github.com/pingcap/tidb/ddl/mockRecoverTableCommitErr"))
// make sure enable GC after recover table.
enable, err := gcutil.CheckGCEnable(tk.Session())
require.NoError(t, err)
require.Equal(t, true, enable)
// check recover table meta and data record.
tk.MustQuery("select * from t_recover").Check(testkit.Rows("1", "2", "3"))
// check recover table autoID.
tk.MustExec("insert into t_recover values (4),(5),(6)")
tk.MustQuery("select * from t_recover").Check(testkit.Rows("1", "2", "3", "4", "5", "6"))
}
func TestRecoverTableByTableNameFail(t *testing.T) {
store, dom := createMockStoreAndDomain(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec("create database if not exists test_recover")
tk.MustExec("use test_recover")
tk.MustExec("drop table if exists t_recover")
tk.MustExec("create table t_recover (a int)")
defer func(originGC bool) {
if originGC {
util.EmulatorGCEnable()
} else {
util.EmulatorGCDisable()
}
}(util.IsEmulatorGCEnable())
// disable emulator GC.
// Otherwise emulator GC will delete table record as soon as possible after execute drop table ddl.
util.EmulatorGCDisable()
gcTimeFormat := "20060102-15:04:05 -0700 MST"
timeBeforeDrop := time.Now().Add(0 - 48*60*60*time.Second).Format(gcTimeFormat)
safePointSQL := `INSERT HIGH_PRIORITY INTO mysql.tidb VALUES ('tikv_gc_safe_point', '%[1]s', '')
ON DUPLICATE KEY
UPDATE variable_value = '%[1]s'`
tk.MustExec("insert into t_recover values (1),(2),(3)")
tk.MustExec("drop table t_recover")
// enableGC first
err := gcutil.EnableGC(tk.Session())
require.NoError(t, err)
tk.MustExec(fmt.Sprintf(safePointSQL, timeBeforeDrop))
// set hook
hook := &ddl.TestDDLCallback{}
hook.OnJobRunBeforeExported = func(job *model.Job) {
if job.Type == model.ActionRecoverTable {
require.NoError(t, failpoint.Enable("tikvclient/mockCommitError", `return(true)`))
require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/ddl/mockRecoverTableCommitErr", `return(true)`))
}
}
dom.DDL().SetHook(hook)
// do recover table.
tk.MustExec("recover table t_recover")
require.NoError(t, failpoint.Disable("tikvclient/mockCommitError"))
require.NoError(t, failpoint.Disable("github.com/pingcap/tidb/ddl/mockRecoverTableCommitErr"))
// make sure enable GC after recover table.
enable, err := gcutil.CheckGCEnable(tk.Session())
require.NoError(t, err)
require.True(t, enable)
// check recover table meta and data record.
tk.MustQuery("select * from t_recover").Check(testkit.Rows("1", "2", "3"))
// check recover table autoID.
tk.MustExec("insert into t_recover values (4),(5),(6)")
tk.MustQuery("select * from t_recover").Check(testkit.Rows("1", "2", "3", "4", "5", "6"))
}
func TestCancelJobByErrorCountLimit(t *testing.T) {
store, _ := createMockStoreAndDomain(t)
tk := testkit.NewTestKit(t, store)
require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/ddl/mockExceedErrorLimit", `return(true)`))
defer func() {
require.NoError(t, failpoint.Disable("github.com/pingcap/tidb/ddl/mockExceedErrorLimit"))
}()
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
limit := variable.GetDDLErrorCountLimit()
tk.MustExec("set @@global.tidb_ddl_error_count_limit = 16")
err := util.LoadDDLVars(tk.Session())
require.NoError(t, err)
defer tk.MustExec(fmt.Sprintf("set @@global.tidb_ddl_error_count_limit = %d", limit))
err = tk.ExecToErr("create table t (a int)")
require.EqualError(t, err, "[ddl:-1]DDL job rollback, error msg: mock do job error")
}
func TestTruncateTableUpdateSchemaVersionErr(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/ddl/mockTruncateTableUpdateVersionError", `return(true)`))
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
limit := variable.GetDDLErrorCountLimit()
tk.MustExec("set @@global.tidb_ddl_error_count_limit = 5")
err := util.LoadDDLVars(tk.Session())
require.NoError(t, err)
defer tk.MustExec(fmt.Sprintf("set @@global.tidb_ddl_error_count_limit = %d", limit))
tk.MustExec("create table t (a int)")
err = tk.ExecToErr("truncate table t")
require.EqualError(t, err, "[ddl:-1]DDL job rollback, error msg: mock update version error")
// Disable fail point.
require.NoError(t, failpoint.Disable("github.com/pingcap/tidb/ddl/mockTruncateTableUpdateVersionError"))
tk.MustExec("truncate table t")
}
func TestCanceledJobTakeTime(t *testing.T) {
store, dom := testkit.CreateMockStoreAndDomain(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec("use test")
tk.MustExec("create table t_cjtt(a int)")
hook := &ddl.TestDDLCallback{}
once := sync.Once{}
hook.OnJobRunBeforeExported = func(job *model.Job) {
once.Do(func() {
ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnDDL)
err := kv.RunInNewTxn(ctx, store, false, func(ctx context.Context, txn kv.Transaction) error {
m := meta.NewMeta(txn)
err := m.GetAutoIDAccessors(job.SchemaID, job.TableID).Del()
if err != nil {
return err
}
return m.DropTableOrView(job.SchemaID, job.TableID)
})
require.NoError(t, err)
})
}
dom.DDL().SetHook(hook)
originalWT := ddl.GetWaitTimeWhenErrorOccurred()
ddl.SetWaitTimeWhenErrorOccurred(1 * time.Second)
defer func() { ddl.SetWaitTimeWhenErrorOccurred(originalWT) }()
startTime := time.Now()
tk.MustGetErrCode("alter table t_cjtt add column b int", mysql.ErrNoSuchTable)
sub := time.Since(startTime)
require.Less(t, sub, ddl.GetWaitTimeWhenErrorOccurred())
}
func TestTableLocksEnable(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec("use test")
tk.MustExec("create table t1 (a int)")
// Test for enable table lock config.
defer config.RestoreFunc()()
config.UpdateGlobal(func(conf *config.Config) {
conf.EnableTableLock = false
})
tk.MustExec("lock tables t1 write")
tk.MustQuery("SHOW WARNINGS").Check(testkit.Rows("Warning 1235 LOCK TABLES is not supported. To enable this experimental feature, set 'enable-table-lock' in the configuration file."))
tbl := external.GetTableByName(t, tk, "test", "t1")
dom := domain.GetDomain(tk.Session())
require.NoError(t, dom.Reload())
require.Nil(t, tbl.Meta().Lock)
tk.MustExec("unlock tables")
tk.MustQuery("SHOW WARNINGS").Check(testkit.Rows("Warning 1235 UNLOCK TABLES is not supported. To enable this experimental feature, set 'enable-table-lock' in the configuration file."))
}
func TestAutoRandomOnTemporaryTable(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec("use test")
tk.MustExec("drop table if exists auto_random_temporary")
err := tk.ExecToErr("create global temporary table auto_random_temporary (a bigint primary key auto_random(3), b varchar(255)) on commit delete rows")
require.Equal(t, core.ErrOptOnTemporaryTable.GenWithStackByArgs("auto_random").Error(), err.Error())
err = tk.ExecToErr("create temporary table t(a bigint key auto_random)")
require.Equal(t, core.ErrOptOnTemporaryTable.GenWithStackByArgs("auto_random").Error(), err.Error())
}
func TestAutoRandom(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec("create database if not exists auto_random_db")
tk.MustExec("use auto_random_db")
databaseName, tableName := "auto_random_db", "t"
tk.MustExec("set @@allow_auto_random_explicit_insert = true")
assertInvalidAutoRandomErr := func(sql string, errMsg string, args ...interface{}) {
err := tk.ExecToErr(sql)
require.EqualError(t, err, dbterror.ErrInvalidAutoRandom.GenWithStackByArgs(fmt.Sprintf(errMsg, args...)).Error())
}
assertPKIsNotHandle := func(sql, errCol string) {
assertInvalidAutoRandomErr(sql, autoid.AutoRandomPKisNotHandleErrMsg, errCol)
}
assertAlterValue := func(sql string) {
assertInvalidAutoRandomErr(sql, autoid.AutoRandomAlterErrMsg)
}
assertOnlyChangeFromAutoIncPK := func(sql string) {
assertInvalidAutoRandomErr(sql, autoid.AutoRandomAlterChangeFromAutoInc)
}
assertDecreaseBitErr := func(sql string) {
assertInvalidAutoRandomErr(sql, autoid.AutoRandomDecreaseBitErrMsg)
}
assertWithAutoInc := func(sql string) {
assertInvalidAutoRandomErr(sql, autoid.AutoRandomIncompatibleWithAutoIncErrMsg)
}
assertOverflow := func(sql, colName string, maxAutoRandBits, actualAutoRandBits uint64) {
assertInvalidAutoRandomErr(sql, autoid.AutoRandomOverflowErrMsg, maxAutoRandBits, actualAutoRandBits, colName)
}
assertMaxOverflow := func(sql, colName string, autoRandBits uint64) {
assertInvalidAutoRandomErr(sql, autoid.AutoRandomOverflowErrMsg, autoid.AutoRandomShardBitsMax, autoRandBits, colName)
}
assertModifyColType := func(sql string) {
tk.MustGetErrCode(sql, errno.ErrUnsupportedDDLOperation)
}
assertDefault := func(sql string) {
assertInvalidAutoRandomErr(sql, autoid.AutoRandomIncompatibleWithDefaultValueErrMsg)
}
assertNonPositive := func(sql string) {
assertInvalidAutoRandomErr(sql, autoid.AutoRandomNonPositive)
}
assertBigIntOnly := func(sql, colType string) {
assertInvalidAutoRandomErr(sql, autoid.AutoRandomOnNonBigIntColumn, colType)
}
assertAddColumn := func(sql, colName string) {
assertInvalidAutoRandomErr(sql, autoid.AutoRandomAlterAddColumn, colName, databaseName, tableName)
}
mustExecAndDrop := func(sql string, fns ...func()) {
tk.MustExec(sql)
for _, f := range fns {
f()
}
tk.MustExec("drop table t")
}
// Only bigint column can set auto_random
assertBigIntOnly("create table t (a char primary key auto_random(3), b int)", "char")
assertBigIntOnly("create table t (a varchar(255) primary key auto_random(3), b int)", "varchar")
assertBigIntOnly("create table t (a timestamp primary key auto_random(3), b int)", "timestamp")
// PKIsHandle, but auto_random is defined on non-primary key.
assertPKIsNotHandle("create table t (a bigint auto_random (3) primary key, b bigint auto_random (3))", "b")
assertPKIsNotHandle("create table t (a bigint auto_random (3), b bigint auto_random(3), primary key(a))", "b")
assertPKIsNotHandle("create table t (a bigint auto_random (3), b bigint auto_random(3) primary key)", "a")
// PKIsNotHandle: no primary key.
assertPKIsNotHandle("create table t (a bigint auto_random(3), b int)", "a")
// PKIsNotHandle: primary key is not a single column.
assertPKIsNotHandle("create table t (a bigint auto_random(3), b bigint, primary key (a, b))", "a")
assertPKIsNotHandle("create table t (a bigint auto_random(3), b int, c char, primary key (a, c))", "a")
// PKIsNotHandle: nonclustered integer primary key.
assertPKIsNotHandle("create table t (a bigint auto_random(3) primary key nonclustered, b int)", "a")
assertPKIsNotHandle("create table t (a bigint auto_random(3) primary key nonclustered, b int)", "a")
assertPKIsNotHandle("create table t (a int, b bigint auto_random(3) primary key nonclustered)", "b")
// Can not set auto_random along with auto_increment.
assertWithAutoInc("create table t (a bigint auto_random(3) primary key auto_increment)")
assertWithAutoInc("create table t (a bigint primary key auto_increment auto_random(3))")
assertWithAutoInc("create table t (a bigint auto_increment primary key auto_random(3))")
assertWithAutoInc("create table t (a bigint auto_random(3) auto_increment, primary key (a))")
// Can not set auto_random along with default.
assertDefault("create table t (a bigint auto_random primary key default 3)")
assertDefault("create table t (a bigint auto_random(2) primary key default 5)")
mustExecAndDrop("create table t (a bigint auto_random primary key)", func() {
assertDefault("alter table t modify column a bigint auto_random default 3")
assertDefault("alter table t alter column a set default 3")
})
// Overflow data type max length.
assertMaxOverflow("create table t (a bigint auto_random(64) primary key)", "a", 64)
assertMaxOverflow("create table t (a bigint auto_random(16) primary key)", "a", 16)
mustExecAndDrop("create table t (a bigint auto_random(5) primary key)", func() {
assertMaxOverflow("alter table t modify a bigint auto_random(64)", "a", 64)
assertMaxOverflow("alter table t modify a bigint auto_random(16)", "a", 16)
})
assertNonPositive("create table t (a bigint auto_random(0) primary key)")
tk.MustGetErrMsg("create table t (a bigint auto_random(-1) primary key)",
`[parser:1064]You have an error in your SQL syntax; check the manual that corresponds to your TiDB version for the right syntax to use line 1 column 38 near "-1) primary key)" `)
// Basic usage.
mustExecAndDrop("create table t (a bigint auto_random(1) primary key)")
mustExecAndDrop("create table t (a bigint auto_random(4) primary key)")
mustExecAndDrop("create table t (a bigint auto_random(15) primary key)")
mustExecAndDrop("create table t (a bigint primary key auto_random(4))")
mustExecAndDrop("create table t (a bigint auto_random(4), primary key (a))")
// Increase auto_random bits.
mustExecAndDrop("create table t (a bigint auto_random(5) primary key)", func() {
tk.MustExec("alter table t modify a bigint auto_random(8)")
tk.MustExec("alter table t modify a bigint auto_random(10)")
tk.MustExec("alter table t modify a bigint auto_random(12)")
})
// Auto_random can occur multiple times like other column attributes.
mustExecAndDrop("create table t (a bigint auto_random(3) auto_random(2) primary key)")
mustExecAndDrop("create table t (a bigint, b bigint auto_random(3) primary key auto_random(2))")
mustExecAndDrop("create table t (a bigint auto_random(1) auto_random(2) auto_random(3), primary key (a))")
// Add/drop the auto_random attribute is not allowed.
mustExecAndDrop("create table t (a bigint auto_random(3) primary key)", func() {
assertAlterValue("alter table t modify column a bigint")
assertAlterValue("alter table t change column a b bigint")
})
mustExecAndDrop("create table t (a bigint, b char, c bigint auto_random(3), primary key(c))", func() {
assertAlterValue("alter table t modify column c bigint")
assertAlterValue("alter table t change column c d bigint")
})
mustExecAndDrop("create table t (a bigint primary key)", func() {
assertOnlyChangeFromAutoIncPK("alter table t modify column a bigint auto_random(3)")
})
mustExecAndDrop("create table t (a bigint, b bigint, primary key(a, b))", func() {
assertOnlyChangeFromAutoIncPK("alter table t modify column a bigint auto_random(3)")
assertOnlyChangeFromAutoIncPK("alter table t modify column b bigint auto_random(3)")
})
// Add auto_random column is not allowed.
mustExecAndDrop("create table t (a bigint)", func() {
assertAddColumn("alter table t add column b int auto_random", "b")
assertAddColumn("alter table t add column b bigint auto_random", "b")
assertAddColumn("alter table t add column b bigint auto_random primary key", "b")
})
mustExecAndDrop("create table t (a bigint, b bigint primary key)", func() {
assertAddColumn("alter table t add column c int auto_random", "c")
assertAddColumn("alter table t add column c bigint auto_random", "c")
assertAddColumn("alter table t add column c bigint auto_random primary key", "c")
})
// Decrease auto_random bits is not allowed.
mustExecAndDrop("create table t (a bigint auto_random(10) primary key)", func() {
assertDecreaseBitErr("alter table t modify column a bigint auto_random(6)")
})
mustExecAndDrop("create table t (a bigint auto_random(10) primary key)", func() {
assertDecreaseBitErr("alter table t modify column a bigint auto_random(1)")
})
originStep := autoid.GetStep()
autoid.SetStep(1)
// Increase auto_random bits but it will overlap with incremental bits.
mustExecAndDrop("create table t (a bigint unsigned auto_random(5) primary key)", func() {
const alterTryCnt, rebaseOffset = 3, 1
insertSQL := fmt.Sprintf("insert into t values (%d)", ((1<<(64-10))-1)-rebaseOffset-alterTryCnt)
tk.MustExec(insertSQL)
// Try to rebase to 0..0011..1111 (54 `1`s).
tk.MustExec("alter table t modify a bigint unsigned auto_random(6)")
tk.MustExec("alter table t modify a bigint unsigned auto_random(10)")
assertOverflow("alter table t modify a bigint unsigned auto_random(11)", "a", 10, 11)
})
autoid.SetStep(originStep)
// Modifying the field type of a auto_random column is not allowed.
// Here the throw error is `ERROR 8200 (HY000): Unsupported modify column: length 11 is less than origin 20`,
// instead of `ERROR 8216 (HY000): Invalid auto random: modifying the auto_random column type is not supported`
// Because the origin column is `bigint`, it can not change to any other column type in TiDB limitation.
mustExecAndDrop("create table t (a bigint primary key auto_random(3), b int)", func() {
assertModifyColType("alter table t modify column a int auto_random(3)")
assertModifyColType("alter table t modify column a mediumint auto_random(3)")
assertModifyColType("alter table t modify column a smallint auto_random(3)")
tk.MustExec("alter table t modify column b int")
tk.MustExec("alter table t modify column b bigint")
tk.MustExec("alter table t modify column a bigint auto_random(3)")
})
// Test show warnings when create auto_random table.
assertShowWarningCorrect := func(sql string, times int) {
mustExecAndDrop(sql, func() {
note := fmt.Sprintf(autoid.AutoRandomAvailableAllocTimesNote, times)
result := fmt.Sprintf("Note|1105|%s", note)
tk.MustQuery("show warnings").Check(testkit.RowsWithSep("|", result))
require.Equal(t, uint16(0), tk.Session().GetSessionVars().StmtCtx.WarningCount())
})
}
assertShowWarningCorrect("create table t (a bigint auto_random(15) primary key)", 281474976710655)
assertShowWarningCorrect("create table t (a bigint unsigned auto_random(15) primary key)", 562949953421311)
assertShowWarningCorrect("create table t (a bigint auto_random(1) primary key)", 4611686018427387903)
// Test insert into auto_random column explicitly is not allowed by default.
assertExplicitInsertDisallowed := func(sql string) {
assertInvalidAutoRandomErr(sql, autoid.AutoRandomExplicitInsertDisabledErrMsg)
}
tk.MustExec("set @@allow_auto_random_explicit_insert = false")
mustExecAndDrop("create table t (a bigint auto_random primary key)", func() {
assertExplicitInsertDisallowed("insert into t values (1)")
assertExplicitInsertDisallowed("insert into t values (3)")
tk.MustExec("insert into t values()")
})
tk.MustExec("set @@allow_auto_random_explicit_insert = true")
mustExecAndDrop("create table t (a bigint auto_random primary key)", func() {
tk.MustExec("insert into t values(1)")
tk.MustExec("insert into t values(3)")
tk.MustExec("insert into t values()")
})
}
func TestAutoRandomWithRangeBits(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec("use test;")
// Test normal usages.
tk.MustExec("create table t (a bigint auto_random(5, 64) primary key, b int);")
tk.MustExec("drop table if exists t;")
tk.MustExec("create table t (a bigint unsigned auto_random(5, 32) primary key, b int);")
// Test create auto_random table with invalid range bits.
expectErr := dbterror.ErrInvalidAutoRandom
tk.MustExec("drop table if exists t;")
err := tk.ExecToErr("create table t (a bigint auto_random(5, 31) primary key, b int);")
require.EqualError(t, err, expectErr.FastGenByArgs(fmt.Sprintf(autoid.AutoRandomInvalidRangeBits, 32, 64, 31)).Error())
err = tk.ExecToErr("create table t (a bigint auto_random(5, 65) primary key, b int);")
require.EqualError(t, err, expectErr.FastGenByArgs(fmt.Sprintf(autoid.AutoRandomInvalidRangeBits, 32, 64, 65)).Error())
err = tk.ExecToErr("create table t (a bigint auto_random(15, 32) primary key, b int);")
require.EqualError(t, err, expectErr.FastGenByArgs(autoid.AutoRandomIncrementalBitsTooSmall).Error())
// Alter table range bits is not supported.
tk.MustExec("create table t (a bigint auto_random(5, 64) primary key, b int);")
err = tk.ExecToErr("alter table t modify column a bigint auto_random(5, 32);")
require.EqualError(t, err, expectErr.FastGenByArgs(autoid.AutoRandomUnsupportedAlterRangeBits).Error())
tk.MustExec("alter table t modify column a bigint auto_random(15, 64);")
}
func TestAutoRandomWithPreSplitRegion(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec("create database if not exists auto_random_db")
tk.MustExec("use auto_random_db")
origin := atomic.LoadUint32(&ddl.EnableSplitTableRegion)
atomic.StoreUint32(&ddl.EnableSplitTableRegion, 1)
defer atomic.StoreUint32(&ddl.EnableSplitTableRegion, origin)
tk.MustExec("set @@global.tidb_scatter_region=1")
// Test pre-split table region for auto_random table.
tk.MustExec("create table t (a bigint auto_random(2) primary key clustered, b int) pre_split_regions=2")
re := tk.MustQuery("show table t regions")
rows := re.Rows()
require.Len(t, rows, 4)
tbl := external.GetTableByName(t, tk, "auto_random_db", "t") //nolint:typecheck
require.Equal(t, fmt.Sprintf("t_%d_r_2305843009213693952", tbl.Meta().ID), rows[1][1])
require.Equal(t, fmt.Sprintf("t_%d_r_4611686018427387904", tbl.Meta().ID), rows[2][1])
require.Equal(t, fmt.Sprintf("t_%d_r_6917529027641081856", tbl.Meta().ID), rows[3][1])
tk.MustExec("drop table t;")
tk.MustExec("create table t (a bigint auto_random(2, 32) primary key clustered, b int) pre_split_regions=2;")
rows = tk.MustQuery("show table t regions;").Rows()
tbl = external.GetTableByName(t, tk, "auto_random_db", "t") //nolint:typecheck
require.Equal(t, fmt.Sprintf("t_%d_r_536870912", tbl.Meta().ID), rows[1][1])
require.Equal(t, fmt.Sprintf("t_%d_r_1073741824", tbl.Meta().ID), rows[2][1])
require.Equal(t, fmt.Sprintf("t_%d_r_1610612736", tbl.Meta().ID), rows[3][1])
tk.MustExec("drop table t;")
tk.MustExec("create table t (a bigint unsigned auto_random(2, 32) primary key clustered, b int) pre_split_regions=2;")
rows = tk.MustQuery("show table t regions;").Rows()
tbl = external.GetTableByName(t, tk, "auto_random_db", "t") //nolint:typecheck
require.Equal(t, fmt.Sprintf("t_%d_r_1073741824", tbl.Meta().ID), rows[1][1])
require.Equal(t, fmt.Sprintf("t_%d_r_2147483648", tbl.Meta().ID), rows[2][1])
require.Equal(t, fmt.Sprintf("t_%d_r_3221225472", tbl.Meta().ID), rows[3][1])
}
func TestModifyingColumn4NewCollations(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec("create database dct")
tk.MustExec("use dct")
tk.MustExec("create table t(b varchar(10) collate utf8_bin, c varchar(10) collate utf8_general_ci) collate utf8_bin")
// Column collation can be changed as long as there is no index defined.
tk.MustExec("alter table t modify b varchar(10) collate utf8_general_ci")
tk.MustExec("alter table t modify c varchar(10) collate utf8_bin")
tk.MustExec("alter table t modify c varchar(10) collate utf8_unicode_ci")
tk.MustExec("alter table t charset utf8 collate utf8_general_ci")
tk.MustExec("alter table t convert to charset utf8 collate utf8_bin")
tk.MustExec("alter table t convert to charset utf8 collate utf8_unicode_ci")
tk.MustExec("alter table t convert to charset utf8 collate utf8_general_ci")
tk.MustExec("alter table t modify b varchar(10) collate utf8_unicode_ci")
tk.MustExec("alter table t modify b varchar(10) collate utf8_bin")
tk.MustExec("alter table t add index b_idx(b)")
tk.MustExec("alter table t add index c_idx(c)")
tk.MustGetErrMsg("alter table t modify b varchar(10) collate utf8_general_ci", "[ddl:8200]Unsupported modifying collation of column 'b' from 'utf8_bin' to 'utf8_general_ci' when index is defined on it.")
tk.MustGetErrMsg("alter table t modify c varchar(10) collate utf8_bin", "[ddl:8200]Unsupported modifying collation of column 'c' from 'utf8_general_ci' to 'utf8_bin' when index is defined on it.")
tk.MustGetErrMsg("alter table t modify c varchar(10) collate utf8_unicode_ci", "[ddl:8200]Unsupported modifying collation of column 'c' from 'utf8_general_ci' to 'utf8_unicode_ci' when index is defined on it.")
tk.MustGetErrMsg("alter table t convert to charset utf8 collate utf8_general_ci", "[ddl:8200]Unsupported converting collation of column 'b' from 'utf8_bin' to 'utf8_general_ci' when index is defined on it.")
// Change to a compatible collation is allowed.
tk.MustExec("alter table t modify c varchar(10) collate utf8mb4_general_ci")
// Change the default collation of table is allowed.
tk.MustExec("alter table t collate utf8mb4_general_ci")
tk.MustExec("alter table t charset utf8mb4 collate utf8mb4_bin")
tk.MustExec("alter table t charset utf8mb4 collate utf8mb4_unicode_ci")
tk.MustExec("alter table t charset utf8mb4 collate utf8mb4_zh_pinyin_tidb_as_cs")
// Change the default collation of database is allowed.
tk.MustExec("alter database dct charset utf8mb4 collate utf8mb4_general_ci")
}
func TestForbidUnsupportedCollations(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
mustGetUnsupportedCollation := func(sql string, coll string) {
tk.MustGetErrMsg(sql, fmt.Sprintf("[ddl:1273]Unsupported collation when new collation is enabled: '%s'", coll))
}
// Test default collation of database.
mustGetUnsupportedCollation("create database ucd charset utf8mb4 collate utf8mb4_roman_ci", "utf8mb4_roman_ci")
mustGetUnsupportedCollation("create database ucd charset utf8 collate utf8_roman_ci", "utf8_roman_ci")
tk.MustExec("create database ucd")
mustGetUnsupportedCollation("alter database ucd charset utf8mb4 collate utf8mb4_roman_ci", "utf8mb4_roman_ci")
mustGetUnsupportedCollation("alter database ucd collate utf8mb4_roman_ci", "utf8mb4_roman_ci")
// Test default collation of table.
tk.MustExec("use ucd")
mustGetUnsupportedCollation("create table t(a varchar(20)) charset utf8mb4 collate utf8mb4_roman_ci", "utf8mb4_roman_ci")
mustGetUnsupportedCollation("create table t(a varchar(20)) collate utf8_roman_ci", "utf8_roman_ci")
tk.MustExec("create table t(a varchar(20)) collate utf8mb4_general_ci")
mustGetUnsupportedCollation("alter table t default collate utf8mb4_roman_ci", "utf8mb4_roman_ci")
mustGetUnsupportedCollation("alter table t convert to charset utf8mb4 collate utf8mb4_roman_ci", "utf8mb4_roman_ci")
// Test collation of columns.
mustGetUnsupportedCollation("create table t1(a varchar(20)) collate utf8mb4_roman_ci", "utf8mb4_roman_ci")
mustGetUnsupportedCollation("create table t1(a varchar(20)) charset utf8 collate utf8_roman_ci", "utf8_roman_ci")
tk.MustExec("create table t1(a varchar(20))")
mustGetUnsupportedCollation("alter table t1 modify a varchar(20) collate utf8mb4_roman_ci", "utf8mb4_roman_ci")
mustGetUnsupportedCollation("alter table t1 modify a varchar(20) charset utf8 collate utf8_roman_ci", "utf8_roman_ci")
//nolint:revive,all_revive
mustGetUnsupportedCollation("alter table t1 modify a varchar(20) charset utf8 collate utf8_roman_ci", "utf8_roman_ci")
// TODO(bb7133): fix the following cases by setting charset from collate firstly.
// mustGetUnsupportedCollation("create database ucd collate utf8mb4_unicode_ci", errMsgUnsupportedUnicodeCI)
// mustGetUnsupportedCollation("alter table t convert to collate utf8mb4_unicode_ci", "utf8mb4_unicode_ci")
}
func TestCreateTableNoBlock(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/ddl/checkOwnerCheckAllVersionsWaitTime", `return(true)`))
defer func() {
require.NoError(t, failpoint.Disable("github.com/pingcap/tidb/ddl/checkOwnerCheckAllVersionsWaitTime"))
}()
save := variable.GetDDLErrorCountLimit()
tk.MustExec("set @@global.tidb_ddl_error_count_limit = 1")
defer func() {
tk.MustExec(fmt.Sprintf("set @@global.tidb_ddl_error_count_limit = %v", save))
}()
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
require.Error(t, tk.ExecToErr("create table t(a int)"))
}
func TestCheckEnumLength(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec("use test")
tk.MustGetErrCode("create table t1 (a enum('aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa'))", errno.ErrTooLongValueForType)
tk.MustGetErrCode("create table t1 (a set('aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa'))", errno.ErrTooLongValueForType)
tk.MustExec("create table t2 (id int primary key)")
tk.MustGetErrCode("alter table t2 add a enum('aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa')", errno.ErrTooLongValueForType)
tk.MustGetErrCode("alter table t2 add a set('aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa')", errno.ErrTooLongValueForType)
config.UpdateGlobal(func(conf *config.Config) {
conf.EnableEnumLengthLimit = false
})
tk.MustExec("create table t3 (a enum('aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa'))")
tk.MustExec("insert into t3 values(1)")
tk.MustQuery("select a from t3").Check(testkit.Rows("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"))
tk.MustExec("create table t4 (a set('aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa'))")
config.UpdateGlobal(func(conf *config.Config) {
conf.EnableEnumLengthLimit = true
})
tk.MustGetErrCode("create table t5 (a enum('aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa'))", errno.ErrTooLongValueForType)
tk.MustGetErrCode("create table t5 (a set('aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa'))", errno.ErrTooLongValueForType)
tk.MustExec("drop table if exists t1,t2,t3,t4,t5")
}
func TestGetReverseKey(t *testing.T) {
var cluster testutils.Cluster
store, dom := testkit.CreateMockStoreAndDomain(t,
mockstore.WithClusterInspector(func(c testutils.Cluster) {
mockstore.BootstrapWithSingleStore(c)
cluster = c
}))
tk := testkit.NewTestKit(t, store)
tk.MustExec("create database db_get")
tk.MustExec("use db_get")
tk.MustExec("create table test_get(a bigint not null primary key, b bigint)")
insertVal := func(val int) {
sql := fmt.Sprintf("insert into test_get value(%d, %d)", val, val)
tk.MustExec(sql)
}
insertVal(math.MinInt64)
insertVal(math.MinInt64 + 1)
insertVal(1 << 61)
insertVal(3 << 61)
insertVal(math.MaxInt64)
insertVal(math.MaxInt64 - 1)
// Get table ID for split.
is := dom.InfoSchema()
tbl, err := is.TableByName(model.NewCIStr("db_get"), model.NewCIStr("test_get"))
require.NoError(t, err)
// Split the table.
tableStart := tablecodec.GenTableRecordPrefix(tbl.Meta().ID)
cluster.SplitKeys(tableStart, tableStart.PrefixNext(), 4)
tk.MustQuery("select * from test_get order by a").Check(testkit.Rows("-9223372036854775808 -9223372036854775808",
"-9223372036854775807 -9223372036854775807",
"2305843009213693952 2305843009213693952",
"6917529027641081856 6917529027641081856",
"9223372036854775806 9223372036854775806",
"9223372036854775807 9223372036854775807",
))
minKey := tablecodec.EncodeRecordKey(tbl.RecordPrefix(), kv.IntHandle(math.MinInt64))
maxKey := tablecodec.EncodeRecordKey(tbl.RecordPrefix(), kv.IntHandle(math.MaxInt64))
checkRet := func(startKey, endKey, retKey kv.Key) {
h, err := ddl.GetMaxRowID(store, 0, tbl, startKey, endKey)
require.NoError(t, err)
require.Equal(t, 0, h.Cmp(retKey))
}
// [minInt64, minInt64]
checkRet(minKey, minKey, minKey)
// [minInt64, 1<<64-1]
endKey := tablecodec.EncodeRecordKey(tbl.RecordPrefix(), kv.IntHandle(1<<61-1))
retKey := tablecodec.EncodeRecordKey(tbl.RecordPrefix(), kv.IntHandle(math.MinInt64+1))
checkRet(minKey, endKey, retKey)
// [1<<64, 2<<64]
startKey := tablecodec.EncodeRecordKey(tbl.RecordPrefix(), kv.IntHandle(1<<61))
endKey = tablecodec.EncodeRecordKey(tbl.RecordPrefix(), kv.IntHandle(2<<61))
checkRet(startKey, endKey, startKey)
// [3<<64, maxInt64]
startKey = tablecodec.EncodeRecordKey(tbl.RecordPrefix(), kv.IntHandle(3<<61))
endKey = maxKey
checkRet(startKey, endKey, endKey)
}
func TestLocalTemporaryTableBlockedDDL(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec("use test")
tk.MustExec("create table t1 (id int)")
tk.MustExec("create temporary table tmp1 (id int primary key, a int unique, b int)")
require.ErrorIs(t, tk.ExecToErr("rename table tmp1 to tmp2"), dbterror.ErrUnsupportedLocalTempTableDDL)
require.ErrorIs(t, tk.ExecToErr("alter table tmp1 add column c int"), dbterror.ErrUnsupportedLocalTempTableDDL)
require.ErrorIs(t, tk.ExecToErr("alter table tmp1 add index b(b)"), dbterror.ErrUnsupportedLocalTempTableDDL)
require.ErrorIs(t, tk.ExecToErr("create index a on tmp1(b)"), dbterror.ErrUnsupportedLocalTempTableDDL)
require.ErrorIs(t, tk.ExecToErr("drop index a on tmp1"), dbterror.ErrUnsupportedLocalTempTableDDL)
require.ErrorIs(t, tk.ExecToErr("lock tables tmp1 read"), dbterror.ErrUnsupportedLocalTempTableDDL)
require.ErrorIs(t, tk.ExecToErr("lock tables tmp1 write"), dbterror.ErrUnsupportedLocalTempTableDDL)
require.ErrorIs(t, tk.ExecToErr("lock tables t1 read, tmp1 read"), dbterror.ErrUnsupportedLocalTempTableDDL)
require.ErrorIs(t, tk.ExecToErr("admin cleanup table lock tmp1"), dbterror.ErrUnsupportedLocalTempTableDDL)
}
| ddl/serial_test.go | 0 | https://github.com/pingcap/tidb/commit/f61024b2dc6f9e3a6f7fd4d941e19fb81f002b1e | [
0.00018279225332662463,
0.000171625884831883,
0.00016229150060098618,
0.00017197200213558972,
0.0000034161980693170335
] |
{
"id": 7,
"code_window": [
"// NeedSetRCCheckTSFlag checks whether it's needed to set `RCCheckTS` flag in current stmtctx.\n",
"func NeedSetRCCheckTSFlag(ctx sessionctx.Context, node ast.Node) bool {\n",
"\tsessionVars := ctx.GetSessionVars()\n",
"\tif sessionVars.ConnectionID > 0 && sessionVars.RcReadCheckTS && sessionVars.InTxn() &&\n",
"\t\t!sessionVars.RetryInfo.Retrying && plannercore.IsReadOnly(node, sessionVars) {\n",
"\t\treturn true\n",
"\t}\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tif sessionVars.ConnectionID > 0 && variable.EnableRCReadCheckTS.Load() && sessionVars.InTxn() &&\n"
],
"file_path": "sessiontxn/isolation/readcommitted.go",
"type": "replace",
"edit_start_line_idx": 103
} | // Copyright 2015 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"context"
"flag"
"fmt"
"io/fs"
"os"
"runtime"
"strconv"
"strings"
"sync/atomic"
"time"
"github.com/coreos/go-semver/semver"
"github.com/opentracing/opentracing-go"
"github.com/pingcap/errors"
"github.com/pingcap/failpoint"
"github.com/pingcap/log"
"github.com/pingcap/tidb/bindinfo"
"github.com/pingcap/tidb/config"
"github.com/pingcap/tidb/ddl"
"github.com/pingcap/tidb/domain"
"github.com/pingcap/tidb/domain/infosync"
"github.com/pingcap/tidb/executor"
"github.com/pingcap/tidb/kv"
"github.com/pingcap/tidb/metrics"
"github.com/pingcap/tidb/parser/mysql"
"github.com/pingcap/tidb/parser/terror"
parsertypes "github.com/pingcap/tidb/parser/types"
plannercore "github.com/pingcap/tidb/planner/core"
"github.com/pingcap/tidb/plugin"
"github.com/pingcap/tidb/privilege/privileges"
"github.com/pingcap/tidb/server"
"github.com/pingcap/tidb/session"
"github.com/pingcap/tidb/session/txninfo"
"github.com/pingcap/tidb/sessionctx/binloginfo"
"github.com/pingcap/tidb/sessionctx/variable"
"github.com/pingcap/tidb/statistics"
kvstore "github.com/pingcap/tidb/store"
"github.com/pingcap/tidb/store/driver"
"github.com/pingcap/tidb/store/mockstore"
uni_metrics "github.com/pingcap/tidb/store/mockstore/unistore/metrics"
pumpcli "github.com/pingcap/tidb/tidb-binlog/pump_client"
"github.com/pingcap/tidb/util"
"github.com/pingcap/tidb/util/cpuprofile"
"github.com/pingcap/tidb/util/deadlockhistory"
"github.com/pingcap/tidb/util/disk"
"github.com/pingcap/tidb/util/domainutil"
"github.com/pingcap/tidb/util/kvcache"
"github.com/pingcap/tidb/util/logutil"
"github.com/pingcap/tidb/util/memory"
"github.com/pingcap/tidb/util/printer"
"github.com/pingcap/tidb/util/sem"
"github.com/pingcap/tidb/util/signal"
"github.com/pingcap/tidb/util/sys/linux"
storageSys "github.com/pingcap/tidb/util/sys/storage"
"github.com/pingcap/tidb/util/systimemon"
"github.com/pingcap/tidb/util/topsql"
"github.com/pingcap/tidb/util/versioninfo"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/push"
"github.com/tikv/client-go/v2/tikv"
"github.com/tikv/client-go/v2/txnkv/transaction"
pd "github.com/tikv/pd/client"
"go.uber.org/automaxprocs/maxprocs"
"go.uber.org/zap"
)
// Flag Names
const (
nmVersion = "V"
nmConfig = "config"
nmConfigCheck = "config-check"
nmConfigStrict = "config-strict"
nmStore = "store"
nmStorePath = "path"
nmHost = "host"
nmAdvertiseAddress = "advertise-address"
nmPort = "P"
nmCors = "cors"
nmSocket = "socket"
nmEnableBinlog = "enable-binlog"
nmRunDDL = "run-ddl"
nmLogLevel = "L"
nmLogFile = "log-file"
nmLogSlowQuery = "log-slow-query"
nmReportStatus = "report-status"
nmStatusHost = "status-host"
nmStatusPort = "status"
nmMetricsAddr = "metrics-addr"
nmMetricsInterval = "metrics-interval"
nmDdlLease = "lease"
nmTokenLimit = "token-limit"
nmPluginDir = "plugin-dir"
nmPluginLoad = "plugin-load"
nmRepairMode = "repair-mode"
nmRepairList = "repair-list"
nmTempDir = "temp-dir"
nmProxyProtocolNetworks = "proxy-protocol-networks"
nmProxyProtocolHeaderTimeout = "proxy-protocol-header-timeout"
nmAffinityCPU = "affinity-cpus"
nmInitializeSecure = "initialize-secure"
nmInitializeInsecure = "initialize-insecure"
)
var (
version = flagBoolean(nmVersion, false, "print version information and exit")
configPath = flag.String(nmConfig, "", "config file path")
configCheck = flagBoolean(nmConfigCheck, false, "check config file validity and exit")
configStrict = flagBoolean(nmConfigStrict, false, "enforce config file validity")
// Base
store = flag.String(nmStore, "unistore", "registered store name, [tikv, mocktikv, unistore]")
storePath = flag.String(nmStorePath, "/tmp/tidb", "tidb storage path")
host = flag.String(nmHost, "0.0.0.0", "tidb server host")
advertiseAddress = flag.String(nmAdvertiseAddress, "", "tidb server advertise IP")
port = flag.String(nmPort, "4000", "tidb server port")
cors = flag.String(nmCors, "", "tidb server allow cors origin")
socket = flag.String(nmSocket, "/tmp/tidb-{Port}.sock", "The socket file to use for connection.")
enableBinlog = flagBoolean(nmEnableBinlog, false, "enable generate binlog")
runDDL = flagBoolean(nmRunDDL, true, "run ddl worker on this tidb-server")
ddlLease = flag.String(nmDdlLease, "45s", "schema lease duration, very dangerous to change only if you know what you do")
tokenLimit = flag.Int(nmTokenLimit, 1000, "the limit of concurrent executed sessions")
pluginDir = flag.String(nmPluginDir, "/data/deploy/plugin", "the folder that hold plugin")
pluginLoad = flag.String(nmPluginLoad, "", "wait load plugin name(separated by comma)")
affinityCPU = flag.String(nmAffinityCPU, "", "affinity cpu (cpu-no. separated by comma, e.g. 1,2,3)")
repairMode = flagBoolean(nmRepairMode, false, "enable admin repair mode")
repairList = flag.String(nmRepairList, "", "admin repair table list")
tempDir = flag.String(nmTempDir, config.DefTempDir, "tidb temporary directory")
// Log
logLevel = flag.String(nmLogLevel, "info", "log level: info, debug, warn, error, fatal")
logFile = flag.String(nmLogFile, "", "log file path")
logSlowQuery = flag.String(nmLogSlowQuery, "", "slow query file path")
// Status
reportStatus = flagBoolean(nmReportStatus, true, "If enable status report HTTP service.")
statusHost = flag.String(nmStatusHost, "0.0.0.0", "tidb server status host")
statusPort = flag.String(nmStatusPort, "10080", "tidb server status port")
metricsAddr = flag.String(nmMetricsAddr, "", "prometheus pushgateway address, leaves it empty will disable prometheus push.")
metricsInterval = flag.Uint(nmMetricsInterval, 15, "prometheus client push interval in second, set \"0\" to disable prometheus push.")
// PROXY Protocol
proxyProtocolNetworks = flag.String(nmProxyProtocolNetworks, "", "proxy protocol networks allowed IP or *, empty mean disable proxy protocol support")
proxyProtocolHeaderTimeout = flag.Uint(nmProxyProtocolHeaderTimeout, 5, "proxy protocol header read timeout, unit is second. (Deprecated: as proxy protocol using lazy mode, header read timeout no longer used)")
// Security
initializeSecure = flagBoolean(nmInitializeSecure, false, "bootstrap tidb-server in secure mode")
initializeInsecure = flagBoolean(nmInitializeInsecure, true, "bootstrap tidb-server in insecure mode")
)
func main() {
help := flag.Bool("help", false, "show the usage")
flag.Parse()
if *help {
flag.Usage()
os.Exit(0)
}
config.InitializeConfig(*configPath, *configCheck, *configStrict, overrideConfig)
if *version {
setVersions()
fmt.Println(printer.GetTiDBInfo())
os.Exit(0)
}
registerStores()
registerMetrics()
if variable.EnableTmpStorageOnOOM.Load() {
config.GetGlobalConfig().UpdateTempStoragePath()
err := disk.InitializeTempDir()
terror.MustNil(err)
checkTempStorageQuota()
}
setupLog()
err := cpuprofile.StartCPUProfiler()
terror.MustNil(err)
// Enable failpoints in tikv/client-go if the test API is enabled.
// It appears in the main function to be set before any use of client-go to prevent data race.
if _, err := failpoint.Status("github.com/pingcap/tidb/server/enableTestAPI"); err == nil {
warnMsg := "tikv/client-go failpoint is enabled, this should NOT happen in the production environment"
logutil.BgLogger().Warn(warnMsg)
tikv.EnableFailpoints()
}
setGlobalVars()
setCPUAffinity()
setupTracing() // Should before createServer and after setup config.
printInfo()
setupBinlogClient()
setupMetrics()
storage, dom := createStoreAndDomain()
svr := createServer(storage, dom)
// Register error API is not thread-safe, the caller MUST NOT register errors after initialization.
// To prevent misuse, set a flag to indicate that register new error will panic immediately.
// For regression of issue like https://github.com/pingcap/tidb/issues/28190
terror.RegisterFinish()
exited := make(chan struct{})
signal.SetupSignalHandler(func(graceful bool) {
svr.Close()
cleanup(svr, storage, dom, graceful)
cpuprofile.StopCPUProfiler()
close(exited)
})
topsql.SetupTopSQL()
terror.MustNil(svr.Run())
<-exited
syncLog()
}
func syncLog() {
if err := log.Sync(); err != nil {
// Don't complain about /dev/stdout as Fsync will return EINVAL.
if pathErr, ok := err.(*fs.PathError); ok {
if pathErr.Path == "/dev/stdout" {
os.Exit(0)
}
}
fmt.Fprintln(os.Stderr, "sync log err:", err)
os.Exit(1)
}
}
func checkTempStorageQuota() {
// check capacity and the quota when EnableTmpStorageOnOOM is enabled
c := config.GetGlobalConfig()
if c.TempStorageQuota < 0 {
// means unlimited, do nothing
} else {
capacityByte, err := storageSys.GetTargetDirectoryCapacity(c.TempStoragePath)
if err != nil {
log.Fatal(err.Error())
} else if capacityByte < uint64(c.TempStorageQuota) {
log.Fatal(fmt.Sprintf("value of [tmp-storage-quota](%d byte) exceeds the capacity(%d byte) of the [%s] directory", c.TempStorageQuota, capacityByte, c.TempStoragePath))
}
}
}
func setCPUAffinity() {
if affinityCPU == nil || len(*affinityCPU) == 0 {
return
}
var cpu []int
for _, af := range strings.Split(*affinityCPU, ",") {
af = strings.TrimSpace(af)
if len(af) > 0 {
c, err := strconv.Atoi(af)
if err != nil {
fmt.Fprintf(os.Stderr, "wrong affinity cpu config: %s", *affinityCPU)
os.Exit(1)
}
cpu = append(cpu, c)
}
}
err := linux.SetAffinity(cpu)
if err != nil {
fmt.Fprintf(os.Stderr, "set cpu affinity failure: %v", err)
os.Exit(1)
}
runtime.GOMAXPROCS(len(cpu))
metrics.MaxProcs.Set(float64(runtime.GOMAXPROCS(0)))
}
func registerStores() {
err := kvstore.Register("tikv", driver.TiKVDriver{})
terror.MustNil(err)
err = kvstore.Register("mocktikv", mockstore.MockTiKVDriver{})
terror.MustNil(err)
err = kvstore.Register("unistore", mockstore.EmbedUnistoreDriver{})
terror.MustNil(err)
}
func registerMetrics() {
metrics.RegisterMetrics()
if config.GetGlobalConfig().Store == "unistore" {
uni_metrics.RegisterMetrics()
}
}
func createStoreAndDomain() (kv.Storage, *domain.Domain) {
cfg := config.GetGlobalConfig()
fullPath := fmt.Sprintf("%s://%s", cfg.Store, cfg.Path)
var err error
storage, err := kvstore.New(fullPath)
terror.MustNil(err)
err = infosync.CheckTiKVVersion(storage, *semver.New(versioninfo.TiKVMinVersion))
terror.MustNil(err)
// Bootstrap a session to load information schema.
dom, err := session.BootstrapSession(storage)
terror.MustNil(err)
return storage, dom
}
func setupBinlogClient() {
cfg := config.GetGlobalConfig()
if !cfg.Binlog.Enable {
return
}
if cfg.Binlog.IgnoreError {
binloginfo.SetIgnoreError(true)
}
var (
client *pumpcli.PumpsClient
err error
)
securityOption := pd.SecurityOption{
CAPath: cfg.Security.ClusterSSLCA,
CertPath: cfg.Security.ClusterSSLCert,
KeyPath: cfg.Security.ClusterSSLKey,
}
if len(cfg.Binlog.BinlogSocket) == 0 {
client, err = pumpcli.NewPumpsClient(cfg.Path, cfg.Binlog.Strategy, parseDuration(cfg.Binlog.WriteTimeout), securityOption)
} else {
client, err = pumpcli.NewLocalPumpsClient(cfg.Path, cfg.Binlog.BinlogSocket, parseDuration(cfg.Binlog.WriteTimeout), securityOption)
}
terror.MustNil(err)
err = logutil.InitLogger(cfg.Log.ToLogConfig())
terror.MustNil(err)
binloginfo.SetPumpsClient(client)
log.Info("tidb-server", zap.Bool("create pumps client success, ignore binlog error", cfg.Binlog.IgnoreError))
}
// Prometheus push.
const zeroDuration = time.Duration(0)
// pushMetric pushes metrics in background.
func pushMetric(addr string, interval time.Duration) {
if interval == zeroDuration || len(addr) == 0 {
log.Info("disable Prometheus push client")
return
}
log.Info("start prometheus push client", zap.String("server addr", addr), zap.String("interval", interval.String()))
go prometheusPushClient(addr, interval)
}
// prometheusPushClient pushes metrics to Prometheus Pushgateway.
func prometheusPushClient(addr string, interval time.Duration) {
// TODO: TiDB do not have uniq name, so we use host+port to compose a name.
job := "tidb"
pusher := push.New(addr, job)
pusher = pusher.Gatherer(prometheus.DefaultGatherer)
pusher = pusher.Grouping("instance", instanceName())
for {
err := pusher.Push()
if err != nil {
log.Error("could not push metrics to prometheus pushgateway", zap.String("err", err.Error()))
}
time.Sleep(interval)
}
}
func instanceName() string {
cfg := config.GetGlobalConfig()
hostname, err := os.Hostname()
if err != nil {
return "unknown"
}
return fmt.Sprintf("%s_%d", hostname, cfg.Port)
}
// parseDuration parses lease argument string.
func parseDuration(lease string) time.Duration {
dur, err := time.ParseDuration(lease)
if err != nil {
dur, err = time.ParseDuration(lease + "s")
}
if err != nil || dur < 0 {
log.Fatal("invalid lease duration", zap.String("lease", lease))
}
return dur
}
func flagBoolean(name string, defaultVal bool, usage string) *bool {
if !defaultVal {
// Fix #4125, golang do not print default false value in usage, so we append it.
usage = fmt.Sprintf("%s (default false)", usage)
return flag.Bool(name, defaultVal, usage)
}
return flag.Bool(name, defaultVal, usage)
}
// overrideConfig considers command arguments and overrides some config items in the Config.
func overrideConfig(cfg *config.Config) {
actualFlags := make(map[string]bool)
flag.Visit(func(f *flag.Flag) {
actualFlags[f.Name] = true
})
// Base
if actualFlags[nmHost] {
cfg.Host = *host
}
if actualFlags[nmAdvertiseAddress] {
var err error
if len(strings.Split(*advertiseAddress, " ")) > 1 {
err = errors.Errorf("Only support one advertise-address")
}
terror.MustNil(err)
cfg.AdvertiseAddress = *advertiseAddress
}
if len(cfg.AdvertiseAddress) == 0 && cfg.Host == "0.0.0.0" {
cfg.AdvertiseAddress = util.GetLocalIP()
}
if len(cfg.AdvertiseAddress) == 0 {
cfg.AdvertiseAddress = cfg.Host
}
var err error
if actualFlags[nmPort] {
var p int
p, err = strconv.Atoi(*port)
terror.MustNil(err)
cfg.Port = uint(p)
}
if actualFlags[nmCors] {
fmt.Println(cors)
cfg.Cors = *cors
}
if actualFlags[nmStore] {
cfg.Store = *store
}
if actualFlags[nmStorePath] {
cfg.Path = *storePath
}
if actualFlags[nmSocket] {
cfg.Socket = *socket
}
if actualFlags[nmEnableBinlog] {
cfg.Binlog.Enable = *enableBinlog
}
if actualFlags[nmRunDDL] {
cfg.Instance.TiDBEnableDDL.Store(*runDDL)
}
if actualFlags[nmDdlLease] {
cfg.Lease = *ddlLease
}
if actualFlags[nmTokenLimit] {
cfg.TokenLimit = uint(*tokenLimit)
}
if actualFlags[nmPluginLoad] {
cfg.Instance.PluginLoad = *pluginLoad
}
if actualFlags[nmPluginDir] {
cfg.Instance.PluginDir = *pluginDir
}
if actualFlags[nmRepairMode] {
cfg.RepairMode = *repairMode
}
if actualFlags[nmRepairList] {
if cfg.RepairMode {
cfg.RepairTableList = stringToList(*repairList)
}
}
if actualFlags[nmTempDir] {
cfg.TempDir = *tempDir
}
// Log
if actualFlags[nmLogLevel] {
cfg.Log.Level = *logLevel
}
if actualFlags[nmLogFile] {
cfg.Log.File.Filename = *logFile
}
if actualFlags[nmLogSlowQuery] {
cfg.Log.SlowQueryFile = *logSlowQuery
}
// Status
if actualFlags[nmReportStatus] {
cfg.Status.ReportStatus = *reportStatus
}
if actualFlags[nmStatusHost] {
cfg.Status.StatusHost = *statusHost
}
if actualFlags[nmStatusPort] {
var p int
p, err = strconv.Atoi(*statusPort)
terror.MustNil(err)
cfg.Status.StatusPort = uint(p)
}
if actualFlags[nmMetricsAddr] {
cfg.Status.MetricsAddr = *metricsAddr
}
if actualFlags[nmMetricsInterval] {
cfg.Status.MetricsInterval = *metricsInterval
}
// PROXY Protocol
if actualFlags[nmProxyProtocolNetworks] {
cfg.ProxyProtocol.Networks = *proxyProtocolNetworks
}
if actualFlags[nmProxyProtocolHeaderTimeout] {
cfg.ProxyProtocol.HeaderTimeout = *proxyProtocolHeaderTimeout
}
// Sanity check: can't specify both options
if actualFlags[nmInitializeSecure] && actualFlags[nmInitializeInsecure] {
err = fmt.Errorf("the options --initialize-insecure and --initialize-secure are mutually exclusive")
terror.MustNil(err)
}
// The option --initialize-secure=true ensures that a secure bootstrap is used.
if actualFlags[nmInitializeSecure] {
cfg.Security.SecureBootstrap = *initializeSecure
}
// The option --initialize-insecure=true/false was used.
// Store the inverted value of this to the secure bootstrap cfg item
if actualFlags[nmInitializeInsecure] {
cfg.Security.SecureBootstrap = !*initializeInsecure
}
// Secure bootstrap initializes with Socket authentication
// which is not supported on windows. Only the insecure bootstrap
// method is supported.
if runtime.GOOS == "windows" && cfg.Security.SecureBootstrap {
err = fmt.Errorf("the option --initialize-secure is not supported on Windows")
terror.MustNil(err)
}
}
func setVersions() {
cfg := config.GetGlobalConfig()
if len(cfg.ServerVersion) > 0 {
mysql.ServerVersion = cfg.ServerVersion
}
if len(cfg.TiDBEdition) > 0 {
versioninfo.TiDBEdition = cfg.TiDBEdition
}
if len(cfg.TiDBReleaseVersion) > 0 {
mysql.TiDBReleaseVersion = cfg.TiDBReleaseVersion
}
}
func setGlobalVars() {
cfg := config.GetGlobalConfig()
// config.DeprecatedOptions records the config options that should be moved to [instance] section.
for _, deprecatedOption := range config.DeprecatedOptions {
for oldName := range deprecatedOption.NameMappings {
switch deprecatedOption.SectionName {
case "":
switch oldName {
case "check-mb4-value-in-utf8":
cfg.Instance.CheckMb4ValueInUTF8.Store(cfg.CheckMb4ValueInUTF8.Load())
case "enable-collect-execution-info":
cfg.Instance.EnableCollectExecutionInfo = cfg.EnableCollectExecutionInfo
case "max-server-connections":
cfg.Instance.MaxConnections = cfg.MaxServerConnections
case "run-ddl":
cfg.Instance.TiDBEnableDDL.Store(cfg.RunDDL)
}
case "log":
switch oldName {
case "enable-slow-log":
cfg.Instance.EnableSlowLog.Store(cfg.Log.EnableSlowLog.Load())
case "slow-threshold":
cfg.Instance.SlowThreshold = cfg.Log.SlowThreshold
case "record-plan-in-slow-log":
cfg.Instance.RecordPlanInSlowLog = cfg.Log.RecordPlanInSlowLog
}
case "performance":
switch oldName {
case "force-priority":
cfg.Instance.ForcePriority = cfg.Performance.ForcePriority
case "memory-usage-alarm-ratio":
cfg.Instance.MemoryUsageAlarmRatio = cfg.Performance.MemoryUsageAlarmRatio
}
case "plugin":
switch oldName {
case "load":
cfg.Instance.PluginLoad = cfg.Plugin.Load
case "dir":
cfg.Instance.PluginDir = cfg.Plugin.Dir
}
default:
}
}
}
// Disable automaxprocs log
nopLog := func(string, ...interface{}) {}
_, err := maxprocs.Set(maxprocs.Logger(nopLog))
terror.MustNil(err)
// We should respect to user's settings in config file.
// The default value of MaxProcs is 0, runtime.GOMAXPROCS(0) is no-op.
runtime.GOMAXPROCS(int(cfg.Performance.MaxProcs))
metrics.MaxProcs.Set(float64(runtime.GOMAXPROCS(0)))
util.SetGOGC(cfg.Performance.GOGC)
ddlLeaseDuration := parseDuration(cfg.Lease)
session.SetSchemaLease(ddlLeaseDuration)
statsLeaseDuration := parseDuration(cfg.Performance.StatsLease)
session.SetStatsLease(statsLeaseDuration)
indexUsageSyncLeaseDuration := parseDuration(cfg.Performance.IndexUsageSyncLease)
session.SetIndexUsageSyncLease(indexUsageSyncLeaseDuration)
planReplayerGCLease := parseDuration(cfg.Performance.PlanReplayerGCLease)
session.SetPlanReplayerGCLease(planReplayerGCLease)
bindinfo.Lease = parseDuration(cfg.Performance.BindInfoLease)
statistics.RatioOfPseudoEstimate.Store(cfg.Performance.PseudoEstimateRatio)
if cfg.SplitTable {
atomic.StoreUint32(&ddl.EnableSplitTableRegion, 1)
}
plannercore.AllowCartesianProduct.Store(cfg.Performance.CrossJoin)
privileges.SkipWithGrant = cfg.Security.SkipGrantTable
kv.TxnTotalSizeLimit = cfg.Performance.TxnTotalSizeLimit
if cfg.Performance.TxnEntrySizeLimit > 120*1024*1024 {
log.Fatal("cannot set txn entry size limit larger than 120M")
}
kv.TxnEntrySizeLimit = cfg.Performance.TxnEntrySizeLimit
priority := mysql.Str2Priority(cfg.Instance.ForcePriority)
variable.ForcePriority = int32(priority)
variable.ProcessGeneralLog.Store(cfg.Instance.TiDBGeneralLog)
variable.EnablePProfSQLCPU.Store(cfg.Instance.EnablePProfSQLCPU)
atomic.StoreUint32(&variable.DDLSlowOprThreshold, cfg.Instance.DDLSlowOprThreshold)
atomic.StoreUint64(&variable.ExpensiveQueryTimeThreshold, cfg.Instance.ExpensiveQueryTimeThreshold)
if len(cfg.ServerVersion) > 0 {
mysql.ServerVersion = cfg.ServerVersion
variable.SetSysVar(variable.Version, cfg.ServerVersion)
}
if len(cfg.TiDBEdition) > 0 {
versioninfo.TiDBEdition = cfg.TiDBEdition
variable.SetSysVar(variable.VersionComment, "TiDB Server (Apache License 2.0) "+versioninfo.TiDBEdition+" Edition, MySQL 5.7 compatible")
}
if len(cfg.VersionComment) > 0 {
variable.SetSysVar(variable.VersionComment, cfg.VersionComment)
}
if len(cfg.TiDBReleaseVersion) > 0 {
mysql.TiDBReleaseVersion = cfg.TiDBReleaseVersion
}
variable.SetSysVar(variable.TiDBForcePriority, mysql.Priority2Str[priority])
variable.SetSysVar(variable.TiDBOptDistinctAggPushDown, variable.BoolToOnOff(cfg.Performance.DistinctAggPushDown))
variable.SetSysVar(variable.TiDBOptProjectionPushDown, variable.BoolToOnOff(cfg.Performance.ProjectionPushDown))
variable.SetSysVar(variable.LogBin, variable.BoolToOnOff(cfg.Binlog.Enable))
variable.SetSysVar(variable.Port, fmt.Sprintf("%d", cfg.Port))
cfg.Socket = strings.Replace(cfg.Socket, "{Port}", fmt.Sprintf("%d", cfg.Port), 1)
variable.SetSysVar(variable.Socket, cfg.Socket)
variable.SetSysVar(variable.DataDir, cfg.Path)
variable.SetSysVar(variable.TiDBSlowQueryFile, cfg.Log.SlowQueryFile)
variable.SetSysVar(variable.TiDBIsolationReadEngines, strings.Join(cfg.IsolationRead.Engines, ","))
variable.SetSysVar(variable.TiDBEnforceMPPExecution, variable.BoolToOnOff(config.GetGlobalConfig().Performance.EnforceMPP))
variable.MemoryUsageAlarmRatio.Store(cfg.Instance.MemoryUsageAlarmRatio)
if hostname, err := os.Hostname(); err == nil {
variable.SetSysVar(variable.Hostname, hostname)
}
variable.GlobalLogMaxDays.Store(int32(config.GetGlobalConfig().Log.File.MaxDays))
if cfg.Security.EnableSEM {
sem.Enable()
}
// For CI environment we default enable prepare-plan-cache.
if config.CheckTableBeforeDrop { // only for test
variable.SetSysVar(variable.TiDBEnablePrepPlanCache, variable.BoolToOnOff(true))
}
// use server-memory-quota as max-plan-cache-memory
plannercore.PreparedPlanCacheMaxMemory.Store(cfg.Performance.ServerMemoryQuota)
total, err := memory.MemTotal()
terror.MustNil(err)
// if server-memory-quota is larger than max-system-memory or not set, use max-system-memory as max-plan-cache-memory
if plannercore.PreparedPlanCacheMaxMemory.Load() > total || plannercore.PreparedPlanCacheMaxMemory.Load() <= 0 {
plannercore.PreparedPlanCacheMaxMemory.Store(total)
}
atomic.StoreUint64(&transaction.CommitMaxBackoff, uint64(parseDuration(cfg.TiKVClient.CommitTimeout).Seconds()*1000))
tikv.SetRegionCacheTTLSec(int64(cfg.TiKVClient.RegionCacheTTL))
domainutil.RepairInfo.SetRepairMode(cfg.RepairMode)
domainutil.RepairInfo.SetRepairTableList(cfg.RepairTableList)
executor.GlobalDiskUsageTracker.SetBytesLimit(cfg.TempStorageQuota)
if cfg.Performance.ServerMemoryQuota < 1 {
// If MaxMemory equals 0, it means unlimited
executor.GlobalMemoryUsageTracker.SetBytesLimit(-1)
} else {
executor.GlobalMemoryUsageTracker.SetBytesLimit(int64(cfg.Performance.ServerMemoryQuota))
}
kvcache.GlobalLRUMemUsageTracker.AttachToGlobalTracker(executor.GlobalMemoryUsageTracker)
t, err := time.ParseDuration(cfg.TiKVClient.StoreLivenessTimeout)
if err != nil || t < 0 {
logutil.BgLogger().Fatal("invalid duration value for store-liveness-timeout",
zap.String("currentValue", cfg.TiKVClient.StoreLivenessTimeout))
}
tikv.SetStoreLivenessTimeout(t)
parsertypes.TiDBStrictIntegerDisplayWidth = cfg.DeprecateIntegerDisplayWidth
deadlockhistory.GlobalDeadlockHistory.Resize(cfg.PessimisticTxn.DeadlockHistoryCapacity)
txninfo.Recorder.ResizeSummaries(cfg.TrxSummary.TransactionSummaryCapacity)
txninfo.Recorder.SetMinDuration(time.Duration(cfg.TrxSummary.TransactionIDDigestMinDuration) * time.Millisecond)
}
func setupLog() {
cfg := config.GetGlobalConfig()
err := logutil.InitLogger(cfg.Log.ToLogConfig())
terror.MustNil(err)
// trigger internal http(s) client init.
util.InternalHTTPClient()
}
func printInfo() {
// Make sure the TiDB info is always printed.
level := log.GetLevel()
log.SetLevel(zap.InfoLevel)
printer.PrintTiDBInfo()
log.SetLevel(level)
}
func createServer(storage kv.Storage, dom *domain.Domain) *server.Server {
cfg := config.GetGlobalConfig()
driver := server.NewTiDBDriver(storage)
svr, err := server.NewServer(cfg, driver)
// Both domain and storage have started, so we have to clean them before exiting.
if err != nil {
closeDomainAndStorage(storage, dom)
log.Fatal("failed to create the server", zap.Error(err), zap.Stack("stack"))
}
svr.SetDomain(dom)
svr.InitGlobalConnID(dom.ServerID)
go dom.ExpensiveQueryHandle().SetSessionManager(svr).Run()
dom.InfoSyncer().SetSessionManager(svr)
return svr
}
func setupMetrics() {
cfg := config.GetGlobalConfig()
// Enable the mutex profile, 1/10 of mutex blocking event sampling.
runtime.SetMutexProfileFraction(10)
systimeErrHandler := func() {
metrics.TimeJumpBackCounter.Inc()
}
callBackCount := 0
successCallBack := func() {
callBackCount++
// It is callback by monitor per second, we increase metrics.KeepAliveCounter per 5s.
if callBackCount >= 5 {
callBackCount = 0
metrics.KeepAliveCounter.Inc()
}
}
go systimemon.StartMonitor(time.Now, systimeErrHandler, successCallBack)
pushMetric(cfg.Status.MetricsAddr, time.Duration(cfg.Status.MetricsInterval)*time.Second)
}
func setupTracing() {
cfg := config.GetGlobalConfig()
tracingCfg := cfg.OpenTracing.ToTracingConfig()
tracingCfg.ServiceName = "TiDB"
tracer, _, err := tracingCfg.NewTracer()
if err != nil {
log.Fatal("setup jaeger tracer failed", zap.String("error message", err.Error()))
}
opentracing.SetGlobalTracer(tracer)
}
func closeDomainAndStorage(storage kv.Storage, dom *domain.Domain) {
tikv.StoreShuttingDown(1)
dom.Close()
err := storage.Close()
terror.Log(errors.Trace(err))
}
func cleanup(svr *server.Server, storage kv.Storage, dom *domain.Domain, graceful bool) {
if graceful {
done := make(chan struct{})
svr.GracefulDown(context.Background(), done)
} else {
svr.TryGracefulDown()
}
plugin.Shutdown(context.Background())
closeDomainAndStorage(storage, dom)
disk.CleanUp()
topsql.Close()
}
func stringToList(repairString string) []string {
if len(repairString) <= 0 {
return []string{}
}
if repairString[0] == '[' && repairString[len(repairString)-1] == ']' {
repairString = repairString[1 : len(repairString)-1]
}
return strings.FieldsFunc(repairString, func(r rune) bool {
return r == ',' || r == ' ' || r == '"'
})
}
| tidb-server/main.go | 1 | https://github.com/pingcap/tidb/commit/f61024b2dc6f9e3a6f7fd4d941e19fb81f002b1e | [
0.002162875374779105,
0.00021523176110349596,
0.00016377064457628876,
0.00016821976169012487,
0.00025309837656095624
] |
{
"id": 7,
"code_window": [
"// NeedSetRCCheckTSFlag checks whether it's needed to set `RCCheckTS` flag in current stmtctx.\n",
"func NeedSetRCCheckTSFlag(ctx sessionctx.Context, node ast.Node) bool {\n",
"\tsessionVars := ctx.GetSessionVars()\n",
"\tif sessionVars.ConnectionID > 0 && sessionVars.RcReadCheckTS && sessionVars.InTxn() &&\n",
"\t\t!sessionVars.RetryInfo.Retrying && plannercore.IsReadOnly(node, sessionVars) {\n",
"\t\treturn true\n",
"\t}\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tif sessionVars.ConnectionID > 0 && variable.EnableRCReadCheckTS.Load() && sessionVars.InTxn() &&\n"
],
"file_path": "sessiontxn/isolation/readcommitted.go",
"type": "replace",
"edit_start_line_idx": 103
} | #!/bin/sh
#
# Copyright 2019 PingCAP, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -eu
# reset substitution if last time failed half way
# on BSD/macOS sed -i must have a following string as backup filename extension
sed -i.bak 's/new/old/g' "tests/lightning_tidb_duplicate_data/data/dup.dup.sql" && rm tests/lightning_tidb_duplicate_data/data/dup.dup.sql.bak
for type in replace ignore error; do
run_sql 'DROP DATABASE IF EXISTS dup;'
export GO_FAILPOINTS="github.com/pingcap/tidb/br/pkg/lightning/backend/tidb/FailIfImportedSomeRows=return"
set +e
run_lightning --config "tests/$TEST_NAME/$type.toml" 2> /dev/null
ERRORCODE=$?
set -e
[ "$ERRORCODE" -ne 0 ]
# backup original sql to dup.dup.sql.bak
sed -i.bak 's/old/new/g' "tests/lightning_tidb_duplicate_data/data/dup.dup.sql"
unset GO_FAILPOINTS
if [ $type = 'error' ]; then
set +e
run_lightning --config "tests/$TEST_NAME/$type.toml" --log-file "$TEST_DIR/lightning-error-on-dup.log"
ERRORCODE=$?
set -e
[ "$ERRORCODE" -ne 0 ]
tail -20 "$TEST_DIR/lightning-error-on-dup.log" > "$TEST_DIR/lightning-error-on-dup.tail"
grep -Fq 'Duplicate entry' "$TEST_DIR/lightning-error-on-dup.tail"
elif [ $type = 'replace' ]; then
run_lightning --config "tests/$TEST_NAME/$type.toml"
run_sql 'SELECT count(*) FROM dup.dup'
check_contains 'count(*): 2'
run_sql 'SELECT d FROM dup.dup WHERE pk = 1'
check_contains 'd: new'
run_sql 'SELECT d FROM dup.dup WHERE pk = 2'
check_contains 'd: new'
elif [ $type = 'ignore' ]; then
run_lightning --config "tests/$TEST_NAME/$type.toml"
run_sql 'SELECT count(*) FROM dup.dup'
check_contains 'count(*): 2'
run_sql 'SELECT d FROM dup.dup WHERE pk = 1'
check_contains 'd: old'
run_sql 'SELECT d FROM dup.dup WHERE pk = 2'
check_contains 'd: new'
fi
mv tests/lightning_tidb_duplicate_data/data/dup.dup.sql.bak tests/lightning_tidb_duplicate_data/data/dup.dup.sql
done
| br/tests/lightning_tidb_duplicate_data/run.sh | 0 | https://github.com/pingcap/tidb/commit/f61024b2dc6f9e3a6f7fd4d941e19fb81f002b1e | [
0.0001754738186718896,
0.0001719370629871264,
0.00016752745432313532,
0.000172195810591802,
0.00000290307798422873
] |
{
"id": 7,
"code_window": [
"// NeedSetRCCheckTSFlag checks whether it's needed to set `RCCheckTS` flag in current stmtctx.\n",
"func NeedSetRCCheckTSFlag(ctx sessionctx.Context, node ast.Node) bool {\n",
"\tsessionVars := ctx.GetSessionVars()\n",
"\tif sessionVars.ConnectionID > 0 && sessionVars.RcReadCheckTS && sessionVars.InTxn() &&\n",
"\t\t!sessionVars.RetryInfo.Retrying && plannercore.IsReadOnly(node, sessionVars) {\n",
"\t\treturn true\n",
"\t}\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tif sessionVars.ConnectionID > 0 && variable.EnableRCReadCheckTS.Load() && sessionVars.InTxn() &&\n"
],
"file_path": "sessiontxn/isolation/readcommitted.go",
"type": "replace",
"edit_start_line_idx": 103
} | // Copyright 2022 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package core
import (
"bytes"
"fmt"
"strings"
"github.com/pingcap/tidb/parser/ast"
"github.com/pingcap/tidb/parser/mysql"
driver "github.com/pingcap/tidb/types/parser_driver"
"github.com/pingcap/tidb/util/collate"
"github.com/pingcap/tidb/util/stringutil"
)
const (
fieldKey = "field"
tableKey = "table"
databaseKey = "database"
collationKey = "collation"
)
var (
_ ShowPredicateExtractor = &ShowBaseExtractor{}
)
// ShowPredicateExtractor is used to extract some predicates from `PatternLikeExpr` clause
// and push the predicates down to the data retrieving on reading memory table stage when use ShowStmt.
//
// e.g:
// SHOW COLUMNS FROM t LIKE '%abc%'
// We must request all components from the memory table, and filter the result by the PatternLikeExpr predicate.
//
// it is a way to fix https://github.com/pingcap/tidb/issues/29910.
type ShowPredicateExtractor interface {
// Extract predicates which can be pushed down and returns whether the extractor can extract predicates.
Extract() bool
explainInfo() string
Field() string
FieldPatternLike() collate.WildcardPattern
}
// ShowBaseExtractor is the definition of base extractor for derived predicates.
type ShowBaseExtractor struct {
ast.ShowStmt
field string
fieldPattern string
}
func newShowBaseExtractor(showStatement ast.ShowStmt) ShowPredicateExtractor {
return &ShowBaseExtractor{ShowStmt: showStatement}
}
// Extract implements the ShowPredicateExtractor interface.
func (e *ShowBaseExtractor) Extract() bool {
show := e.ShowStmt
if show.Pattern != nil && show.Pattern.Pattern != nil {
pattern := show.Pattern
switch pattern.Pattern.(type) {
case *driver.ValueExpr:
// It is used in `SHOW XXXX in t LIKE `abc``.
ptn := pattern.Pattern.(*driver.ValueExpr).GetString()
patValue, patTypes := stringutil.CompilePattern(ptn, pattern.Escape)
if stringutil.IsExactMatch(patTypes) {
e.field = strings.ToLower(string(patValue))
return true
}
e.fieldPattern = strings.ToLower(string(patValue))
return true
case *ast.ColumnNameExpr:
// It is used in `SHOW COLUMNS FROM t LIKE abc`.
// MySQL do not support this syntax and return the error.
return false
}
} else if show.Column != nil && show.Column.Name.L != "" {
// it is used in `DESCRIBE t COLUMN`.
e.field = show.Column.Name.L
return true
}
return false
}
// explainInfo implements the ShowPredicateExtractor interface.
func (e *ShowBaseExtractor) explainInfo() string {
key := ""
switch e.ShowStmt.Tp {
case ast.ShowVariables, ast.ShowColumns:
key = fieldKey
case ast.ShowTables, ast.ShowTableStatus:
key = tableKey
case ast.ShowDatabases:
key = databaseKey
case ast.ShowCollation:
key = collationKey
}
r := new(bytes.Buffer)
if len(e.field) > 0 {
r.WriteString(fmt.Sprintf("%s:[%s], ", key, e.field))
}
if len(e.fieldPattern) > 0 {
r.WriteString(fmt.Sprintf("%s_pattern:[%s], ", key, e.fieldPattern))
}
// remove the last ", " in the message info
s := r.String()
if len(s) > 2 {
return s[:len(s)-2]
}
return s
}
// Field will return the variable `field` in ShowBaseExtractor
func (e *ShowBaseExtractor) Field() string {
return e.field
}
// FieldPatternLike will return compiled collate.WildcardPattern
func (e *ShowBaseExtractor) FieldPatternLike() collate.WildcardPattern {
if e.fieldPattern == "" {
return nil
}
fieldPatternsLike := collate.GetCollatorByID(collate.CollationName2ID(mysql.UTF8MB4DefaultCollation)).Pattern()
fieldPatternsLike.Compile(e.fieldPattern, byte('\\'))
return fieldPatternsLike
}
| planner/core/show_predicate_extractor.go | 0 | https://github.com/pingcap/tidb/commit/f61024b2dc6f9e3a6f7fd4d941e19fb81f002b1e | [
0.0001760958111844957,
0.00016897713067010045,
0.00016508983389940113,
0.00016860001778695732,
0.000003011859462276334
] |
{
"id": 7,
"code_window": [
"// NeedSetRCCheckTSFlag checks whether it's needed to set `RCCheckTS` flag in current stmtctx.\n",
"func NeedSetRCCheckTSFlag(ctx sessionctx.Context, node ast.Node) bool {\n",
"\tsessionVars := ctx.GetSessionVars()\n",
"\tif sessionVars.ConnectionID > 0 && sessionVars.RcReadCheckTS && sessionVars.InTxn() &&\n",
"\t\t!sessionVars.RetryInfo.Retrying && plannercore.IsReadOnly(node, sessionVars) {\n",
"\t\treturn true\n",
"\t}\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tif sessionVars.ConnectionID > 0 && variable.EnableRCReadCheckTS.Load() && sessionVars.InTxn() &&\n"
],
"file_path": "sessiontxn/isolation/readcommitted.go",
"type": "replace",
"edit_start_line_idx": 103
} | // Copyright 2020 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package graceshutdown
import (
"context"
"database/sql"
"flag"
"fmt"
"os"
"os/exec"
"testing"
"time"
_ "github.com/go-sql-driver/mysql"
"github.com/pingcap/errors"
"github.com/pingcap/log"
"github.com/stretchr/testify/require"
"go.uber.org/zap"
)
var (
tidbBinaryPath = flag.String("s", "bin/tidb-server", "tidb server binary path")
tmpPath = flag.String("tmp", "/tmp/tidb_gracefulshutdown", "temporary files path")
tidbStartPort = flag.Int("tidb_start_port", 5500, "first tidb server listening port")
tidbStatusPort = flag.Int("tidb_status_port", 8500, "first tidb server status port")
)
func startTiDBWithoutPD(port int, statusPort int) (cmd *exec.Cmd, err error) {
cmd = exec.Command(*tidbBinaryPath,
"--store=mocktikv",
fmt.Sprintf("--path=%s/mocktikv", *tmpPath),
fmt.Sprintf("-P=%d", port),
fmt.Sprintf("--status=%d", statusPort),
fmt.Sprintf("--log-file=%s/tidb%d.log", *tmpPath, port))
log.Info("starting tidb", zap.Any("cmd", cmd))
err = cmd.Start()
if err != nil {
return nil, errors.Trace(err)
}
time.Sleep(500 * time.Millisecond)
return cmd, nil
}
func stopService(name string, cmd *exec.Cmd) (err error) {
if err = cmd.Process.Signal(os.Interrupt); err != nil {
return errors.Trace(err)
}
log.Info("service Interrupt", zap.String("name", name))
if err = cmd.Wait(); err != nil {
return errors.Trace(err)
}
log.Info("service stopped gracefully", zap.String("name", name))
return nil
}
func connectTiDB(port int) (db *sql.DB, err error) {
addr := fmt.Sprintf("127.0.0.1:%d", port)
dsn := fmt.Sprintf("root@(%s)/test", addr)
sleepTime := 250 * time.Millisecond
startTime := time.Now()
maxRetry := 10
for i := 0; i < maxRetry; i++ {
db, err = sql.Open("mysql", dsn)
if err != nil {
log.Warn("open addr failed",
zap.String("addr", addr),
zap.Int("retry count", i),
zap.Error(err),
)
continue
}
err = db.Ping()
if err == nil {
break
}
log.Warn("ping addr failed",
zap.String("addr", addr),
zap.Int("retry count", i),
zap.Error(err),
)
err1 := db.Close()
if err1 != nil {
log.Warn("close db failed", zap.Int("retry count", i), zap.Error(err1))
break
}
time.Sleep(sleepTime)
sleepTime += sleepTime
}
if err != nil {
log.Error("connect to server addr failed",
zap.String("addr", addr),
zap.Duration("take time", time.Since(startTime)),
zap.Error(err),
)
return nil, errors.Trace(err)
}
db.SetMaxOpenConns(10)
log.Info("connect to server ok", zap.String("addr", addr))
return db, nil
}
func TestGracefulShutdown(t *testing.T) {
port := *tidbStartPort + 1
tidb, err := startTiDBWithoutPD(port, *tidbStatusPort)
require.NoError(t, err)
db, err := connectTiDB(port)
require.NoError(t, err)
defer func() {
err := db.Close()
require.NoError(t, err)
}()
ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(10*time.Second))
defer cancel()
conn1, err := db.Conn(ctx)
require.NoError(t, err)
defer func() {
require.NoError(t, conn1.Close())
}()
_, err = conn1.ExecContext(ctx, "drop table if exists t;")
require.NoError(t, err)
_, err = conn1.ExecContext(ctx, "create table t(a int);")
require.NoError(t, err)
_, err = conn1.ExecContext(ctx, "insert into t values(1);")
require.NoError(t, err)
done := make(chan struct{})
go func() {
time.Sleep(time.Second)
err = stopService("tidb", tidb)
require.NoError(t, err)
close(done)
}()
sql := `select 1 from t where not (select sleep(3)) ;`
var a int64
err = conn1.QueryRowContext(ctx, sql).Scan(&a)
require.NoError(t, err)
require.Equal(t, a, int64(1))
<-done
}
| tests/graceshutdown/graceshutdown_test.go | 0 | https://github.com/pingcap/tidb/commit/f61024b2dc6f9e3a6f7fd4d941e19fb81f002b1e | [
0.00021485352772288024,
0.00017132391803897917,
0.0001639306137803942,
0.0001668139302637428,
0.000011821635780506767
] |
{
"id": 8,
"code_window": [
"\n",
"\tvariable.ProcessGeneralLog.Store(cfg.Instance.TiDBGeneralLog)\n",
"\tvariable.EnablePProfSQLCPU.Store(cfg.Instance.EnablePProfSQLCPU)\n",
"\tatomic.StoreUint32(&variable.DDLSlowOprThreshold, cfg.Instance.DDLSlowOprThreshold)\n",
"\tatomic.StoreUint64(&variable.ExpensiveQueryTimeThreshold, cfg.Instance.ExpensiveQueryTimeThreshold)\n",
"\n",
"\tif len(cfg.ServerVersion) > 0 {\n"
],
"labels": [
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tvariable.EnableRCReadCheckTS.Store(cfg.Instance.TiDBRCReadCheckTS)\n"
],
"file_path": "tidb-server/main.go",
"type": "add",
"edit_start_line_idx": 640
} | // Copyright 2017 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package config
import (
"bytes"
"encoding/base64"
"encoding/json"
"fmt"
"math"
"os"
"os/user"
"path/filepath"
"sort"
"strings"
"sync"
"sync/atomic"
"time"
"github.com/BurntSushi/toml"
"github.com/pingcap/errors"
zaplog "github.com/pingcap/log"
logbackupconf "github.com/pingcap/tidb/br/pkg/streamhelper/config"
"github.com/pingcap/tidb/parser/terror"
"github.com/pingcap/tidb/util/logutil"
"github.com/pingcap/tidb/util/tikvutil"
"github.com/pingcap/tidb/util/versioninfo"
tikvcfg "github.com/tikv/client-go/v2/config"
tracing "github.com/uber/jaeger-client-go/config"
atomicutil "go.uber.org/atomic"
"go.uber.org/zap"
)
// Config number limitations
const (
MaxLogFileSize = 4096 // MB
// DefTxnEntrySizeLimit is the default value of TxnEntrySizeLimit.
DefTxnEntrySizeLimit = 6 * 1024 * 1024
// DefTxnTotalSizeLimit is the default value of TxnTxnTotalSizeLimit.
DefTxnTotalSizeLimit = 100 * 1024 * 1024
// DefMaxIndexLength is the maximum index length(in bytes). This value is consistent with MySQL.
DefMaxIndexLength = 3072
// DefMaxOfMaxIndexLength is the maximum index length(in bytes) for TiDB v3.0.7 and previous version.
DefMaxOfMaxIndexLength = 3072 * 4
// DefIndexLimit is the limitation of index on a single table. This value is consistent with MySQL.
DefIndexLimit = 64
// DefMaxOfIndexLimit is the maximum limitation of index on a single table for TiDB.
DefMaxOfIndexLimit = 64 * 8
// DefPort is the default port of TiDB
DefPort = 4000
// DefStatusPort is the default status port of TiDB
DefStatusPort = 10080
// DefHost is the default host of TiDB
DefHost = "0.0.0.0"
// DefStatusHost is the default status host of TiDB
DefStatusHost = "0.0.0.0"
// DefTableColumnCountLimit is limit of the number of columns in a table
DefTableColumnCountLimit = 1017
// DefMaxOfTableColumnCountLimit is maximum limitation of the number of columns in a table
DefMaxOfTableColumnCountLimit = 4096
// DefStatsLoadConcurrencyLimit is limit of the concurrency of stats-load
DefStatsLoadConcurrencyLimit = 1
// DefMaxOfStatsLoadConcurrencyLimit is maximum limitation of the concurrency of stats-load
DefMaxOfStatsLoadConcurrencyLimit = 128
// DefStatsLoadQueueSizeLimit is limit of the size of stats-load request queue
DefStatsLoadQueueSizeLimit = 1
// DefMaxOfStatsLoadQueueSizeLimit is maximum limitation of the size of stats-load request queue
DefMaxOfStatsLoadQueueSizeLimit = 100000
// DefDDLSlowOprThreshold sets log DDL operations whose execution time exceeds the threshold value.
DefDDLSlowOprThreshold = 300
// DefExpensiveQueryTimeThreshold indicates the time threshold of expensive query.
DefExpensiveQueryTimeThreshold = 60
// DefMemoryUsageAlarmRatio is the threshold triggering an alarm which the memory usage of tidb-server instance exceeds.
DefMemoryUsageAlarmRatio = 0.8
// DefTempDir is the default temporary directory path for TiDB.
DefTempDir = "/tmp/tidb"
)
// Valid config maps
var (
ValidStorage = map[string]bool{
"mocktikv": true,
"tikv": true,
"unistore": true,
}
// CheckTableBeforeDrop enable to execute `admin check table` before `drop table`.
CheckTableBeforeDrop = false
// checkBeforeDropLDFlag is a go build flag.
checkBeforeDropLDFlag = "None"
// tempStorageDirName is the default temporary storage dir name by base64 encoding a string `port/statusPort`
tempStorageDirName = encodeDefTempStorageDir(os.TempDir(), DefHost, DefStatusHost, DefPort, DefStatusPort)
)
// InstanceConfigSection indicates a config session that has options moved to [instance] session.
type InstanceConfigSection struct {
// SectionName indicates the origin section name.
SectionName string
// NameMappings maps the origin name to the name in [instance].
NameMappings map[string]string
}
var (
// sectionMovedToInstance records all config section and options that should be moved to [instance].
sectionMovedToInstance = []InstanceConfigSection{
{
"",
map[string]string{
"check-mb4-value-in-utf8": "tidb_check_mb4_value_in_utf8",
"enable-collect-execution-info": "tidb_enable_collect_execution_info",
"max-server-connections": "max_connections",
"run-ddl": "tidb_enable_ddl",
},
},
{
"log",
map[string]string{
"enable-slow-log": "tidb_enable_slow_log",
"slow-threshold": "tidb_slow_log_threshold",
"record-plan-in-slow-log": "tidb_record_plan_in_slow_log",
},
},
{
"performance",
map[string]string{
"force-priority": "tidb_force_priority",
"memory-usage-alarm-ratio": "tidb_memory_usage_alarm_ratio",
},
},
{
"plugin",
map[string]string{
"load": "plugin_load",
"dir": "plugin_dir",
},
},
}
// ConflictOptions indicates the conflict config options existing in both [instance] and other sections in config file.
ConflictOptions []InstanceConfigSection
// DeprecatedOptions indicates the config options existing in some other sections in config file.
// They should be moved to [instance] section.
DeprecatedOptions []InstanceConfigSection
// TikvConfigLock protects against concurrent tikv config refresh
TikvConfigLock sync.Mutex
)
// Config contains configuration options.
type Config struct {
Host string `toml:"host" json:"host"`
AdvertiseAddress string `toml:"advertise-address" json:"advertise-address"`
Port uint `toml:"port" json:"port"`
Cors string `toml:"cors" json:"cors"`
Store string `toml:"store" json:"store"`
Path string `toml:"path" json:"path"`
Socket string `toml:"socket" json:"socket"`
Lease string `toml:"lease" json:"lease"`
SplitTable bool `toml:"split-table" json:"split-table"`
TokenLimit uint `toml:"token-limit" json:"token-limit"`
TempDir string `toml:"temp-dir" json:"temp-dir"`
TempStoragePath string `toml:"tmp-storage-path" json:"tmp-storage-path"`
// TempStorageQuota describe the temporary storage Quota during query exector when TiDBEnableTmpStorageOnOOM is enabled
// If the quota exceed the capacity of the TempStoragePath, the tidb-server would exit with fatal error
TempStorageQuota int64 `toml:"tmp-storage-quota" json:"tmp-storage-quota"` // Bytes
TxnLocalLatches tikvcfg.TxnLocalLatches `toml:"-" json:"-"`
ServerVersion string `toml:"server-version" json:"server-version"`
VersionComment string `toml:"version-comment" json:"version-comment"`
TiDBEdition string `toml:"tidb-edition" json:"tidb-edition"`
TiDBReleaseVersion string `toml:"tidb-release-version" json:"tidb-release-version"`
Log Log `toml:"log" json:"log"`
Instance Instance `toml:"instance" json:"instance"`
Security Security `toml:"security" json:"security"`
Status Status `toml:"status" json:"status"`
Performance Performance `toml:"performance" json:"performance"`
PreparedPlanCache PreparedPlanCache `toml:"prepared-plan-cache" json:"prepared-plan-cache"`
OpenTracing OpenTracing `toml:"opentracing" json:"opentracing"`
ProxyProtocol ProxyProtocol `toml:"proxy-protocol" json:"proxy-protocol"`
PDClient tikvcfg.PDClient `toml:"pd-client" json:"pd-client"`
TiKVClient tikvcfg.TiKVClient `toml:"tikv-client" json:"tikv-client"`
Binlog Binlog `toml:"binlog" json:"binlog"`
CompatibleKillQuery bool `toml:"compatible-kill-query" json:"compatible-kill-query"`
PessimisticTxn PessimisticTxn `toml:"pessimistic-txn" json:"pessimistic-txn"`
MaxIndexLength int `toml:"max-index-length" json:"max-index-length"`
IndexLimit int `toml:"index-limit" json:"index-limit"`
TableColumnCountLimit uint32 `toml:"table-column-count-limit" json:"table-column-count-limit"`
GracefulWaitBeforeShutdown int `toml:"graceful-wait-before-shutdown" json:"graceful-wait-before-shutdown"`
// AlterPrimaryKey is used to control alter primary key feature.
AlterPrimaryKey bool `toml:"alter-primary-key" json:"alter-primary-key"`
// TreatOldVersionUTF8AsUTF8MB4 is use to treat old version table/column UTF8 charset as UTF8MB4. This is for compatibility.
// Currently not support dynamic modify, because this need to reload all old version schema.
TreatOldVersionUTF8AsUTF8MB4 bool `toml:"treat-old-version-utf8-as-utf8mb4" json:"treat-old-version-utf8-as-utf8mb4"`
// EnableTableLock indicate whether enable table lock.
// TODO: remove this after table lock features stable.
EnableTableLock bool `toml:"enable-table-lock" json:"enable-table-lock"`
DelayCleanTableLock uint64 `toml:"delay-clean-table-lock" json:"delay-clean-table-lock"`
SplitRegionMaxNum uint64 `toml:"split-region-max-num" json:"split-region-max-num"`
TopSQL TopSQL `toml:"top-sql" json:"top-sql"`
// RepairMode indicates that the TiDB is in the repair mode for table meta.
RepairMode bool `toml:"repair-mode" json:"repair-mode"`
RepairTableList []string `toml:"repair-table-list" json:"repair-table-list"`
// IsolationRead indicates that the TiDB reads data from which isolation level(engine and label).
IsolationRead IsolationRead `toml:"isolation-read" json:"isolation-read"`
// NewCollationsEnabledOnFirstBootstrap indicates if the new collations are enabled, it effects only when a TiDB cluster bootstrapped on the first time.
NewCollationsEnabledOnFirstBootstrap bool `toml:"new_collations_enabled_on_first_bootstrap" json:"new_collations_enabled_on_first_bootstrap"`
// Experimental contains parameters for experimental features.
Experimental Experimental `toml:"experimental" json:"experimental"`
// SkipRegisterToDashboard tells TiDB don't register itself to the dashboard.
SkipRegisterToDashboard bool `toml:"skip-register-to-dashboard" json:"skip-register-to-dashboard"`
// EnableTelemetry enables the usage data report to PingCAP.
EnableTelemetry bool `toml:"enable-telemetry" json:"enable-telemetry"`
// Labels indicates the labels set for the tidb server. The labels describe some specific properties for the tidb
// server like `zone`/`rack`/`host`. Currently, labels won't affect the tidb server except for some special
// label keys. Now we have following special keys:
// 1. 'group' is a special label key which should be automatically set by tidb-operator. We don't suggest
// users to set 'group' in labels.
// 2. 'zone' is a special key that indicates the DC location of this tidb-server. If it is set, the value for this
// key will be the default value of the session variable `txn_scope` for this tidb-server.
Labels map[string]string `toml:"labels" json:"labels"`
// EnableGlobalIndex enables creating global index.
EnableGlobalIndex bool `toml:"enable-global-index" json:"enable-global-index"`
// DeprecateIntegerDisplayWidth indicates whether deprecating the max display length for integer.
DeprecateIntegerDisplayWidth bool `toml:"deprecate-integer-display-length" json:"deprecate-integer-display-length"`
// EnableEnumLengthLimit indicates whether the enum/set element length is limited.
// According to MySQL 8.0 Refman:
// The maximum supported length of an individual SET element is M <= 255 and (M x w) <= 1020,
// where M is the element literal length and w is the number of bytes required for the maximum-length character in the character set.
// See https://dev.mysql.com/doc/refman/8.0/en/string-type-syntax.html for more details.
EnableEnumLengthLimit bool `toml:"enable-enum-length-limit" json:"enable-enum-length-limit"`
// StoresRefreshInterval indicates the interval of refreshing stores info, the unit is second.
StoresRefreshInterval uint64 `toml:"stores-refresh-interval" json:"stores-refresh-interval"`
// EnableTCP4Only enables net.Listen("tcp4",...)
// Note that: it can make lvs with toa work and thus tidb can get real client ip.
EnableTCP4Only bool `toml:"enable-tcp4-only" json:"enable-tcp4-only"`
// The client will forward the requests through the follower
// if one of the following conditions happens:
// 1. there is a network partition problem between TiDB and PD leader.
// 2. there is a network partition problem between TiDB and TiKV leader.
EnableForwarding bool `toml:"enable-forwarding" json:"enable-forwarding"`
// MaxBallastObjectSize set the max size of the ballast object, the unit is byte.
// The default value is the smallest of the following two values: 2GB or
// one quarter of the total physical memory in the current system.
MaxBallastObjectSize int `toml:"max-ballast-object-size" json:"max-ballast-object-size"`
// BallastObjectSize set the initial size of the ballast object, the unit is byte.
BallastObjectSize int `toml:"ballast-object-size" json:"ballast-object-size"`
// EnableGlobalKill indicates whether to enable global kill.
TrxSummary TrxSummary `toml:"transaction-summary" json:"transaction-summary"`
EnableGlobalKill bool `toml:"enable-global-kill" json:"enable-global-kill"`
// The following items are deprecated. We need to keep them here temporarily
// to support the upgrade process. They can be removed in future.
// EnableBatchDML, MemQuotaQuery, OOMAction unused since bootstrap v90
EnableBatchDML bool `toml:"enable-batch-dml" json:"enable-batch-dml"`
MemQuotaQuery int64 `toml:"mem-quota-query" json:"mem-quota-query"`
OOMAction string `toml:"oom-action" json:"oom-action"`
// OOMUseTmpStorage unused since bootstrap v93
OOMUseTmpStorage bool `toml:"oom-use-tmp-storage" json:"oom-use-tmp-storage"`
// These items are deprecated because they are turned into instance system variables.
CheckMb4ValueInUTF8 AtomicBool `toml:"check-mb4-value-in-utf8" json:"check-mb4-value-in-utf8"`
EnableCollectExecutionInfo bool `toml:"enable-collect-execution-info" json:"enable-collect-execution-info"`
Plugin Plugin `toml:"plugin" json:"plugin"`
MaxServerConnections uint32 `toml:"max-server-connections" json:"max-server-connections"`
RunDDL bool `toml:"run-ddl" json:"run-ddl"`
}
// UpdateTempStoragePath is to update the `TempStoragePath` if port/statusPort was changed
// and the `tmp-storage-path` was not specified in the conf.toml or was specified the same as the default value.
func (c *Config) UpdateTempStoragePath() {
if c.TempStoragePath == tempStorageDirName {
c.TempStoragePath = encodeDefTempStorageDir(os.TempDir(), c.Host, c.Status.StatusHost, c.Port, c.Status.StatusPort)
} else {
c.TempStoragePath = encodeDefTempStorageDir(c.TempStoragePath, c.Host, c.Status.StatusHost, c.Port, c.Status.StatusPort)
}
}
// GetTiKVConfig returns configuration options from tikvcfg
func (c *Config) GetTiKVConfig() *tikvcfg.Config {
return &tikvcfg.Config{
CommitterConcurrency: int(tikvutil.CommitterConcurrency.Load()),
MaxTxnTTL: c.Performance.MaxTxnTTL,
TiKVClient: c.TiKVClient,
Security: c.Security.ClusterSecurity(),
PDClient: c.PDClient,
PessimisticTxn: tikvcfg.PessimisticTxn{MaxRetryCount: c.PessimisticTxn.MaxRetryCount},
TxnLocalLatches: c.TxnLocalLatches,
StoresRefreshInterval: c.StoresRefreshInterval,
OpenTracingEnable: c.OpenTracing.Enable,
Path: c.Path,
EnableForwarding: c.EnableForwarding,
TxnScope: c.Labels["zone"],
}
}
func encodeDefTempStorageDir(tempDir string, host, statusHost string, port, statusPort uint) string {
dirName := base64.URLEncoding.EncodeToString([]byte(fmt.Sprintf("%v:%v/%v:%v", host, port, statusHost, statusPort)))
osUID := ""
currentUser, err := user.Current()
if err == nil {
osUID = currentUser.Uid
}
return filepath.Join(tempDir, osUID+"_tidb", dirName, "tmp-storage")
}
// nullableBool defaults unset bool options to unset instead of false, which enables us to know if the user has set 2
// conflict options at the same time.
type nullableBool struct {
IsValid bool
IsTrue bool
}
var (
nbUnset = nullableBool{false, false}
nbFalse = nullableBool{true, false}
nbTrue = nullableBool{true, true}
)
func (b *nullableBool) toBool() bool {
return b.IsValid && b.IsTrue
}
func (b nullableBool) MarshalJSON() ([]byte, error) {
switch b {
case nbTrue:
return json.Marshal(true)
case nbFalse:
return json.Marshal(false)
default:
return json.Marshal(nil)
}
}
func (b *nullableBool) UnmarshalText(text []byte) error {
str := string(text)
switch str {
case "", "null":
*b = nbUnset
return nil
case "true":
*b = nbTrue
case "false":
*b = nbFalse
default:
*b = nbUnset
return errors.New("Invalid value for bool type: " + str)
}
return nil
}
func (b nullableBool) MarshalText() ([]byte, error) {
if !b.IsValid {
return []byte(""), nil
}
if b.IsTrue {
return []byte("true"), nil
}
return []byte("false"), nil
}
func (b *nullableBool) UnmarshalJSON(data []byte) error {
var err error
var v interface{}
if err = json.Unmarshal(data, &v); err != nil {
return err
}
switch raw := v.(type) {
case bool:
*b = nullableBool{true, raw}
default:
*b = nbUnset
}
return err
}
// AtomicBool is a helper type for atomic operations on a boolean value.
type AtomicBool struct {
atomicutil.Bool
}
// NewAtomicBool creates an AtomicBool.
func NewAtomicBool(v bool) *AtomicBool {
return &AtomicBool{*atomicutil.NewBool(v)}
}
// MarshalText implements the encoding.TextMarshaler interface.
func (b AtomicBool) MarshalText() ([]byte, error) {
if b.Load() {
return []byte("true"), nil
}
return []byte("false"), nil
}
// UnmarshalText implements the encoding.TextUnmarshaler interface.
func (b *AtomicBool) UnmarshalText(text []byte) error {
str := string(text)
switch str {
case "", "null":
*b = AtomicBool{*atomicutil.NewBool(false)}
case "true":
*b = AtomicBool{*atomicutil.NewBool(true)}
case "false":
*b = AtomicBool{*atomicutil.NewBool(false)}
default:
*b = AtomicBool{*atomicutil.NewBool(false)}
return errors.New("Invalid value for bool type: " + str)
}
return nil
}
// LogBackup is the config for log backup service.
// For now, it includes the embed advancer.
type LogBackup struct {
Advancer logbackupconf.Config `toml:"advancer" json:"advancer"`
Enabled bool `toml:"enabled" json:"enabled"`
}
// Log is the log section of config.
type Log struct {
// Log level.
Level string `toml:"level" json:"level"`
// Log format, one of json or text.
Format string `toml:"format" json:"format"`
// Disable automatic timestamps in output. Deprecated: use EnableTimestamp instead.
DisableTimestamp nullableBool `toml:"disable-timestamp" json:"disable-timestamp"`
// EnableTimestamp enables automatic timestamps in log output.
EnableTimestamp nullableBool `toml:"enable-timestamp" json:"enable-timestamp"`
// DisableErrorStack stops annotating logs with the full stack error
// message. Deprecated: use EnableErrorStack instead.
DisableErrorStack nullableBool `toml:"disable-error-stack" json:"disable-error-stack"`
// EnableErrorStack enables annotating logs with the full stack error
// message.
EnableErrorStack nullableBool `toml:"enable-error-stack" json:"enable-error-stack"`
// File log config.
File logutil.FileLogConfig `toml:"file" json:"file"`
SlowQueryFile string `toml:"slow-query-file" json:"slow-query-file"`
ExpensiveThreshold uint `toml:"expensive-threshold" json:"expensive-threshold"`
// The following items are deprecated. We need to keep them here temporarily
// to support the upgrade process. They can be removed in future.
// QueryLogMaxLen unused since bootstrap v90
QueryLogMaxLen uint64 `toml:"query-log-max-len" json:"query-log-max-len"`
// EnableSlowLog, SlowThreshold, RecordPlanInSlowLog are deprecated.
EnableSlowLog AtomicBool `toml:"enable-slow-log" json:"enable-slow-log"`
SlowThreshold uint64 `toml:"slow-threshold" json:"slow-threshold"`
RecordPlanInSlowLog uint32 `toml:"record-plan-in-slow-log" json:"record-plan-in-slow-log"`
}
// Instance is the section of instance scope system variables.
type Instance struct {
// These variables only exist in [instance] section.
// TiDBGeneralLog is used to log every query in the server in info level.
TiDBGeneralLog bool `toml:"tidb_general_log" json:"tidb_general_log"`
// EnablePProfSQLCPU is used to add label sql label to pprof result.
EnablePProfSQLCPU bool `toml:"tidb_pprof_sql_cpu" json:"tidb_pprof_sql_cpu"`
// DDLSlowOprThreshold sets log DDL operations whose execution time exceeds the threshold value.
DDLSlowOprThreshold uint32 `toml:"ddl_slow_threshold" json:"ddl_slow_threshold"`
// ExpensiveQueryTimeThreshold indicates the time threshold of expensive query.
ExpensiveQueryTimeThreshold uint64 `toml:"tidb_expensive_query_time_threshold" json:"tidb_expensive_query_time_threshold"`
// These variables exist in both 'instance' section and another place.
// The configuration in 'instance' section takes precedence.
EnableSlowLog AtomicBool `toml:"tidb_enable_slow_log" json:"tidb_enable_slow_log"`
SlowThreshold uint64 `toml:"tidb_slow_log_threshold" json:"tidb_slow_log_threshold"`
RecordPlanInSlowLog uint32 `toml:"tidb_record_plan_in_slow_log" json:"tidb_record_plan_in_slow_log"`
CheckMb4ValueInUTF8 AtomicBool `toml:"tidb_check_mb4_value_in_utf8" json:"tidb_check_mb4_value_in_utf8"`
ForcePriority string `toml:"tidb_force_priority" json:"tidb_force_priority"`
MemoryUsageAlarmRatio float64 `toml:"tidb_memory_usage_alarm_ratio" json:"tidb_memory_usage_alarm_ratio"`
// EnableCollectExecutionInfo enables the TiDB to collect execution info.
EnableCollectExecutionInfo bool `toml:"tidb_enable_collect_execution_info" json:"tidb_enable_collect_execution_info"`
PluginDir string `toml:"plugin_dir" json:"plugin_dir"`
PluginLoad string `toml:"plugin_load" json:"plugin_load"`
// MaxConnections is the maximum permitted number of simultaneous client connections.
MaxConnections uint32 `toml:"max_connections" json:"max_connections"`
TiDBEnableDDL AtomicBool `toml:"tidb_enable_ddl" json:"tidb_enable_ddl"`
}
func (l *Log) getDisableTimestamp() bool {
if l.EnableTimestamp == nbUnset && l.DisableTimestamp == nbUnset {
return false
}
if l.EnableTimestamp == nbUnset {
return l.DisableTimestamp.toBool()
}
return !l.EnableTimestamp.toBool()
}
func (l *Log) getDisableErrorStack() bool {
if l.EnableErrorStack == nbUnset && l.DisableErrorStack == nbUnset {
return true
}
if l.EnableErrorStack == nbUnset {
return l.DisableErrorStack.toBool()
}
return !l.EnableErrorStack.toBool()
}
// The following constants represents the valid action configurations for Security.SpilledFileEncryptionMethod.
// "plaintext" means encryption is disabled.
// NOTE: Although the values is case insensitive, we should use lower-case
// strings because the configuration value will be transformed to lower-case
// string and compared with these constants in the further usage.
const (
SpilledFileEncryptionMethodPlaintext = "plaintext"
SpilledFileEncryptionMethodAES128CTR = "aes128-ctr"
)
// Security is the security section of the config.
type Security struct {
SkipGrantTable bool `toml:"skip-grant-table" json:"skip-grant-table"`
SSLCA string `toml:"ssl-ca" json:"ssl-ca"`
SSLCert string `toml:"ssl-cert" json:"ssl-cert"`
SSLKey string `toml:"ssl-key" json:"ssl-key"`
ClusterSSLCA string `toml:"cluster-ssl-ca" json:"cluster-ssl-ca"`
ClusterSSLCert string `toml:"cluster-ssl-cert" json:"cluster-ssl-cert"`
ClusterSSLKey string `toml:"cluster-ssl-key" json:"cluster-ssl-key"`
ClusterVerifyCN []string `toml:"cluster-verify-cn" json:"cluster-verify-cn"`
// If set to "plaintext", the spilled files will not be encrypted.
SpilledFileEncryptionMethod string `toml:"spilled-file-encryption-method" json:"spilled-file-encryption-method"`
// EnableSEM prevents SUPER users from having full access.
EnableSEM bool `toml:"enable-sem" json:"enable-sem"`
// Allow automatic TLS certificate generation
AutoTLS bool `toml:"auto-tls" json:"auto-tls"`
MinTLSVersion string `toml:"tls-version" json:"tls-version"`
RSAKeySize int `toml:"rsa-key-size" json:"rsa-key-size"`
SecureBootstrap bool `toml:"secure-bootstrap" json:"secure-bootstrap"`
}
// The ErrConfigValidationFailed error is used so that external callers can do a type assertion
// to defer handling of this specific error when someone does not want strict type checking.
// This is needed only because logging hasn't been set up at the time we parse the config file.
// This should all be ripped out once strict config checking is made the default behavior.
type ErrConfigValidationFailed struct {
confFile string
UndecodedItems []string
}
func (e *ErrConfigValidationFailed) Error() string {
return fmt.Sprintf("config file %s contained invalid configuration options: %s; check "+
"TiDB manual to make sure this option has not been deprecated and removed from your TiDB "+
"version if the option does not appear to be a typo", e.confFile, strings.Join(
e.UndecodedItems, ", "))
}
// ErrConfigInstanceSection error is used to warning the user
// which config options should be moved to 'instance'.
type ErrConfigInstanceSection struct {
confFile string
configSections *[]InstanceConfigSection
deprecatedSections *[]InstanceConfigSection
}
func (e *ErrConfigInstanceSection) Error() string {
var builder strings.Builder
if len(*e.configSections) > 0 {
builder.WriteString("Conflict configuration options exists on both [instance] section and some other sections. ")
}
if len(*e.deprecatedSections) > 0 {
builder.WriteString("Some configuration options should be moved to [instance] section. ")
}
builder.WriteString("Please use the latter config options in [instance] instead: ")
for _, configSection := range *e.configSections {
for oldName, newName := range configSection.NameMappings {
builder.WriteString(fmt.Sprintf(" (%s, %s)", oldName, newName))
}
}
for _, configSection := range *e.deprecatedSections {
for oldName, newName := range configSection.NameMappings {
builder.WriteString(fmt.Sprintf(" (%s, %s)", oldName, newName))
}
}
builder.WriteString(".")
return builder.String()
}
// ClusterSecurity returns Security info for cluster
func (s *Security) ClusterSecurity() tikvcfg.Security {
return tikvcfg.NewSecurity(s.ClusterSSLCA, s.ClusterSSLCert, s.ClusterSSLKey, s.ClusterVerifyCN)
}
// Status is the status section of the config.
type Status struct {
StatusHost string `toml:"status-host" json:"status-host"`
MetricsAddr string `toml:"metrics-addr" json:"metrics-addr"`
StatusPort uint `toml:"status-port" json:"status-port"`
MetricsInterval uint `toml:"metrics-interval" json:"metrics-interval"`
ReportStatus bool `toml:"report-status" json:"report-status"`
RecordQPSbyDB bool `toml:"record-db-qps" json:"record-db-qps"`
// After a duration of this time in seconds if the server doesn't see any activity it pings
// the client to see if the transport is still alive.
GRPCKeepAliveTime uint `toml:"grpc-keepalive-time" json:"grpc-keepalive-time"`
// After having pinged for keepalive check, the server waits for a duration of timeout in seconds
// and if no activity is seen even after that the connection is closed.
GRPCKeepAliveTimeout uint `toml:"grpc-keepalive-timeout" json:"grpc-keepalive-timeout"`
// The number of max concurrent streams/requests on a client connection.
GRPCConcurrentStreams uint `toml:"grpc-concurrent-streams" json:"grpc-concurrent-streams"`
// Sets window size for stream. The default value is 2MB.
GRPCInitialWindowSize int `toml:"grpc-initial-window-size" json:"grpc-initial-window-size"`
// Set maximum message length in bytes that gRPC can send. `-1` means unlimited. The default value is 10MB.
GRPCMaxSendMsgSize int `toml:"grpc-max-send-msg-size" json:"grpc-max-send-msg-size"`
}
// Performance is the performance section of the config.
type Performance struct {
MaxProcs uint `toml:"max-procs" json:"max-procs"`
// Deprecated: use ServerMemoryQuota instead
MaxMemory uint64 `toml:"max-memory" json:"max-memory"`
ServerMemoryQuota uint64 `toml:"server-memory-quota" json:"server-memory-quota"`
StatsLease string `toml:"stats-lease" json:"stats-lease"`
StmtCountLimit uint `toml:"stmt-count-limit" json:"stmt-count-limit"`
PseudoEstimateRatio float64 `toml:"pseudo-estimate-ratio" json:"pseudo-estimate-ratio"`
BindInfoLease string `toml:"bind-info-lease" json:"bind-info-lease"`
TxnEntrySizeLimit uint64 `toml:"txn-entry-size-limit" json:"txn-entry-size-limit"`
TxnTotalSizeLimit uint64 `toml:"txn-total-size-limit" json:"txn-total-size-limit"`
TCPKeepAlive bool `toml:"tcp-keep-alive" json:"tcp-keep-alive"`
TCPNoDelay bool `toml:"tcp-no-delay" json:"tcp-no-delay"`
CrossJoin bool `toml:"cross-join" json:"cross-join"`
DistinctAggPushDown bool `toml:"distinct-agg-push-down" json:"distinct-agg-push-down"`
// Whether enable projection push down for coprocessors (both tikv & tiflash), default false.
ProjectionPushDown bool `toml:"projection-push-down" json:"projection-push-down"`
MaxTxnTTL uint64 `toml:"max-txn-ttl" json:"max-txn-ttl"`
// Deprecated
MemProfileInterval string `toml:"-" json:"-"`
IndexUsageSyncLease string `toml:"index-usage-sync-lease" json:"index-usage-sync-lease"`
PlanReplayerGCLease string `toml:"plan-replayer-gc-lease" json:"plan-replayer-gc-lease"`
GOGC int `toml:"gogc" json:"gogc"`
EnforceMPP bool `toml:"enforce-mpp" json:"enforce-mpp"`
StatsLoadConcurrency uint `toml:"stats-load-concurrency" json:"stats-load-concurrency"`
StatsLoadQueueSize uint `toml:"stats-load-queue-size" json:"stats-load-queue-size"`
EnableStatsCacheMemQuota bool `toml:"enable-stats-cache-mem-quota" json:"enable-stats-cache-mem-quota"`
// The following items are deprecated. We need to keep them here temporarily
// to support the upgrade process. They can be removed in future.
// CommitterConcurrency, RunAutoAnalyze unused since bootstrap v90
CommitterConcurrency int `toml:"committer-concurrency" json:"committer-concurrency"`
RunAutoAnalyze bool `toml:"run-auto-analyze" json:"run-auto-analyze"`
// ForcePriority, MemoryUsageAlarmRatio are deprecated.
ForcePriority string `toml:"force-priority" json:"force-priority"`
MemoryUsageAlarmRatio float64 `toml:"memory-usage-alarm-ratio" json:"memory-usage-alarm-ratio"`
EnableLoadFMSketch bool `toml:"enable-load-fmsketch" json:"enable-load-fmsketch"`
}
// PlanCache is the PlanCache section of the config.
type PlanCache struct {
Enabled bool `toml:"enabled" json:"enabled"`
Capacity uint `toml:"capacity" json:"capacity"`
Shards uint `toml:"shards" json:"shards"`
}
// PreparedPlanCache is the PreparedPlanCache section of the config.
type PreparedPlanCache struct {
Enabled bool `toml:"enabled" json:"enabled"`
Capacity uint `toml:"capacity" json:"capacity"`
MemoryGuardRatio float64 `toml:"memory-guard-ratio" json:"memory-guard-ratio"`
}
// OpenTracing is the opentracing section of the config.
type OpenTracing struct {
Enable bool `toml:"enable" json:"enable"`
RPCMetrics bool `toml:"rpc-metrics" json:"rpc-metrics"`
Sampler OpenTracingSampler `toml:"sampler" json:"sampler"`
Reporter OpenTracingReporter `toml:"reporter" json:"reporter"`
}
// OpenTracingSampler is the config for opentracing sampler.
// See https://godoc.org/github.com/uber/jaeger-client-go/config#SamplerConfig
type OpenTracingSampler struct {
Type string `toml:"type" json:"type"`
Param float64 `toml:"param" json:"param"`
SamplingServerURL string `toml:"sampling-server-url" json:"sampling-server-url"`
MaxOperations int `toml:"max-operations" json:"max-operations"`
SamplingRefreshInterval time.Duration `toml:"sampling-refresh-interval" json:"sampling-refresh-interval"`
}
// OpenTracingReporter is the config for opentracing reporter.
// See https://godoc.org/github.com/uber/jaeger-client-go/config#ReporterConfig
type OpenTracingReporter struct {
QueueSize int `toml:"queue-size" json:"queue-size"`
BufferFlushInterval time.Duration `toml:"buffer-flush-interval" json:"buffer-flush-interval"`
LogSpans bool `toml:"log-spans" json:"log-spans"`
LocalAgentHostPort string `toml:"local-agent-host-port" json:"local-agent-host-port"`
}
// ProxyProtocol is the PROXY protocol section of the config.
type ProxyProtocol struct {
// PROXY protocol acceptable client networks.
// Empty string means disable PROXY protocol,
// * means all networks.
Networks string `toml:"networks" json:"networks"`
// PROXY protocol header read timeout, Unit is second.
HeaderTimeout uint `toml:"header-timeout" json:"header-timeout"`
}
// Binlog is the config for binlog.
type Binlog struct {
Enable bool `toml:"enable" json:"enable"`
// If IgnoreError is true, when writing binlog meets error, TiDB would
// ignore the error.
IgnoreError bool `toml:"ignore-error" json:"ignore-error"`
WriteTimeout string `toml:"write-timeout" json:"write-timeout"`
// Use socket file to write binlog, for compatible with kafka version tidb-binlog.
BinlogSocket string `toml:"binlog-socket" json:"binlog-socket"`
// The strategy for sending binlog to pump, value can be "range" or "hash" now.
Strategy string `toml:"strategy" json:"strategy"`
}
// PessimisticTxn is the config for pessimistic transaction.
type PessimisticTxn struct {
// The max count of retry for a single statement in a pessimistic transaction.
MaxRetryCount uint `toml:"max-retry-count" json:"max-retry-count"`
// The max count of deadlock events that will be recorded in the information_schema.deadlocks table.
DeadlockHistoryCapacity uint `toml:"deadlock-history-capacity" json:"deadlock-history-capacity"`
// Whether retryable deadlocks (in-statement deadlocks) are collected to the information_schema.deadlocks table.
DeadlockHistoryCollectRetryable bool `toml:"deadlock-history-collect-retryable" json:"deadlock-history-collect-retryable"`
// PessimisticAutoCommit represents if true it means the auto-commit transactions will be in pessimistic mode.
PessimisticAutoCommit AtomicBool `toml:"pessimistic-auto-commit" json:"pessimistic-auto-commit"`
}
// TrxSummary is the config for transaction summary collecting.
type TrxSummary struct {
// how many transaction summary in `transaction_summary` each TiDB node should keep.
TransactionSummaryCapacity uint `toml:"transaction-summary-capacity" json:"transaction-summary-capacity"`
// how long a transaction should be executed to make it be recorded in `transaction_id_digest`.
TransactionIDDigestMinDuration uint `toml:"transaction-id-digest-min-duration" json:"transaction-id-digest-min-duration"`
}
// Valid Validatse TrxSummary configs
func (config *TrxSummary) Valid() error {
if config.TransactionSummaryCapacity > 5000 {
return errors.New("transaction-summary.transaction-summary-capacity should not be larger than 5000")
}
return nil
}
// DefaultPessimisticTxn returns the default configuration for PessimisticTxn
func DefaultPessimisticTxn() PessimisticTxn {
return PessimisticTxn{
MaxRetryCount: 256,
DeadlockHistoryCapacity: 10,
DeadlockHistoryCollectRetryable: false,
PessimisticAutoCommit: *NewAtomicBool(false),
}
}
// DefaultTrxSummary returns the default configuration for TrxSummary collector
func DefaultTrxSummary() TrxSummary {
// TrxSummary is not enabled by default before GA
return TrxSummary{
TransactionSummaryCapacity: 500,
TransactionIDDigestMinDuration: 2147483647,
}
}
// Plugin is the config for plugin
type Plugin struct {
Dir string `toml:"dir" json:"dir"`
Load string `toml:"load" json:"load"`
}
// TopSQL is the config for TopSQL.
type TopSQL struct {
// The TopSQL's data receiver address.
ReceiverAddress string `toml:"receiver-address" json:"receiver-address"`
}
// IsolationRead is the config for isolation read.
type IsolationRead struct {
// Engines filters tidb-server access paths by engine type.
Engines []string `toml:"engines" json:"engines"`
}
// Experimental controls the features that are still experimental: their semantics, interfaces are subject to change.
// Using these features in the production environment is not recommended.
type Experimental struct {
// Whether enable creating expression index.
AllowsExpressionIndex bool `toml:"allow-expression-index" json:"allow-expression-index"`
// Whether enable charset feature.
EnableNewCharset bool `toml:"enable-new-charset" json:"-"`
}
var defTiKVCfg = tikvcfg.DefaultConfig()
var defaultConf = Config{
Host: DefHost,
AdvertiseAddress: "",
Port: DefPort,
Socket: "/tmp/tidb-{Port}.sock",
Cors: "",
Store: "unistore",
Path: "/tmp/tidb",
RunDDL: true,
SplitTable: true,
Lease: "45s",
TokenLimit: 1000,
OOMUseTmpStorage: true,
TempDir: DefTempDir,
TempStorageQuota: -1,
TempStoragePath: tempStorageDirName,
MemQuotaQuery: 1 << 30,
OOMAction: "cancel",
EnableBatchDML: false,
CheckMb4ValueInUTF8: *NewAtomicBool(true),
MaxIndexLength: 3072,
IndexLimit: 64,
TableColumnCountLimit: 1017,
AlterPrimaryKey: false,
TreatOldVersionUTF8AsUTF8MB4: true,
EnableTableLock: false,
DelayCleanTableLock: 0,
SplitRegionMaxNum: 1000,
RepairMode: false,
RepairTableList: []string{},
MaxServerConnections: 0,
TxnLocalLatches: defTiKVCfg.TxnLocalLatches,
GracefulWaitBeforeShutdown: 0,
ServerVersion: "",
TiDBEdition: "",
VersionComment: "",
TiDBReleaseVersion: "",
Log: Log{
Level: "info",
Format: "text",
File: logutil.NewFileLogConfig(logutil.DefaultLogMaxSize),
SlowQueryFile: "tidb-slow.log",
SlowThreshold: logutil.DefaultSlowThreshold,
ExpensiveThreshold: 10000,
DisableErrorStack: nbUnset,
EnableErrorStack: nbUnset, // If both options are nbUnset, getDisableErrorStack() returns true
EnableTimestamp: nbUnset,
DisableTimestamp: nbUnset, // If both options are nbUnset, getDisableTimestamp() returns false
QueryLogMaxLen: logutil.DefaultQueryLogMaxLen,
RecordPlanInSlowLog: logutil.DefaultRecordPlanInSlowLog,
EnableSlowLog: *NewAtomicBool(logutil.DefaultTiDBEnableSlowLog),
},
Instance: Instance{
TiDBGeneralLog: false,
EnablePProfSQLCPU: false,
DDLSlowOprThreshold: DefDDLSlowOprThreshold,
ExpensiveQueryTimeThreshold: DefExpensiveQueryTimeThreshold,
EnableSlowLog: *NewAtomicBool(logutil.DefaultTiDBEnableSlowLog),
SlowThreshold: logutil.DefaultSlowThreshold,
RecordPlanInSlowLog: logutil.DefaultRecordPlanInSlowLog,
CheckMb4ValueInUTF8: *NewAtomicBool(true),
ForcePriority: "NO_PRIORITY",
MemoryUsageAlarmRatio: DefMemoryUsageAlarmRatio,
EnableCollectExecutionInfo: true,
PluginDir: "/data/deploy/plugin",
PluginLoad: "",
MaxConnections: 0,
TiDBEnableDDL: *NewAtomicBool(true),
},
Status: Status{
ReportStatus: true,
StatusHost: DefStatusHost,
StatusPort: DefStatusPort,
MetricsInterval: 15,
RecordQPSbyDB: false,
GRPCKeepAliveTime: 10,
GRPCKeepAliveTimeout: 3,
GRPCConcurrentStreams: 1024,
GRPCInitialWindowSize: 2 * 1024 * 1024,
GRPCMaxSendMsgSize: math.MaxInt32,
},
Performance: Performance{
MaxMemory: 0,
ServerMemoryQuota: 0,
MemoryUsageAlarmRatio: DefMemoryUsageAlarmRatio,
TCPKeepAlive: true,
TCPNoDelay: true,
CrossJoin: true,
StatsLease: "3s",
StmtCountLimit: 5000,
PseudoEstimateRatio: 0.8,
ForcePriority: "NO_PRIORITY",
BindInfoLease: "3s",
TxnEntrySizeLimit: DefTxnEntrySizeLimit,
TxnTotalSizeLimit: DefTxnTotalSizeLimit,
DistinctAggPushDown: false,
ProjectionPushDown: false,
CommitterConcurrency: defTiKVCfg.CommitterConcurrency,
MaxTxnTTL: defTiKVCfg.MaxTxnTTL, // 1hour
// TODO: set indexUsageSyncLease to 60s.
IndexUsageSyncLease: "0s",
GOGC: 100,
EnforceMPP: false,
PlanReplayerGCLease: "10m",
StatsLoadConcurrency: 5,
StatsLoadQueueSize: 1000,
EnableStatsCacheMemQuota: false,
RunAutoAnalyze: true,
EnableLoadFMSketch: false,
},
ProxyProtocol: ProxyProtocol{
Networks: "",
HeaderTimeout: 5,
},
PreparedPlanCache: PreparedPlanCache{
Enabled: true,
Capacity: 100,
MemoryGuardRatio: 0.1,
},
OpenTracing: OpenTracing{
Enable: false,
Sampler: OpenTracingSampler{
Type: "const",
Param: 1.0,
},
Reporter: OpenTracingReporter{},
},
PDClient: defTiKVCfg.PDClient,
TiKVClient: defTiKVCfg.TiKVClient,
Binlog: Binlog{
WriteTimeout: "15s",
Strategy: "range",
},
Plugin: Plugin{
Dir: "/data/deploy/plugin",
Load: "",
},
PessimisticTxn: DefaultPessimisticTxn(),
IsolationRead: IsolationRead{
Engines: []string{"tikv", "tiflash", "tidb"},
},
Experimental: Experimental{},
EnableCollectExecutionInfo: true,
EnableTelemetry: true,
Labels: make(map[string]string),
EnableGlobalIndex: false,
Security: Security{
SpilledFileEncryptionMethod: SpilledFileEncryptionMethodPlaintext,
EnableSEM: false,
AutoTLS: false,
RSAKeySize: 4096,
},
DeprecateIntegerDisplayWidth: false,
EnableEnumLengthLimit: true,
StoresRefreshInterval: defTiKVCfg.StoresRefreshInterval,
EnableForwarding: defTiKVCfg.EnableForwarding,
NewCollationsEnabledOnFirstBootstrap: true,
EnableGlobalKill: true,
TrxSummary: DefaultTrxSummary(),
}
var (
globalConf atomic.Value
)
// NewConfig creates a new config instance with default value.
func NewConfig() *Config {
conf := defaultConf
return &conf
}
// GetGlobalConfig returns the global configuration for this server.
// It should store configuration from command line and configuration file.
// Other parts of the system can read the global configuration use this function.
func GetGlobalConfig() *Config {
return globalConf.Load().(*Config)
}
// StoreGlobalConfig stores a new config to the globalConf. It mostly uses in the test to avoid some data races.
func StoreGlobalConfig(config *Config) {
globalConf.Store(config)
TikvConfigLock.Lock()
defer TikvConfigLock.Unlock()
cfg := *config.GetTiKVConfig()
tikvcfg.StoreGlobalConfig(&cfg)
}
// removedConfig contains items that are no longer supported.
// they might still be in the config struct to support import,
// but are not actively used.
var removedConfig = map[string]struct{}{
"pessimistic-txn.ttl": {},
"pessimistic-txn.enable": {},
"log.file.log-rotate": {},
"log.log-slow-query": {},
"txn-local-latches": {},
"txn-local-latches.enabled": {},
"txn-local-latches.capacity": {},
"performance.max-memory": {},
"max-txn-time-use": {},
"experimental.allow-auto-random": {},
"enable-redact-log": {}, // use variable tidb_redact_log instead
"enable-streaming": {},
"performance.mem-profile-interval": {},
"security.require-secure-transport": {},
"lower-case-table-names": {},
"stmt-summary": {},
"stmt-summary.enable": {},
"stmt-summary.enable-internal-query": {},
"stmt-summary.max-stmt-count": {},
"stmt-summary.max-sql-length": {},
"stmt-summary.refresh-interval": {},
"stmt-summary.history-size": {},
"enable-batch-dml": {}, // use tidb_enable_batch_dml
"mem-quota-query": {},
"log.query-log-max-len": {},
"performance.committer-concurrency": {},
"experimental.enable-global-kill": {},
"performance.run-auto-analyze": {}, //use tidb_enable_auto_analyze
// use tidb_enable_prepared_plan_cache, tidb_prepared_plan_cache_size and tidb_prepared_plan_cache_memory_guard_ratio
"prepared-plan-cache.enabled": {},
"prepared-plan-cache.capacity": {},
"prepared-plan-cache.memory-guard-ratio": {},
"oom-action": {},
"check-mb4-value-in-utf8": {}, // use tidb_check_mb4_value_in_utf8
"enable-collect-execution-info": {}, // use tidb_enable_collect_execution_info
"log.enable-slow-log": {}, // use tidb_enable_slow_log
"log.slow-threshold": {}, // use tidb_slow_log_threshold
"log.record-plan-in-slow-log": {}, // use tidb_record_plan_in_slow_log
"performance.force-priority": {}, // use tidb_force_priority
"performance.memory-usage-alarm-ratio": {}, // use tidb_memory_usage_alarm_ratio
"plugin.load": {}, // use plugin_load
"plugin.dir": {}, // use plugin_dir
"performance.feedback-probability": {}, // This feature is deprecated
"performance.query-feedback-limit": {},
"oom-use-tmp-storage": {}, // use tidb_enable_tmp_storage_on_oom
"max-server-connections": {}, // use sysvar max_connections
"run-ddl": {}, // use sysvar tidb_enable_ddl
}
// isAllRemovedConfigItems returns true if all the items that couldn't validate
// belong to the list of removedConfig items.
func isAllRemovedConfigItems(items []string) bool {
for _, item := range items {
if _, ok := removedConfig[item]; !ok {
return false
}
}
return true
}
// InitializeConfig initialize the global config handler.
// The function enforceCmdArgs is used to merge the config file with command arguments:
// For example, if you start TiDB by the command "./tidb-server --port=3000", the port number should be
// overwritten to 3000 and ignore the port number in the config file.
func InitializeConfig(confPath string, configCheck, configStrict bool, enforceCmdArgs func(*Config)) {
cfg := GetGlobalConfig()
var err error
if confPath != "" {
if err = cfg.Load(confPath); err != nil {
// Unused config item error turns to warnings.
if tmp, ok := err.(*ErrConfigValidationFailed); ok {
// This block is to accommodate an interim situation where strict config checking
// is not the default behavior of TiDB. The warning message must be deferred until
// logging has been set up. After strict config checking is the default behavior,
// This should all be removed.
if (!configCheck && !configStrict) || isAllRemovedConfigItems(tmp.UndecodedItems) {
fmt.Fprintln(os.Stderr, err.Error())
err = nil
}
} else if tmp, ok := err.(*ErrConfigInstanceSection); ok {
logutil.BgLogger().Warn(tmp.Error())
err = nil
}
}
// In configCheck we always print out which options in the config file
// have been removed. This helps users upgrade better.
if configCheck {
err = cfg.RemovedVariableCheck(confPath)
if err != nil {
logutil.BgLogger().Warn(err.Error())
err = nil // treat as warning
}
}
terror.MustNil(err)
} else {
// configCheck should have the config file specified.
if configCheck {
fmt.Fprintln(os.Stderr, "config check failed", errors.New("no config file specified for config-check"))
os.Exit(1)
}
}
enforceCmdArgs(cfg)
if err := cfg.Valid(); err != nil {
if !filepath.IsAbs(confPath) {
if tmp, err := filepath.Abs(confPath); err == nil {
confPath = tmp
}
}
fmt.Fprintln(os.Stderr, "load config file:", confPath)
fmt.Fprintln(os.Stderr, "invalid config", err)
os.Exit(1)
}
if configCheck {
fmt.Println("config check successful")
os.Exit(0)
}
StoreGlobalConfig(cfg)
}
// RemovedVariableCheck checks if the config file contains any items
// which have been removed. These will not take effect any more.
func (c *Config) RemovedVariableCheck(confFile string) error {
metaData, err := toml.DecodeFile(confFile, c)
if err != nil {
return err
}
var removed []string
for item := range removedConfig {
// We need to split the string to account for the top level
// and the section hierarchy of config.
tmp := strings.Split(item, ".")
if len(tmp) == 2 && metaData.IsDefined(tmp[0], tmp[1]) {
removed = append(removed, item)
} else if len(tmp) == 1 && metaData.IsDefined(tmp[0]) {
removed = append(removed, item)
}
}
if len(removed) > 0 {
sort.Strings(removed) // deterministic for tests
return fmt.Errorf("The following configuration options are no longer supported in this version of TiDB. Check the release notes for more information: %s", strings.Join(removed, ", "))
}
return nil
}
// Load loads config options from a toml file.
func (c *Config) Load(confFile string) error {
metaData, err := toml.DecodeFile(confFile, c)
if c.TokenLimit == 0 {
c.TokenLimit = 1000
}
// If any items in confFile file are not mapped into the Config struct, issue
// an error and stop the server from starting.
undecoded := metaData.Undecoded()
if len(undecoded) > 0 && err == nil {
var undecodedItems []string
for _, item := range undecoded {
undecodedItems = append(undecodedItems, item.String())
}
err = &ErrConfigValidationFailed{confFile, undecodedItems}
}
for _, section := range sectionMovedToInstance {
newConflictSection := InstanceConfigSection{SectionName: section.SectionName, NameMappings: map[string]string{}}
newDeprecatedSection := InstanceConfigSection{SectionName: section.SectionName, NameMappings: map[string]string{}}
for oldName, newName := range section.NameMappings {
if section.SectionName == "" && metaData.IsDefined(oldName) ||
section.SectionName != "" && metaData.IsDefined(section.SectionName, oldName) {
if metaData.IsDefined("instance", newName) {
newConflictSection.NameMappings[oldName] = newName
} else {
newDeprecatedSection.NameMappings[oldName] = newName
}
}
}
if len(newConflictSection.NameMappings) > 0 {
ConflictOptions = append(ConflictOptions, newConflictSection)
}
if len(newDeprecatedSection.NameMappings) > 0 {
DeprecatedOptions = append(DeprecatedOptions, newDeprecatedSection)
}
}
if len(ConflictOptions) > 0 || len(DeprecatedOptions) > 0 {
// Give a warning that the 'instance' section should be used.
err = &ErrConfigInstanceSection{confFile, &ConflictOptions, &DeprecatedOptions}
}
return err
}
// Valid checks if this config is valid.
func (c *Config) Valid() error {
if c.Log.EnableErrorStack == c.Log.DisableErrorStack && c.Log.EnableErrorStack != nbUnset {
logutil.BgLogger().Warn(fmt.Sprintf("\"enable-error-stack\" (%v) conflicts \"disable-error-stack\" (%v). \"disable-error-stack\" is deprecated, please use \"enable-error-stack\" instead. disable-error-stack is ignored.", c.Log.EnableErrorStack, c.Log.DisableErrorStack))
// if two options conflict, we will use the value of EnableErrorStack
c.Log.DisableErrorStack = nbUnset
}
if c.Log.EnableTimestamp == c.Log.DisableTimestamp && c.Log.EnableTimestamp != nbUnset {
logutil.BgLogger().Warn(fmt.Sprintf("\"enable-timestamp\" (%v) conflicts \"disable-timestamp\" (%v). \"disable-timestamp\" is deprecated, please use \"enable-timestamp\" instead", c.Log.EnableTimestamp, c.Log.DisableTimestamp))
// if two options conflict, we will use the value of EnableTimestamp
c.Log.DisableTimestamp = nbUnset
}
if c.Security.SkipGrantTable && !hasRootPrivilege() {
return fmt.Errorf("TiDB run with skip-grant-table need root privilege")
}
if !ValidStorage[c.Store] {
nameList := make([]string, 0, len(ValidStorage))
for k, v := range ValidStorage {
if v {
nameList = append(nameList, k)
}
}
return fmt.Errorf("invalid store=%s, valid storages=%v", c.Store, nameList)
}
if c.Store == "mocktikv" && !c.Instance.TiDBEnableDDL.Load() {
return fmt.Errorf("can't disable DDL on mocktikv")
}
if c.MaxIndexLength < DefMaxIndexLength || c.MaxIndexLength > DefMaxOfMaxIndexLength {
return fmt.Errorf("max-index-length should be [%d, %d]", DefMaxIndexLength, DefMaxOfMaxIndexLength)
}
if c.IndexLimit < DefIndexLimit || c.IndexLimit > DefMaxOfIndexLimit {
return fmt.Errorf("index-limit should be [%d, %d]", DefIndexLimit, DefMaxOfIndexLimit)
}
if c.Log.File.MaxSize > MaxLogFileSize {
return fmt.Errorf("invalid max log file size=%v which is larger than max=%v", c.Log.File.MaxSize, MaxLogFileSize)
}
if c.TableColumnCountLimit < DefTableColumnCountLimit || c.TableColumnCountLimit > DefMaxOfTableColumnCountLimit {
return fmt.Errorf("table-column-limit should be [%d, %d]", DefIndexLimit, DefMaxOfTableColumnCountLimit)
}
// txn-local-latches
if err := c.TxnLocalLatches.Valid(); err != nil {
return err
}
// For tikvclient.
if err := c.TiKVClient.Valid(); err != nil {
return err
}
if err := c.TrxSummary.Valid(); err != nil {
return err
}
if c.Performance.TxnTotalSizeLimit > 1<<40 {
return fmt.Errorf("txn-total-size-limit should be less than %d", 1<<40)
}
if c.Instance.MemoryUsageAlarmRatio > 1 || c.Instance.MemoryUsageAlarmRatio < 0 {
return fmt.Errorf("tidb_memory_usage_alarm_ratio in [Instance] must be greater than or equal to 0 and less than or equal to 1")
}
if len(c.IsolationRead.Engines) < 1 {
return fmt.Errorf("the number of [isolation-read]engines for isolation read should be at least 1")
}
for _, engine := range c.IsolationRead.Engines {
if engine != "tidb" && engine != "tikv" && engine != "tiflash" {
return fmt.Errorf("type of [isolation-read]engines can't be %v should be one of tidb or tikv or tiflash", engine)
}
}
// test security
c.Security.SpilledFileEncryptionMethod = strings.ToLower(c.Security.SpilledFileEncryptionMethod)
switch c.Security.SpilledFileEncryptionMethod {
case SpilledFileEncryptionMethodPlaintext, SpilledFileEncryptionMethodAES128CTR:
default:
return fmt.Errorf("unsupported [security]spilled-file-encryption-method %v, TiDB only supports [%v, %v]",
c.Security.SpilledFileEncryptionMethod, SpilledFileEncryptionMethodPlaintext, SpilledFileEncryptionMethodAES128CTR)
}
// check stats load config
if c.Performance.StatsLoadConcurrency < DefStatsLoadConcurrencyLimit || c.Performance.StatsLoadConcurrency > DefMaxOfStatsLoadConcurrencyLimit {
return fmt.Errorf("stats-load-concurrency should be [%d, %d]", DefStatsLoadConcurrencyLimit, DefMaxOfStatsLoadConcurrencyLimit)
}
if c.Performance.StatsLoadQueueSize < DefStatsLoadQueueSizeLimit || c.Performance.StatsLoadQueueSize > DefMaxOfStatsLoadQueueSizeLimit {
return fmt.Errorf("stats-load-queue-size should be [%d, %d]", DefStatsLoadQueueSizeLimit, DefMaxOfStatsLoadQueueSizeLimit)
}
// test log level
l := zap.NewAtomicLevel()
return l.UnmarshalText([]byte(c.Log.Level))
}
// UpdateGlobal updates the global config, and provide a restore function that can be used to restore to the original.
func UpdateGlobal(f func(conf *Config)) {
g := GetGlobalConfig()
newConf := *g
f(&newConf)
StoreGlobalConfig(&newConf)
}
// RestoreFunc gets a function that restore the config to the current value.
func RestoreFunc() (restore func()) {
g := GetGlobalConfig()
return func() {
StoreGlobalConfig(g)
}
}
func hasRootPrivilege() bool {
return os.Geteuid() == 0
}
// TableLockEnabled uses to check whether enabled the table lock feature.
func TableLockEnabled() bool {
return GetGlobalConfig().EnableTableLock
}
// TableLockDelayClean uses to get the time of delay clean table lock.
var TableLockDelayClean = func() uint64 {
return GetGlobalConfig().DelayCleanTableLock
}
// ToLogConfig converts *Log to *logutil.LogConfig.
func (l *Log) ToLogConfig() *logutil.LogConfig {
return logutil.NewLogConfig(l.Level, l.Format, l.SlowQueryFile, l.File, l.getDisableTimestamp(), func(config *zaplog.Config) { config.DisableErrorVerbose = l.getDisableErrorStack() })
}
// ToTracingConfig converts *OpenTracing to *tracing.Configuration.
func (t *OpenTracing) ToTracingConfig() *tracing.Configuration {
ret := &tracing.Configuration{
Disabled: !t.Enable,
RPCMetrics: t.RPCMetrics,
Reporter: &tracing.ReporterConfig{},
Sampler: &tracing.SamplerConfig{},
}
ret.Reporter.QueueSize = t.Reporter.QueueSize
ret.Reporter.BufferFlushInterval = t.Reporter.BufferFlushInterval
ret.Reporter.LogSpans = t.Reporter.LogSpans
ret.Reporter.LocalAgentHostPort = t.Reporter.LocalAgentHostPort
ret.Sampler.Type = t.Sampler.Type
ret.Sampler.Param = t.Sampler.Param
ret.Sampler.SamplingServerURL = t.Sampler.SamplingServerURL
ret.Sampler.MaxOperations = t.Sampler.MaxOperations
ret.Sampler.SamplingRefreshInterval = t.Sampler.SamplingRefreshInterval
return ret
}
func init() {
initByLDFlags(versioninfo.TiDBEdition, checkBeforeDropLDFlag)
}
func initByLDFlags(edition, checkBeforeDropLDFlag string) {
if edition != versioninfo.CommunityEdition {
defaultConf.EnableTelemetry = false
}
conf := defaultConf
StoreGlobalConfig(&conf)
if checkBeforeDropLDFlag == "1" {
CheckTableBeforeDrop = true
}
}
// hideConfig is used to filter a single line of config for hiding.
var hideConfig = []string{
"performance.index-usage-sync-lease",
}
// GetJSONConfig returns the config as JSON with hidden items removed
// It replaces the earlier HideConfig() which used strings.Split() in
// an way that didn't work for similarly named items (like enable).
func GetJSONConfig() (string, error) {
j, err := json.Marshal(GetGlobalConfig())
if err != nil {
return "", err
}
jsonValue := make(map[string]interface{})
err = json.Unmarshal(j, &jsonValue)
if err != nil {
return "", err
}
removedPaths := make([]string, 0, len(removedConfig)+len(hideConfig))
for removedItem := range removedConfig {
removedPaths = append(removedPaths, removedItem)
}
removedPaths = append(removedPaths, hideConfig...)
for _, path := range removedPaths {
s := strings.Split(path, ".")
curValue := jsonValue
for i, key := range s {
if i == len(s)-1 {
delete(curValue, key)
}
if curValue[key] != nil {
mapValue, ok := curValue[key].(map[string]interface{})
if !ok {
break
}
curValue = mapValue
} else {
break
}
}
}
buf, err := json.Marshal(jsonValue)
if err != nil {
return "", err
}
var resBuf bytes.Buffer
if err = json.Indent(&resBuf, buf, "", "\t"); err != nil {
return "", err
}
return resBuf.String(), nil
}
// ContainHiddenConfig checks whether it contains the configuration that needs to be hidden.
func ContainHiddenConfig(s string) bool {
s = strings.ToLower(s)
for _, hc := range hideConfig {
if strings.Contains(s, hc) {
return true
}
}
for dc := range removedConfig {
if strings.Contains(s, dc) {
return true
}
}
return false
}
| config/config.go | 1 | https://github.com/pingcap/tidb/commit/f61024b2dc6f9e3a6f7fd4d941e19fb81f002b1e | [
0.015781516209244728,
0.000493385421577841,
0.00016161066014319658,
0.00017027587455231696,
0.0015256775077432394
] |
{
"id": 8,
"code_window": [
"\n",
"\tvariable.ProcessGeneralLog.Store(cfg.Instance.TiDBGeneralLog)\n",
"\tvariable.EnablePProfSQLCPU.Store(cfg.Instance.EnablePProfSQLCPU)\n",
"\tatomic.StoreUint32(&variable.DDLSlowOprThreshold, cfg.Instance.DDLSlowOprThreshold)\n",
"\tatomic.StoreUint64(&variable.ExpensiveQueryTimeThreshold, cfg.Instance.ExpensiveQueryTimeThreshold)\n",
"\n",
"\tif len(cfg.ServerVersion) > 0 {\n"
],
"labels": [
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tvariable.EnableRCReadCheckTS.Store(cfg.Instance.TiDBRCReadCheckTS)\n"
],
"file_path": "tidb-server/main.go",
"type": "add",
"edit_start_line_idx": 640
} | // Copyright 2021 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package execdetails
import (
"testing"
"github.com/pingcap/tidb/testkit/testsetup"
"go.uber.org/goleak"
)
func TestMain(m *testing.M) {
testsetup.SetupForCommonTest()
opts := []goleak.Option{
goleak.IgnoreTopFunction("github.com/golang/glog.(*loggingT).flushDaemon"),
goleak.IgnoreTopFunction("go.etcd.io/etcd/client/pkg/v3/logutil.(*MergeLogger).outputLoop"),
goleak.IgnoreTopFunction("go.opencensus.io/stats/view.(*worker).start"),
}
goleak.VerifyTestMain(m, opts...)
}
| util/execdetails/main_test.go | 0 | https://github.com/pingcap/tidb/commit/f61024b2dc6f9e3a6f7fd4d941e19fb81f002b1e | [
0.0001764635817380622,
0.00017257139552384615,
0.0001701653382042423,
0.00017182834562845528,
0.0000024620599106128793
] |
{
"id": 8,
"code_window": [
"\n",
"\tvariable.ProcessGeneralLog.Store(cfg.Instance.TiDBGeneralLog)\n",
"\tvariable.EnablePProfSQLCPU.Store(cfg.Instance.EnablePProfSQLCPU)\n",
"\tatomic.StoreUint32(&variable.DDLSlowOprThreshold, cfg.Instance.DDLSlowOprThreshold)\n",
"\tatomic.StoreUint64(&variable.ExpensiveQueryTimeThreshold, cfg.Instance.ExpensiveQueryTimeThreshold)\n",
"\n",
"\tif len(cfg.ServerVersion) > 0 {\n"
],
"labels": [
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tvariable.EnableRCReadCheckTS.Store(cfg.Instance.TiDBRCReadCheckTS)\n"
],
"file_path": "tidb-server/main.go",
"type": "add",
"edit_start_line_idx": 640
} | load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
go_library(
name = "ranger",
srcs = [
"checker.go",
"detacher.go",
"points.go",
"ranger.go",
"types.go",
],
importpath = "github.com/pingcap/tidb/util/ranger",
visibility = ["//visibility:public"],
deps = [
"//errno",
"//expression",
"//kv",
"//parser/ast",
"//parser/charset",
"//parser/format",
"//parser/model",
"//parser/mysql",
"//parser/terror",
"//sessionctx",
"//sessionctx/stmtctx",
"//types",
"//types/parser_driver",
"//util/chunk",
"//util/codec",
"//util/collate",
"//util/dbterror",
"@com_github_pingcap_errors//:errors",
"@org_golang_x_exp//slices",
],
)
go_test(
name = "ranger_test",
timeout = "short",
srcs = [
"bench_test.go",
"main_test.go",
"ranger_test.go",
"types_test.go",
],
data = glob(["testdata/**"]),
flaky = True,
deps = [
":ranger",
"//config",
"//expression",
"//parser/ast",
"//parser/model",
"//parser/mysql",
"//planner/core",
"//session",
"//sessionctx",
"//sessionctx/variable",
"//testkit",
"//testkit/testdata",
"//testkit/testsetup",
"//types",
"//util/collate",
"@com_github_stretchr_testify//require",
"@org_uber_go_goleak//:goleak",
],
)
| util/ranger/BUILD.bazel | 0 | https://github.com/pingcap/tidb/commit/f61024b2dc6f9e3a6f7fd4d941e19fb81f002b1e | [
0.00017528333410155028,
0.00017297746671829373,
0.00016964643145911396,
0.00017258801381103694,
0.0000017337005147055606
] |
{
"id": 8,
"code_window": [
"\n",
"\tvariable.ProcessGeneralLog.Store(cfg.Instance.TiDBGeneralLog)\n",
"\tvariable.EnablePProfSQLCPU.Store(cfg.Instance.EnablePProfSQLCPU)\n",
"\tatomic.StoreUint32(&variable.DDLSlowOprThreshold, cfg.Instance.DDLSlowOprThreshold)\n",
"\tatomic.StoreUint64(&variable.ExpensiveQueryTimeThreshold, cfg.Instance.ExpensiveQueryTimeThreshold)\n",
"\n",
"\tif len(cfg.ServerVersion) > 0 {\n"
],
"labels": [
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tvariable.EnableRCReadCheckTS.Store(cfg.Instance.TiDBRCReadCheckTS)\n"
],
"file_path": "tidb-server/main.go",
"type": "add",
"edit_start_line_idx": 640
} | CREATE DATABASE `alter_random` /*!40100 DEFAULT CHARACTER SET utf8mb4 */;
| br/tests/lightning_alter_random/data/alter_random-schema-create.sql | 0 | https://github.com/pingcap/tidb/commit/f61024b2dc6f9e3a6f7fd4d941e19fb81f002b1e | [
0.00016156566562131047,
0.00016156566562131047,
0.00016156566562131047,
0.00016156566562131047,
0
] |
{
"id": 0,
"code_window": [
"\t\"net/http\"\n",
"\t\"net/url\"\n",
"\t\"regexp\"\n",
"\t\"strings\"\n",
"\t\"time\"\n",
"\n",
"\txhttp \"github.com/minio/minio/cmd/http\"\n",
"\t\"github.com/minio/minio/cmd/logger\"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "cmd/handler-utils.go",
"type": "replace",
"edit_start_line_idx": 31
} | /*
* MinIO Cloud Storage, (C) 2017 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import (
"net/http"
"strings"
"sync"
"sync/atomic"
"github.com/minio/minio/cmd/logger"
"github.com/prometheus/client_golang/prometheus"
)
// ConnStats - Network statistics
// Count total input/output transferred bytes during
// the server's life.
type ConnStats struct {
totalInputBytes uint64
totalOutputBytes uint64
s3InputBytes uint64
s3OutputBytes uint64
}
// Increase total input bytes
func (s *ConnStats) incInputBytes(n int) {
atomic.AddUint64(&s.totalInputBytes, uint64(n))
}
// Increase total output bytes
func (s *ConnStats) incOutputBytes(n int) {
atomic.AddUint64(&s.totalOutputBytes, uint64(n))
}
// Return total input bytes
func (s *ConnStats) getTotalInputBytes() uint64 {
return atomic.LoadUint64(&s.totalInputBytes)
}
// Return total output bytes
func (s *ConnStats) getTotalOutputBytes() uint64 {
return atomic.LoadUint64(&s.totalOutputBytes)
}
// Increase outbound input bytes
func (s *ConnStats) incS3InputBytes(n int) {
atomic.AddUint64(&s.s3InputBytes, uint64(n))
}
// Increase outbound output bytes
func (s *ConnStats) incS3OutputBytes(n int) {
atomic.AddUint64(&s.s3OutputBytes, uint64(n))
}
// Return outbound input bytes
func (s *ConnStats) getS3InputBytes() uint64 {
return atomic.LoadUint64(&s.s3InputBytes)
}
// Return outbound output bytes
func (s *ConnStats) getS3OutputBytes() uint64 {
return atomic.LoadUint64(&s.s3OutputBytes)
}
// Return connection stats (total input/output bytes and total s3 input/output bytes)
func (s *ConnStats) toServerConnStats() ServerConnStats {
return ServerConnStats{
TotalInputBytes: s.getTotalInputBytes(),
TotalOutputBytes: s.getTotalOutputBytes(),
S3InputBytes: s.getS3InputBytes(),
S3OutputBytes: s.getS3OutputBytes(),
}
}
// Prepare new ConnStats structure
func newConnStats() *ConnStats {
return &ConnStats{}
}
// HTTPAPIStats holds statistics information about
// a given API in the requests.
type HTTPAPIStats struct {
apiStats map[string]int
sync.RWMutex
}
// Inc increments the api stats counter.
func (stats *HTTPAPIStats) Inc(api string) {
if stats == nil {
return
}
stats.Lock()
defer stats.Unlock()
if stats.apiStats == nil {
stats.apiStats = make(map[string]int)
}
stats.apiStats[api]++
}
// Dec increments the api stats counter.
func (stats *HTTPAPIStats) Dec(api string) {
if stats == nil {
return
}
stats.Lock()
defer stats.Unlock()
if val, ok := stats.apiStats[api]; ok && val > 0 {
stats.apiStats[api]--
}
}
// Load returns the recorded stats.
func (stats *HTTPAPIStats) Load() map[string]int {
stats.Lock()
defer stats.Unlock()
var apiStats = make(map[string]int, len(stats.apiStats))
for k, v := range stats.apiStats {
apiStats[k] = v
}
return apiStats
}
// HTTPStats holds statistics information about
// HTTP requests made by all clients
type HTTPStats struct {
currentS3Requests HTTPAPIStats
totalS3Requests HTTPAPIStats
totalS3Errors HTTPAPIStats
}
// Converts http stats into struct to be sent back to the client.
func (st *HTTPStats) toServerHTTPStats() ServerHTTPStats {
serverStats := ServerHTTPStats{}
serverStats.CurrentS3Requests = ServerHTTPAPIStats{
APIStats: st.currentS3Requests.Load(),
}
serverStats.TotalS3Requests = ServerHTTPAPIStats{
APIStats: st.totalS3Requests.Load(),
}
serverStats.TotalS3Errors = ServerHTTPAPIStats{
APIStats: st.totalS3Errors.Load(),
}
return serverStats
}
// Update statistics from http request and response data
func (st *HTTPStats) updateStats(api string, r *http.Request, w *logger.ResponseWriter, durationSecs float64) {
// A successful request has a 2xx response code
successReq := (w.StatusCode >= 200 && w.StatusCode < 300)
if !strings.HasSuffix(r.URL.Path, prometheusMetricsPath) {
st.totalS3Requests.Inc(api)
if !successReq && w.StatusCode != 0 {
st.totalS3Errors.Inc(api)
}
}
if r.Method == http.MethodGet {
// Increment the prometheus http request response histogram with appropriate label
httpRequestsDuration.With(prometheus.Labels{"api": api}).Observe(durationSecs)
}
}
// Prepare new HTTPStats structure
func newHTTPStats() *HTTPStats {
return &HTTPStats{}
}
| cmd/http-stats.go | 1 | https://github.com/minio/minio/commit/3a0082f0f18abc158e4ee6d4ce02d012a36a0901 | [
0.001785436412319541,
0.00035041876253671944,
0.00016424212662968785,
0.0001692528312560171,
0.0004619856190402061
] |
{
"id": 0,
"code_window": [
"\t\"net/http\"\n",
"\t\"net/url\"\n",
"\t\"regexp\"\n",
"\t\"strings\"\n",
"\t\"time\"\n",
"\n",
"\txhttp \"github.com/minio/minio/cmd/http\"\n",
"\t\"github.com/minio/minio/cmd/logger\"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "cmd/handler-utils.go",
"type": "replace",
"edit_start_line_idx": 31
} | /*
* MinIO Cloud Storage, (C) 2019,2020 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import (
"context"
"errors"
"fmt"
"io"
"math/rand"
"net/http"
"strconv"
"strings"
"sync"
"time"
"github.com/minio/minio-go/v7/pkg/set"
"github.com/minio/minio-go/v7/pkg/tags"
"github.com/minio/minio/cmd/config/storageclass"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/dsync"
"github.com/minio/minio/pkg/madmin"
"github.com/minio/minio/pkg/sync/errgroup"
)
type erasureServerPools struct {
GatewayUnsupported
serverPools []*erasureSets
// Shut down async operations
shutdown context.CancelFunc
}
func (z *erasureServerPools) SingleZone() bool {
return len(z.serverPools) == 1
}
// Initialize new zone of erasure sets.
func newErasureServerPools(ctx context.Context, endpointServerPools EndpointServerPools) (ObjectLayer, error) {
var (
deploymentID string
err error
formats = make([]*formatErasureV3, len(endpointServerPools))
storageDisks = make([][]StorageAPI, len(endpointServerPools))
z = &erasureServerPools{serverPools: make([]*erasureSets, len(endpointServerPools))}
)
var localDrives []string
local := endpointServerPools.FirstLocal()
for i, ep := range endpointServerPools {
for _, endpoint := range ep.Endpoints {
if endpoint.IsLocal {
localDrives = append(localDrives, endpoint.Path)
}
}
storageDisks[i], formats[i], err = waitForFormatErasure(local, ep.Endpoints, i+1,
ep.SetCount, ep.DrivesPerSet, deploymentID)
if err != nil {
return nil, err
}
if deploymentID == "" {
deploymentID = formats[i].ID
}
z.serverPools[i], err = newErasureSets(ctx, ep.Endpoints, storageDisks[i], formats[i])
if err != nil {
return nil, err
}
}
ctx, z.shutdown = context.WithCancel(ctx)
go intDataUpdateTracker.start(ctx, localDrives...)
return z, nil
}
func (z *erasureServerPools) NewNSLock(bucket string, objects ...string) RWLocker {
return z.serverPools[0].NewNSLock(bucket, objects...)
}
// GetDisksID will return disks by their ID.
func (z *erasureServerPools) GetDisksID(ids ...string) []StorageAPI {
idMap := make(map[string]struct{})
for _, id := range ids {
idMap[id] = struct{}{}
}
res := make([]StorageAPI, 0, len(idMap))
for _, ss := range z.serverPools {
for _, disks := range ss.erasureDisks {
for _, disk := range disks {
id, _ := disk.GetDiskID()
if _, ok := idMap[id]; ok {
res = append(res, disk)
}
}
}
}
return res
}
func (z *erasureServerPools) GetAllLockers() []dsync.NetLocker {
return z.serverPools[0].GetAllLockers()
}
func (z *erasureServerPools) SetDriveCount() int {
return z.serverPools[0].SetDriveCount()
}
type serverPoolsAvailableSpace []zoneAvailableSpace
type zoneAvailableSpace struct {
Index int
Available uint64
}
// TotalAvailable - total available space
func (p serverPoolsAvailableSpace) TotalAvailable() uint64 {
total := uint64(0)
for _, z := range p {
total += z.Available
}
return total
}
// getAvailableZoneIdx will return an index that can hold size bytes.
// -1 is returned if no serverPools have available space for the size given.
func (z *erasureServerPools) getAvailableZoneIdx(ctx context.Context, size int64) int {
serverPools := z.getServerPoolsAvailableSpace(ctx, size)
total := serverPools.TotalAvailable()
if total == 0 {
return -1
}
// choose when we reach this many
choose := rand.Uint64() % total
atTotal := uint64(0)
for _, zone := range serverPools {
atTotal += zone.Available
if atTotal > choose && zone.Available > 0 {
return zone.Index
}
}
// Should not happen, but print values just in case.
logger.LogIf(ctx, fmt.Errorf("reached end of serverPools (total: %v, atTotal: %v, choose: %v)", total, atTotal, choose))
return -1
}
// getServerPoolsAvailableSpace will return the available space of each zone after storing the content.
// If there is not enough space the zone will return 0 bytes available.
// Negative sizes are seen as 0 bytes.
func (z *erasureServerPools) getServerPoolsAvailableSpace(ctx context.Context, size int64) serverPoolsAvailableSpace {
if size < 0 {
size = 0
}
var serverPools = make(serverPoolsAvailableSpace, len(z.serverPools))
storageInfos := make([]StorageInfo, len(z.serverPools))
g := errgroup.WithNErrs(len(z.serverPools))
for index := range z.serverPools {
index := index
g.Go(func() error {
storageInfos[index] = z.serverPools[index].StorageUsageInfo(ctx)
return nil
}, index)
}
// Wait for the go routines.
g.Wait()
for i, zinfo := range storageInfos {
var available uint64
var total uint64
for _, disk := range zinfo.Disks {
total += disk.TotalSpace
available += disk.TotalSpace - disk.UsedSpace
}
// Make sure we can fit "size" on to the disk without getting above the diskFillFraction
if available < uint64(size) {
available = 0
}
if available > 0 {
// How much will be left after adding the file.
available -= -uint64(size)
// wantLeft is how much space there at least must be left.
wantLeft := uint64(float64(total) * (1.0 - diskFillFraction))
if available <= wantLeft {
available = 0
}
}
serverPools[i] = zoneAvailableSpace{
Index: i,
Available: available,
}
}
return serverPools
}
// getZoneIdx returns the found previous object and its corresponding zone idx,
// if none are found falls back to most available space zone.
func (z *erasureServerPools) getZoneIdx(ctx context.Context, bucket, object string, opts ObjectOptions, size int64) (idx int, err error) {
if z.SingleZone() {
return 0, nil
}
for i, zone := range z.serverPools {
objInfo, err := zone.GetObjectInfo(ctx, bucket, object, opts)
switch err.(type) {
case ObjectNotFound:
// VersionId was not specified but found delete marker or no versions exist.
case MethodNotAllowed:
// VersionId was specified but found delete marker
default:
if err != nil {
// any other un-handled errors return right here.
return -1, err
}
}
// delete marker not specified means no versions
// exist continue to next zone.
if !objInfo.DeleteMarker && err != nil {
continue
}
// Success case and when DeleteMarker is true return.
return i, nil
}
// We multiply the size by 2 to account for erasure coding.
idx = z.getAvailableZoneIdx(ctx, size*2)
if idx < 0 {
return -1, toObjectErr(errDiskFull)
}
return idx, nil
}
func (z *erasureServerPools) Shutdown(ctx context.Context) error {
defer z.shutdown()
g := errgroup.WithNErrs(len(z.serverPools))
for index := range z.serverPools {
index := index
g.Go(func() error {
return z.serverPools[index].Shutdown(ctx)
}, index)
}
for _, err := range g.Wait() {
if err != nil {
logger.LogIf(ctx, err)
}
// let's the rest shutdown
}
return nil
}
func (z *erasureServerPools) StorageInfo(ctx context.Context, local bool) (StorageInfo, []error) {
var storageInfo StorageInfo
storageInfo.Backend.Type = BackendErasure
storageInfos := make([]StorageInfo, len(z.serverPools))
storageInfosErrs := make([][]error, len(z.serverPools))
g := errgroup.WithNErrs(len(z.serverPools))
for index := range z.serverPools {
index := index
g.Go(func() error {
storageInfos[index], storageInfosErrs[index] = z.serverPools[index].StorageInfo(ctx, local)
return nil
}, index)
}
// Wait for the go routines.
g.Wait()
for _, lstorageInfo := range storageInfos {
storageInfo.Disks = append(storageInfo.Disks, lstorageInfo.Disks...)
storageInfo.Backend.OnlineDisks = storageInfo.Backend.OnlineDisks.Merge(lstorageInfo.Backend.OnlineDisks)
storageInfo.Backend.OfflineDisks = storageInfo.Backend.OfflineDisks.Merge(lstorageInfo.Backend.OfflineDisks)
}
scParity := globalStorageClass.GetParityForSC(storageclass.STANDARD)
if scParity == 0 {
scParity = z.SetDriveCount() / 2
}
storageInfo.Backend.StandardSCData = z.SetDriveCount() - scParity
storageInfo.Backend.StandardSCParity = scParity
rrSCParity := globalStorageClass.GetParityForSC(storageclass.RRS)
storageInfo.Backend.RRSCData = z.SetDriveCount() - rrSCParity
storageInfo.Backend.RRSCParity = rrSCParity
var errs []error
for i := range z.serverPools {
errs = append(errs, storageInfosErrs[i]...)
}
return storageInfo, errs
}
func (z *erasureServerPools) CrawlAndGetDataUsage(ctx context.Context, bf *bloomFilter, updates chan<- DataUsageInfo) error {
ctx, cancel := context.WithCancel(ctx)
defer cancel()
var wg sync.WaitGroup
var mu sync.Mutex
var results []dataUsageCache
var firstErr error
var knownBuckets = make(map[string]struct{}) // used to deduplicate buckets.
var allBuckets []BucketInfo
// Collect for each set in serverPools.
for _, z := range z.serverPools {
buckets, err := z.ListBuckets(ctx)
if err != nil {
return err
}
// Add new buckets.
for _, b := range buckets {
if _, ok := knownBuckets[b.Name]; ok {
continue
}
allBuckets = append(allBuckets, b)
knownBuckets[b.Name] = struct{}{}
}
for _, erObj := range z.sets {
wg.Add(1)
results = append(results, dataUsageCache{})
go func(i int, erObj *erasureObjects) {
updates := make(chan dataUsageCache, 1)
defer close(updates)
// Start update collector.
go func() {
defer wg.Done()
for info := range updates {
mu.Lock()
results[i] = info
mu.Unlock()
}
}()
// Start crawler. Blocks until done.
err := erObj.crawlAndGetDataUsage(ctx, buckets, bf, updates)
if err != nil {
logger.LogIf(ctx, err)
mu.Lock()
if firstErr == nil {
firstErr = err
}
// Cancel remaining...
cancel()
mu.Unlock()
return
}
}(len(results)-1, erObj)
}
}
updateCloser := make(chan chan struct{})
go func() {
updateTicker := time.NewTicker(30 * time.Second)
defer updateTicker.Stop()
var lastUpdate time.Time
// We need to merge since we will get the same buckets from each zone.
// Therefore to get the exact bucket sizes we must merge before we can convert.
var allMerged dataUsageCache
update := func() {
mu.Lock()
defer mu.Unlock()
allMerged = dataUsageCache{Info: dataUsageCacheInfo{Name: dataUsageRoot}}
for _, info := range results {
if info.Info.LastUpdate.IsZero() {
// Not filled yet.
return
}
allMerged.merge(info)
}
if allMerged.root() != nil && allMerged.Info.LastUpdate.After(lastUpdate) {
updates <- allMerged.dui(allMerged.Info.Name, allBuckets)
lastUpdate = allMerged.Info.LastUpdate
}
}
for {
select {
case <-ctx.Done():
return
case v := <-updateCloser:
update()
// Enforce quotas when all is done.
if firstErr == nil {
for _, b := range allBuckets {
enforceFIFOQuotaBucket(ctx, z, b.Name, allMerged.bucketUsageInfo(b.Name))
}
}
close(v)
return
case <-updateTicker.C:
update()
}
}
}()
wg.Wait()
ch := make(chan struct{})
select {
case updateCloser <- ch:
<-ch
case <-ctx.Done():
if firstErr == nil {
firstErr = ctx.Err()
}
}
return firstErr
}
func (z *erasureServerPools) MakeMultipleBuckets(ctx context.Context, buckets ...string) error {
g := errgroup.WithNErrs(len(z.serverPools))
// Create buckets in parallel across all sets.
for index := range z.serverPools {
index := index
g.Go(func() error {
return z.serverPools[index].MakeMultipleBuckets(ctx, buckets...)
}, index)
}
errs := g.Wait()
// Return the first encountered error
for _, err := range errs {
if err != nil {
return err
}
}
return nil
}
// MakeBucketWithLocation - creates a new bucket across all serverPools simultaneously
// even if one of the sets fail to create buckets, we proceed all the successful
// operations.
func (z *erasureServerPools) MakeBucketWithLocation(ctx context.Context, bucket string, opts BucketOptions) error {
g := errgroup.WithNErrs(len(z.serverPools))
// Create buckets in parallel across all sets.
for index := range z.serverPools {
index := index
g.Go(func() error {
return z.serverPools[index].MakeBucketWithLocation(ctx, bucket, opts)
}, index)
}
errs := g.Wait()
// Return the first encountered error
for _, err := range errs {
if err != nil {
return err
}
}
// If it doesn't exist we get a new, so ignore errors
meta := newBucketMetadata(bucket)
if opts.LockEnabled {
meta.VersioningConfigXML = enabledBucketVersioningConfig
meta.ObjectLockConfigXML = enabledBucketObjectLockConfig
}
if err := meta.Save(ctx, z); err != nil {
return toObjectErr(err, bucket)
}
globalBucketMetadataSys.Set(bucket, meta)
// Success.
return nil
}
func (z *erasureServerPools) GetObjectNInfo(ctx context.Context, bucket, object string, rs *HTTPRangeSpec, h http.Header, lockType LockType, opts ObjectOptions) (gr *GetObjectReader, err error) {
if err = checkGetObjArgs(ctx, bucket, object); err != nil {
return nil, err
}
object = encodeDirObject(object)
for _, zone := range z.serverPools {
gr, err = zone.GetObjectNInfo(ctx, bucket, object, rs, h, lockType, opts)
if err != nil {
if isErrObjectNotFound(err) || isErrVersionNotFound(err) {
continue
}
return gr, err
}
return gr, nil
}
if opts.VersionID != "" {
return gr, VersionNotFound{Bucket: bucket, Object: object, VersionID: opts.VersionID}
}
return gr, ObjectNotFound{Bucket: bucket, Object: object}
}
func (z *erasureServerPools) GetObject(ctx context.Context, bucket, object string, startOffset int64, length int64, writer io.Writer, etag string, opts ObjectOptions) error {
if err := checkGetObjArgs(ctx, bucket, object); err != nil {
return err
}
object = encodeDirObject(object)
for _, zone := range z.serverPools {
if err := zone.GetObject(ctx, bucket, object, startOffset, length, writer, etag, opts); err != nil {
if isErrObjectNotFound(err) || isErrVersionNotFound(err) {
continue
}
return err
}
return nil
}
if opts.VersionID != "" {
return VersionNotFound{Bucket: bucket, Object: object, VersionID: opts.VersionID}
}
return ObjectNotFound{Bucket: bucket, Object: object}
}
func (z *erasureServerPools) GetObjectInfo(ctx context.Context, bucket, object string, opts ObjectOptions) (objInfo ObjectInfo, err error) {
if err = checkGetObjArgs(ctx, bucket, object); err != nil {
return objInfo, err
}
object = encodeDirObject(object)
for _, zone := range z.serverPools {
objInfo, err = zone.GetObjectInfo(ctx, bucket, object, opts)
if err != nil {
if isErrObjectNotFound(err) || isErrVersionNotFound(err) {
continue
}
return objInfo, err
}
return objInfo, nil
}
object = decodeDirObject(object)
if opts.VersionID != "" {
return objInfo, VersionNotFound{Bucket: bucket, Object: object, VersionID: opts.VersionID}
}
return objInfo, ObjectNotFound{Bucket: bucket, Object: object}
}
// PutObject - writes an object to least used erasure zone.
func (z *erasureServerPools) PutObject(ctx context.Context, bucket string, object string, data *PutObjReader, opts ObjectOptions) (ObjectInfo, error) {
// Validate put object input args.
if err := checkPutObjectArgs(ctx, bucket, object, z); err != nil {
return ObjectInfo{}, err
}
object = encodeDirObject(object)
if z.SingleZone() {
return z.serverPools[0].PutObject(ctx, bucket, object, data, opts)
}
idx, err := z.getZoneIdx(ctx, bucket, object, opts, data.Size())
if err != nil {
return ObjectInfo{}, err
}
// Overwrite the object at the right zone
return z.serverPools[idx].PutObject(ctx, bucket, object, data, opts)
}
func (z *erasureServerPools) DeleteObject(ctx context.Context, bucket string, object string, opts ObjectOptions) (objInfo ObjectInfo, err error) {
if err = checkDelObjArgs(ctx, bucket, object); err != nil {
return objInfo, err
}
object = encodeDirObject(object)
if z.SingleZone() {
return z.serverPools[0].DeleteObject(ctx, bucket, object, opts)
}
for _, zone := range z.serverPools {
objInfo, err = zone.DeleteObject(ctx, bucket, object, opts)
if err == nil {
return objInfo, nil
}
if err != nil && !isErrObjectNotFound(err) && !isErrVersionNotFound(err) {
break
}
}
return objInfo, err
}
func (z *erasureServerPools) DeleteObjects(ctx context.Context, bucket string, objects []ObjectToDelete, opts ObjectOptions) ([]DeletedObject, []error) {
derrs := make([]error, len(objects))
dobjects := make([]DeletedObject, len(objects))
objSets := set.NewStringSet()
for i := range derrs {
objects[i].ObjectName = encodeDirObject(objects[i].ObjectName)
derrs[i] = checkDelObjArgs(ctx, bucket, objects[i].ObjectName)
objSets.Add(objects[i].ObjectName)
}
// Acquire a bulk write lock across 'objects'
multiDeleteLock := z.NewNSLock(bucket, objSets.ToSlice()...)
if err := multiDeleteLock.GetLock(ctx, globalOperationTimeout); err != nil {
for i := range derrs {
derrs[i] = err
}
return dobjects, derrs
}
defer multiDeleteLock.Unlock()
if z.SingleZone() {
return z.serverPools[0].DeleteObjects(ctx, bucket, objects, opts)
}
for _, zone := range z.serverPools {
deletedObjects, errs := zone.DeleteObjects(ctx, bucket, objects, opts)
for i, derr := range errs {
if derr != nil {
derrs[i] = derr
}
dobjects[i] = deletedObjects[i]
}
}
return dobjects, derrs
}
func (z *erasureServerPools) CopyObject(ctx context.Context, srcBucket, srcObject, dstBucket, dstObject string, srcInfo ObjectInfo, srcOpts, dstOpts ObjectOptions) (objInfo ObjectInfo, err error) {
srcObject = encodeDirObject(srcObject)
dstObject = encodeDirObject(dstObject)
cpSrcDstSame := isStringEqual(pathJoin(srcBucket, srcObject), pathJoin(dstBucket, dstObject))
zoneIdx, err := z.getZoneIdx(ctx, dstBucket, dstObject, dstOpts, srcInfo.Size)
if err != nil {
return objInfo, err
}
if cpSrcDstSame && srcInfo.metadataOnly {
// Version ID is set for the destination and source == destination version ID.
if dstOpts.VersionID != "" && srcOpts.VersionID == dstOpts.VersionID {
return z.serverPools[zoneIdx].CopyObject(ctx, srcBucket, srcObject, dstBucket, dstObject, srcInfo, srcOpts, dstOpts)
}
// Destination is not versioned and source version ID is empty
// perform an in-place update.
if !dstOpts.Versioned && srcOpts.VersionID == "" {
return z.serverPools[zoneIdx].CopyObject(ctx, srcBucket, srcObject, dstBucket, dstObject, srcInfo, srcOpts, dstOpts)
}
// Destination is versioned, source is not destination version,
// as a special case look for if the source object is not legacy
// from older format, for older format we will rewrite them as
// newer using PutObject() - this is an optimization to save space
if dstOpts.Versioned && srcOpts.VersionID != dstOpts.VersionID && !srcInfo.Legacy {
// CopyObject optimization where we don't create an entire copy
// of the content, instead we add a reference.
srcInfo.versionOnly = true
return z.serverPools[zoneIdx].CopyObject(ctx, srcBucket, srcObject, dstBucket, dstObject, srcInfo, srcOpts, dstOpts)
}
}
putOpts := ObjectOptions{
ServerSideEncryption: dstOpts.ServerSideEncryption,
UserDefined: srcInfo.UserDefined,
Versioned: dstOpts.Versioned,
VersionID: dstOpts.VersionID,
MTime: dstOpts.MTime,
}
return z.serverPools[zoneIdx].PutObject(ctx, dstBucket, dstObject, srcInfo.PutObjReader, putOpts)
}
func (z *erasureServerPools) ListObjectsV2(ctx context.Context, bucket, prefix, continuationToken, delimiter string, maxKeys int, fetchOwner bool, startAfter string) (ListObjectsV2Info, error) {
marker := continuationToken
if marker == "" {
marker = startAfter
}
loi, err := z.ListObjects(ctx, bucket, prefix, marker, delimiter, maxKeys)
if err != nil {
return ListObjectsV2Info{}, err
}
listObjectsV2Info := ListObjectsV2Info{
IsTruncated: loi.IsTruncated,
ContinuationToken: continuationToken,
NextContinuationToken: loi.NextMarker,
Objects: loi.Objects,
Prefixes: loi.Prefixes,
}
return listObjectsV2Info, err
}
func (z *erasureServerPools) ListObjectVersions(ctx context.Context, bucket, prefix, marker, versionMarker, delimiter string, maxKeys int) (ListObjectVersionsInfo, error) {
loi := ListObjectVersionsInfo{}
if marker == "" && versionMarker != "" {
return loi, NotImplemented{}
}
opts := listPathOptions{
Bucket: bucket,
Prefix: prefix,
Separator: delimiter,
Limit: maxKeys,
Marker: marker,
InclDeleted: true,
AskDisks: globalAPIConfig.getListQuorum(),
}
// Shortcut for APN/1.0 Veeam/1.0 Backup/10.0
// It requests unique blocks with a specific prefix.
// We skip scanning the parent directory for
// more objects matching the prefix.
ri := logger.GetReqInfo(ctx)
if ri != nil && strings.Contains(ri.UserAgent, `1.0 Veeam/1.0 Backup`) && strings.HasSuffix(prefix, ".blk") {
opts.singleObject = true
opts.Transient = true
}
merged, err := z.listPath(ctx, opts)
if err != nil && err != io.EOF {
return loi, err
}
loi.Objects, loi.Prefixes = merged.fileInfoVersions(bucket, prefix, delimiter, versionMarker)
loi.IsTruncated = err == nil && len(loi.Objects) > 0
if maxKeys > 0 && len(loi.Objects) > maxKeys {
loi.Objects = loi.Objects[:maxKeys]
loi.IsTruncated = true
}
if loi.IsTruncated {
last := loi.Objects[len(loi.Objects)-1]
loi.NextMarker = encodeMarker(last.Name, merged.listID)
loi.NextVersionIDMarker = last.VersionID
}
return loi, nil
}
func (z *erasureServerPools) ListObjects(ctx context.Context, bucket, prefix, marker, delimiter string, maxKeys int) (ListObjectsInfo, error) {
var loi ListObjectsInfo
merged, err := z.listPath(ctx, listPathOptions{
Bucket: bucket,
Prefix: prefix,
Separator: delimiter,
Limit: maxKeys,
Marker: marker,
InclDeleted: false,
AskDisks: globalAPIConfig.getListQuorum(),
})
if err != nil && err != io.EOF {
logger.LogIf(ctx, err)
return loi, err
}
// Default is recursive, if delimiter is set then list non recursive.
loi.Objects, loi.Prefixes = merged.fileInfos(bucket, prefix, delimiter)
loi.IsTruncated = err == nil && len(loi.Objects) > 0
if loi.IsTruncated {
loi.NextMarker = encodeMarker(loi.Objects[len(loi.Objects)-1].Name, merged.listID)
}
return loi, nil
}
func (z *erasureServerPools) ListMultipartUploads(ctx context.Context, bucket, prefix, keyMarker, uploadIDMarker, delimiter string, maxUploads int) (ListMultipartsInfo, error) {
if err := checkListMultipartArgs(ctx, bucket, prefix, keyMarker, uploadIDMarker, delimiter, z); err != nil {
return ListMultipartsInfo{}, err
}
if z.SingleZone() {
return z.serverPools[0].ListMultipartUploads(ctx, bucket, prefix, keyMarker, uploadIDMarker, delimiter, maxUploads)
}
var zoneResult = ListMultipartsInfo{}
zoneResult.MaxUploads = maxUploads
zoneResult.KeyMarker = keyMarker
zoneResult.Prefix = prefix
zoneResult.Delimiter = delimiter
for _, zone := range z.serverPools {
result, err := zone.ListMultipartUploads(ctx, bucket, prefix, keyMarker, uploadIDMarker,
delimiter, maxUploads)
if err != nil {
return result, err
}
zoneResult.Uploads = append(zoneResult.Uploads, result.Uploads...)
}
return zoneResult, nil
}
// Initiate a new multipart upload on a hashedSet based on object name.
func (z *erasureServerPools) NewMultipartUpload(ctx context.Context, bucket, object string, opts ObjectOptions) (string, error) {
if err := checkNewMultipartArgs(ctx, bucket, object, z); err != nil {
return "", err
}
if z.SingleZone() {
return z.serverPools[0].NewMultipartUpload(ctx, bucket, object, opts)
}
// We don't know the exact size, so we ask for at least 1GiB file.
idx, err := z.getZoneIdx(ctx, bucket, object, opts, 1<<30)
if err != nil {
return "", err
}
return z.serverPools[idx].NewMultipartUpload(ctx, bucket, object, opts)
}
// Copies a part of an object from source hashedSet to destination hashedSet.
func (z *erasureServerPools) CopyObjectPart(ctx context.Context, srcBucket, srcObject, destBucket, destObject string, uploadID string, partID int, startOffset int64, length int64, srcInfo ObjectInfo, srcOpts, dstOpts ObjectOptions) (PartInfo, error) {
if err := checkNewMultipartArgs(ctx, srcBucket, srcObject, z); err != nil {
return PartInfo{}, err
}
return z.PutObjectPart(ctx, destBucket, destObject, uploadID, partID,
NewPutObjReader(srcInfo.Reader, nil, nil), dstOpts)
}
// PutObjectPart - writes part of an object to hashedSet based on the object name.
func (z *erasureServerPools) PutObjectPart(ctx context.Context, bucket, object, uploadID string, partID int, data *PutObjReader, opts ObjectOptions) (PartInfo, error) {
if err := checkPutObjectPartArgs(ctx, bucket, object, z); err != nil {
return PartInfo{}, err
}
if z.SingleZone() {
return z.serverPools[0].PutObjectPart(ctx, bucket, object, uploadID, partID, data, opts)
}
for _, zone := range z.serverPools {
_, err := zone.GetMultipartInfo(ctx, bucket, object, uploadID, opts)
if err == nil {
return zone.PutObjectPart(ctx, bucket, object, uploadID, partID, data, opts)
}
switch err.(type) {
case InvalidUploadID:
// Look for information on the next zone
continue
}
// Any other unhandled errors such as quorum return.
return PartInfo{}, err
}
return PartInfo{}, InvalidUploadID{
Bucket: bucket,
Object: object,
UploadID: uploadID,
}
}
func (z *erasureServerPools) GetMultipartInfo(ctx context.Context, bucket, object, uploadID string, opts ObjectOptions) (MultipartInfo, error) {
if err := checkListPartsArgs(ctx, bucket, object, z); err != nil {
return MultipartInfo{}, err
}
if z.SingleZone() {
return z.serverPools[0].GetMultipartInfo(ctx, bucket, object, uploadID, opts)
}
for _, zone := range z.serverPools {
mi, err := zone.GetMultipartInfo(ctx, bucket, object, uploadID, opts)
if err == nil {
return mi, nil
}
switch err.(type) {
case InvalidUploadID:
// upload id not found, continue to the next zone.
continue
}
// any other unhandled error return right here.
return MultipartInfo{}, err
}
return MultipartInfo{}, InvalidUploadID{
Bucket: bucket,
Object: object,
UploadID: uploadID,
}
}
// ListObjectParts - lists all uploaded parts to an object in hashedSet.
func (z *erasureServerPools) ListObjectParts(ctx context.Context, bucket, object, uploadID string, partNumberMarker int, maxParts int, opts ObjectOptions) (ListPartsInfo, error) {
if err := checkListPartsArgs(ctx, bucket, object, z); err != nil {
return ListPartsInfo{}, err
}
if z.SingleZone() {
return z.serverPools[0].ListObjectParts(ctx, bucket, object, uploadID, partNumberMarker, maxParts, opts)
}
for _, zone := range z.serverPools {
_, err := zone.GetMultipartInfo(ctx, bucket, object, uploadID, opts)
if err == nil {
return zone.ListObjectParts(ctx, bucket, object, uploadID, partNumberMarker, maxParts, opts)
}
switch err.(type) {
case InvalidUploadID:
continue
}
return ListPartsInfo{}, err
}
return ListPartsInfo{}, InvalidUploadID{
Bucket: bucket,
Object: object,
UploadID: uploadID,
}
}
// Aborts an in-progress multipart operation on hashedSet based on the object name.
func (z *erasureServerPools) AbortMultipartUpload(ctx context.Context, bucket, object, uploadID string, opts ObjectOptions) error {
if err := checkAbortMultipartArgs(ctx, bucket, object, z); err != nil {
return err
}
if z.SingleZone() {
return z.serverPools[0].AbortMultipartUpload(ctx, bucket, object, uploadID, opts)
}
for _, zone := range z.serverPools {
_, err := zone.GetMultipartInfo(ctx, bucket, object, uploadID, opts)
if err == nil {
return zone.AbortMultipartUpload(ctx, bucket, object, uploadID, opts)
}
switch err.(type) {
case InvalidUploadID:
// upload id not found move to next zone
continue
}
return err
}
return InvalidUploadID{
Bucket: bucket,
Object: object,
UploadID: uploadID,
}
}
// CompleteMultipartUpload - completes a pending multipart transaction, on hashedSet based on object name.
func (z *erasureServerPools) CompleteMultipartUpload(ctx context.Context, bucket, object, uploadID string, uploadedParts []CompletePart, opts ObjectOptions) (objInfo ObjectInfo, err error) {
if err = checkCompleteMultipartArgs(ctx, bucket, object, z); err != nil {
return objInfo, err
}
if z.SingleZone() {
return z.serverPools[0].CompleteMultipartUpload(ctx, bucket, object, uploadID, uploadedParts, opts)
}
// Purge any existing object.
for _, zone := range z.serverPools {
zone.DeleteObject(ctx, bucket, object, opts)
}
for _, zone := range z.serverPools {
result, err := zone.ListMultipartUploads(ctx, bucket, object, "", "", "", maxUploadsList)
if err != nil {
return objInfo, err
}
if result.Lookup(uploadID) {
return zone.CompleteMultipartUpload(ctx, bucket, object, uploadID, uploadedParts, opts)
}
}
return objInfo, InvalidUploadID{
Bucket: bucket,
Object: object,
UploadID: uploadID,
}
}
// GetBucketInfo - returns bucket info from one of the erasure coded serverPools.
func (z *erasureServerPools) GetBucketInfo(ctx context.Context, bucket string) (bucketInfo BucketInfo, err error) {
if z.SingleZone() {
bucketInfo, err = z.serverPools[0].GetBucketInfo(ctx, bucket)
if err != nil {
return bucketInfo, err
}
meta, err := globalBucketMetadataSys.Get(bucket)
if err == nil {
bucketInfo.Created = meta.Created
}
return bucketInfo, nil
}
for _, zone := range z.serverPools {
bucketInfo, err = zone.GetBucketInfo(ctx, bucket)
if err != nil {
if isErrBucketNotFound(err) {
continue
}
return bucketInfo, err
}
meta, err := globalBucketMetadataSys.Get(bucket)
if err == nil {
bucketInfo.Created = meta.Created
}
return bucketInfo, nil
}
return bucketInfo, BucketNotFound{
Bucket: bucket,
}
}
// IsNotificationSupported returns whether bucket notification is applicable for this layer.
func (z *erasureServerPools) IsNotificationSupported() bool {
return true
}
// IsListenSupported returns whether listen bucket notification is applicable for this layer.
func (z *erasureServerPools) IsListenSupported() bool {
return true
}
// IsEncryptionSupported returns whether server side encryption is implemented for this layer.
func (z *erasureServerPools) IsEncryptionSupported() bool {
return true
}
// IsCompressionSupported returns whether compression is applicable for this layer.
func (z *erasureServerPools) IsCompressionSupported() bool {
return true
}
func (z *erasureServerPools) IsTaggingSupported() bool {
return true
}
// DeleteBucket - deletes a bucket on all serverPools simultaneously,
// even if one of the serverPools fail to delete buckets, we proceed to
// undo a successful operation.
func (z *erasureServerPools) DeleteBucket(ctx context.Context, bucket string, forceDelete bool) error {
if z.SingleZone() {
return z.serverPools[0].DeleteBucket(ctx, bucket, forceDelete)
}
g := errgroup.WithNErrs(len(z.serverPools))
// Delete buckets in parallel across all serverPools.
for index := range z.serverPools {
index := index
g.Go(func() error {
return z.serverPools[index].DeleteBucket(ctx, bucket, forceDelete)
}, index)
}
errs := g.Wait()
// For any write quorum failure, we undo all the delete
// buckets operation by creating all the buckets again.
for _, err := range errs {
if err != nil {
if _, ok := err.(InsufficientWriteQuorum); ok {
undoDeleteBucketServerPools(ctx, bucket, z.serverPools, errs)
}
return err
}
}
// Success.
return nil
}
// deleteAll will delete a bucket+prefix unconditionally across all disks.
// Note that set distribution is ignored so it should only be used in cases where
// data is not distributed across sets.
// Errors are logged but individual disk failures are not returned.
func (z *erasureServerPools) deleteAll(ctx context.Context, bucket, prefix string) {
var wg sync.WaitGroup
for _, servers := range z.serverPools {
for _, set := range servers.sets {
for _, disk := range set.getDisks() {
if disk == nil {
continue
}
wg.Add(1)
go func(disk StorageAPI) {
defer wg.Done()
disk.Delete(ctx, bucket, prefix, true)
}(disk)
}
}
}
wg.Wait()
}
// This function is used to undo a successful DeleteBucket operation.
func undoDeleteBucketServerPools(ctx context.Context, bucket string, serverPools []*erasureSets, errs []error) {
g := errgroup.WithNErrs(len(serverPools))
// Undo previous delete bucket on all underlying serverPools.
for index := range serverPools {
index := index
g.Go(func() error {
if errs[index] == nil {
return serverPools[index].MakeBucketWithLocation(ctx, bucket, BucketOptions{})
}
return nil
}, index)
}
g.Wait()
}
// List all buckets from one of the serverPools, we are not doing merge
// sort here just for simplification. As per design it is assumed
// that all buckets are present on all serverPools.
func (z *erasureServerPools) ListBuckets(ctx context.Context) (buckets []BucketInfo, err error) {
if z.SingleZone() {
buckets, err = z.serverPools[0].ListBuckets(ctx)
} else {
for _, zone := range z.serverPools {
buckets, err = zone.ListBuckets(ctx)
if err != nil {
logger.LogIf(ctx, err)
continue
}
break
}
}
if err != nil {
return nil, err
}
for i := range buckets {
meta, err := globalBucketMetadataSys.Get(buckets[i].Name)
if err == nil {
buckets[i].Created = meta.Created
}
}
return buckets, nil
}
func (z *erasureServerPools) HealFormat(ctx context.Context, dryRun bool) (madmin.HealResultItem, error) {
// Acquire lock on format.json
formatLock := z.NewNSLock(minioMetaBucket, formatConfigFile)
if err := formatLock.GetLock(ctx, globalOperationTimeout); err != nil {
return madmin.HealResultItem{}, err
}
defer formatLock.Unlock()
var r = madmin.HealResultItem{
Type: madmin.HealItemMetadata,
Detail: "disk-format",
}
var countNoHeal int
for _, zone := range z.serverPools {
result, err := zone.HealFormat(ctx, dryRun)
if err != nil && !errors.Is(err, errNoHealRequired) {
logger.LogIf(ctx, err)
continue
}
// Count errNoHealRequired across all serverPools,
// to return appropriate error to the caller
if errors.Is(err, errNoHealRequired) {
countNoHeal++
}
r.DiskCount += result.DiskCount
r.SetCount += result.SetCount
r.Before.Drives = append(r.Before.Drives, result.Before.Drives...)
r.After.Drives = append(r.After.Drives, result.After.Drives...)
}
// No heal returned by all serverPools, return errNoHealRequired
if countNoHeal == len(z.serverPools) {
return r, errNoHealRequired
}
return r, nil
}
func (z *erasureServerPools) HealBucket(ctx context.Context, bucket string, dryRun, remove bool) (madmin.HealResultItem, error) {
var r = madmin.HealResultItem{
Type: madmin.HealItemBucket,
Bucket: bucket,
}
for _, zone := range z.serverPools {
result, err := zone.HealBucket(ctx, bucket, dryRun, remove)
if err != nil {
switch err.(type) {
case BucketNotFound:
continue
}
return result, err
}
r.DiskCount += result.DiskCount
r.SetCount += result.SetCount
r.Before.Drives = append(r.Before.Drives, result.Before.Drives...)
r.After.Drives = append(r.After.Drives, result.After.Drives...)
}
return r, nil
}
// Walk a bucket, optionally prefix recursively, until we have returned
// all the content to objectInfo channel, it is callers responsibility
// to allocate a receive channel for ObjectInfo, upon any unhandled
// error walker returns error. Optionally if context.Done() is received
// then Walk() stops the walker.
func (z *erasureServerPools) Walk(ctx context.Context, bucket, prefix string, results chan<- ObjectInfo, opts ObjectOptions) error {
if err := checkListObjsArgs(ctx, bucket, prefix, "", z); err != nil {
// Upon error close the channel.
close(results)
return err
}
if opts.WalkVersions {
go func() {
defer close(results)
var marker, versionIDMarker string
for {
loi, err := z.ListObjectVersions(ctx, bucket, prefix, marker, versionIDMarker, "", 1000)
if err != nil {
break
}
for _, obj := range loi.Objects {
results <- obj
}
if !loi.IsTruncated {
break
}
marker = loi.NextMarker
versionIDMarker = loi.NextVersionIDMarker
}
}()
return nil
}
go func() {
defer close(results)
var marker string
for {
loi, err := z.ListObjects(ctx, bucket, prefix, marker, "", 1000)
if err != nil {
break
}
for _, obj := range loi.Objects {
results <- obj
}
if !loi.IsTruncated {
break
}
marker = loi.NextMarker
}
}()
return nil
}
// HealObjectFn closure function heals the object.
type HealObjectFn func(bucket, object, versionID string) error
func (z *erasureServerPools) HealObjects(ctx context.Context, bucket, prefix string, opts madmin.HealOpts, healObject HealObjectFn) error {
// If listing did not return any entries upon first attempt, we
// return `ObjectNotFound`, to indicate the caller for any
// actions they may want to take as if `prefix` is missing.
err := toObjectErr(errFileNotFound, bucket, prefix)
for _, erasureSet := range z.serverPools {
for _, set := range erasureSet.sets {
var entryChs []FileInfoVersionsCh
var mu sync.Mutex
var wg sync.WaitGroup
for _, disk := range set.getOnlineDisks() {
disk := disk
wg.Add(1)
go func() {
defer wg.Done()
entryCh, err := disk.WalkVersions(ctx, bucket, prefix, "", true, ctx.Done())
if err != nil {
// Disk walk returned error, ignore it.
return
}
mu.Lock()
entryChs = append(entryChs, FileInfoVersionsCh{
Ch: entryCh,
})
mu.Unlock()
}()
}
wg.Wait()
entriesValid := make([]bool, len(entryChs))
entries := make([]FileInfoVersions, len(entryChs))
for {
entry, quorumCount, ok := lexicallySortedEntryVersions(entryChs, entries, entriesValid)
if !ok {
break
}
// Indicate that first attempt was a success and subsequent loop
// knows that its not our first attempt at 'prefix'
err = nil
if quorumCount == z.SetDriveCount() && opts.ScanMode == madmin.HealNormalScan {
continue
}
for _, version := range entry.Versions {
if err := healObject(bucket, version.Name, version.VersionID); err != nil {
return toObjectErr(err, bucket, version.Name)
}
}
}
}
}
return err
}
func (z *erasureServerPools) HealObject(ctx context.Context, bucket, object, versionID string, opts madmin.HealOpts) (madmin.HealResultItem, error) {
object = encodeDirObject(object)
lk := z.NewNSLock(bucket, object)
if bucket == minioMetaBucket {
// For .minio.sys bucket heals we should hold write locks.
if err := lk.GetLock(ctx, globalOperationTimeout); err != nil {
return madmin.HealResultItem{}, err
}
defer lk.Unlock()
} else {
// Lock the object before healing. Use read lock since healing
// will only regenerate parts & xl.meta of outdated disks.
if err := lk.GetRLock(ctx, globalOperationTimeout); err != nil {
return madmin.HealResultItem{}, err
}
defer lk.RUnlock()
}
for _, zone := range z.serverPools {
result, err := zone.HealObject(ctx, bucket, object, versionID, opts)
if err != nil {
if isErrObjectNotFound(err) || isErrVersionNotFound(err) {
continue
}
return result, err
}
return result, nil
}
if versionID != "" {
return madmin.HealResultItem{}, VersionNotFound{
Bucket: bucket,
Object: object,
VersionID: versionID,
}
}
return madmin.HealResultItem{}, ObjectNotFound{
Bucket: bucket,
Object: object,
}
}
func (z *erasureServerPools) ListBucketsHeal(ctx context.Context) ([]BucketInfo, error) {
var healBuckets []BucketInfo
for _, zone := range z.serverPools {
bucketsInfo, err := zone.ListBucketsHeal(ctx)
if err != nil {
continue
}
healBuckets = append(healBuckets, bucketsInfo...)
}
for i := range healBuckets {
meta, err := globalBucketMetadataSys.Get(healBuckets[i].Name)
if err == nil {
healBuckets[i].Created = meta.Created
}
}
return healBuckets, nil
}
// GetMetrics - no op
func (z *erasureServerPools) GetMetrics(ctx context.Context) (*Metrics, error) {
logger.LogIf(ctx, NotImplemented{})
return &Metrics{}, NotImplemented{}
}
func (z *erasureServerPools) getZoneAndSet(id string) (int, int, error) {
for zoneIdx := range z.serverPools {
format := z.serverPools[zoneIdx].format
for setIdx, set := range format.Erasure.Sets {
for _, diskID := range set {
if diskID == id {
return zoneIdx, setIdx, nil
}
}
}
}
return 0, 0, fmt.Errorf("DiskID(%s) %w", id, errDiskNotFound)
}
// HealthOptions takes input options to return sepcific information
type HealthOptions struct {
Maintenance bool
}
// HealthResult returns the current state of the system, also
// additionally with any specific heuristic information which
// was queried
type HealthResult struct {
Healthy bool
HealingDrives int
ZoneID, SetID int
WriteQuorum int
}
// Health - returns current status of the object layer health,
// provides if write access exists across sets, additionally
// can be used to query scenarios if health may be lost
// if this node is taken down by an external orchestrator.
func (z *erasureServerPools) Health(ctx context.Context, opts HealthOptions) HealthResult {
erasureSetUpCount := make([][]int, len(z.serverPools))
for i := range z.serverPools {
erasureSetUpCount[i] = make([]int, len(z.serverPools[i].sets))
}
diskIDs := globalNotificationSys.GetLocalDiskIDs(ctx)
if !opts.Maintenance {
diskIDs = append(diskIDs, getLocalDiskIDs(z))
}
for _, localDiskIDs := range diskIDs {
for _, id := range localDiskIDs {
zoneIdx, setIdx, err := z.getZoneAndSet(id)
if err != nil {
logger.LogIf(ctx, err)
continue
}
erasureSetUpCount[zoneIdx][setIdx]++
}
}
reqInfo := (&logger.ReqInfo{}).AppendTags("maintenance", strconv.FormatBool(opts.Maintenance))
parityDrives := globalStorageClass.GetParityForSC(storageclass.STANDARD)
diskCount := z.SetDriveCount()
if parityDrives == 0 {
parityDrives = getDefaultParityBlocks(diskCount)
}
dataDrives := diskCount - parityDrives
writeQuorum := dataDrives
if dataDrives == parityDrives {
writeQuorum++
}
var aggHealStateResult madmin.BgHealState
if opts.Maintenance {
// check if local disks are being healed, if they are being healed
// we need to tell healthy status as 'false' so that this server
// is not taken down for maintenance
var err error
aggHealStateResult, err = getAggregatedBackgroundHealState(ctx)
if err != nil {
logger.LogIf(logger.SetReqInfo(ctx, reqInfo), fmt.Errorf("Unable to verify global heal status: %w", err))
return HealthResult{
Healthy: false,
}
}
if len(aggHealStateResult.HealDisks) > 0 {
logger.LogIf(logger.SetReqInfo(ctx, reqInfo), fmt.Errorf("Total drives to be healed %d", len(aggHealStateResult.HealDisks)))
}
}
for zoneIdx := range erasureSetUpCount {
for setIdx := range erasureSetUpCount[zoneIdx] {
if erasureSetUpCount[zoneIdx][setIdx] < writeQuorum {
logger.LogIf(logger.SetReqInfo(ctx, reqInfo),
fmt.Errorf("Write quorum may be lost on zone: %d, set: %d, expected write quorum: %d",
zoneIdx, setIdx, writeQuorum))
return HealthResult{
Healthy: false,
HealingDrives: len(aggHealStateResult.HealDisks),
ZoneID: zoneIdx,
SetID: setIdx,
WriteQuorum: writeQuorum,
}
}
}
}
// when maintenance is not specified we don't have
// to look at the healing side of the code.
if !opts.Maintenance {
return HealthResult{
Healthy: true,
WriteQuorum: writeQuorum,
}
}
return HealthResult{
Healthy: len(aggHealStateResult.HealDisks) == 0,
HealingDrives: len(aggHealStateResult.HealDisks),
WriteQuorum: writeQuorum,
}
}
// PutObjectTags - replace or add tags to an existing object
func (z *erasureServerPools) PutObjectTags(ctx context.Context, bucket, object string, tags string, opts ObjectOptions) error {
object = encodeDirObject(object)
if z.SingleZone() {
return z.serverPools[0].PutObjectTags(ctx, bucket, object, tags, opts)
}
for _, zone := range z.serverPools {
err := zone.PutObjectTags(ctx, bucket, object, tags, opts)
if err != nil {
if isErrObjectNotFound(err) || isErrVersionNotFound(err) {
continue
}
return err
}
return nil
}
if opts.VersionID != "" {
return VersionNotFound{
Bucket: bucket,
Object: object,
VersionID: opts.VersionID,
}
}
return ObjectNotFound{
Bucket: bucket,
Object: object,
}
}
// DeleteObjectTags - delete object tags from an existing object
func (z *erasureServerPools) DeleteObjectTags(ctx context.Context, bucket, object string, opts ObjectOptions) error {
object = encodeDirObject(object)
if z.SingleZone() {
return z.serverPools[0].DeleteObjectTags(ctx, bucket, object, opts)
}
for _, zone := range z.serverPools {
err := zone.DeleteObjectTags(ctx, bucket, object, opts)
if err != nil {
if isErrObjectNotFound(err) || isErrVersionNotFound(err) {
continue
}
return err
}
return nil
}
if opts.VersionID != "" {
return VersionNotFound{
Bucket: bucket,
Object: object,
VersionID: opts.VersionID,
}
}
return ObjectNotFound{
Bucket: bucket,
Object: object,
}
}
// GetObjectTags - get object tags from an existing object
func (z *erasureServerPools) GetObjectTags(ctx context.Context, bucket, object string, opts ObjectOptions) (*tags.Tags, error) {
object = encodeDirObject(object)
if z.SingleZone() {
return z.serverPools[0].GetObjectTags(ctx, bucket, object, opts)
}
for _, zone := range z.serverPools {
tags, err := zone.GetObjectTags(ctx, bucket, object, opts)
if err != nil {
if isErrObjectNotFound(err) || isErrVersionNotFound(err) {
continue
}
return tags, err
}
return tags, nil
}
if opts.VersionID != "" {
return nil, VersionNotFound{
Bucket: bucket,
Object: object,
VersionID: opts.VersionID,
}
}
return nil, ObjectNotFound{
Bucket: bucket,
Object: object,
}
}
| cmd/erasure-server-sets.go | 0 | https://github.com/minio/minio/commit/3a0082f0f18abc158e4ee6d4ce02d012a36a0901 | [
0.003532425034791231,
0.00020445480186026543,
0.00016221198893617839,
0.00017125747399404645,
0.00027324637630954385
] |
{
"id": 0,
"code_window": [
"\t\"net/http\"\n",
"\t\"net/url\"\n",
"\t\"regexp\"\n",
"\t\"strings\"\n",
"\t\"time\"\n",
"\n",
"\txhttp \"github.com/minio/minio/cmd/http\"\n",
"\t\"github.com/minio/minio/cmd/logger\"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "cmd/handler-utils.go",
"type": "replace",
"edit_start_line_idx": 31
} | /*
* MinIO Cloud Storage, (C) 2020 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package openid
import (
"crypto"
"github.com/dgrijalva/jwt-go"
// Needed for SHA3 to work - See: https://golang.org/src/crypto/crypto.go?s=1034:1288
_ "golang.org/x/crypto/sha3"
)
// Specific instances for EC256 and company
var (
SigningMethodES3256 *jwt.SigningMethodECDSA
SigningMethodES3384 *jwt.SigningMethodECDSA
SigningMethodES3512 *jwt.SigningMethodECDSA
)
func init() {
// ES256
SigningMethodES3256 = &jwt.SigningMethodECDSA{Name: "ES3256", Hash: crypto.SHA3_256, KeySize: 32, CurveBits: 256}
jwt.RegisterSigningMethod(SigningMethodES3256.Alg(), func() jwt.SigningMethod {
return SigningMethodES3256
})
// ES384
SigningMethodES3384 = &jwt.SigningMethodECDSA{Name: "ES3384", Hash: crypto.SHA3_384, KeySize: 48, CurveBits: 384}
jwt.RegisterSigningMethod(SigningMethodES3384.Alg(), func() jwt.SigningMethod {
return SigningMethodES3384
})
// ES512
SigningMethodES3512 = &jwt.SigningMethodECDSA{Name: "ES3512", Hash: crypto.SHA3_512, KeySize: 66, CurveBits: 521}
jwt.RegisterSigningMethod(SigningMethodES3512.Alg(), func() jwt.SigningMethod {
return SigningMethodES3512
})
}
| cmd/config/identity/openid/ecdsa-sha3.go | 0 | https://github.com/minio/minio/commit/3a0082f0f18abc158e4ee6d4ce02d012a36a0901 | [
0.00017369858687743545,
0.00016983783280011266,
0.0001643425493966788,
0.00017070025205612183,
0.0000033204501050931867
] |
{
"id": 0,
"code_window": [
"\t\"net/http\"\n",
"\t\"net/url\"\n",
"\t\"regexp\"\n",
"\t\"strings\"\n",
"\t\"time\"\n",
"\n",
"\txhttp \"github.com/minio/minio/cmd/http\"\n",
"\t\"github.com/minio/minio/cmd/logger\"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "cmd/handler-utils.go",
"type": "replace",
"edit_start_line_idx": 31
} | /*
* MinIO Cloud Storage (C) 2016 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
var webpack = require('webpack')
var path = require('path')
var glob = require('glob-all')
var CopyWebpackPlugin = require('copy-webpack-plugin')
var PurgecssPlugin = require('purgecss-webpack-plugin')
var exports = {
context: __dirname,
mode: 'development',
entry: [
path.resolve(__dirname, 'app/index.js')
],
output: {
path: path.resolve(__dirname, 'dev'),
filename: 'index_bundle.js',
publicPath: '/minio/'
},
module: {
rules: [{
test: /\.js$/,
exclude: /(node_modules|bower_components)/,
use: [{
loader: 'babel-loader',
options: {
presets: ['react', 'es2015']
}
}]
}, {
test: /\.less$/,
use: [{
loader: 'style-loader'
}, {
loader: 'css-loader'
}, {
loader: 'less-loader'
}]
}, {
test: /\.css$/,
use: [{
loader: 'style-loader'
}, {
loader: 'css-loader'
}]
}, {
test: /\.(eot|woff|woff2|ttf|svg|png)/,
use: [{
loader: 'url-loader'
}]
}]
},
node:{
fs:'empty'
},
devServer: {
historyApiFallback: {
index: '/minio/'
},
proxy: {
'/minio/webrpc': {
target: 'http://localhost:9000',
secure: false,
headers: {'Host': "localhost:9000"}
},
'/minio/upload/*': {
target: 'http://localhost:9000',
secure: false
},
'/minio/download/*': {
target: 'http://localhost:9000',
secure: false
},
'/minio/zip': {
target: 'http://localhost:9000',
secure: false
}
}
},
plugins: [
new CopyWebpackPlugin({patterns: [
{from: 'app/css/loader.css'},
{from: 'app/img/browsers/chrome.png'},
{from: 'app/img/browsers/firefox.png'},
{from: 'app/img/browsers/safari.png'},
{from: 'app/img/logo.svg'},
{from: 'app/img/favicon/favicon-16x16.png'},
{from: 'app/img/favicon/favicon-32x32.png'},
{from: 'app/img/favicon/favicon-96x96.png'},
{from: 'app/index.html'}
]}),
new webpack.ContextReplacementPlugin(/moment[\\\/]locale$/, /^\.\/(en)$/),
new PurgecssPlugin({
paths: glob.sync([
path.join(__dirname, 'app/index.html'),
path.join(__dirname, 'app/js/*.js')
])
})
]
}
if (process.env.NODE_ENV === 'dev') {
exports.entry = [
'webpack-dev-server/client?http://localhost:8080',
path.resolve(__dirname, 'app/index.js')
]
}
module.exports = exports
| browser/webpack.config.js | 0 | https://github.com/minio/minio/commit/3a0082f0f18abc158e4ee6d4ce02d012a36a0901 | [
0.00021610841213259846,
0.00017472340550739318,
0.0001660360867390409,
0.00017149564519058913,
0.000012502065146691166
] |
{
"id": 1,
"code_window": [
"\n",
"\t\tf.ServeHTTP(statsWriter, r)\n",
"\n",
"\t\t// Time duration in secs since the call started.\n",
"\t\t// We don't need to do nanosecond precision in this\n",
"\t\t// simply for the fact that it is not human readable.\n",
"\t\tdurationSecs := time.Since(statsWriter.StartTime).Seconds()\n",
"\n",
"\t\tglobalHTTPStats.updateStats(api, r, statsWriter, durationSecs)\n",
"\t}\n",
"}\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tglobalHTTPStats.updateStats(api, r, statsWriter)\n"
],
"file_path": "cmd/handler-utils.go",
"type": "replace",
"edit_start_line_idx": 381
} | /*
* MinIO Cloud Storage, (C) 2015-2020 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import (
"bytes"
"context"
"errors"
"fmt"
"io"
"io/ioutil"
"mime/multipart"
"net"
"net/http"
"net/url"
"regexp"
"strings"
"time"
xhttp "github.com/minio/minio/cmd/http"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/auth"
"github.com/minio/minio/pkg/handlers"
"github.com/minio/minio/pkg/madmin"
)
const (
copyDirective = "COPY"
replaceDirective = "REPLACE"
)
// Parses location constraint from the incoming reader.
func parseLocationConstraint(r *http.Request) (location string, s3Error APIErrorCode) {
// If the request has no body with content-length set to 0,
// we do not have to validate location constraint. Bucket will
// be created at default region.
locationConstraint := createBucketLocationConfiguration{}
err := xmlDecoder(r.Body, &locationConstraint, r.ContentLength)
if err != nil && r.ContentLength != 0 {
logger.LogIf(GlobalContext, err)
// Treat all other failures as XML parsing errors.
return "", ErrMalformedXML
} // else for both err as nil or io.EOF
location = locationConstraint.Location
if location == "" {
location = globalServerRegion
}
return location, ErrNone
}
// Validates input location is same as configured region
// of MinIO server.
func isValidLocation(location string) bool {
return globalServerRegion == "" || globalServerRegion == location
}
// Supported headers that needs to be extracted.
var supportedHeaders = []string{
"content-type",
"cache-control",
"content-language",
"content-encoding",
"content-disposition",
xhttp.AmzStorageClass,
xhttp.AmzObjectTagging,
"expires",
xhttp.AmzBucketReplicationStatus,
// Add more supported headers here.
}
// isDirectiveValid - check if tagging-directive is valid.
func isDirectiveValid(v string) bool {
// Check if set metadata-directive is valid.
return isDirectiveCopy(v) || isDirectiveReplace(v)
}
// Check if the directive COPY is requested.
func isDirectiveCopy(value string) bool {
// By default if directive is not set we
// treat it as 'COPY' this function returns true.
return value == copyDirective || value == ""
}
// Check if the directive REPLACE is requested.
func isDirectiveReplace(value string) bool {
return value == replaceDirective
}
// userMetadataKeyPrefixes contains the prefixes of used-defined metadata keys.
// All values stored with a key starting with one of the following prefixes
// must be extracted from the header.
var userMetadataKeyPrefixes = []string{
"X-Amz-Meta-",
"X-Minio-Meta-",
"x-amz-meta-",
"x-minio-meta-",
}
// extractMetadata extracts metadata from HTTP header and HTTP queryString.
func extractMetadata(ctx context.Context, r *http.Request) (metadata map[string]string, err error) {
query := r.URL.Query()
header := r.Header
metadata = make(map[string]string)
// Extract all query values.
err = extractMetadataFromMap(ctx, query, metadata)
if err != nil {
return nil, err
}
// Extract all header values.
err = extractMetadataFromMap(ctx, header, metadata)
if err != nil {
return nil, err
}
// Set content-type to default value if it is not set.
if _, ok := metadata[strings.ToLower(xhttp.ContentType)]; !ok {
metadata[strings.ToLower(xhttp.ContentType)] = "application/octet-stream"
}
// https://github.com/google/security-research/security/advisories/GHSA-76wf-9vgp-pj7w
for k := range metadata {
if strings.EqualFold(k, xhttp.AmzMetaUnencryptedContentLength) || strings.EqualFold(k, xhttp.AmzMetaUnencryptedContentMD5) {
delete(metadata, k)
}
}
if contentEncoding, ok := metadata[strings.ToLower(xhttp.ContentEncoding)]; ok {
contentEncoding = trimAwsChunkedContentEncoding(contentEncoding)
if contentEncoding != "" {
// Make sure to trim and save the content-encoding
// parameter for a streaming signature which is set
// to a custom value for example: "aws-chunked,gzip".
metadata[strings.ToLower(xhttp.ContentEncoding)] = contentEncoding
} else {
// Trimmed content encoding is empty when the header
// value is set to "aws-chunked" only.
// Make sure to delete the content-encoding parameter
// for a streaming signature which is set to value
// for example: "aws-chunked"
delete(metadata, strings.ToLower(xhttp.ContentEncoding))
}
}
// Success.
return metadata, nil
}
// extractMetadata extracts metadata from map values.
func extractMetadataFromMap(ctx context.Context, v map[string][]string, m map[string]string) error {
if v == nil {
logger.LogIf(ctx, errInvalidArgument)
return errInvalidArgument
}
// Save all supported headers.
for _, supportedHeader := range supportedHeaders {
if value, ok := v[http.CanonicalHeaderKey(supportedHeader)]; ok {
m[supportedHeader] = value[0]
} else if value, ok := v[supportedHeader]; ok {
m[supportedHeader] = value[0]
}
}
for key := range v {
for _, prefix := range userMetadataKeyPrefixes {
if !strings.HasPrefix(strings.ToLower(key), strings.ToLower(prefix)) {
continue
}
value, ok := v[key]
if ok {
m[key] = strings.Join(value, ",")
break
}
}
}
return nil
}
// The Query string for the redirect URL the client is
// redirected on successful upload.
func getRedirectPostRawQuery(objInfo ObjectInfo) string {
redirectValues := make(url.Values)
redirectValues.Set("bucket", objInfo.Bucket)
redirectValues.Set("key", objInfo.Name)
redirectValues.Set("etag", "\""+objInfo.ETag+"\"")
return redirectValues.Encode()
}
// Returns access credentials in the request Authorization header.
func getReqAccessCred(r *http.Request, region string) (cred auth.Credentials) {
cred, _, _ = getReqAccessKeyV4(r, region, serviceS3)
if cred.AccessKey == "" {
cred, _, _ = getReqAccessKeyV2(r)
}
if cred.AccessKey == "" {
claims, owner, _ := webRequestAuthenticate(r)
if owner {
return globalActiveCred
}
if claims != nil {
cred, _ = globalIAMSys.GetUser(claims.AccessKey)
}
}
return cred
}
// Extract request params to be sent with event notifiation.
func extractReqParams(r *http.Request) map[string]string {
if r == nil {
return nil
}
region := globalServerRegion
cred := getReqAccessCred(r, region)
// Success.
return map[string]string{
"region": region,
"accessKey": cred.AccessKey,
"sourceIPAddress": handlers.GetSourceIP(r),
// Add more fields here.
}
}
// Extract response elements to be sent with event notifiation.
func extractRespElements(w http.ResponseWriter) map[string]string {
if w == nil {
return map[string]string{}
}
return map[string]string{
"requestId": w.Header().Get(xhttp.AmzRequestID),
"content-length": w.Header().Get(xhttp.ContentLength),
// Add more fields here.
}
}
// Trims away `aws-chunked` from the content-encoding header if present.
// Streaming signature clients can have custom content-encoding such as
// `aws-chunked,gzip` here we need to only save `gzip`.
// For more refer http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-streaming.html
func trimAwsChunkedContentEncoding(contentEnc string) (trimmedContentEnc string) {
if contentEnc == "" {
return contentEnc
}
var newEncs []string
for _, enc := range strings.Split(contentEnc, ",") {
if enc != streamingContentEncoding {
newEncs = append(newEncs, enc)
}
}
return strings.Join(newEncs, ",")
}
// Validate form field size for s3 specification requirement.
func validateFormFieldSize(ctx context.Context, formValues http.Header) error {
// Iterate over form values
for k := range formValues {
// Check if value's field exceeds S3 limit
if int64(len(formValues.Get(k))) > maxFormFieldSize {
logger.LogIf(ctx, errSizeUnexpected)
return errSizeUnexpected
}
}
// Success.
return nil
}
// Extract form fields and file data from a HTTP POST Policy
func extractPostPolicyFormValues(ctx context.Context, form *multipart.Form) (filePart io.ReadCloser, fileName string, fileSize int64, formValues http.Header, err error) {
/// HTML Form values
fileName = ""
// Canonicalize the form values into http.Header.
formValues = make(http.Header)
for k, v := range form.Value {
formValues[http.CanonicalHeaderKey(k)] = v
}
// Validate form values.
if err = validateFormFieldSize(ctx, formValues); err != nil {
return nil, "", 0, nil, err
}
// this means that filename="" was not specified for file key and Go has
// an ugly way of handling this situation. Refer here
// https://golang.org/src/mime/multipart/formdata.go#L61
if len(form.File) == 0 {
var b = &bytes.Buffer{}
for _, v := range formValues["File"] {
b.WriteString(v)
}
fileSize = int64(b.Len())
filePart = ioutil.NopCloser(b)
return filePart, fileName, fileSize, formValues, nil
}
// Iterator until we find a valid File field and break
for k, v := range form.File {
canonicalFormName := http.CanonicalHeaderKey(k)
if canonicalFormName == "File" {
if len(v) == 0 {
logger.LogIf(ctx, errInvalidArgument)
return nil, "", 0, nil, errInvalidArgument
}
// Fetch fileHeader which has the uploaded file information
fileHeader := v[0]
// Set filename
fileName = fileHeader.Filename
// Open the uploaded part
filePart, err = fileHeader.Open()
if err != nil {
logger.LogIf(ctx, err)
return nil, "", 0, nil, err
}
// Compute file size
fileSize, err = filePart.(io.Seeker).Seek(0, 2)
if err != nil {
logger.LogIf(ctx, err)
return nil, "", 0, nil, err
}
// Reset Seek to the beginning
_, err = filePart.(io.Seeker).Seek(0, 0)
if err != nil {
logger.LogIf(ctx, err)
return nil, "", 0, nil, err
}
// File found and ready for reading
break
}
}
return filePart, fileName, fileSize, formValues, nil
}
// Log headers and body.
func httpTraceAll(f http.HandlerFunc) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
if !globalHTTPTrace.HasSubscribers() {
f.ServeHTTP(w, r)
return
}
trace := Trace(f, true, w, r)
globalHTTPTrace.Publish(trace)
}
}
// Log only the headers.
func httpTraceHdrs(f http.HandlerFunc) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
if !globalHTTPTrace.HasSubscribers() {
f.ServeHTTP(w, r)
return
}
trace := Trace(f, false, w, r)
globalHTTPTrace.Publish(trace)
}
}
func collectAPIStats(api string, f http.HandlerFunc) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
globalHTTPStats.currentS3Requests.Inc(api)
defer globalHTTPStats.currentS3Requests.Dec(api)
statsWriter := logger.NewResponseWriter(w)
f.ServeHTTP(statsWriter, r)
// Time duration in secs since the call started.
// We don't need to do nanosecond precision in this
// simply for the fact that it is not human readable.
durationSecs := time.Since(statsWriter.StartTime).Seconds()
globalHTTPStats.updateStats(api, r, statsWriter, durationSecs)
}
}
// Returns "/bucketName/objectName" for path-style or virtual-host-style requests.
func getResource(path string, host string, domains []string) (string, error) {
if len(domains) == 0 {
return path, nil
}
// If virtual-host-style is enabled construct the "resource" properly.
if strings.Contains(host, ":") {
// In bucket.mydomain.com:9000, strip out :9000
var err error
if host, _, err = net.SplitHostPort(host); err != nil {
reqInfo := (&logger.ReqInfo{}).AppendTags("host", host)
reqInfo.AppendTags("path", path)
ctx := logger.SetReqInfo(GlobalContext, reqInfo)
logger.LogIf(ctx, err)
return "", err
}
}
for _, domain := range domains {
if host == minioReservedBucket+"."+domain {
continue
}
if !strings.HasSuffix(host, "."+domain) {
continue
}
bucket := strings.TrimSuffix(host, "."+domain)
return SlashSeparator + pathJoin(bucket, path), nil
}
return path, nil
}
var regexVersion = regexp.MustCompile(`(\w\d+)`)
func extractAPIVersion(r *http.Request) string {
return regexVersion.FindString(r.URL.Path)
}
func methodNotAllowedHandler(api string) func(w http.ResponseWriter, r *http.Request) {
return func(w http.ResponseWriter, r *http.Request) {
code := "XMinio" + api + "VersionMismatch"
writeErrorResponseString(r.Context(), w, APIError{
Code: code,
Description: "Not allowed (" + r.Method + " " + r.URL.String() + " on " + api + " API)",
HTTPStatusCode: http.StatusMethodNotAllowed,
}, r.URL)
}
}
// If none of the http routes match respond with appropriate errors
func errorResponseHandler(w http.ResponseWriter, r *http.Request) {
if r.Method == http.MethodOptions {
return
}
version := extractAPIVersion(r)
switch {
case strings.HasPrefix(r.URL.Path, peerRESTPrefix):
desc := fmt.Sprintf("Expected 'peer' API version '%s', instead found '%s', please upgrade the servers",
peerRESTVersion, version)
writeErrorResponseString(r.Context(), w, APIError{
Code: "XMinioPeerVersionMismatch",
Description: desc,
HTTPStatusCode: http.StatusUpgradeRequired,
}, r.URL)
case strings.HasPrefix(r.URL.Path, storageRESTPrefix):
desc := fmt.Sprintf("Expected 'storage' API version '%s', instead found '%s', please upgrade the servers",
storageRESTVersion, version)
writeErrorResponseString(r.Context(), w, APIError{
Code: "XMinioStorageVersionMismatch",
Description: desc,
HTTPStatusCode: http.StatusUpgradeRequired,
}, r.URL)
case strings.HasPrefix(r.URL.Path, lockRESTPrefix):
desc := fmt.Sprintf("Expected 'lock' API version '%s', instead found '%s', please upgrade the servers",
lockRESTVersion, version)
writeErrorResponseString(r.Context(), w, APIError{
Code: "XMinioLockVersionMismatch",
Description: desc,
HTTPStatusCode: http.StatusUpgradeRequired,
}, r.URL)
case strings.HasPrefix(r.URL.Path, adminPathPrefix):
var desc string
if version == "v1" {
desc = fmt.Sprintf("Server expects client requests with 'admin' API version '%s', found '%s', please upgrade the client to latest releases", madmin.AdminAPIVersion, version)
} else if version == madmin.AdminAPIVersion {
desc = fmt.Sprintf("This 'admin' API is not supported by server in '%s'", getMinioMode())
} else {
desc = fmt.Sprintf("Unexpected client 'admin' API version found '%s', expected '%s', please downgrade the client to older releases", version, madmin.AdminAPIVersion)
}
writeErrorResponseJSON(r.Context(), w, APIError{
Code: "XMinioAdminVersionMismatch",
Description: desc,
HTTPStatusCode: http.StatusUpgradeRequired,
}, r.URL)
default:
desc := fmt.Sprintf("Unknown API request at %s", r.URL.Path)
writeErrorResponse(r.Context(), w, APIError{
Code: "XMinioUnknownAPIRequest",
Description: desc,
HTTPStatusCode: http.StatusBadRequest,
}, r.URL, guessIsBrowserReq(r))
}
}
// gets host name for current node
func getHostName(r *http.Request) (hostName string) {
if globalIsDistErasure {
hostName = GetLocalPeer(globalEndpoints)
} else {
hostName = r.Host
}
return
}
// Proxy any request to an endpoint.
func proxyRequest(ctx context.Context, w http.ResponseWriter, r *http.Request, ep ProxyEndpoint) (success bool) {
success = true
// Make sure we remove any existing headers before
// proxying the request to another node.
for k := range w.Header() {
w.Header().Del(k)
}
f := handlers.NewForwarder(&handlers.Forwarder{
PassHost: true,
RoundTripper: ep.Transport,
ErrorHandler: func(w http.ResponseWriter, r *http.Request, err error) {
success = false
if err != nil && !errors.Is(err, context.Canceled) {
logger.LogIf(GlobalContext, err)
}
},
})
r.URL.Scheme = "http"
if globalIsSSL {
r.URL.Scheme = "https"
}
r.URL.Host = ep.Host
f.ServeHTTP(w, r)
return
}
| cmd/handler-utils.go | 1 | https://github.com/minio/minio/commit/3a0082f0f18abc158e4ee6d4ce02d012a36a0901 | [
0.998200535774231,
0.01899227872490883,
0.00016322298324666917,
0.00017521320842206478,
0.1345159262418747
] |
{
"id": 1,
"code_window": [
"\n",
"\t\tf.ServeHTTP(statsWriter, r)\n",
"\n",
"\t\t// Time duration in secs since the call started.\n",
"\t\t// We don't need to do nanosecond precision in this\n",
"\t\t// simply for the fact that it is not human readable.\n",
"\t\tdurationSecs := time.Since(statsWriter.StartTime).Seconds()\n",
"\n",
"\t\tglobalHTTPStats.updateStats(api, r, statsWriter, durationSecs)\n",
"\t}\n",
"}\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tglobalHTTPStats.updateStats(api, r, statsWriter)\n"
],
"file_path": "cmd/handler-utils.go",
"type": "replace",
"edit_start_line_idx": 381
} | /*
* Minio Cloud Storage, (C) 2019 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package data
import (
"reflect"
"testing"
"github.com/minio/minio/pkg/s3select/internal/parquet-go/gen-go/parquet"
"github.com/minio/minio/pkg/s3select/internal/parquet-go/schema"
)
func TestPopulateMap(t *testing.T) {
t.Skip("Broken")
requiredMap1 := schema.NewTree()
{
mapElement, err := schema.NewElement("map", parquet.FieldRepetitionType_REQUIRED,
nil, parquet.ConvertedTypePtr(parquet.ConvertedType_MAP),
nil, nil, nil)
if err != nil {
t.Fatal(err)
}
keyValue, err := schema.NewElement("key_value", parquet.FieldRepetitionType_REPEATED,
nil, nil,
nil, nil, nil)
if err != nil {
t.Fatal(err)
}
requiredKey, err := schema.NewElement("key", parquet.FieldRepetitionType_REQUIRED,
parquet.TypePtr(parquet.Type_BYTE_ARRAY), parquet.ConvertedTypePtr(parquet.ConvertedType_UTF8),
nil, nil, nil)
if err != nil {
t.Fatal(err)
}
requiredValue, err := schema.NewElement("value", parquet.FieldRepetitionType_REQUIRED,
parquet.TypePtr(parquet.Type_INT32), nil,
nil, nil, nil)
if err != nil {
t.Fatal(err)
}
if err = requiredMap1.Set("map", mapElement); err != nil {
t.Fatal(err)
}
if err = requiredMap1.Set("map.key_value", keyValue); err != nil {
t.Fatal(err)
}
if err = requiredMap1.Set("map.key_value.key", requiredKey); err != nil {
t.Fatal(err)
}
if err = requiredMap1.Set("map.key_value.value", requiredValue); err != nil {
t.Fatal(err)
}
if _, _, err = requiredMap1.ToParquetSchema(); err != nil {
t.Fatal(err)
}
}
requiredMap2 := schema.NewTree()
{
mapElement, err := schema.NewElement("map", parquet.FieldRepetitionType_REQUIRED,
nil, parquet.ConvertedTypePtr(parquet.ConvertedType_MAP),
nil, nil, nil)
if err != nil {
t.Fatal(err)
}
keyValue, err := schema.NewElement("key_value", parquet.FieldRepetitionType_REPEATED,
nil, nil,
nil, nil, nil)
if err != nil {
t.Fatal(err)
}
requiredKey, err := schema.NewElement("key", parquet.FieldRepetitionType_REQUIRED,
parquet.TypePtr(parquet.Type_BYTE_ARRAY), parquet.ConvertedTypePtr(parquet.ConvertedType_UTF8),
nil, nil, nil)
if err != nil {
t.Fatal(err)
}
optionalValue, err := schema.NewElement("value", parquet.FieldRepetitionType_OPTIONAL,
parquet.TypePtr(parquet.Type_INT32), nil,
nil, nil, nil)
if err != nil {
t.Fatal(err)
}
if err = requiredMap2.Set("map", mapElement); err != nil {
t.Fatal(err)
}
if err = requiredMap2.Set("map.key_value", keyValue); err != nil {
t.Fatal(err)
}
if err = requiredMap2.Set("map.key_value.key", requiredKey); err != nil {
t.Fatal(err)
}
if err = requiredMap2.Set("map.key_value.value", optionalValue); err != nil {
t.Fatal(err)
}
if _, _, err = requiredMap2.ToParquetSchema(); err != nil {
t.Fatal(err)
}
}
optionalMap1 := schema.NewTree()
{
mapElement, err := schema.NewElement("map", parquet.FieldRepetitionType_OPTIONAL,
nil, parquet.ConvertedTypePtr(parquet.ConvertedType_MAP),
nil, nil, nil)
if err != nil {
t.Fatal(err)
}
keyValue, err := schema.NewElement("key_value", parquet.FieldRepetitionType_REPEATED,
nil, nil,
nil, nil, nil)
if err != nil {
t.Fatal(err)
}
requiredKey, err := schema.NewElement("key", parquet.FieldRepetitionType_REQUIRED,
parquet.TypePtr(parquet.Type_BYTE_ARRAY), parquet.ConvertedTypePtr(parquet.ConvertedType_UTF8),
nil, nil, nil)
if err != nil {
t.Fatal(err)
}
requiredValue, err := schema.NewElement("value", parquet.FieldRepetitionType_REQUIRED,
parquet.TypePtr(parquet.Type_INT32), nil,
nil, nil, nil)
if err != nil {
t.Fatal(err)
}
if err = optionalMap1.Set("map", mapElement); err != nil {
t.Fatal(err)
}
if err = optionalMap1.Set("map.key_value", keyValue); err != nil {
t.Fatal(err)
}
if err = optionalMap1.Set("map.key_value.key", requiredKey); err != nil {
t.Fatal(err)
}
if err = optionalMap1.Set("map.key_value.value", requiredValue); err != nil {
t.Fatal(err)
}
if _, _, err = optionalMap1.ToParquetSchema(); err != nil {
t.Fatal(err)
}
}
optionalMap2 := schema.NewTree()
{
mapElement, err := schema.NewElement("map", parquet.FieldRepetitionType_OPTIONAL,
nil, parquet.ConvertedTypePtr(parquet.ConvertedType_MAP),
nil, nil, nil)
if err != nil {
t.Fatal(err)
}
keyValue, err := schema.NewElement("key_value", parquet.FieldRepetitionType_REPEATED,
nil, nil,
nil, nil, nil)
if err != nil {
t.Fatal(err)
}
requiredKey, err := schema.NewElement("key", parquet.FieldRepetitionType_REQUIRED,
parquet.TypePtr(parquet.Type_BYTE_ARRAY), parquet.ConvertedTypePtr(parquet.ConvertedType_UTF8),
nil, nil, nil)
if err != nil {
t.Fatal(err)
}
optionalValue, err := schema.NewElement("value", parquet.FieldRepetitionType_OPTIONAL,
parquet.TypePtr(parquet.Type_INT32), nil,
nil, nil, nil)
if err != nil {
t.Fatal(err)
}
if err = optionalMap2.Set("map", mapElement); err != nil {
t.Fatal(err)
}
if err = optionalMap2.Set("map.key_value", keyValue); err != nil {
t.Fatal(err)
}
if err = optionalMap2.Set("map.key_value.key", requiredKey); err != nil {
t.Fatal(err)
}
if err = optionalMap2.Set("map.key_value.value", optionalValue); err != nil {
t.Fatal(err)
}
if _, _, err = optionalMap2.ToParquetSchema(); err != nil {
t.Fatal(err)
}
}
result1 := map[string]*Column{
"map.key_value.key": {
parquetType: parquet.Type_BYTE_ARRAY,
values: []interface{}{ten},
definitionLevels: []int64{1},
repetitionLevels: []int64{0},
},
"map.key_value.value": {
parquetType: parquet.Type_INT32,
values: []interface{}{v10},
definitionLevels: []int64{1},
repetitionLevels: []int64{1},
},
}
result2 := map[string]*Column{
"map.key_value.key": {
parquetType: parquet.Type_BYTE_ARRAY,
values: []interface{}{ten},
definitionLevels: []int64{1},
repetitionLevels: []int64{0},
},
"map.key_value.value": {
parquetType: parquet.Type_INT32,
values: []interface{}{nil},
definitionLevels: []int64{1},
repetitionLevels: []int64{1},
},
}
result3 := map[string]*Column{
"map.key_value.key": {
parquetType: parquet.Type_BYTE_ARRAY,
values: []interface{}{ten},
definitionLevels: []int64{1},
repetitionLevels: []int64{0},
},
"map.key_value.value": {
parquetType: parquet.Type_INT32,
values: []interface{}{v10},
definitionLevels: []int64{2},
repetitionLevels: []int64{1},
},
}
result4 := map[string]*Column{
"map.key_value.key": {
parquetType: parquet.Type_BYTE_ARRAY,
values: []interface{}{nil},
definitionLevels: []int64{0},
repetitionLevels: []int64{0},
},
}
result5 := map[string]*Column{
"map.key_value.key": {
parquetType: parquet.Type_BYTE_ARRAY,
values: []interface{}{ten},
definitionLevels: []int64{2},
repetitionLevels: []int64{0},
},
"map.key_value.value": {
parquetType: parquet.Type_INT32,
values: []interface{}{v10},
definitionLevels: []int64{2},
repetitionLevels: []int64{1},
},
}
result6 := map[string]*Column{
"map.key_value.key": {
parquetType: parquet.Type_BYTE_ARRAY,
values: []interface{}{ten},
definitionLevels: []int64{2},
repetitionLevels: []int64{0},
},
"map.key_value.value": {
parquetType: parquet.Type_INT32,
values: []interface{}{nil},
definitionLevels: []int64{2},
repetitionLevels: []int64{1},
},
}
result7 := map[string]*Column{
"map.key_value.key": {
parquetType: parquet.Type_BYTE_ARRAY,
values: []interface{}{ten},
definitionLevels: []int64{2},
repetitionLevels: []int64{0},
},
"map.key_value.value": {
parquetType: parquet.Type_INT32,
values: []interface{}{v10},
definitionLevels: []int64{3},
repetitionLevels: []int64{1},
},
}
testCases := []struct {
schemaTree *schema.Tree
data string
expectedResult map[string]*Column
expectErr bool
}{
{requiredMap1, `{}`, nil, true}, // err: map: nil value for required field
{requiredMap1, `{"map": null}`, nil, true}, // err: map: nil value for required field
{requiredMap1, `{"map": {"ten": null}}`, nil, true}, // err: map.key_value.value: nil value for required field
{requiredMap1, `{"map": {"ten": 10}}`, result1, false},
{requiredMap2, `{}`, nil, true}, // err: map: nil value for required field
{requiredMap2, `{"map": null}`, nil, true}, // err: map: nil value for required field
{requiredMap2, `{"map": {"ten": null}}`, result2, false},
{requiredMap2, `{"map": {"ten": 10}}`, result3, false},
{optionalMap1, `{}`, result4, false},
{optionalMap1, `{"map": null}`, result4, false},
{optionalMap1, `{"map": {"ten": null}}`, nil, true}, // err: map.key_value.value: nil value for required field
{optionalMap1, `{"map": {"ten": 10}}`, result5, false},
{optionalMap2, `{}`, result4, false},
{optionalMap2, `{"map": null}`, result4, false},
{optionalMap2, `{"map": {"ten": null}}`, result6, false},
{optionalMap2, `{"map": {"ten": 10}}`, result7, false},
}
for i, testCase := range testCases {
result, err := UnmarshalJSON([]byte(testCase.data), testCase.schemaTree)
expectErr := (err != nil)
if testCase.expectErr != expectErr {
t.Fatalf("case %v: error: expected: %v, got: %v", i+1, testCase.expectErr, expectErr)
}
if !testCase.expectErr {
if !reflect.DeepEqual(result, testCase.expectedResult) {
t.Errorf("case %v: result: expected: %v, got: %v", i+1, testCase.expectedResult, result)
}
}
}
}
| pkg/s3select/internal/parquet-go/data/column-map_test.go | 0 | https://github.com/minio/minio/commit/3a0082f0f18abc158e4ee6d4ce02d012a36a0901 | [
0.00018030527280643582,
0.0001773242693161592,
0.00016910056001506746,
0.00017756246961653233,
0.0000016312874322466087
] |
{
"id": 1,
"code_window": [
"\n",
"\t\tf.ServeHTTP(statsWriter, r)\n",
"\n",
"\t\t// Time duration in secs since the call started.\n",
"\t\t// We don't need to do nanosecond precision in this\n",
"\t\t// simply for the fact that it is not human readable.\n",
"\t\tdurationSecs := time.Since(statsWriter.StartTime).Seconds()\n",
"\n",
"\t\tglobalHTTPStats.updateStats(api, r, statsWriter, durationSecs)\n",
"\t}\n",
"}\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tglobalHTTPStats.updateStats(api, r, statsWriter)\n"
],
"file_path": "cmd/handler-utils.go",
"type": "replace",
"edit_start_line_idx": 381
} | /*
* MinIO Cloud Storage (C) 2018 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import * as actionsCommon from "./actions"
export default (
state = {
sidebarOpen: false,
storageInfo: {used: 0},
serverInfo: {}
},
action
) => {
switch (action.type) {
case actionsCommon.TOGGLE_SIDEBAR:
return Object.assign({}, state, {
sidebarOpen: !state.sidebarOpen
})
case actionsCommon.CLOSE_SIDEBAR:
return Object.assign({}, state, {
sidebarOpen: false
})
case actionsCommon.SET_STORAGE_INFO:
return Object.assign({}, state, {
storageInfo: action.storageInfo
})
case actionsCommon.SET_SERVER_INFO:
return { ...state, serverInfo: action.serverInfo }
default:
return state
}
}
| browser/app/js/browser/reducer.js | 0 | https://github.com/minio/minio/commit/3a0082f0f18abc158e4ee6d4ce02d012a36a0901 | [
0.00017941364785656333,
0.0001774169213604182,
0.0001737612037686631,
0.00017793929146137089,
0.0000019768770016526105
] |
{
"id": 1,
"code_window": [
"\n",
"\t\tf.ServeHTTP(statsWriter, r)\n",
"\n",
"\t\t// Time duration in secs since the call started.\n",
"\t\t// We don't need to do nanosecond precision in this\n",
"\t\t// simply for the fact that it is not human readable.\n",
"\t\tdurationSecs := time.Since(statsWriter.StartTime).Seconds()\n",
"\n",
"\t\tglobalHTTPStats.updateStats(api, r, statsWriter, durationSecs)\n",
"\t}\n",
"}\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tglobalHTTPStats.updateStats(api, r, statsWriter)\n"
],
"file_path": "cmd/handler-utils.go",
"type": "replace",
"edit_start_line_idx": 381
} | // +build ignore
/*
* MinIO Cloud Storage, (C) 2019 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package main
import (
"context"
"log"
"github.com/minio/minio/pkg/madmin"
)
func main() {
// Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY and my-bucketname are
// dummy values, please replace them with original values.
// API requests are secure (HTTPS) if secure=true and insecure (HTTPS) otherwise.
// New returns an MinIO Admin client object.
madmClnt, err := madmin.New("your-minio.example.com:9000", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true)
if err != nil {
log.Fatalln(err)
}
dataUsageInfo, err := madmClnt.DataUsageInfo(context.Background())
if err != nil {
log.Fatalln(err)
}
log.Println(dataUsageInfo)
}
| pkg/madmin/examples/data-usage-info.go | 0 | https://github.com/minio/minio/commit/3a0082f0f18abc158e4ee6d4ce02d012a36a0901 | [
0.00017719990864861757,
0.00017157019465230405,
0.00016471091657876968,
0.00017350145208183676,
0.000004717571300716372
] |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.