hunk
dict | file
stringlengths 0
11.8M
| file_path
stringlengths 2
234
| label
int64 0
1
| commit_url
stringlengths 74
103
| dependency_score
sequencelengths 5
5
|
---|---|---|---|---|---|
{
"id": 4,
"code_window": [
"\t\thandleError(ExitBadArgs, errors.New(\"key required\"))\n",
"\t}\n",
"\tkey := c.Args()[0]\n",
"\tttl := c.Int(\"ttl\")\n",
"\tctx, cancel := contextWithTotalTimeout(c)\n",
"\t_, err := ki.Set(ctx, key, \"\", &client.SetOptions{TTL: time.Duration(ttl) * time.Second, Dir: true, PrevExist: client.PrevExist})\n",
"\tcancel()\n",
"\tif err != nil {\n",
"\t\thandleError(ExitServerError, err)\n",
"\t}\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tresp, err := ki.Set(ctx, key, \"\", &client.SetOptions{TTL: time.Duration(ttl) * time.Second, Dir: true, PrevExist: client.PrevExist})\n"
],
"file_path": "etcdctl/ctlv2/command/update_dir_command.go",
"type": "replace",
"edit_start_line_idx": 48
} | // Copyright 2015 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package command
import (
"errors"
"github.com/coreos/etcd/client"
"github.com/urfave/cli"
)
// NewRemoveDirCommand returns the CLI command for "rmdir".
func NewRemoveDirCommand() cli.Command {
return cli.Command{
Name: "rmdir",
Usage: "removes the key if it is an empty directory or a key-value pair",
ArgsUsage: "<key>",
Action: func(c *cli.Context) error {
rmdirCommandFunc(c, mustNewKeyAPI(c))
return nil
},
}
}
// rmdirCommandFunc executes the "rmdir" command.
func rmdirCommandFunc(c *cli.Context, ki client.KeysAPI) {
if len(c.Args()) == 0 {
handleError(ExitBadArgs, errors.New("key required"))
}
key := c.Args()[0]
ctx, cancel := contextWithTotalTimeout(c)
resp, err := ki.Delete(ctx, key, &client.DeleteOptions{Dir: true})
cancel()
if err != nil {
handleError(ExitServerError, err)
}
if !resp.Node.Dir {
printResponseKey(resp, c.GlobalString("output"))
}
}
| etcdctl/ctlv2/command/rmdir_command.go | 1 | https://github.com/etcd-io/etcd/commit/205f10aeb6d7a2869d4da16131cccb77ba5289e2 | [
0.19911399483680725,
0.034513700753450394,
0.00016571122978348285,
0.00017708353698253632,
0.07365718483924866
] |
{
"id": 4,
"code_window": [
"\t\thandleError(ExitBadArgs, errors.New(\"key required\"))\n",
"\t}\n",
"\tkey := c.Args()[0]\n",
"\tttl := c.Int(\"ttl\")\n",
"\tctx, cancel := contextWithTotalTimeout(c)\n",
"\t_, err := ki.Set(ctx, key, \"\", &client.SetOptions{TTL: time.Duration(ttl) * time.Second, Dir: true, PrevExist: client.PrevExist})\n",
"\tcancel()\n",
"\tif err != nil {\n",
"\t\thandleError(ExitServerError, err)\n",
"\t}\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tresp, err := ki.Set(ctx, key, \"\", &client.SetOptions{TTL: time.Duration(ttl) * time.Second, Dir: true, PrevExist: client.PrevExist})\n"
],
"file_path": "etcdctl/ctlv2/command/update_dir_command.go",
"type": "replace",
"edit_start_line_idx": 48
} | // +build !notfastpath
// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
// ************************************************************
// DO NOT EDIT.
// THIS FILE IS AUTO-GENERATED from fast-path.go.tmpl
// ************************************************************
package codec
// Fast path functions try to create a fast path encode or decode implementation
// for common maps and slices.
//
// We define the functions and register then in this single file
// so as not to pollute the encode.go and decode.go, and create a dependency in there.
// This file can be omitted without causing a build failure.
//
// The advantage of fast paths is:
// - Many calls bypass reflection altogether
//
// Currently support
// - slice of all builtin types,
// - map of all builtin types to string or interface value
// - symetrical maps of all builtin types (e.g. str-str, uint8-uint8)
// This should provide adequate "typical" implementations.
//
// Note that fast track decode functions must handle values for which an address cannot be obtained.
// For example:
// m2 := map[string]int{}
// p2 := []interface{}{m2}
// // decoding into p2 will bomb if fast track functions do not treat like unaddressable.
//
import (
"reflect"
"sort"
)
const fastpathCheckNilFalse = false // for reflect
const fastpathCheckNilTrue = true // for type switch
type fastpathT struct {}
var fastpathTV fastpathT
type fastpathE struct {
rtid uintptr
rt reflect.Type
encfn func(*encFnInfo, reflect.Value)
decfn func(*decFnInfo, reflect.Value)
}
type fastpathA [{{ .FastpathLen }}]fastpathE
func (x *fastpathA) index(rtid uintptr) int {
// use binary search to grab the index (adapted from sort/search.go)
h, i, j := 0, 0, {{ .FastpathLen }} // len(x)
for i < j {
h = i + (j-i)/2
if x[h].rtid < rtid {
i = h + 1
} else {
j = h
}
}
if i < {{ .FastpathLen }} && x[i].rtid == rtid {
return i
}
return -1
}
type fastpathAslice []fastpathE
func (x fastpathAslice) Len() int { return len(x) }
func (x fastpathAslice) Less(i, j int) bool { return x[i].rtid < x[j].rtid }
func (x fastpathAslice) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
var fastpathAV fastpathA
// due to possible initialization loop error, make fastpath in an init()
func init() {
if !fastpathEnabled {
return
}
i := 0
fn := func(v interface{}, fe func(*encFnInfo, reflect.Value), fd func(*decFnInfo, reflect.Value)) (f fastpathE) {
xrt := reflect.TypeOf(v)
xptr := reflect.ValueOf(xrt).Pointer()
fastpathAV[i] = fastpathE{xptr, xrt, fe, fd}
i++
return
}
{{range .Values}}{{if not .Primitive}}{{if not .MapKey }}
fn([]{{ .Elem }}(nil), (*encFnInfo).{{ .MethodNamePfx "fastpathEnc" false }}R, (*decFnInfo).{{ .MethodNamePfx "fastpathDec" false }}R){{end}}{{end}}{{end}}
{{range .Values}}{{if not .Primitive}}{{if .MapKey }}
fn(map[{{ .MapKey }}]{{ .Elem }}(nil), (*encFnInfo).{{ .MethodNamePfx "fastpathEnc" false }}R, (*decFnInfo).{{ .MethodNamePfx "fastpathDec" false }}R){{end}}{{end}}{{end}}
sort.Sort(fastpathAslice(fastpathAV[:]))
}
// -- encode
// -- -- fast path type switch
func fastpathEncodeTypeSwitch(iv interface{}, e *Encoder) bool {
if !fastpathEnabled {
return false
}
switch v := iv.(type) {
{{range .Values}}{{if not .Primitive}}{{if not .MapKey }}
case []{{ .Elem }}:{{else}}
case map[{{ .MapKey }}]{{ .Elem }}:{{end}}
fastpathTV.{{ .MethodNamePfx "Enc" false }}V(v, fastpathCheckNilTrue, e){{if not .MapKey }}
case *[]{{ .Elem }}:{{else}}
case *map[{{ .MapKey }}]{{ .Elem }}:{{end}}
fastpathTV.{{ .MethodNamePfx "Enc" false }}V(*v, fastpathCheckNilTrue, e)
{{end}}{{end}}
default:
_ = v // TODO: workaround https://github.com/golang/go/issues/12927 (remove after go 1.6 release)
return false
}
return true
}
func fastpathEncodeTypeSwitchSlice(iv interface{}, e *Encoder) bool {
if !fastpathEnabled {
return false
}
switch v := iv.(type) {
{{range .Values}}{{if not .Primitive}}{{if not .MapKey }}
case []{{ .Elem }}:
fastpathTV.{{ .MethodNamePfx "Enc" false }}V(v, fastpathCheckNilTrue, e)
case *[]{{ .Elem }}:
fastpathTV.{{ .MethodNamePfx "Enc" false }}V(*v, fastpathCheckNilTrue, e)
{{end}}{{end}}{{end}}
default:
_ = v // TODO: workaround https://github.com/golang/go/issues/12927 (remove after go 1.6 release)
return false
}
return true
}
func fastpathEncodeTypeSwitchMap(iv interface{}, e *Encoder) bool {
if !fastpathEnabled {
return false
}
switch v := iv.(type) {
{{range .Values}}{{if not .Primitive}}{{if .MapKey }}
case map[{{ .MapKey }}]{{ .Elem }}:
fastpathTV.{{ .MethodNamePfx "Enc" false }}V(v, fastpathCheckNilTrue, e)
case *map[{{ .MapKey }}]{{ .Elem }}:
fastpathTV.{{ .MethodNamePfx "Enc" false }}V(*v, fastpathCheckNilTrue, e)
{{end}}{{end}}{{end}}
default:
_ = v // TODO: workaround https://github.com/golang/go/issues/12927 (remove after go 1.6 release)
return false
}
return true
}
// -- -- fast path functions
{{range .Values}}{{if not .Primitive}}{{if not .MapKey }}
func (f *encFnInfo) {{ .MethodNamePfx "fastpathEnc" false }}R(rv reflect.Value) {
fastpathTV.{{ .MethodNamePfx "Enc" false }}V(rv.Interface().([]{{ .Elem }}), fastpathCheckNilFalse, f.e)
}
func (_ fastpathT) {{ .MethodNamePfx "Enc" false }}V(v []{{ .Elem }}, checkNil bool, e *Encoder) {
ee := e.e
cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
}
ee.EncodeArrayStart(len(v))
for _, v2 := range v {
if cr != nil { cr.sendContainerState(containerArrayElem) }
{{ encmd .Elem "v2"}}
}
if cr != nil { cr.sendContainerState(containerArrayEnd) }{{/* ee.EncodeEnd() */}}
}
{{end}}{{end}}{{end}}
{{range .Values}}{{if not .Primitive}}{{if .MapKey }}
func (f *encFnInfo) {{ .MethodNamePfx "fastpathEnc" false }}R(rv reflect.Value) {
fastpathTV.{{ .MethodNamePfx "Enc" false }}V(rv.Interface().(map[{{ .MapKey }}]{{ .Elem }}), fastpathCheckNilFalse, f.e)
}
func (_ fastpathT) {{ .MethodNamePfx "Enc" false }}V(v map[{{ .MapKey }}]{{ .Elem }}, checkNil bool, e *Encoder) {
ee := e.e
cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
}
ee.EncodeMapStart(len(v))
{{if eq .MapKey "string"}}asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0
{{end}}if e.h.Canonical {
{{if eq .MapKey "interface{}"}}{{/* out of band
*/}}var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding
e2 := NewEncoderBytes(&mksv, e.hh)
v2 := make([]bytesI, len(v))
var i, l int
var vp *bytesI {{/* put loop variables outside. seems currently needed for better perf */}}
for k2, _ := range v {
l = len(mksv)
e2.MustEncode(k2)
vp = &v2[i]
vp.v = mksv[l:]
vp.i = k2
i++
}
sort.Sort(bytesISlice(v2))
for j := range v2 {
if cr != nil { cr.sendContainerState(containerMapKey) }
e.asis(v2[j].v)
if cr != nil { cr.sendContainerState(containerMapValue) }
e.encode(v[v2[j].i])
} {{else}}{{ $x := sorttype .MapKey true}}v2 := make([]{{ $x }}, len(v))
var i int
for k, _ := range v {
v2[i] = {{ $x }}(k)
i++
}
sort.Sort({{ sorttype .MapKey false}}(v2))
for _, k2 := range v2 {
if cr != nil { cr.sendContainerState(containerMapKey) }
{{if eq .MapKey "string"}}if asSymbols {
ee.EncodeSymbol(k2)
} else {
ee.EncodeString(c_UTF8, k2)
}{{else}}{{ $y := printf "%s(k2)" .MapKey }}{{ encmd .MapKey $y }}{{end}}
if cr != nil { cr.sendContainerState(containerMapValue) }
{{ $y := printf "v[%s(k2)]" .MapKey }}{{ encmd .Elem $y }}
} {{end}}
} else {
for k2, v2 := range v {
if cr != nil { cr.sendContainerState(containerMapKey) }
{{if eq .MapKey "string"}}if asSymbols {
ee.EncodeSymbol(k2)
} else {
ee.EncodeString(c_UTF8, k2)
}{{else}}{{ encmd .MapKey "k2"}}{{end}}
if cr != nil { cr.sendContainerState(containerMapValue) }
{{ encmd .Elem "v2"}}
}
}
if cr != nil { cr.sendContainerState(containerMapEnd) }{{/* ee.EncodeEnd() */}}
}
{{end}}{{end}}{{end}}
// -- decode
// -- -- fast path type switch
func fastpathDecodeTypeSwitch(iv interface{}, d *Decoder) bool {
if !fastpathEnabled {
return false
}
switch v := iv.(type) {
{{range .Values}}{{if not .Primitive}}{{if not .MapKey }}
case []{{ .Elem }}:{{else}}
case map[{{ .MapKey }}]{{ .Elem }}:{{end}}
fastpathTV.{{ .MethodNamePfx "Dec" false }}V(v, fastpathCheckNilFalse, false, d){{if not .MapKey }}
case *[]{{ .Elem }}:{{else}}
case *map[{{ .MapKey }}]{{ .Elem }}:{{end}}
v2, changed2 := fastpathTV.{{ .MethodNamePfx "Dec" false }}V(*v, fastpathCheckNilFalse, true, d)
if changed2 {
*v = v2
}
{{end}}{{end}}
default:
_ = v // TODO: workaround https://github.com/golang/go/issues/12927 (remove after go 1.6 release)
return false
}
return true
}
// -- -- fast path functions
{{range .Values}}{{if not .Primitive}}{{if not .MapKey }}
{{/*
Slices can change if they
- did not come from an array
- are addressable (from a ptr)
- are settable (e.g. contained in an interface{})
*/}}
func (f *decFnInfo) {{ .MethodNamePfx "fastpathDec" false }}R(rv reflect.Value) {
array := f.seq == seqTypeArray
if !array && rv.CanAddr() { {{/* // CanSet => CanAddr + Exported */}}
vp := rv.Addr().Interface().(*[]{{ .Elem }})
v, changed := fastpathTV.{{ .MethodNamePfx "Dec" false }}V(*vp, fastpathCheckNilFalse, !array, f.d)
if changed {
*vp = v
}
} else {
v := rv.Interface().([]{{ .Elem }})
fastpathTV.{{ .MethodNamePfx "Dec" false }}V(v, fastpathCheckNilFalse, false, f.d)
}
}
func (f fastpathT) {{ .MethodNamePfx "Dec" false }}X(vp *[]{{ .Elem }}, checkNil bool, d *Decoder) {
v, changed := f.{{ .MethodNamePfx "Dec" false }}V(*vp, checkNil, true, d)
if changed {
*vp = v
}
}
func (_ fastpathT) {{ .MethodNamePfx "Dec" false }}V(v []{{ .Elem }}, checkNil bool, canChange bool, d *Decoder) (_ []{{ .Elem }}, changed bool) {
dd := d.d
{{/* // if dd.isContainerType(valueTypeNil) { dd.TryDecodeAsNil() */}}
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
changed = true
}
return nil, changed
}
slh, containerLenS := d.decSliceHelperStart()
if containerLenS == 0 {
if canChange {
if v == nil {
v = []{{ .Elem }}{}
} else if len(v) != 0 {
v = v[:0]
}
changed = true
}
slh.End()
return
}
if containerLenS > 0 {
x2read := containerLenS
var xtrunc bool
if containerLenS > cap(v) {
if canChange { {{/*
// fast-path is for "basic" immutable types, so no need to copy them over
// s := make([]{{ .Elem }}, decInferLen(containerLenS, d.h.MaxInitLen))
// copy(s, v[:cap(v)])
// v = s */}}
var xlen int
xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, {{ .Size }})
if xtrunc {
if xlen <= cap(v) {
v = v[:xlen]
} else {
v = make([]{{ .Elem }}, xlen)
}
} else {
v = make([]{{ .Elem }}, xlen)
}
changed = true
} else {
d.arrayCannotExpand(len(v), containerLenS)
}
x2read = len(v)
} else if containerLenS != len(v) {
if canChange {
v = v[:containerLenS]
changed = true
}
} {{/* // all checks done. cannot go past len. */}}
j := 0
for ; j < x2read; j++ {
slh.ElemContainerState(j)
{{ if eq .Elem "interface{}" }}d.decode(&v[j]){{ else }}v[j] = {{ decmd .Elem }}{{ end }}
}
if xtrunc { {{/* // means canChange=true, changed=true already. */}}
for ; j < containerLenS; j++ {
v = append(v, {{ zerocmd .Elem }})
slh.ElemContainerState(j)
{{ if eq .Elem "interface{}" }}d.decode(&v[j]){{ else }}v[j] = {{ decmd .Elem }}{{ end }}
}
} else if !canChange {
for ; j < containerLenS; j++ {
slh.ElemContainerState(j)
d.swallow()
}
}
} else {
breakFound := dd.CheckBreak() {{/* check break first, so we can initialize v with a capacity of 4 if necessary */}}
if breakFound {
if canChange {
if v == nil {
v = []{{ .Elem }}{}
} else if len(v) != 0 {
v = v[:0]
}
changed = true
}
slh.End()
return
}
if cap(v) == 0 {
v = make([]{{ .Elem }}, 1, 4)
changed = true
}
j := 0
for ; !breakFound; j++ {
if j >= len(v) {
if canChange {
v = append(v, {{ zerocmd .Elem }})
changed = true
} else {
d.arrayCannotExpand(len(v), j+1)
}
}
slh.ElemContainerState(j)
if j < len(v) { {{/* // all checks done. cannot go past len. */}}
{{ if eq .Elem "interface{}" }}d.decode(&v[j])
{{ else }}v[j] = {{ decmd .Elem }}{{ end }}
} else {
d.swallow()
}
breakFound = dd.CheckBreak()
}
if canChange && j < len(v) {
v = v[:j]
changed = true
}
}
slh.End()
return v, changed
}
{{end}}{{end}}{{end}}
{{range .Values}}{{if not .Primitive}}{{if .MapKey }}
{{/*
Maps can change if they are
- addressable (from a ptr)
- settable (e.g. contained in an interface{})
*/}}
func (f *decFnInfo) {{ .MethodNamePfx "fastpathDec" false }}R(rv reflect.Value) {
if rv.CanAddr() {
vp := rv.Addr().Interface().(*map[{{ .MapKey }}]{{ .Elem }})
v, changed := fastpathTV.{{ .MethodNamePfx "Dec" false }}V(*vp, fastpathCheckNilFalse, true, f.d)
if changed {
*vp = v
}
} else {
v := rv.Interface().(map[{{ .MapKey }}]{{ .Elem }})
fastpathTV.{{ .MethodNamePfx "Dec" false }}V(v, fastpathCheckNilFalse, false, f.d)
}
}
func (f fastpathT) {{ .MethodNamePfx "Dec" false }}X(vp *map[{{ .MapKey }}]{{ .Elem }}, checkNil bool, d *Decoder) {
v, changed := f.{{ .MethodNamePfx "Dec" false }}V(*vp, checkNil, true, d)
if changed {
*vp = v
}
}
func (_ fastpathT) {{ .MethodNamePfx "Dec" false }}V(v map[{{ .MapKey }}]{{ .Elem }}, checkNil bool, canChange bool,
d *Decoder) (_ map[{{ .MapKey }}]{{ .Elem }}, changed bool) {
dd := d.d
cr := d.cr
{{/* // if dd.isContainerType(valueTypeNil) {dd.TryDecodeAsNil() */}}
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
changed = true
}
return nil, changed
}
containerLen := dd.ReadMapStart()
if canChange && v == nil {
xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, {{ .Size }})
v = make(map[{{ .MapKey }}]{{ .Elem }}, xlen)
changed = true
}
{{ if eq .Elem "interface{}" }}mapGet := !d.h.MapValueReset && !d.h.InterfaceReset{{end}}
var mk {{ .MapKey }}
var mv {{ .Elem }}
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
if cr != nil { cr.sendContainerState(containerMapKey) }
{{ if eq .MapKey "interface{}" }}mk = nil
d.decode(&mk)
if bv, bok := mk.([]byte); bok {
mk = d.string(bv) {{/* // maps cannot have []byte as key. switch to string. */}}
}{{ else }}mk = {{ decmd .MapKey }}{{ end }}
if cr != nil { cr.sendContainerState(containerMapValue) }
{{ if eq .Elem "interface{}" }}if mapGet { mv = v[mk] } else { mv = nil }
d.decode(&mv){{ else }}mv = {{ decmd .Elem }}{{ end }}
if v != nil {
v[mk] = mv
}
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
if cr != nil { cr.sendContainerState(containerMapKey) }
{{ if eq .MapKey "interface{}" }}mk = nil
d.decode(&mk)
if bv, bok := mk.([]byte); bok {
mk = d.string(bv) {{/* // maps cannot have []byte as key. switch to string. */}}
}{{ else }}mk = {{ decmd .MapKey }}{{ end }}
if cr != nil { cr.sendContainerState(containerMapValue) }
{{ if eq .Elem "interface{}" }}if mapGet { mv = v[mk] } else { mv = nil }
d.decode(&mv){{ else }}mv = {{ decmd .Elem }}{{ end }}
if v != nil {
v[mk] = mv
}
}
}
if cr != nil { cr.sendContainerState(containerMapEnd) }
return v, changed
}
{{end}}{{end}}{{end}}
| cmd/vendor/github.com/ugorji/go/codec/fast-path.go.tmpl | 0 | https://github.com/etcd-io/etcd/commit/205f10aeb6d7a2869d4da16131cccb77ba5289e2 | [
0.0013786278432235122,
0.00021891795040573925,
0.0001618040114408359,
0.0001758399448590353,
0.00020124638103879988
] |
{
"id": 4,
"code_window": [
"\t\thandleError(ExitBadArgs, errors.New(\"key required\"))\n",
"\t}\n",
"\tkey := c.Args()[0]\n",
"\tttl := c.Int(\"ttl\")\n",
"\tctx, cancel := contextWithTotalTimeout(c)\n",
"\t_, err := ki.Set(ctx, key, \"\", &client.SetOptions{TTL: time.Duration(ttl) * time.Second, Dir: true, PrevExist: client.PrevExist})\n",
"\tcancel()\n",
"\tif err != nil {\n",
"\t\thandleError(ExitServerError, err)\n",
"\t}\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tresp, err := ki.Set(ctx, key, \"\", &client.SetOptions{TTL: time.Duration(ttl) * time.Second, Dir: true, PrevExist: client.PrevExist})\n"
],
"file_path": "etcdctl/ctlv2/command/update_dir_command.go",
"type": "replace",
"edit_start_line_idx": 48
} | package pb
import (
"io"
)
// It's proxy reader, implement io.Reader
type Reader struct {
io.Reader
bar *ProgressBar
}
func (r *Reader) Read(p []byte) (n int, err error) {
n, err = r.Reader.Read(p)
r.bar.Add(n)
return
}
| cmd/vendor/gopkg.in/cheggaaa/pb.v1/reader.go | 0 | https://github.com/etcd-io/etcd/commit/205f10aeb6d7a2869d4da16131cccb77ba5289e2 | [
0.00017480649694334716,
0.00017341777856927365,
0.00017202906019520015,
0.00017341777856927365,
0.0000013887183740735054
] |
{
"id": 4,
"code_window": [
"\t\thandleError(ExitBadArgs, errors.New(\"key required\"))\n",
"\t}\n",
"\tkey := c.Args()[0]\n",
"\tttl := c.Int(\"ttl\")\n",
"\tctx, cancel := contextWithTotalTimeout(c)\n",
"\t_, err := ki.Set(ctx, key, \"\", &client.SetOptions{TTL: time.Duration(ttl) * time.Second, Dir: true, PrevExist: client.PrevExist})\n",
"\tcancel()\n",
"\tif err != nil {\n",
"\t\thandleError(ExitServerError, err)\n",
"\t}\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tresp, err := ki.Set(ctx, key, \"\", &client.SetOptions{TTL: time.Duration(ttl) * time.Second, Dir: true, PrevExist: client.PrevExist})\n"
],
"file_path": "etcdctl/ctlv2/command/update_dir_command.go",
"type": "replace",
"edit_start_line_idx": 48
} | [](https://travis-ci.org/spf13/pflag)
## Description
pflag is a drop-in replacement for Go's flag package, implementing
POSIX/GNU-style --flags.
pflag is compatible with the [GNU extensions to the POSIX recommendations
for command-line options][1]. For a more precise description, see the
"Command-line flag syntax" section below.
[1]: http://www.gnu.org/software/libc/manual/html_node/Argument-Syntax.html
pflag is available under the same style of BSD license as the Go language,
which can be found in the LICENSE file.
## Installation
pflag is available using the standard `go get` command.
Install by running:
go get github.com/spf13/pflag
Run tests by running:
go test github.com/spf13/pflag
## Usage
pflag is a drop-in replacement of Go's native flag package. If you import
pflag under the name "flag" then all code should continue to function
with no changes.
``` go
import flag "github.com/spf13/pflag"
```
There is one exception to this: if you directly instantiate the Flag struct
there is one more field "Shorthand" that you will need to set.
Most code never instantiates this struct directly, and instead uses
functions such as String(), BoolVar(), and Var(), and is therefore
unaffected.
Define flags using flag.String(), Bool(), Int(), etc.
This declares an integer flag, -flagname, stored in the pointer ip, with type *int.
``` go
var ip *int = flag.Int("flagname", 1234, "help message for flagname")
```
If you like, you can bind the flag to a variable using the Var() functions.
``` go
var flagvar int
func init() {
flag.IntVar(&flagvar, "flagname", 1234, "help message for flagname")
}
```
Or you can create custom flags that satisfy the Value interface (with
pointer receivers) and couple them to flag parsing by
``` go
flag.Var(&flagVal, "name", "help message for flagname")
```
For such flags, the default value is just the initial value of the variable.
After all flags are defined, call
``` go
flag.Parse()
```
to parse the command line into the defined flags.
Flags may then be used directly. If you're using the flags themselves,
they are all pointers; if you bind to variables, they're values.
``` go
fmt.Println("ip has value ", *ip)
fmt.Println("flagvar has value ", flagvar)
```
There are helpers function to get values later if you have the FlagSet but
it was difficult to keep up with all of the the flag pointers in your code.
If you have a pflag.FlagSet with a flag called 'flagname' of type int you
can use GetInt() to get the int value. But notice that 'flagname' must exist
and it must be an int. GetString("flagname") will fail.
``` go
i, err := flagset.GetInt("flagname")
```
After parsing, the arguments after the flag are available as the
slice flag.Args() or individually as flag.Arg(i).
The arguments are indexed from 0 through flag.NArg()-1.
The pflag package also defines some new functions that are not in flag,
that give one-letter shorthands for flags. You can use these by appending
'P' to the name of any function that defines a flag.
``` go
var ip = flag.IntP("flagname", "f", 1234, "help message")
var flagvar bool
func init() {
flag.BoolVarP("boolname", "b", true, "help message")
}
flag.VarP(&flagVar, "varname", "v", 1234, "help message")
```
Shorthand letters can be used with single dashes on the command line.
Boolean shorthand flags can be combined with other shorthand flags.
The default set of command-line flags is controlled by
top-level functions. The FlagSet type allows one to define
independent sets of flags, such as to implement subcommands
in a command-line interface. The methods of FlagSet are
analogous to the top-level functions for the command-line
flag set.
## Setting no option default values for flags
After you create a flag it is possible to set the pflag.NoOptDefVal for
the given flag. Doing this changes the meaning of the flag slightly. If
a flag has a NoOptDefVal and the flag is set on the command line without
an option the flag will be set to the NoOptDefVal. For example given:
``` go
var ip = flag.IntP("flagname", "f", 1234, "help message")
flag.Lookup("flagname").NoOptDefVal = "4321"
```
Would result in something like
| Parsed Arguments | Resulting Value |
| ------------- | ------------- |
| --flagname=1357 | ip=1357 |
| --flagname | ip=4321 |
| [nothing] | ip=1234 |
## Command line flag syntax
```
--flag // boolean flags, or flags with no option default values
--flag x // only on flags without a default value
--flag=x
```
Unlike the flag package, a single dash before an option means something
different than a double dash. Single dashes signify a series of shorthand
letters for flags. All but the last shorthand letter must be boolean flags
or a flag with a default value
```
// boolean or flags where the 'no option default value' is set
-f
-f=true
-abc
but
-b true is INVALID
// non-boolean and flags without a 'no option default value'
-n 1234
-n=1234
-n1234
// mixed
-abcs "hello"
-absd="hello"
-abcs1234
```
Flag parsing stops after the terminator "--". Unlike the flag package,
flags can be interspersed with arguments anywhere on the command line
before this terminator.
Integer flags accept 1234, 0664, 0x1234 and may be negative.
Boolean flags (in their long form) accept 1, 0, t, f, true, false,
TRUE, FALSE, True, False.
Duration flags accept any input valid for time.ParseDuration.
## Mutating or "Normalizing" Flag names
It is possible to set a custom flag name 'normalization function.' It allows flag names to be mutated both when created in the code and when used on the command line to some 'normalized' form. The 'normalized' form is used for comparison. Two examples of using the custom normalization func follow.
**Example #1**: You want -, _, and . in flags to compare the same. aka --my-flag == --my_flag == --my.flag
``` go
func wordSepNormalizeFunc(f *pflag.FlagSet, name string) pflag.NormalizedName {
from := []string{"-", "_"}
to := "."
for _, sep := range from {
name = strings.Replace(name, sep, to, -1)
}
return pflag.NormalizedName(name)
}
myFlagSet.SetNormalizeFunc(wordSepNormalizeFunc)
```
**Example #2**: You want to alias two flags. aka --old-flag-name == --new-flag-name
``` go
func aliasNormalizeFunc(f *pflag.FlagSet, name string) pflag.NormalizedName {
switch name {
case "old-flag-name":
name = "new-flag-name"
break
}
return pflag.NormalizedName(name)
}
myFlagSet.SetNormalizeFunc(aliasNormalizeFunc)
```
## Deprecating a flag or its shorthand
It is possible to deprecate a flag, or just its shorthand. Deprecating a flag/shorthand hides it from help text and prints a usage message when the deprecated flag/shorthand is used.
**Example #1**: You want to deprecate a flag named "badflag" as well as inform the users what flag they should use instead.
```go
// deprecate a flag by specifying its name and a usage message
flags.MarkDeprecated("badflag", "please use --good-flag instead")
```
This hides "badflag" from help text, and prints `Flag --badflag has been deprecated, please use --good-flag instead` when "badflag" is used.
**Example #2**: You want to keep a flag name "noshorthandflag" but deprecate its shortname "n".
```go
// deprecate a flag shorthand by specifying its flag name and a usage message
flags.MarkShorthandDeprecated("noshorthandflag", "please use --noshorthandflag only")
```
This hides the shortname "n" from help text, and prints `Flag shorthand -n has been deprecated, please use --noshorthandflag only` when the shorthand "n" is used.
Note that usage message is essential here, and it should not be empty.
## Hidden flags
It is possible to mark a flag as hidden, meaning it will still function as normal, however will not show up in usage/help text.
**Example**: You have a flag named "secretFlag" that you need for internal use only and don't want it showing up in help text, or for its usage text to be available.
```go
// hide a flag by specifying its name
flags.MarkHidden("secretFlag")
```
## More info
You can see the full reference documentation of the pflag package
[at godoc.org][3], or through go's standard documentation system by
running `godoc -http=:6060` and browsing to
[http://localhost:6060/pkg/github.com/ogier/pflag][2] after
installation.
[2]: http://localhost:6060/pkg/github.com/ogier/pflag
[3]: http://godoc.org/github.com/ogier/pflag
| cmd/vendor/github.com/spf13/pflag/README.md | 0 | https://github.com/etcd-io/etcd/commit/205f10aeb6d7a2869d4da16131cccb77ba5289e2 | [
0.0005291139823384583,
0.00018222947255708277,
0.00016044374206103384,
0.00016941590001806617,
0.00006955697608646005
] |
{
"id": 5,
"code_window": [
"\tif err != nil {\n",
"\t\thandleError(ExitServerError, err)\n",
"\t}\n",
"}"
],
"labels": [
"keep",
"keep",
"add",
"keep"
],
"after_edit": [
"\tif c.GlobalString(\"output\") != \"simple\" {\n",
"\t\tprintResponseKey(resp, c.GlobalString(\"output\"))\n",
"\t}\n"
],
"file_path": "etcdctl/ctlv2/command/update_dir_command.go",
"type": "add",
"edit_start_line_idx": 53
} | // Copyright 2015 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package command
import (
"errors"
"github.com/coreos/etcd/client"
"github.com/urfave/cli"
)
// NewRemoveCommand returns the CLI command for "rm".
func NewRemoveCommand() cli.Command {
return cli.Command{
Name: "rm",
Usage: "remove a key or a directory",
ArgsUsage: "<key>",
Flags: []cli.Flag{
cli.BoolFlag{Name: "dir", Usage: "removes the key if it is an empty directory or a key-value pair"},
cli.BoolFlag{Name: "recursive, r", Usage: "removes the key and all child keys(if it is a directory)"},
cli.StringFlag{Name: "with-value", Value: "", Usage: "previous value"},
cli.IntFlag{Name: "with-index", Value: 0, Usage: "previous index"},
},
Action: func(c *cli.Context) error {
rmCommandFunc(c, mustNewKeyAPI(c))
return nil
},
}
}
// rmCommandFunc executes the "rm" command.
func rmCommandFunc(c *cli.Context, ki client.KeysAPI) {
if len(c.Args()) == 0 {
handleError(ExitBadArgs, errors.New("key required"))
}
key := c.Args()[0]
recursive := c.Bool("recursive")
dir := c.Bool("dir")
prevValue := c.String("with-value")
prevIndex := c.Int("with-index")
ctx, cancel := contextWithTotalTimeout(c)
resp, err := ki.Delete(ctx, key, &client.DeleteOptions{PrevIndex: uint64(prevIndex), PrevValue: prevValue, Dir: dir, Recursive: recursive})
cancel()
if err != nil {
handleError(ExitServerError, err)
}
if !resp.Node.Dir {
printResponseKey(resp, c.GlobalString("output"))
}
}
| etcdctl/ctlv2/command/rm_command.go | 1 | https://github.com/etcd-io/etcd/commit/205f10aeb6d7a2869d4da16131cccb77ba5289e2 | [
0.9920990467071533,
0.14190581440925598,
0.00016611401224508882,
0.0001755404082359746,
0.347089946269989
] |
{
"id": 5,
"code_window": [
"\tif err != nil {\n",
"\t\thandleError(ExitServerError, err)\n",
"\t}\n",
"}"
],
"labels": [
"keep",
"keep",
"add",
"keep"
],
"after_edit": [
"\tif c.GlobalString(\"output\") != \"simple\" {\n",
"\t\tprintResponseKey(resp, c.GlobalString(\"output\"))\n",
"\t}\n"
],
"file_path": "etcdctl/ctlv2/command/update_dir_command.go",
"type": "add",
"edit_start_line_idx": 53
} | # Maintenance
## Overview
An etcd cluster needs periodic maintenance to remain reliable. Depending on an etcd application's needs, this maintenance can usually be automated and performed without downtime or significantly degraded performance.
All etcd maintenance manages storage resources consumed by the etcd keyspace. Failure to adequately control the keyspace size is guarded by storage space quotas; if an etcd member runs low on space, a quota will trigger cluster-wide alarms which will put the system into a limited-operation maintenance mode. To avoid running out of space for writes to the keyspace, the etcd keyspace history must be compacted. Storage space itself may be reclaimed by defragmenting etcd members. Finally, periodic snapshot backups of etcd member state makes it possible to recover any unintended logical data loss or corruption caused by operational error.
## History compaction
Since etcd keeps an exact history of its keyspace, this history should be periodically compacted to avoid performance degradation and eventual storage space exhaustion. Compacting the keyspace history drops all information about keys superseded prior to a given keyspace revision. The space used by these keys then becomes available for additional writes to the keyspace.
The keyspace can be compacted automatically with `etcd`'s time windowed history retention policy, or manually with `etcdctl`. The `etcdctl` method provides fine-grained control over the compacting process whereas automatic compacting fits applications that only need key history for some length of time.
`etcd` can be set to automatically compact the keyspace with the `--auto-compaction` option with a period of hours:
```sh
# keep one hour of history
$ etcd --auto-compaction-retention=1
```
An `etcdctl` initiated compaction works as follows:
```sh
# compact up to revision 3
$ etcdctl compact 3
```
Revisions prior to the compaction revision become inaccessible:
```sh
$ etcdctl get --rev=2 somekey
Error: rpc error: code = 11 desc = etcdserver: mvcc: required revision has been compacted
```
## Defragmentation
After compacting the keyspace, the backend database may exhibit internal fragmentation. Any internal fragmentation is space that is free to use by the backend but still consumes storage space. The process of defragmentation releases this storage space back to the file system. Defragmentation is issued on a per-member so that cluster-wide latency spikes may be avoided.
Compacting old revisions internally fragments `etcd` by leaving gaps in backend database. Fragmented space is available for use by `etcd` but unavailable to the host filesystem.
To defragment an etcd member, use the `etcdctl defrag` command:
```sh
$ etcdctl defrag
Finished defragmenting etcd member[127.0.0.1:2379]
```
## Space quota
The space quota in `etcd` ensures the cluster operates in a reliable fashion. Without a space quota, `etcd` may suffer from poor performance if the keyspace grows excessively large, or it may simply run out of storage space, leading to unpredictable cluster behavior. If the keyspace's backend database for any member exceeds the space quota, `etcd` raises a cluster-wide alarm that puts the cluster into a maintenance mode which only accepts key reads and deletes. After freeing enough space in the keyspace, the alarm can be disarmed and the cluster will resume normal operation.
By default, `etcd` sets a conservative space quota suitable for most applications, but it may be configured on the command line, in bytes:
```sh
# set a very small 16MB quota
$ etcd --quota-backend-bytes=16777216
```
The space quota can be triggered with a loop:
```sh
# fill keyspace
$ while [ 1 ]; do dd if=/dev/urandom bs=1024 count=1024 | etcdctl put key || break; done
...
Error: rpc error: code = 8 desc = etcdserver: mvcc: database space exceeded
# confirm quota space is exceeded
$ etcdctl --write-out=table endpoint status
+----------------+------------------+-----------+---------+-----------+-----------+------------+
| ENDPOINT | ID | VERSION | DB SIZE | IS LEADER | RAFT TERM | RAFT INDEX |
+----------------+------------------+-----------+---------+-----------+-----------+------------+
| 127.0.0.1:2379 | bf9071f4639c75cc | 2.3.0+git | 18 MB | true | 2 | 3332 |
+----------------+------------------+-----------+---------+-----------+-----------+------------+
# confirm alarm is raised
$ etcdctl alarm list
memberID:13803658152347727308 alarm:NOSPACE
```
Removing excessive keyspace data will put the cluster back within the quota limits so the alarm can be disarmed:
```sh
# get current revision
$ etcdctl --endpoints=:2379 endpoint status
[{"Endpoint":"127.0.0.1:2379","Status":{"header":{"cluster_id":8925027824743593106,"member_id":13803658152347727308,"revision":1516,"raft_term":2},"version":"2.3.0+git","dbSize":17973248,"leader":13803658152347727308,"raftIndex":6359,"raftTerm":2}}]
# compact away all old revisions
$ etdctl compact 1516
compacted revision 1516
# defragment away excessive space
$ etcdctl defrag
Finished defragmenting etcd member[127.0.0.1:2379]
# disarm alarm
$ etcdctl alarm disarm
memberID:13803658152347727308 alarm:NOSPACE
# test puts are allowed again
$ etdctl put newkey 123
OK
```
## Snapshot backup
Snapshotting the `etcd` cluster on a regular basis serves as a durable backup for an etcd keyspace. By taking periodic snapshots of an etcd member's backend database, an `etcd` cluster can be recovered to a point in time with a known good state.
A snapshot is taken with `etcdctl`:
```sh
$ etcdctl snapshot save backup.db
$ etcdctl --write-out=table snapshot status backup.db
+----------+----------+------------+------------+
| HASH | REVISION | TOTAL KEYS | TOTAL SIZE |
+----------+----------+------------+------------+
| fe01cf57 | 10 | 7 | 2.1 MB |
+----------+----------+------------+------------+
```
| Documentation/op-guide/maintenance.md | 0 | https://github.com/etcd-io/etcd/commit/205f10aeb6d7a2869d4da16131cccb77ba5289e2 | [
0.00041479093488305807,
0.00018875031673815101,
0.00016534568567294627,
0.00016773820971138775,
0.00006820975249866024
] |
{
"id": 5,
"code_window": [
"\tif err != nil {\n",
"\t\thandleError(ExitServerError, err)\n",
"\t}\n",
"}"
],
"labels": [
"keep",
"keep",
"add",
"keep"
],
"after_edit": [
"\tif c.GlobalString(\"output\") != \"simple\" {\n",
"\t\tprintResponseKey(resp, c.GlobalString(\"output\"))\n",
"\t}\n"
],
"file_path": "etcdctl/ctlv2/command/update_dir_command.go",
"type": "add",
"edit_start_line_idx": 53
} | // Copyright 2015 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// +build !windows
package capnslog
import (
"errors"
"fmt"
"os"
"path/filepath"
"github.com/coreos/go-systemd/journal"
)
func NewJournaldFormatter() (Formatter, error) {
if !journal.Enabled() {
return nil, errors.New("No systemd detected")
}
return &journaldFormatter{}, nil
}
type journaldFormatter struct{}
func (j *journaldFormatter) Format(pkg string, l LogLevel, _ int, entries ...interface{}) {
var pri journal.Priority
switch l {
case CRITICAL:
pri = journal.PriCrit
case ERROR:
pri = journal.PriErr
case WARNING:
pri = journal.PriWarning
case NOTICE:
pri = journal.PriNotice
case INFO:
pri = journal.PriInfo
case DEBUG:
pri = journal.PriDebug
case TRACE:
pri = journal.PriDebug
default:
panic("Unhandled loglevel")
}
msg := fmt.Sprint(entries...)
tags := map[string]string{
"PACKAGE": pkg,
"SYSLOG_IDENTIFIER": filepath.Base(os.Args[0]),
}
err := journal.Send(msg, pri, tags)
if err != nil {
fmt.Fprintln(os.Stderr, err)
}
}
func (j *journaldFormatter) Flush() {}
| cmd/vendor/github.com/coreos/pkg/capnslog/journald_formatter.go | 0 | https://github.com/etcd-io/etcd/commit/205f10aeb6d7a2869d4da16131cccb77ba5289e2 | [
0.0010341607267037034,
0.00031210208544507623,
0.00017084230785258114,
0.00017657702846918255,
0.000297023740131408
] |
{
"id": 5,
"code_window": [
"\tif err != nil {\n",
"\t\thandleError(ExitServerError, err)\n",
"\t}\n",
"}"
],
"labels": [
"keep",
"keep",
"add",
"keep"
],
"after_edit": [
"\tif c.GlobalString(\"output\") != \"simple\" {\n",
"\t\tprintResponseKey(resp, c.GlobalString(\"output\"))\n",
"\t}\n"
],
"file_path": "etcdctl/ctlv2/command/update_dir_command.go",
"type": "add",
"edit_start_line_idx": 53
} | // Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !gccgo
#include "textflag.h"
//
// System call support for 386, FreeBSD
//
// Just jump to package syscall's implementation for all these functions.
// The runtime may know about them.
TEXT ·Syscall(SB),NOSPLIT,$0-32
JMP syscall·Syscall(SB)
TEXT ·Syscall6(SB),NOSPLIT,$0-44
JMP syscall·Syscall6(SB)
TEXT ·Syscall9(SB),NOSPLIT,$0-56
JMP syscall·Syscall9(SB)
TEXT ·RawSyscall(SB),NOSPLIT,$0-32
JMP syscall·RawSyscall(SB)
TEXT ·RawSyscall6(SB),NOSPLIT,$0-44
JMP syscall·RawSyscall6(SB)
| cmd/vendor/golang.org/x/sys/unix/asm_dragonfly_386.s | 0 | https://github.com/etcd-io/etcd/commit/205f10aeb6d7a2869d4da16131cccb77ba5289e2 | [
0.00017960843979381025,
0.00017275696154683828,
0.00016739348939154297,
0.0001712689845589921,
0.000005096524091641186
] |
{
"id": 0,
"code_window": [
"\t\t// are unlikely to happen.\n",
"\t\treturn\n",
"\t}\n",
"\tsr := &tracingpb.StructuredRecord{\n"
],
"labels": [
"keep",
"keep",
"add",
"keep"
],
"after_edit": [
"\n",
"\tvar now time.Time\n",
"\tif clock := s.testing.Clock; clock != nil {\n",
"\t\tnow = clock.Now()\n",
"\t} else {\n",
"\t\tnow = time.Now()\n",
"\t}\n"
],
"file_path": "pkg/util/tracing/crdbspan.go",
"type": "add",
"edit_start_line_idx": 312
} | // Copyright 2020 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package tracing
import (
"context"
"fmt"
"reflect"
"regexp"
"strings"
"testing"
"time"
"github.com/cockroachdb/cockroach/pkg/util/timeutil"
"github.com/cockroachdb/cockroach/pkg/util/tracing/tracingpb"
"github.com/cockroachdb/errors"
"github.com/cockroachdb/logtags"
"github.com/gogo/protobuf/types"
"github.com/stretchr/testify/require"
"go.opentelemetry.io/otel/attribute"
"golang.org/x/net/trace"
"google.golang.org/grpc/metadata"
)
func TestRecordingString(t *testing.T) {
tr := NewTracer()
tr2 := NewTracer()
root := tr.StartSpan("root", WithForceRealSpan())
root.SetVerbose(true)
root.Record("root 1")
{
// Hackily fix the timing on the first log message, so that we can check it later.
r := root.i.crdb.mu.recording.logs.GetFirst().(*tracingpb.LogRecord)
r.Time = root.i.crdb.startTime.Add(time.Millisecond)
root.i.crdb.mu.recording.logs.RemoveFirst()
root.i.crdb.mu.recording.logs.AddFirst(r)
}
// Sleep a bit so that everything that comes afterwards has higher timestamps
// than the one we just assigned. Otherwise the sorting will be screwed up.
time.Sleep(10 * time.Millisecond)
carrier := metadataCarrier{MD: metadata.MD{}}
require.NoError(t, tr.InjectMetaInto(root.Meta(), carrier))
wireSpanMeta, err := tr2.ExtractMetaFrom(carrier)
require.NoError(t, err)
remoteChild := tr2.StartSpan("remote child", WithParentAndManualCollection(wireSpanMeta))
root.Record("root 2")
remoteChild.Record("remote child 1")
remoteChild.Finish()
remoteRec := remoteChild.GetRecording()
root.ImportRemoteSpans(remoteRec)
root.Record("root 3")
ch2 := tr.StartSpan("local child", WithParentAndAutoCollection(root))
root.Record("root 4")
ch2.Record("local child 1")
ch2.Finish()
root.Record("root 5")
root.Finish()
rec := root.GetRecording()
// Sanity check that the recording looks like we want. Note that this is not
// its String() representation; this just lists all the spans in order.
require.NoError(t, CheckRecordedSpans(rec, `
span: root
tags: _verbose=1
event: root 1
event: root 2
event: root 3
event: root 4
event: root 5
span: remote child
tags: _verbose=1
event: remote child 1
span: local child
tags: _verbose=1
event: local child 1
`))
require.NoError(t, CheckRecording(rec, `
=== operation:root _verbose:1
event:root 1
=== operation:remote child _verbose:1
event:remote child 1
event:root 2
event:root 3
=== operation:local child _verbose:1
event:local child 1
event:root 4
event:root 5
`))
// Check the timing info on the first two lines.
lines := strings.Split(rec.String(), "\n")
l, err := parseLine(lines[0])
require.NoError(t, err)
require.Equal(t, traceLine{
timeSinceTraceStart: "0.000ms",
timeSincePrev: "0.000ms",
text: "=== operation:root _verbose:1",
}, l)
l, err = parseLine(lines[1])
require.Equal(t, traceLine{
timeSinceTraceStart: "1.000ms",
timeSincePrev: "1.000ms",
text: "event:root 1",
}, l)
require.NoError(t, err)
}
type traceLine struct {
timeSinceTraceStart, timeSincePrev string
text string
}
func parseLine(s string) (traceLine, error) {
// Parse lines like:
// 0.007ms 0.007ms event:root 1
re := regexp.MustCompile(`\s*(.*s)\s*(.*s)\s{4}(.*)`)
match := re.FindStringSubmatch(s)
if match == nil {
return traceLine{}, errors.Newf("line doesn't match: %s", s)
}
return traceLine{
timeSinceTraceStart: match[1],
timeSincePrev: match[2],
text: match[3],
}, nil
}
func TestRecordingInRecording(t *testing.T) {
tr := NewTracer()
root := tr.StartSpan("root", WithForceRealSpan())
root.SetVerbose(true)
child := tr.StartSpan("child", WithParentAndAutoCollection(root), WithForceRealSpan())
child.SetVerbose(true)
// The remote grandchild is also recording, however since it's remote the spans
// have to be imported into the parent manually (this would usually happen via
// code at the RPC boundaries).
grandChild := tr.StartSpan("grandchild", WithParentAndManualCollection(child.Meta()))
grandChild.Finish()
child.ImportRemoteSpans(grandChild.GetRecording())
child.Finish()
root.Finish()
rootRec := root.GetRecording()
require.NoError(t, CheckRecordedSpans(rootRec, `
span: root
tags: _verbose=1
span: child
tags: _verbose=1
span: grandchild
tags: _verbose=1
`))
childRec := child.GetRecording()
require.NoError(t, CheckRecordedSpans(childRec, `
span: child
tags: _verbose=1
span: grandchild
tags: _verbose=1
`))
require.NoError(t, CheckRecording(childRec, `
=== operation:child _verbose:1
=== operation:grandchild _verbose:1
`))
}
func TestSpan_ImportRemoteSpans(t *testing.T) {
// Verify that GetRecording propagates the recording even when the receiving
// Span isn't verbose during import.
tr := NewTracer()
sp := tr.StartSpan("root", WithForceRealSpan())
ch := tr.StartSpan("child", WithParentAndManualCollection(sp.Meta()))
ch.SetVerbose(true)
ch.Record("foo")
ch.SetVerbose(false)
ch.Finish()
sp.ImportRemoteSpans(ch.GetRecording())
sp.Finish()
require.NoError(t, CheckRecordedSpans(sp.GetRecording(), `
span: root
span: child
event: foo
`))
}
func TestSpanRecordStructured(t *testing.T) {
tr := NewTracer()
sp := tr.StartSpan("root", WithForceRealSpan())
defer sp.Finish()
sp.RecordStructured(&types.Int32Value{Value: 4})
rec := sp.GetRecording()
require.Len(t, rec, 1)
require.Len(t, rec[0].StructuredRecords, 1)
item := rec[0].StructuredRecords[0]
var d1 types.DynamicAny
require.NoError(t, types.UnmarshalAny(item.Payload, &d1))
require.IsType(t, (*types.Int32Value)(nil), d1.Message)
require.NoError(t, CheckRecordedSpans(rec, `
span: root
`))
require.NoError(t, CheckRecording(rec, `
=== operation:root
structured:{"@type":"type.googleapis.com/google.protobuf.Int32Value","value":4}
`))
}
// TestSpanRecordStructuredLimit tests recording behavior when the size of
// structured data recorded into the span exceeds the configured limit.
func TestSpanRecordStructuredLimit(t *testing.T) {
tr := NewTracer()
sp := tr.StartSpan("root", WithForceRealSpan())
defer sp.Finish()
pad := func(i int) string { return fmt.Sprintf("%06d", i) }
payload := func(i int) Structured { return &types.StringValue{Value: pad(i)} }
anyPayload, err := types.MarshalAny(payload(42))
require.NoError(t, err)
structuredRecord := &tracingpb.StructuredRecord{
Time: timeutil.Now(),
Payload: anyPayload,
}
numStructuredRecordings := maxStructuredBytesPerSpan / structuredRecord.Size()
const extra = 10
for i := 1; i <= numStructuredRecordings+extra; i++ {
sp.RecordStructured(payload(i))
}
sp.SetVerbose(true)
rec := sp.GetRecording()
require.Len(t, rec, 1)
require.Len(t, rec[0].StructuredRecords, numStructuredRecordings)
require.Equal(t, "1", rec[0].Tags["_dropped"])
first := rec[0].StructuredRecords[0]
last := rec[0].StructuredRecords[len(rec[0].StructuredRecords)-1]
var d1 types.DynamicAny
require.NoError(t, types.UnmarshalAny(first.Payload, &d1))
require.IsType(t, (*types.StringValue)(nil), d1.Message)
var res string
require.NoError(t, types.StdStringUnmarshal(&res, first.Payload.Value))
require.Equal(t, pad(extra+1), res)
var d2 types.DynamicAny
require.NoError(t, types.UnmarshalAny(last.Payload, &d2))
require.IsType(t, (*types.StringValue)(nil), d2.Message)
require.NoError(t, types.StdStringUnmarshal(&res, last.Payload.Value))
require.Equal(t, pad(numStructuredRecordings+extra), res)
}
// TestSpanRecordLimit tests recording behavior when the amount of data logged
// into the span exceeds the configured limit.
func TestSpanRecordLimit(t *testing.T) {
// Logs include the timestamp, and we want to fix them so they're not
// variably sized (needed for the test below).
clock := &timeutil.ManualTime{}
tr := NewTracerWithOpt(context.Background(), WithTestingKnobs(TracerTestingKnobs{Clock: clock}))
sp := tr.StartSpan("root", WithForceRealSpan())
defer sp.Finish()
sp.SetVerbose(true)
msg := func(i int) string { return fmt.Sprintf("msg: %10d", i) }
// Determine the size of a log record by actually recording once.
sp.Recordf("%s", msg(42))
logSize := sp.GetRecording()[0].Logs[0].Size()
sp.ResetRecording()
numLogs := maxLogBytesPerSpan / logSize
const extra = 10
for i := 1; i <= numLogs+extra; i++ {
sp.Recordf("%s", msg(i))
}
rec := sp.GetRecording()
require.Len(t, rec, 1)
require.Len(t, rec[0].Logs, numLogs)
require.Equal(t, rec[0].Tags["_dropped"], "1")
first := rec[0].Logs[0]
last := rec[0].Logs[len(rec[0].Logs)-1]
require.Equal(t, first.Msg().StripMarkers(), msg(extra+1))
require.Equal(t, last.Msg().StripMarkers(), msg(numLogs+extra))
}
// testStructuredImpl is a testing implementation of Structured event.
type testStructuredImpl struct {
*types.Int32Value
}
var _ Structured = &testStructuredImpl{}
func (t *testStructuredImpl) String() string {
return fmt.Sprintf("structured=%d", t.Value)
}
func newTestStructured(i int) *testStructuredImpl {
return &testStructuredImpl{
&types.Int32Value{Value: int32(i)},
}
}
// TestSpanReset checks that resetting a span clears out existing recordings.
func TestSpanReset(t *testing.T) {
// Logs include the timestamp, and we want to fix them so they're not
// variably sized (needed for the test below).
clock := &timeutil.ManualTime{}
tr := NewTracerWithOpt(context.Background(), WithTestingKnobs(TracerTestingKnobs{Clock: clock}))
sp := tr.StartSpan("root", WithForceRealSpan())
defer sp.Finish()
sp.SetVerbose(true)
for i := 1; i <= 10; i++ {
if i%2 == 0 {
sp.RecordStructured(newTestStructured(i))
} else {
sp.Recordf("%d", i)
}
}
require.NoError(t, CheckRecordedSpans(sp.GetRecording(), `
span: root
tags: _unfinished=1 _verbose=1
event: 1
event: structured=2
event: 3
event: structured=4
event: 5
event: structured=6
event: 7
event: structured=8
event: 9
event: structured=10
`))
require.NoError(t, CheckRecording(sp.GetRecording(), `
=== operation:root _unfinished:1 _verbose:1
event:1
event:structured=2
event:3
event:structured=4
event:5
event:structured=6
event:7
event:structured=8
event:9
event:structured=10
`))
sp.ResetRecording()
require.NoError(t, CheckRecordedSpans(sp.GetRecording(), `
span: root
tags: _unfinished=1 _verbose=1
`))
require.NoError(t, CheckRecording(sp.GetRecording(), `
=== operation:root _unfinished:1 _verbose:1
`))
msg := func(i int) string { return fmt.Sprintf("msg: %010d", i) }
sp.Record(msg(42))
logSize := sp.GetRecording()[0].Logs[0].Size()
numLogs := maxLogBytesPerSpan / logSize
const extra = 10
for i := 1; i <= numLogs+extra; i++ {
sp.Record(msg(i))
}
require.Equal(t, sp.GetRecording()[0].Tags["_dropped"], "1")
sp.ResetRecording()
_, found := sp.GetRecording()[0].Tags["_dropped"]
require.False(t, found)
}
func TestNonVerboseChildSpanRegisteredWithParent(t *testing.T) {
tr := NewTracer()
sp := tr.StartSpan("root", WithForceRealSpan())
defer sp.Finish()
ch := tr.StartSpan("child", WithParentAndAutoCollection(sp))
defer ch.Finish()
require.Equal(t, 1, sp.i.crdb.mu.recording.children.len())
require.Equal(t, ch.i.crdb, sp.i.crdb.mu.recording.children.get(0))
ch.RecordStructured(&types.Int32Value{Value: 5})
// Check that the child span (incl its payload) is in the recording.
rec := sp.GetRecording()
require.Len(t, rec, 2)
require.Len(t, rec[1].StructuredRecords, 1)
}
// TestSpanMaxChildren verifies that a Span can
// track at most maxChildrenPerSpan direct children.
func TestSpanMaxChildren(t *testing.T) {
tr := NewTracer()
sp := tr.StartSpan("root", WithForceRealSpan())
defer sp.Finish()
for i := 0; i < maxChildrenPerSpan+123; i++ {
ch := tr.StartSpan(fmt.Sprintf("child %d", i), WithParentAndAutoCollection(sp), WithForceRealSpan())
ch.Finish()
exp := i + 1
if exp > maxChildrenPerSpan {
exp = maxChildrenPerSpan
}
require.Equal(t, exp, sp.i.crdb.mu.recording.children.len())
}
}
type explodyNetTr struct {
trace.Trace
}
func (tr *explodyNetTr) Finish() {
if tr.Trace == nil {
panic("(*trace.Trace).Finish called twice")
}
tr.Trace.Finish()
tr.Trace = nil
}
// TestSpan_UseAfterFinish finishes a Span multiple times and
// calls all of its methods multiple times as well. This is
// to check that `Span.done` is called in the right places,
// and serves as a regression test for issues such as:
//
// https://github.com/cockroachdb/cockroach/issues/58489#issuecomment-781263005
func TestSpan_UseAfterFinish(t *testing.T) {
tr := NewTracer()
tr._useNetTrace = 1
sp := tr.StartSpan("foo", WithForceRealSpan())
require.NotNil(t, sp.i.netTr)
// Set up netTr to reliably explode if Finish'ed twice. We
// expect `sp.Finish` to not let it come to that.
sp.i.netTr = &explodyNetTr{Trace: sp.i.netTr}
sp.Finish()
require.True(t, sp.done())
sp.Finish()
require.EqualValues(t, 2, sp.numFinishCalled)
netTrT := reflect.TypeOf(sp)
for i := 0; i < netTrT.NumMethod(); i++ {
f := netTrT.Method(i)
t.Run(f.Name, func(t *testing.T) {
// The receiver is the first argument.
args := []reflect.Value{reflect.ValueOf(sp)}
for i := 1; i < f.Type.NumIn(); i++ {
// Zeroes for the rest. It would be nice to do something
// like `quick.Check` here (or even just call quick.Check!)
// but that's for another day. It should be doable!
args = append(args, reflect.Zero(f.Type.In(i)))
}
// NB: on an impl of Span that calls through to `trace.Trace.Finish`, and
// on my machine, and at the time of writing, `tr.Finish` would reliably
// deadlock on exactly the 10th call. This motivates the choice of 20
// below.
for i := 0; i < 20; i++ {
t.Run("invoke", func(t *testing.T) {
if i == 9 {
f.Func.Call(args)
} else {
f.Func.Call(args)
}
})
}
})
}
}
type countingStringer int32
func (i *countingStringer) String() string {
*i++ // not for concurrent use
return fmt.Sprint(*i)
}
// TestSpanTagsInRecordings verifies that tags added before a recording started
// are part of the recording.
func TestSpanTagsInRecordings(t *testing.T) {
tr := NewTracer()
var counter countingStringer
logTags := logtags.SingleTagBuffer("tagfoo", "tagbar")
logTags = logTags.Add("foo1", &counter)
sp := tr.StartSpan("root",
WithForceRealSpan(),
WithLogTags(logTags),
)
defer sp.Finish()
require.False(t, sp.IsVerbose())
sp.SetTag("foo2", attribute.StringValue("bar2"))
sp.Record("dummy recording")
rec := sp.GetRecording()
require.Len(t, rec, 0)
// We didn't stringify the log tag.
require.Zero(t, int(counter))
sp.SetVerbose(true)
rec = sp.GetRecording()
require.Len(t, rec, 1)
require.Len(t, rec[0].Tags, 5) // _unfinished:1 _verbose:1 tagfoo:tagbar foo1:1 foor2:bar2
_, ok := rec[0].Tags["foo2"]
require.True(t, ok)
require.Equal(t, 1, int(counter))
// Verify that subsequent tags are also captured.
sp.SetTag("foo3", attribute.StringValue("bar3"))
rec = sp.GetRecording()
require.Len(t, rec, 1)
require.Len(t, rec[0].Tags, 6)
_, ok = rec[0].Tags["foo3"]
require.True(t, ok)
require.Equal(t, 2, int(counter))
}
| pkg/util/tracing/span_test.go | 1 | https://github.com/cockroachdb/cockroach/commit/9d55d63b9132b4917e2864581f66cc426836f488 | [
0.987246036529541,
0.018697576597332954,
0.0001653398503549397,
0.00017250579548999667,
0.13304239511489868
] |
{
"id": 0,
"code_window": [
"\t\t// are unlikely to happen.\n",
"\t\treturn\n",
"\t}\n",
"\tsr := &tracingpb.StructuredRecord{\n"
],
"labels": [
"keep",
"keep",
"add",
"keep"
],
"after_edit": [
"\n",
"\tvar now time.Time\n",
"\tif clock := s.testing.Clock; clock != nil {\n",
"\t\tnow = clock.Now()\n",
"\t} else {\n",
"\t\tnow = time.Now()\n",
"\t}\n"
],
"file_path": "pkg/util/tracing/crdbspan.go",
"type": "add",
"edit_start_line_idx": 312
} | // Copyright 2017 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package stats
import (
"math"
"sort"
"github.com/cockroachdb/cockroach/pkg/settings"
"github.com/cockroachdb/cockroach/pkg/sql/opt/cat"
"github.com/cockroachdb/cockroach/pkg/sql/rowenc"
"github.com/cockroachdb/cockroach/pkg/sql/sem/tree"
"github.com/cockroachdb/cockroach/pkg/sql/types"
"github.com/cockroachdb/cockroach/pkg/util/encoding"
"github.com/cockroachdb/errors"
)
// HistogramClusterMode controls the cluster setting for enabling
// histogram collection.
var HistogramClusterMode = settings.RegisterBoolSetting(
"sql.stats.histogram_collection.enabled",
"histogram collection mode",
true,
).WithPublic()
// HistogramVersion identifies histogram versions.
type HistogramVersion uint32
// histVersion is the current histogram version.
//
// ATTENTION: When updating this field, add a brief description of what
// changed to the version history below.
const histVersion HistogramVersion = 1
/*
** VERSION HISTORY **
Please add new entries at the top.
- Version: 1
- The histogram creation logic was changed so the number of distinct values in
the histogram matched the estimated distinct count from the HyperLogLog sketch.
- Version: 0
- Histogram implementations up to and including 21.1.x. The version field is
omitted on Version 0 histograms.
*/
// EquiDepthHistogram creates a histogram where each bucket contains roughly
// the same number of samples (though it can vary when a boundary value has
// high frequency).
//
// numRows is the total number of rows from which values were sampled
// (excluding rows that have NULL values on the histogram column).
//
// In addition to building the histogram buckets, EquiDepthHistogram also
// estimates the number of distinct values in each bucket. It distributes the
// known number of distinct values (distinctCount) among the buckets, in
// proportion with the number of rows in each bucket.
func EquiDepthHistogram(
evalCtx *tree.EvalContext,
colType *types.T,
samples tree.Datums,
numRows, distinctCount int64,
maxBuckets int,
) (HistogramData, error) {
numSamples := len(samples)
if numSamples == 0 {
return HistogramData{ColumnType: colType}, nil
}
if maxBuckets < 2 {
return HistogramData{}, errors.Errorf("histogram requires at least two buckets")
}
if numRows < int64(numSamples) {
return HistogramData{}, errors.Errorf("more samples than rows")
}
if distinctCount == 0 {
return HistogramData{}, errors.Errorf("histogram requires distinctCount > 0")
}
for _, d := range samples {
if d == tree.DNull {
return HistogramData{}, errors.Errorf("NULL values not allowed in histogram")
}
}
sort.Slice(samples, func(i, j int) bool {
return samples[i].Compare(evalCtx, samples[j]) < 0
})
numBuckets := maxBuckets
if maxBuckets > numSamples {
numBuckets = numSamples
}
h := histogram{buckets: make([]cat.HistogramBucket, 0, numBuckets)}
lowerBound := samples[0]
// i keeps track of the current sample and advances as we form buckets.
for i, b := 0, 0; b < numBuckets && i < numSamples; b++ {
// numSamplesInBucket is the number of samples in this bucket. The first
// bucket has numSamplesInBucket=1 so the histogram has a clear lower bound.
numSamplesInBucket := (numSamples - i) / (numBuckets - b)
if i == 0 || numSamplesInBucket < 1 {
numSamplesInBucket = 1
}
upper := samples[i+numSamplesInBucket-1]
// numLess is the number of samples less than upper (in this bucket).
numLess := 0
for ; numLess < numSamplesInBucket-1; numLess++ {
if c := samples[i+numLess].Compare(evalCtx, upper); c == 0 {
break
} else if c > 0 {
return HistogramData{}, errors.AssertionFailedf("%+v", "samples not sorted")
}
}
// Advance the boundary of the bucket to cover all samples equal to upper.
for ; i+numSamplesInBucket < numSamples; numSamplesInBucket++ {
if samples[i+numSamplesInBucket].Compare(evalCtx, upper) != 0 {
break
}
}
// Estimate the number of rows equal to the upper bound and less than the
// upper bound, as well as the number of distinct values less than the upper
// bound. These estimates may be adjusted later based on the total distinct
// count.
numEq := float64(numSamplesInBucket-numLess) * float64(numRows) / float64(numSamples)
numRange := float64(numLess) * float64(numRows) / float64(numSamples)
distinctRange := estimatedDistinctValuesInRange(evalCtx, numRange, lowerBound, upper)
i += numSamplesInBucket
h.buckets = append(h.buckets, cat.HistogramBucket{
NumEq: numEq,
NumRange: numRange,
DistinctRange: distinctRange,
UpperBound: upper,
})
lowerBound = getNextLowerBound(evalCtx, upper)
}
h.adjustCounts(evalCtx, float64(numRows), float64(distinctCount))
return h.toHistogramData(colType)
}
type histogram struct {
buckets []cat.HistogramBucket
}
// adjustCounts adjusts the row count and number of distinct values per bucket
// based on the total row count and estimated distinct count.
func (h *histogram) adjustCounts(
evalCtx *tree.EvalContext, rowCountTotal, distinctCountTotal float64,
) {
// Calculate the current state of the histogram so we can adjust it as needed.
// The number of rows and distinct values represented by the histogram should
// be adjusted so they equal rowCountTotal and distinctCountTotal.
var rowCountRange, rowCountEq float64
// Total distinct count for values strictly inside bucket boundaries.
var distinctCountRange float64
// Number of bucket boundaries with at least one row on the boundary.
var distinctCountEq float64
for i := range h.buckets {
rowCountRange += h.buckets[i].NumRange
rowCountEq += h.buckets[i].NumEq
distinctCountRange += h.buckets[i].DistinctRange
if h.buckets[i].NumEq > 0 {
distinctCountEq++
}
}
if rowCountEq <= 0 {
panic(errors.AssertionFailedf("expected a positive value for rowCountEq"))
}
// If the upper bounds account for all distinct values (as estimated by the
// sketch), make the histogram consistent by clearing the ranges and adjusting
// the NumEq values to add up to the row count.
if distinctCountEq >= distinctCountTotal {
adjustmentFactorNumEq := rowCountTotal / rowCountEq
for i := range h.buckets {
h.buckets[i].NumRange = 0
h.buckets[i].DistinctRange = 0
h.buckets[i].NumEq *= adjustmentFactorNumEq
}
return
}
// The upper bounds do not account for all distinct values, so adjust the
// NumEq values if needed so they add up to less than the row count.
remDistinctCount := distinctCountTotal - distinctCountEq
if rowCountEq+remDistinctCount >= rowCountTotal {
targetRowCountEq := rowCountTotal - remDistinctCount
adjustmentFactorNumEq := targetRowCountEq / rowCountEq
for i := range h.buckets {
h.buckets[i].NumEq *= adjustmentFactorNumEq
}
rowCountEq = targetRowCountEq
}
// If the ranges do not account for the remaining distinct values, increment
// them so they add up to the remaining distinct count.
if remDistinctCount > distinctCountRange {
remDistinctCount -= distinctCountRange
// Calculate the maximum possible number of distinct values that can be
// added to the histogram.
maxDistinctCountRange := float64(math.MaxInt64)
lowerBound := h.buckets[0].UpperBound
upperBound := h.buckets[len(h.buckets)-1].UpperBound
if maxDistinct, ok := tree.MaxDistinctCount(evalCtx, lowerBound, upperBound); ok {
// Subtract distinctCountEq to account for the upper bounds of the
// buckets, along with the current range distinct count which has already
// been accounted for.
maxDistinctCountRange = float64(maxDistinct) - distinctCountEq - distinctCountRange
}
// Add distinct values into the histogram if there is space. Increment the
// distinct count of each bucket except the first one.
if maxDistinctCountRange > 0 {
if remDistinctCount > maxDistinctCountRange {
// There isn't enough space in the entire histogram for these distinct
// values. Add what we can now, and we will add extra buckets below.
remDistinctCount = maxDistinctCountRange
}
avgRemPerBucket := remDistinctCount / float64(len(h.buckets)-1)
for i := 1; i < len(h.buckets); i++ {
lowerBound := h.buckets[i-1].UpperBound
upperBound := h.buckets[i].UpperBound
maxDistRange, countable := maxDistinctRange(evalCtx, lowerBound, upperBound)
inc := avgRemPerBucket
if countable {
maxDistRange -= h.buckets[i].DistinctRange
// Set the increment proportional to the remaining number of
// distinct values in the bucket.
inc = remDistinctCount * (maxDistRange / maxDistinctCountRange)
}
h.buckets[i].NumRange += inc
h.buckets[i].DistinctRange += inc
rowCountRange += inc
distinctCountRange += inc
}
}
}
// If there are still some distinct values that are unaccounted for, this is
// probably because the samples did not cover the full domain of possible
// values. Add buckets above and below the existing buckets to contain these
// values.
remDistinctCount = distinctCountTotal - distinctCountRange - distinctCountEq
if remDistinctCount > 0 {
h.addOuterBuckets(
evalCtx, remDistinctCount, &rowCountEq, &distinctCountEq, &rowCountRange, &distinctCountRange,
)
}
// Adjust the values so the row counts and distinct counts add up correctly.
adjustmentFactorDistinctRange := float64(1)
if distinctCountRange > 0 {
adjustmentFactorDistinctRange = (distinctCountTotal - distinctCountEq) / distinctCountRange
}
adjustmentFactorRowCount := rowCountTotal / (rowCountRange + rowCountEq)
for i := range h.buckets {
h.buckets[i].DistinctRange *= adjustmentFactorDistinctRange
h.buckets[i].NumRange *= adjustmentFactorRowCount
h.buckets[i].NumEq *= adjustmentFactorRowCount
}
}
// addOuterBuckets adds buckets above and below the existing buckets in the
// histogram to include the remaining distinct values in remDistinctCount. It
// also increments the counters rowCountEq, distinctCountEq, rowCountRange, and
// distinctCountRange as needed.
func (h *histogram) addOuterBuckets(
evalCtx *tree.EvalContext,
remDistinctCount float64,
rowCountEq, distinctCountEq, rowCountRange, distinctCountRange *float64,
) {
var maxDistinctCountExtraBuckets float64
var addedMin, addedMax bool
var newBuckets int
if !h.buckets[0].UpperBound.IsMin(evalCtx) {
if minVal, ok := h.buckets[0].UpperBound.Min(evalCtx); ok {
lowerBound := minVal
upperBound := h.buckets[0].UpperBound
maxDistRange, _ := maxDistinctRange(evalCtx, lowerBound, upperBound)
maxDistinctCountExtraBuckets += maxDistRange
h.buckets = append([]cat.HistogramBucket{{UpperBound: minVal}}, h.buckets...)
addedMin = true
newBuckets++
}
}
if !h.buckets[len(h.buckets)-1].UpperBound.IsMax(evalCtx) {
if maxVal, ok := h.buckets[len(h.buckets)-1].UpperBound.Max(evalCtx); ok {
lowerBound := h.buckets[len(h.buckets)-1].UpperBound
upperBound := maxVal
maxDistRange, _ := maxDistinctRange(evalCtx, lowerBound, upperBound)
maxDistinctCountExtraBuckets += maxDistRange
h.buckets = append(h.buckets, cat.HistogramBucket{UpperBound: maxVal})
addedMax = true
newBuckets++
}
}
if newBuckets == 0 {
// No new buckets added.
return
}
// If this is an enum or bool histogram, increment numEq for the upper
// bounds.
if typFam := h.buckets[0].UpperBound.ResolvedType().Family(); typFam == types.EnumFamily ||
typFam == types.BoolFamily {
if addedMin {
h.buckets[0].NumEq++
}
if addedMax {
h.buckets[len(h.buckets)-1].NumEq++
}
*rowCountEq += float64(newBuckets)
*distinctCountEq += float64(newBuckets)
remDistinctCount -= float64(newBuckets)
}
if remDistinctCount <= 0 {
// All distinct values accounted for.
return
}
// Account for the remaining values in the new bucket ranges.
bucIndexes := make([]int, 0, newBuckets)
if addedMin {
// We'll be incrementing the range of the second bucket.
bucIndexes = append(bucIndexes, 1)
}
if addedMax {
bucIndexes = append(bucIndexes, len(h.buckets)-1)
}
avgRemPerBucket := remDistinctCount / float64(newBuckets)
for _, i := range bucIndexes {
lowerBound := h.buckets[i-1].UpperBound
upperBound := h.buckets[i].UpperBound
maxDistRange, countable := maxDistinctRange(evalCtx, lowerBound, upperBound)
inc := avgRemPerBucket
if countable && h.buckets[0].UpperBound.ResolvedType().Family() == types.EnumFamily {
// Set the increment proportional to the remaining number of
// distinct values in the bucket. This only really matters for
// enums.
inc = remDistinctCount * (maxDistRange / maxDistinctCountExtraBuckets)
}
h.buckets[i].NumRange += inc
h.buckets[i].DistinctRange += inc
*rowCountRange += inc
*distinctCountRange += inc
}
}
// toHistogramData converts a histogram to a HistogramData protobuf with the
// given type.
func (h histogram) toHistogramData(colType *types.T) (HistogramData, error) {
histogramData := HistogramData{
Buckets: make([]HistogramData_Bucket, len(h.buckets)),
ColumnType: colType,
Version: histVersion,
}
for i := range h.buckets {
encoded, err := rowenc.EncodeTableKey(nil, h.buckets[i].UpperBound, encoding.Ascending)
if err != nil {
return HistogramData{}, err
}
histogramData.Buckets[i] = HistogramData_Bucket{
NumEq: int64(math.Round(h.buckets[i].NumEq)),
NumRange: int64(math.Round(h.buckets[i].NumRange)),
DistinctRange: h.buckets[i].DistinctRange,
UpperBound: encoded,
}
}
return histogramData, nil
}
// estimatedDistinctValuesInRange returns the estimated number of distinct
// values in the range [lowerBound, upperBound), given that the total number
// of values is numRange.
//
// If lowerBound and upperBound are not countable, the distinct count is just
// equal to numRange. If they are countable, we can estimate the distinct count
// based on the total number of distinct values in the range.
func estimatedDistinctValuesInRange(
evalCtx *tree.EvalContext, numRange float64, lowerBound, upperBound tree.Datum,
) float64 {
if numRange == 0 {
return 0
}
rangeUpperBound, ok := upperBound.Prev(evalCtx)
if !ok {
rangeUpperBound = upperBound
}
if maxDistinct, ok := tree.MaxDistinctCount(evalCtx, lowerBound, rangeUpperBound); ok {
return expectedDistinctCount(numRange, float64(maxDistinct))
}
return numRange
}
func getNextLowerBound(evalCtx *tree.EvalContext, currentUpperBound tree.Datum) tree.Datum {
nextLowerBound, ok := currentUpperBound.Next(evalCtx)
if !ok {
nextLowerBound = currentUpperBound
}
return nextLowerBound
}
// maxDistinctRange returns the maximum number of distinct values in the given
// range, excluding both lowerBound and upperBound. Returns countable=true if
// the returned value is countable.
func maxDistinctRange(
evalCtx *tree.EvalContext, lowerBound, upperBound tree.Datum,
) (_ float64, countable bool) {
if maxDistinct, ok := tree.MaxDistinctCount(evalCtx, lowerBound, upperBound); ok {
// Remove 2 for the upper and lower boundaries.
if maxDistinct < 2 {
return 0, true
}
return float64(maxDistinct - 2), true
}
return float64(math.MaxInt64), false
}
// expectedDistinctCount returns the expected number of distinct values
// among k random numbers selected from n possible values. We assume the
// values are chosen using uniform random sampling with replacement.
func expectedDistinctCount(k, n float64) float64 {
if n == 0 || k == 0 {
return 0
}
// The probability that one specific value (out of the n possible values)
// does not appear in any of the k selections is:
//
// ⎛ n-1 ⎞ k
// p = ⎜-----⎟
// ⎝ n ⎠
//
// Therefore, the probability that a specific value appears at least once is
// 1-p. Over all n values, the expected number that appear at least once is
// n * (1-p). In other words, the expected distinct count is:
//
// ⎛ ⎛ n-1 ⎞ k ⎞
// E[distinct count] = n * ⎜ 1 - ⎜-----⎟ ⎟
// ⎝ ⎝ n ⎠ ⎠
//
// See https://math.stackexchange.com/questions/72223/finding-expected-
// number-of-distinct-values-selected-from-a-set-of-integers for more info.
count := n * (1 - math.Pow((n-1)/n, k))
// It's possible that if n is very large, floating point precision errors
// will cause count to be 0. In that case, just return min(n, k).
if count == 0 {
count = k
if n < k {
count = n
}
}
return count
}
| pkg/sql/stats/histogram.go | 0 | https://github.com/cockroachdb/cockroach/commit/9d55d63b9132b4917e2864581f66cc426836f488 | [
0.0005028066807426512,
0.00019005492504220456,
0.00016363858594559133,
0.0001691733777988702,
0.00006843107257736847
] |
{
"id": 0,
"code_window": [
"\t\t// are unlikely to happen.\n",
"\t\treturn\n",
"\t}\n",
"\tsr := &tracingpb.StructuredRecord{\n"
],
"labels": [
"keep",
"keep",
"add",
"keep"
],
"after_edit": [
"\n",
"\tvar now time.Time\n",
"\tif clock := s.testing.Clock; clock != nil {\n",
"\t\tnow = clock.Now()\n",
"\t} else {\n",
"\t\tnow = time.Now()\n",
"\t}\n"
],
"file_path": "pkg/util/tracing/crdbspan.go",
"type": "add",
"edit_start_line_idx": 312
} | [32 0 0 0 0 0 0 0] | pkg/util/json/testdata/encoded/structure_lonely_null.json.bytes | 0 | https://github.com/cockroachdb/cockroach/commit/9d55d63b9132b4917e2864581f66cc426836f488 | [
0.00017589092021808028,
0.00017589092021808028,
0.00017589092021808028,
0.00017589092021808028,
0
] |
{
"id": 0,
"code_window": [
"\t\t// are unlikely to happen.\n",
"\t\treturn\n",
"\t}\n",
"\tsr := &tracingpb.StructuredRecord{\n"
],
"labels": [
"keep",
"keep",
"add",
"keep"
],
"after_edit": [
"\n",
"\tvar now time.Time\n",
"\tif clock := s.testing.Clock; clock != nil {\n",
"\t\tnow = clock.Now()\n",
"\t} else {\n",
"\t\tnow = time.Now()\n",
"\t}\n"
],
"file_path": "pkg/util/tracing/crdbspan.go",
"type": "add",
"edit_start_line_idx": 312
} | // Copyright 2018 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package storage
// TestingKnobs can be passed when instantiating a storage engine. Settings here
// are used to change behavior in tests.
type TestingKnobs struct {
// DisableSeparatedIntents disables the writing of separated intents. Only
// used in tests to check handling of interleaved intents.
DisableSeparatedIntents bool
}
| pkg/storage/testing_knobs.go | 0 | https://github.com/cockroachdb/cockroach/commit/9d55d63b9132b4917e2864581f66cc426836f488 | [
0.00017568760085850954,
0.0001718679122859612,
0.00016804822371341288,
0.0001718679122859612,
0.00000381968857254833
] |
{
"id": 1,
"code_window": [
"\tsr := &tracingpb.StructuredRecord{\n",
"\t\tTime: time.Now(),\n",
"\t\tPayload: p,\n",
"\t}\n",
"\ts.recordInternal(sr, &s.mu.recording.structured)\n"
],
"labels": [
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tTime: now,\n"
],
"file_path": "pkg/util/tracing/crdbspan.go",
"type": "replace",
"edit_start_line_idx": 313
} | // Copyright 2021 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package tracing
import (
"fmt"
"sort"
"sync"
"sync/atomic"
"time"
"github.com/cockroachdb/cockroach/pkg/util/ring"
"github.com/cockroachdb/cockroach/pkg/util/syncutil"
"github.com/cockroachdb/cockroach/pkg/util/timeutil"
"github.com/cockroachdb/cockroach/pkg/util/tracing/tracingpb"
"github.com/cockroachdb/logtags"
"github.com/cockroachdb/redact"
"github.com/gogo/protobuf/types"
"go.opentelemetry.io/otel/attribute"
)
// crdbSpan is a span for internal crdb usage. This is used to power SQL session
// tracing.
type crdbSpan struct {
rootSpan *crdbSpan // root span of the containing trace; could be itself
// traceEmpty indicates whether or not the trace rooted at this span
// (provided it is a root span) contains any recordings or baggage. All
// spans hold a reference to the rootSpan; this field is accessed
// through that reference.
traceEmpty int32 // accessed atomically, through markTraceAsNonEmpty and inAnEmptyTrace
traceID uint64 // probabilistically unique
spanID uint64 // probabilistically unique
parentSpanID uint64
goroutineID uint64
startTime time.Time
// logTags are set to the log tags that were available when this Span was
// created, so that there's no need to eagerly copy all of those log tags
// into this Span's tags. If the Span's tags are actually requested, these
// logTags will be copied out at that point.
//
// Note that these tags have not gone through the log tag -> Span tag
// remapping procedure; tagName() needs to be called before exposing each
// tag's key to a user.
logTags *logtags.Buffer
mu crdbSpanMu
testing *TracerTestingKnobs
}
type crdbSpanMu struct {
syncutil.Mutex
// duration is initialized to -1 and set on Finish().
duration time.Duration
operation string // name of operation associated with the span
recording struct {
// recordingType is the recording type of the ongoing recording, if any.
// Its 'load' method may be called without holding the surrounding mutex,
// but its 'swap' method requires the mutex.
recordingType atomicRecordingType
logs sizeLimitedBuffer // of *tracingpb.LogRecords
structured sizeLimitedBuffer // of Structured events
// dropped is true if the span has capped out it's memory limits for
// logs and structured events, and has had to drop some. It's used to
// annotate recordings with the _dropped tag, when applicable.
dropped bool
// children contains the list of child spans started after this Span
// started recording.
children childSpanRefs
// remoteSpan contains the list of remote child span recordings that
// were manually imported.
remoteSpans []tracingpb.RecordedSpan
}
// The Span's associated baggage.
baggage map[string]string
// tags are only captured when recording. These are tags that have been
// added to this Span, and will be appended to the tags in logTags when
// someone needs to actually observe the total set of tags that is a part of
// this Span.
// TODO(radu): perhaps we want a recording to capture all the tags (even
// those that were set before recording started)?
tags []attribute.KeyValue
}
type childSpanRefs struct {
refCount int
preAllocated [4]*crdbSpan
overflow []*crdbSpan
}
func (c *childSpanRefs) len() int {
return c.refCount
}
func (c *childSpanRefs) add(ref *crdbSpan) {
if c.refCount < len(c.preAllocated) {
c.preAllocated[c.refCount] = ref
c.refCount++
return
}
// Only record the child if the parent still has room.
if c.refCount < maxChildrenPerSpan {
c.overflow = append(c.overflow, ref)
c.refCount++
}
}
func (c *childSpanRefs) get(idx int) *crdbSpan {
if idx < len(c.preAllocated) {
ref := c.preAllocated[idx]
if ref == nil {
panic(fmt.Sprintf("idx %d out of bounds", idx))
}
return ref
}
return c.overflow[idx-len(c.preAllocated)]
}
func (c *childSpanRefs) reset() {
for i := 0; i < len(c.preAllocated); i++ {
c.preAllocated[i] = nil
}
c.overflow = nil
c.refCount = 0
}
func newSizeLimitedBuffer(limit int64) sizeLimitedBuffer {
return sizeLimitedBuffer{
limit: limit,
}
}
type sizeLimitedBuffer struct {
ring.Buffer
size int64 // in bytes
limit int64 // in bytes
}
func (b *sizeLimitedBuffer) Reset() {
b.Buffer.Reset()
b.size = 0
}
func (s *crdbSpan) recordingType() RecordingType {
if s == nil {
return RecordingOff
}
return s.mu.recording.recordingType.load()
}
// enableRecording start recording on the Span. From now on, log events and
// child spans will be stored.
func (s *crdbSpan) enableRecording(recType RecordingType) {
if recType == RecordingOff || s.recordingType() == recType {
return
}
s.mu.Lock()
defer s.mu.Unlock()
s.mu.recording.recordingType.swap(recType)
if recType == RecordingVerbose {
s.setBaggageItemLocked(verboseTracingBaggageKey, "1")
}
}
// resetRecording clears any previously recorded info.
//
// NB: This is needed by SQL SessionTracing, who likes to start and stop
// recording repeatedly on the same Span, and collect the (separate) recordings
// every time.
func (s *crdbSpan) resetRecording() {
s.mu.Lock()
defer s.mu.Unlock()
s.mu.recording.logs.Reset()
s.mu.recording.structured.Reset()
s.mu.recording.dropped = false
s.mu.recording.children.reset()
s.mu.recording.remoteSpans = nil
}
func (s *crdbSpan) disableRecording() {
if s.recordingType() == RecordingOff {
return
}
s.mu.Lock()
defer s.mu.Unlock()
oldRecType := s.mu.recording.recordingType.swap(RecordingOff)
// We test the duration as a way to check if the Span has been finished. If it
// has, we don't want to do the call below as it might crash (at least if
// there's a netTr).
if (s.mu.duration == -1) && (oldRecType == RecordingVerbose) {
// Clear the verboseTracingBaggageKey baggage item, assuming that it was set by
// enableRecording().
s.setBaggageItemLocked(verboseTracingBaggageKey, "")
}
}
func (s *crdbSpan) getRecording(wantTags bool) Recording {
if s == nil {
return nil // noop span
}
// Return early (without allocating) if the trace is empty, i.e. there are
// no recordings or baggage. If the trace is verbose, we'll still recurse in
// order to pick up all the operations that were part of the trace, despite
// nothing having any actual data in them.
if s.recordingType() != RecordingVerbose && s.inAnEmptyTrace() && !s.testing.RecordEmptyTraces {
return nil
}
s.mu.Lock()
// The capacity here is approximate since we don't know how many
// grandchildren there are.
result := make(Recording, 0, 1+s.mu.recording.children.len()+len(s.mu.recording.remoteSpans))
// Shallow-copy the children so we can process them without the lock.
var children []*crdbSpan
for i := 0; i < s.mu.recording.children.len(); i++ {
children = append(children, s.mu.recording.children.get(i))
}
result = append(result, s.getRecordingLocked(wantTags))
result = append(result, s.mu.recording.remoteSpans...)
s.mu.Unlock()
for _, child := range children {
result = append(result, child.getRecording(wantTags)...)
}
// Sort the spans by StartTime, except the first Span (the root of this
// recording) which stays in place.
toSort := sortPool.Get().(*Recording) // avoids allocations in sort.Sort
*toSort = result[1:]
sort.Sort(toSort)
*toSort = nil
sortPool.Put(toSort)
return result
}
func (s *crdbSpan) importRemoteSpans(remoteSpans []tracingpb.RecordedSpan) {
if len(remoteSpans) == 0 {
return
}
s.markTraceAsNonEmpty()
// Change the root of the remote recording to be a child of this Span. This is
// usually already the case, except with DistSQL traces where remote
// processors run in spans that FollowFrom an RPC Span that we don't collect.
remoteSpans[0].ParentSpanID = s.spanID
s.mu.Lock()
defer s.mu.Unlock()
s.mu.recording.remoteSpans = append(s.mu.recording.remoteSpans, remoteSpans...)
}
func (s *crdbSpan) setTagLocked(key string, value attribute.Value) {
k := attribute.Key(key)
for i := range s.mu.tags {
if s.mu.tags[i].Key == k {
s.mu.tags[i].Value = value
return
}
}
s.mu.tags = append(s.mu.tags, attribute.KeyValue{Key: k, Value: value})
}
func (s *crdbSpan) record(msg redact.RedactableString) {
if s.recordingType() != RecordingVerbose {
return
}
var now time.Time
if clock := s.testing.Clock; clock != nil {
now = clock.Now()
} else {
now = time.Now()
}
logRecord := &tracingpb.LogRecord{
Time: now,
Message: msg,
// Compatibility with 21.2.
DeprecatedFields: []tracingpb.LogRecord_Field{
{Key: tracingpb.LogMessageField, Value: msg},
},
}
s.recordInternal(logRecord, &s.mu.recording.logs)
}
func (s *crdbSpan) recordStructured(item Structured) {
p, err := types.MarshalAny(item)
if err != nil {
// An error here is an error from Marshal; these
// are unlikely to happen.
return
}
sr := &tracingpb.StructuredRecord{
Time: time.Now(),
Payload: p,
}
s.recordInternal(sr, &s.mu.recording.structured)
}
// sizable is a subset for protoutil.Message, for payloads (log records and
// structured events) that can be recorded.
type sizable interface {
Size() int
}
// inAnEmptyTrace indicates whether or not the containing trace is "empty" (i.e.
// has any recordings or baggage).
func (s *crdbSpan) inAnEmptyTrace() bool {
val := atomic.LoadInt32(&s.rootSpan.traceEmpty)
return val == 0
}
func (s *crdbSpan) markTraceAsNonEmpty() {
atomic.StoreInt32(&s.rootSpan.traceEmpty, 1)
}
func (s *crdbSpan) recordInternal(payload sizable, buffer *sizeLimitedBuffer) {
s.markTraceAsNonEmpty()
s.mu.Lock()
defer s.mu.Unlock()
size := int64(payload.Size())
if size > buffer.limit {
// The incoming payload alone blows past the memory limit. Let's just
// drop it.
s.mu.recording.dropped = true
return
}
buffer.size += size
if buffer.size > buffer.limit {
s.mu.recording.dropped = true
}
for buffer.size > buffer.limit {
first := buffer.GetFirst().(sizable)
buffer.RemoveFirst()
buffer.size -= int64(first.Size())
}
buffer.AddLast(payload)
}
func (s *crdbSpan) setBaggageItemAndTag(restrictedKey, value string) {
s.markTraceAsNonEmpty()
s.mu.Lock()
defer s.mu.Unlock()
s.setBaggageItemLocked(restrictedKey, value)
// Don't set the tag if this is the special cased baggage item indicating
// span verbosity, as it is named nondescriptly and the recording knows
// how to display its verbosity independently.
if restrictedKey != verboseTracingBaggageKey {
s.setTagLocked(restrictedKey, attribute.StringValue(value))
}
}
func (s *crdbSpan) setBaggageItemLocked(restrictedKey, value string) {
if oldVal, ok := s.mu.baggage[restrictedKey]; ok && oldVal == value {
// No-op.
return
}
if s.mu.baggage == nil {
s.mu.baggage = make(map[string]string)
}
s.mu.baggage[restrictedKey] = value
}
// getRecordingLocked returns the Span's recording. This does not include
// children.
//
// When wantTags is false, no tags will be added. This is a performance
// optimization as stringifying the tag values can be expensive.
func (s *crdbSpan) getRecordingLocked(wantTags bool) tracingpb.RecordedSpan {
rs := tracingpb.RecordedSpan{
TraceID: s.traceID,
SpanID: s.spanID,
ParentSpanID: s.parentSpanID,
GoroutineID: s.goroutineID,
Operation: s.mu.operation,
StartTime: s.startTime,
Duration: s.mu.duration,
RedactableLogs: true,
}
if rs.Duration == -1 {
// -1 indicates an unfinished Span. For a recording it's better to put some
// duration in it, otherwise tools get confused. For example, we export
// recordings to Jaeger, and spans with a zero duration don't look nice.
rs.Duration = timeutil.Now().Sub(rs.StartTime)
rs.Finished = false
} else {
rs.Finished = true
}
addTag := func(k, v string) {
if rs.Tags == nil {
rs.Tags = make(map[string]string)
}
rs.Tags[k] = v
}
if wantTags {
if s.mu.duration == -1 {
addTag("_unfinished", "1")
}
if s.mu.recording.recordingType.load() == RecordingVerbose {
addTag("_verbose", "1")
}
if s.mu.recording.dropped {
addTag("_dropped", "1")
}
}
if numEvents := s.mu.recording.structured.Len(); numEvents != 0 {
rs.StructuredRecords = make([]tracingpb.StructuredRecord, numEvents)
for i := 0; i < numEvents; i++ {
event := s.mu.recording.structured.Get(i).(*tracingpb.StructuredRecord)
rs.StructuredRecords[i] = *event
}
}
if len(s.mu.baggage) > 0 {
rs.Baggage = make(map[string]string)
for k, v := range s.mu.baggage {
rs.Baggage[k] = v
}
}
if wantTags {
if s.logTags != nil {
setLogTags(s.logTags.Get(), func(remappedKey string, tag *logtags.Tag) {
addTag(remappedKey, tag.ValueStr())
})
}
for _, kv := range s.mu.tags {
// We encode the tag values as strings.
addTag(string(kv.Key), kv.Value.Emit())
}
}
if numLogs := s.mu.recording.logs.Len(); numLogs != 0 {
rs.Logs = make([]tracingpb.LogRecord, numLogs)
for i := 0; i < numLogs; i++ {
lr := s.mu.recording.logs.Get(i).(*tracingpb.LogRecord)
rs.Logs[i] = *lr
}
}
return rs
}
func (s *crdbSpan) addChild(child *crdbSpan) {
s.mu.Lock()
defer s.mu.Unlock()
s.mu.recording.children.add(child)
}
// setVerboseRecursively sets the verbosity of the crdbSpan appropriately and
// recurses on its list of children.
func (s *crdbSpan) setVerboseRecursively(to bool) {
if to {
s.enableRecording(RecordingVerbose)
} else {
s.disableRecording()
}
s.mu.Lock()
var children []*crdbSpan
for i := 0; i < s.mu.recording.children.len(); i++ {
children = append(children, s.mu.recording.children.get(i))
}
s.mu.Unlock()
for _, child := range children {
child.setVerboseRecursively(to)
}
}
var sortPool = sync.Pool{
New: func() interface{} {
return &Recording{}
},
}
// Less implements sort.Interface.
func (r Recording) Less(i, j int) bool {
return r[i].StartTime.Before(r[j].StartTime)
}
// Swap implements sort.Interface.
func (r Recording) Swap(i, j int) {
r[i], r[j] = r[j], r[i]
}
// Len implements sort.Interface.
func (r Recording) Len() int {
return len(r)
}
type atomicRecordingType RecordingType
// load returns the recording type.
func (art *atomicRecordingType) load() RecordingType {
return RecordingType(atomic.LoadInt32((*int32)(art)))
}
// swap stores the new recording type and returns the old one.
func (art *atomicRecordingType) swap(recType RecordingType) RecordingType {
return RecordingType(atomic.SwapInt32((*int32)(art), int32(recType)))
}
| pkg/util/tracing/crdbspan.go | 1 | https://github.com/cockroachdb/cockroach/commit/9d55d63b9132b4917e2864581f66cc426836f488 | [
0.9981752634048462,
0.019653908908367157,
0.00016356869309674948,
0.0002596335834823549,
0.1357024610042572
] |
{
"id": 1,
"code_window": [
"\tsr := &tracingpb.StructuredRecord{\n",
"\t\tTime: time.Now(),\n",
"\t\tPayload: p,\n",
"\t}\n",
"\ts.recordInternal(sr, &s.mu.recording.structured)\n"
],
"labels": [
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tTime: now,\n"
],
"file_path": "pkg/util/tracing/crdbspan.go",
"type": "replace",
"edit_start_line_idx": 313
} | // Copyright 2021 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package recording
import (
"bytes"
"fmt"
"io"
"strings"
)
// Recording can be used to play back a set of operations (defined only by a
// "command" and an "expected output"). It provides a handy way to mock out the
// components being recorded.
type Recording struct {
// scanner is where we're replaying the recording from. op is the
// scratch space used to parse out the current operation being read.
scanner *scanner
op Operation
}
// WithReplayFrom is used to configure a Recording to play back from the given
// reader. The provided name is used only for diagnostic purposes, it's
// typically the name of the file being read.
func WithReplayFrom(r io.Reader, name string) *Recording {
re := &Recording{}
re.scanner = newScanner(r, name)
return re
}
// Next is used to step through the next operation found in the recording, if
// any.
func (r *Recording) Next(f func(Operation) error) (found bool, err error) {
parsed, err := r.parseOperation()
if err != nil {
return false, err
}
if !parsed {
return false, nil
}
if err := f(r.op); err != nil {
return false, fmt.Errorf("%s: %w", r.scanner.pos(), err)
}
return true, nil
}
// parseOperation parses out the next Operation from the internal scanner. See
// type-level comment on Operation to understand the grammar we're parsing
// against.
func (r *Recording) parseOperation() (parsed bool, err error) {
for r.scanner.Scan() {
r.op = Operation{}
line := r.scanner.Text()
line = strings.TrimSpace(line)
if strings.HasPrefix(line, "#") {
// Skip comment lines.
continue
}
// Support wrapping command directive lines using "\".
for strings.HasSuffix(line, `\`) && r.scanner.Scan() {
nextLine := r.scanner.Text()
line = strings.TrimSuffix(line, `\`)
line = strings.TrimSpace(line)
line = fmt.Sprintf("%s %s", line, strings.TrimSpace(nextLine))
}
command, err := r.parseCommand(line)
if err != nil {
return false, err
}
if command == "" {
// Nothing to do here.
continue
}
r.op.Command = command
if err := r.parseSeparator(); err != nil {
return false, err
}
if err := r.parseOutput(); err != nil {
return false, err
}
return true, nil
}
return false, nil
}
// parseCommand parses a <command> line and returns it if parsed correctly. See
// type-level comment on Operation to understand the grammar we're parsing
// against.
func (r *Recording) parseCommand(line string) (cmd string, err error) {
line = strings.TrimSpace(line)
if line == "" {
return "", nil
}
origLine := line
cmd = strings.TrimSpace(line)
if cmd == "" {
column := len(origLine) - len(line) + 1
return "", fmt.Errorf("%s: cannot parse command at col %d: %s", r.scanner.pos(), column, origLine)
}
return cmd, nil
}
// parseSeparator parses a separator ('----'), erroring out if it's not parsed
// correctly. See type-level comment on Operation to understand the grammar
// we're parsing against.
func (r *Recording) parseSeparator() error {
if !r.scanner.Scan() {
return fmt.Errorf("%s: expected to find separator after command", r.scanner.pos())
}
line := r.scanner.Text()
if line != "----" {
return fmt.Errorf("%s: expected to find separator after command, found %q instead", r.scanner.pos(), line)
}
return nil
}
// parseOutput parses an <output>. See type-level comment on Operation to
// understand the grammar we're parsing against.
func (r *Recording) parseOutput() error {
var buf bytes.Buffer
var line string
var allowBlankLines bool
if r.scanner.Scan() {
line = r.scanner.Text()
if line == "----" {
allowBlankLines = true
}
}
if !allowBlankLines {
// Terminate on first blank line.
for {
if strings.TrimSpace(line) == "" {
break
}
if _, err := fmt.Fprintln(&buf, line); err != nil {
return err
}
if !r.scanner.Scan() {
break
}
line = r.scanner.Text()
}
r.op.Output = buf.String()
return nil
}
// Look for two successive lines of "----" before terminating.
for r.scanner.Scan() {
line = r.scanner.Text()
if line != "----" {
// We just picked up a regular line that's part of the command
// output.
if _, err := fmt.Fprintln(&buf, line); err != nil {
return err
}
continue
}
// We picked up a separator. We could either be part of the
// command output, or it was actually intended by the user as a
// separator. Let's check to see if we can parse a second one.
if err := r.parseSeparator(); err == nil {
// We just saw the second separator, the output portion is done.
// Read the following blank line.
if r.scanner.Scan() && r.scanner.Text() != "" {
return fmt.Errorf("%s: non-blank line after end of double ---- separator section", r.scanner.pos())
}
break
}
// The separator we saw was part of the command output.
// Let's collect both lines (the first separator, and the
// new one).
if _, err := fmt.Fprintln(&buf, line); err != nil {
return err
}
line2 := r.scanner.Text()
if _, err := fmt.Fprintln(&buf, line2); err != nil {
return err
}
}
r.op.Output = buf.String()
return nil
}
| pkg/cmd/dev/recording/recording.go | 0 | https://github.com/cockroachdb/cockroach/commit/9d55d63b9132b4917e2864581f66cc426836f488 | [
0.0018273958703503013,
0.00032172646024264395,
0.00016788036737125367,
0.00018102256581187248,
0.0003627690894063562
] |
{
"id": 1,
"code_window": [
"\tsr := &tracingpb.StructuredRecord{\n",
"\t\tTime: time.Now(),\n",
"\t\tPayload: p,\n",
"\t}\n",
"\ts.recordInternal(sr, &s.mu.recording.structured)\n"
],
"labels": [
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tTime: now,\n"
],
"file_path": "pkg/util/tracing/crdbspan.go",
"type": "replace",
"edit_start_line_idx": 313
} | // Copyright 2020 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package passesutil_test
import (
"path/filepath"
"testing"
"github.com/cockroachdb/cockroach/pkg/build/bazel"
"github.com/cockroachdb/cockroach/pkg/testutils/lint/passes/forbiddenmethod"
"github.com/cockroachdb/cockroach/pkg/testutils/lint/passes/unconvert"
"github.com/cockroachdb/cockroach/pkg/testutils/skip"
"github.com/stretchr/testify/require"
"golang.org/x/tools/go/analysis/analysistest"
)
func init() {
if bazel.BuiltWithBazel() {
bazel.SetGoEnv()
}
}
// Use tests from other packages to also test this package. This ensures
// that if that code changes, somebody will look here. Also it allows for
// coverage checking here.
func requireNotEmpty(t *testing.T, path string) {
t.Helper()
files, err := filepath.Glob(path)
require.NoError(t, err)
require.NotEmpty(t, files)
}
func getTestdataForPackage(t *testing.T, pkg string) string {
if bazel.BuiltWithBazel() {
runfiles, err := bazel.RunfilesPath()
require.NoError(t, err)
return filepath.Join(runfiles, "pkg", "testutils", "lint", "passes", pkg, "testdata")
}
return filepath.Join("..", pkg, "testdata")
}
func TestDescriptorMarshal(t *testing.T) {
skip.UnderStress(t)
testdata, err := filepath.Abs(getTestdataForPackage(t, "forbiddenmethod"))
require.NoError(t, err)
requireNotEmpty(t, testdata)
analysistest.TestData = func() string { return testdata }
analysistest.Run(t, testdata, forbiddenmethod.DescriptorMarshalAnalyzer, "descmarshaltest")
}
func TestUnconvert(t *testing.T) {
skip.UnderStress(t)
testdata, err := filepath.Abs(getTestdataForPackage(t, "unconvert"))
require.NoError(t, err)
requireNotEmpty(t, testdata)
analysistest.TestData = func() string { return testdata }
analysistest.Run(t, testdata, unconvert.Analyzer, "a")
}
| pkg/testutils/lint/passes/passesutil/passes_util_test.go | 0 | https://github.com/cockroachdb/cockroach/commit/9d55d63b9132b4917e2864581f66cc426836f488 | [
0.0001774191769072786,
0.00017277283768635243,
0.000169723920407705,
0.00017209799261763692,
0.0000025204583380400436
] |
{
"id": 1,
"code_window": [
"\tsr := &tracingpb.StructuredRecord{\n",
"\t\tTime: time.Now(),\n",
"\t\tPayload: p,\n",
"\t}\n",
"\ts.recordInternal(sr, &s.mu.recording.structured)\n"
],
"labels": [
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tTime: now,\n"
],
"file_path": "pkg/util/tracing/crdbspan.go",
"type": "replace",
"edit_start_line_idx": 313
} | // Code generated by TestPretty. DO NOT EDIT.
// GENERATED FILE DO NOT EDIT
1:
-
IMPORT
TABLE
t (
i
INT8,
s
STRING
)
CSV
DATA (
$1
)
6:
------
IMPORT
TABLE t (
i
INT8,
s
STRING
)
CSV DATA (
$1
)
15:
---------------
IMPORT
TABLE t (
i INT8,
s STRING
)
CSV DATA (
$1
)
16:
----------------
IMPORT
TABLE t (
i INT8,
s STRING
)
CSV DATA ($1)
24:
------------------------
IMPORT
TABLE t (
i INT8, s STRING
)
CSV DATA ($1)
27:
---------------------------
IMPORT
TABLE t (i INT8, s STRING)
CSV DATA ($1)
47:
-----------------------------------------------
IMPORT TABLE t (i INT8, s STRING) CSV DATA ($1)
| pkg/sql/sem/tree/testdata/pretty/import4.align-only.golden | 0 | https://github.com/cockroachdb/cockroach/commit/9d55d63b9132b4917e2864581f66cc426836f488 | [
0.00017114426009356976,
0.00016702016000635922,
0.0001646658347453922,
0.00016676230006851256,
0.000002012264303630218
] |
{
"id": 2,
"code_window": [
"}\n",
"\n",
"// TestSpanRecordStructuredLimit tests recording behavior when the size of\n",
"// structured data recorded into the span exceeds the configured limit.\n",
"func TestSpanRecordStructuredLimit(t *testing.T) {\n",
"\ttr := NewTracer()\n",
"\tsp := tr.StartSpan(\"root\", WithForceRealSpan())\n",
"\tdefer sp.Finish()\n",
"\n",
"\tpad := func(i int) string { return fmt.Sprintf(\"%06d\", i) }\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tnow := timeutil.Now()\n",
"\tclock := timeutil.NewManualTime(now)\n",
"\ttr := NewTracerWithOpt(context.Background(), WithTestingKnobs(TracerTestingKnobs{Clock: clock}))\n",
"\n"
],
"file_path": "pkg/util/tracing/span_test.go",
"type": "replace",
"edit_start_line_idx": 230
} | // Copyright 2021 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package tracing
import (
"fmt"
"sort"
"sync"
"sync/atomic"
"time"
"github.com/cockroachdb/cockroach/pkg/util/ring"
"github.com/cockroachdb/cockroach/pkg/util/syncutil"
"github.com/cockroachdb/cockroach/pkg/util/timeutil"
"github.com/cockroachdb/cockroach/pkg/util/tracing/tracingpb"
"github.com/cockroachdb/logtags"
"github.com/cockroachdb/redact"
"github.com/gogo/protobuf/types"
"go.opentelemetry.io/otel/attribute"
)
// crdbSpan is a span for internal crdb usage. This is used to power SQL session
// tracing.
type crdbSpan struct {
rootSpan *crdbSpan // root span of the containing trace; could be itself
// traceEmpty indicates whether or not the trace rooted at this span
// (provided it is a root span) contains any recordings or baggage. All
// spans hold a reference to the rootSpan; this field is accessed
// through that reference.
traceEmpty int32 // accessed atomically, through markTraceAsNonEmpty and inAnEmptyTrace
traceID uint64 // probabilistically unique
spanID uint64 // probabilistically unique
parentSpanID uint64
goroutineID uint64
startTime time.Time
// logTags are set to the log tags that were available when this Span was
// created, so that there's no need to eagerly copy all of those log tags
// into this Span's tags. If the Span's tags are actually requested, these
// logTags will be copied out at that point.
//
// Note that these tags have not gone through the log tag -> Span tag
// remapping procedure; tagName() needs to be called before exposing each
// tag's key to a user.
logTags *logtags.Buffer
mu crdbSpanMu
testing *TracerTestingKnobs
}
type crdbSpanMu struct {
syncutil.Mutex
// duration is initialized to -1 and set on Finish().
duration time.Duration
operation string // name of operation associated with the span
recording struct {
// recordingType is the recording type of the ongoing recording, if any.
// Its 'load' method may be called without holding the surrounding mutex,
// but its 'swap' method requires the mutex.
recordingType atomicRecordingType
logs sizeLimitedBuffer // of *tracingpb.LogRecords
structured sizeLimitedBuffer // of Structured events
// dropped is true if the span has capped out it's memory limits for
// logs and structured events, and has had to drop some. It's used to
// annotate recordings with the _dropped tag, when applicable.
dropped bool
// children contains the list of child spans started after this Span
// started recording.
children childSpanRefs
// remoteSpan contains the list of remote child span recordings that
// were manually imported.
remoteSpans []tracingpb.RecordedSpan
}
// The Span's associated baggage.
baggage map[string]string
// tags are only captured when recording. These are tags that have been
// added to this Span, and will be appended to the tags in logTags when
// someone needs to actually observe the total set of tags that is a part of
// this Span.
// TODO(radu): perhaps we want a recording to capture all the tags (even
// those that were set before recording started)?
tags []attribute.KeyValue
}
type childSpanRefs struct {
refCount int
preAllocated [4]*crdbSpan
overflow []*crdbSpan
}
func (c *childSpanRefs) len() int {
return c.refCount
}
func (c *childSpanRefs) add(ref *crdbSpan) {
if c.refCount < len(c.preAllocated) {
c.preAllocated[c.refCount] = ref
c.refCount++
return
}
// Only record the child if the parent still has room.
if c.refCount < maxChildrenPerSpan {
c.overflow = append(c.overflow, ref)
c.refCount++
}
}
func (c *childSpanRefs) get(idx int) *crdbSpan {
if idx < len(c.preAllocated) {
ref := c.preAllocated[idx]
if ref == nil {
panic(fmt.Sprintf("idx %d out of bounds", idx))
}
return ref
}
return c.overflow[idx-len(c.preAllocated)]
}
func (c *childSpanRefs) reset() {
for i := 0; i < len(c.preAllocated); i++ {
c.preAllocated[i] = nil
}
c.overflow = nil
c.refCount = 0
}
func newSizeLimitedBuffer(limit int64) sizeLimitedBuffer {
return sizeLimitedBuffer{
limit: limit,
}
}
type sizeLimitedBuffer struct {
ring.Buffer
size int64 // in bytes
limit int64 // in bytes
}
func (b *sizeLimitedBuffer) Reset() {
b.Buffer.Reset()
b.size = 0
}
func (s *crdbSpan) recordingType() RecordingType {
if s == nil {
return RecordingOff
}
return s.mu.recording.recordingType.load()
}
// enableRecording start recording on the Span. From now on, log events and
// child spans will be stored.
func (s *crdbSpan) enableRecording(recType RecordingType) {
if recType == RecordingOff || s.recordingType() == recType {
return
}
s.mu.Lock()
defer s.mu.Unlock()
s.mu.recording.recordingType.swap(recType)
if recType == RecordingVerbose {
s.setBaggageItemLocked(verboseTracingBaggageKey, "1")
}
}
// resetRecording clears any previously recorded info.
//
// NB: This is needed by SQL SessionTracing, who likes to start and stop
// recording repeatedly on the same Span, and collect the (separate) recordings
// every time.
func (s *crdbSpan) resetRecording() {
s.mu.Lock()
defer s.mu.Unlock()
s.mu.recording.logs.Reset()
s.mu.recording.structured.Reset()
s.mu.recording.dropped = false
s.mu.recording.children.reset()
s.mu.recording.remoteSpans = nil
}
func (s *crdbSpan) disableRecording() {
if s.recordingType() == RecordingOff {
return
}
s.mu.Lock()
defer s.mu.Unlock()
oldRecType := s.mu.recording.recordingType.swap(RecordingOff)
// We test the duration as a way to check if the Span has been finished. If it
// has, we don't want to do the call below as it might crash (at least if
// there's a netTr).
if (s.mu.duration == -1) && (oldRecType == RecordingVerbose) {
// Clear the verboseTracingBaggageKey baggage item, assuming that it was set by
// enableRecording().
s.setBaggageItemLocked(verboseTracingBaggageKey, "")
}
}
func (s *crdbSpan) getRecording(wantTags bool) Recording {
if s == nil {
return nil // noop span
}
// Return early (without allocating) if the trace is empty, i.e. there are
// no recordings or baggage. If the trace is verbose, we'll still recurse in
// order to pick up all the operations that were part of the trace, despite
// nothing having any actual data in them.
if s.recordingType() != RecordingVerbose && s.inAnEmptyTrace() && !s.testing.RecordEmptyTraces {
return nil
}
s.mu.Lock()
// The capacity here is approximate since we don't know how many
// grandchildren there are.
result := make(Recording, 0, 1+s.mu.recording.children.len()+len(s.mu.recording.remoteSpans))
// Shallow-copy the children so we can process them without the lock.
var children []*crdbSpan
for i := 0; i < s.mu.recording.children.len(); i++ {
children = append(children, s.mu.recording.children.get(i))
}
result = append(result, s.getRecordingLocked(wantTags))
result = append(result, s.mu.recording.remoteSpans...)
s.mu.Unlock()
for _, child := range children {
result = append(result, child.getRecording(wantTags)...)
}
// Sort the spans by StartTime, except the first Span (the root of this
// recording) which stays in place.
toSort := sortPool.Get().(*Recording) // avoids allocations in sort.Sort
*toSort = result[1:]
sort.Sort(toSort)
*toSort = nil
sortPool.Put(toSort)
return result
}
func (s *crdbSpan) importRemoteSpans(remoteSpans []tracingpb.RecordedSpan) {
if len(remoteSpans) == 0 {
return
}
s.markTraceAsNonEmpty()
// Change the root of the remote recording to be a child of this Span. This is
// usually already the case, except with DistSQL traces where remote
// processors run in spans that FollowFrom an RPC Span that we don't collect.
remoteSpans[0].ParentSpanID = s.spanID
s.mu.Lock()
defer s.mu.Unlock()
s.mu.recording.remoteSpans = append(s.mu.recording.remoteSpans, remoteSpans...)
}
func (s *crdbSpan) setTagLocked(key string, value attribute.Value) {
k := attribute.Key(key)
for i := range s.mu.tags {
if s.mu.tags[i].Key == k {
s.mu.tags[i].Value = value
return
}
}
s.mu.tags = append(s.mu.tags, attribute.KeyValue{Key: k, Value: value})
}
func (s *crdbSpan) record(msg redact.RedactableString) {
if s.recordingType() != RecordingVerbose {
return
}
var now time.Time
if clock := s.testing.Clock; clock != nil {
now = clock.Now()
} else {
now = time.Now()
}
logRecord := &tracingpb.LogRecord{
Time: now,
Message: msg,
// Compatibility with 21.2.
DeprecatedFields: []tracingpb.LogRecord_Field{
{Key: tracingpb.LogMessageField, Value: msg},
},
}
s.recordInternal(logRecord, &s.mu.recording.logs)
}
func (s *crdbSpan) recordStructured(item Structured) {
p, err := types.MarshalAny(item)
if err != nil {
// An error here is an error from Marshal; these
// are unlikely to happen.
return
}
sr := &tracingpb.StructuredRecord{
Time: time.Now(),
Payload: p,
}
s.recordInternal(sr, &s.mu.recording.structured)
}
// sizable is a subset for protoutil.Message, for payloads (log records and
// structured events) that can be recorded.
type sizable interface {
Size() int
}
// inAnEmptyTrace indicates whether or not the containing trace is "empty" (i.e.
// has any recordings or baggage).
func (s *crdbSpan) inAnEmptyTrace() bool {
val := atomic.LoadInt32(&s.rootSpan.traceEmpty)
return val == 0
}
func (s *crdbSpan) markTraceAsNonEmpty() {
atomic.StoreInt32(&s.rootSpan.traceEmpty, 1)
}
func (s *crdbSpan) recordInternal(payload sizable, buffer *sizeLimitedBuffer) {
s.markTraceAsNonEmpty()
s.mu.Lock()
defer s.mu.Unlock()
size := int64(payload.Size())
if size > buffer.limit {
// The incoming payload alone blows past the memory limit. Let's just
// drop it.
s.mu.recording.dropped = true
return
}
buffer.size += size
if buffer.size > buffer.limit {
s.mu.recording.dropped = true
}
for buffer.size > buffer.limit {
first := buffer.GetFirst().(sizable)
buffer.RemoveFirst()
buffer.size -= int64(first.Size())
}
buffer.AddLast(payload)
}
func (s *crdbSpan) setBaggageItemAndTag(restrictedKey, value string) {
s.markTraceAsNonEmpty()
s.mu.Lock()
defer s.mu.Unlock()
s.setBaggageItemLocked(restrictedKey, value)
// Don't set the tag if this is the special cased baggage item indicating
// span verbosity, as it is named nondescriptly and the recording knows
// how to display its verbosity independently.
if restrictedKey != verboseTracingBaggageKey {
s.setTagLocked(restrictedKey, attribute.StringValue(value))
}
}
func (s *crdbSpan) setBaggageItemLocked(restrictedKey, value string) {
if oldVal, ok := s.mu.baggage[restrictedKey]; ok && oldVal == value {
// No-op.
return
}
if s.mu.baggage == nil {
s.mu.baggage = make(map[string]string)
}
s.mu.baggage[restrictedKey] = value
}
// getRecordingLocked returns the Span's recording. This does not include
// children.
//
// When wantTags is false, no tags will be added. This is a performance
// optimization as stringifying the tag values can be expensive.
func (s *crdbSpan) getRecordingLocked(wantTags bool) tracingpb.RecordedSpan {
rs := tracingpb.RecordedSpan{
TraceID: s.traceID,
SpanID: s.spanID,
ParentSpanID: s.parentSpanID,
GoroutineID: s.goroutineID,
Operation: s.mu.operation,
StartTime: s.startTime,
Duration: s.mu.duration,
RedactableLogs: true,
}
if rs.Duration == -1 {
// -1 indicates an unfinished Span. For a recording it's better to put some
// duration in it, otherwise tools get confused. For example, we export
// recordings to Jaeger, and spans with a zero duration don't look nice.
rs.Duration = timeutil.Now().Sub(rs.StartTime)
rs.Finished = false
} else {
rs.Finished = true
}
addTag := func(k, v string) {
if rs.Tags == nil {
rs.Tags = make(map[string]string)
}
rs.Tags[k] = v
}
if wantTags {
if s.mu.duration == -1 {
addTag("_unfinished", "1")
}
if s.mu.recording.recordingType.load() == RecordingVerbose {
addTag("_verbose", "1")
}
if s.mu.recording.dropped {
addTag("_dropped", "1")
}
}
if numEvents := s.mu.recording.structured.Len(); numEvents != 0 {
rs.StructuredRecords = make([]tracingpb.StructuredRecord, numEvents)
for i := 0; i < numEvents; i++ {
event := s.mu.recording.structured.Get(i).(*tracingpb.StructuredRecord)
rs.StructuredRecords[i] = *event
}
}
if len(s.mu.baggage) > 0 {
rs.Baggage = make(map[string]string)
for k, v := range s.mu.baggage {
rs.Baggage[k] = v
}
}
if wantTags {
if s.logTags != nil {
setLogTags(s.logTags.Get(), func(remappedKey string, tag *logtags.Tag) {
addTag(remappedKey, tag.ValueStr())
})
}
for _, kv := range s.mu.tags {
// We encode the tag values as strings.
addTag(string(kv.Key), kv.Value.Emit())
}
}
if numLogs := s.mu.recording.logs.Len(); numLogs != 0 {
rs.Logs = make([]tracingpb.LogRecord, numLogs)
for i := 0; i < numLogs; i++ {
lr := s.mu.recording.logs.Get(i).(*tracingpb.LogRecord)
rs.Logs[i] = *lr
}
}
return rs
}
func (s *crdbSpan) addChild(child *crdbSpan) {
s.mu.Lock()
defer s.mu.Unlock()
s.mu.recording.children.add(child)
}
// setVerboseRecursively sets the verbosity of the crdbSpan appropriately and
// recurses on its list of children.
func (s *crdbSpan) setVerboseRecursively(to bool) {
if to {
s.enableRecording(RecordingVerbose)
} else {
s.disableRecording()
}
s.mu.Lock()
var children []*crdbSpan
for i := 0; i < s.mu.recording.children.len(); i++ {
children = append(children, s.mu.recording.children.get(i))
}
s.mu.Unlock()
for _, child := range children {
child.setVerboseRecursively(to)
}
}
var sortPool = sync.Pool{
New: func() interface{} {
return &Recording{}
},
}
// Less implements sort.Interface.
func (r Recording) Less(i, j int) bool {
return r[i].StartTime.Before(r[j].StartTime)
}
// Swap implements sort.Interface.
func (r Recording) Swap(i, j int) {
r[i], r[j] = r[j], r[i]
}
// Len implements sort.Interface.
func (r Recording) Len() int {
return len(r)
}
type atomicRecordingType RecordingType
// load returns the recording type.
func (art *atomicRecordingType) load() RecordingType {
return RecordingType(atomic.LoadInt32((*int32)(art)))
}
// swap stores the new recording type and returns the old one.
func (art *atomicRecordingType) swap(recType RecordingType) RecordingType {
return RecordingType(atomic.SwapInt32((*int32)(art), int32(recType)))
}
| pkg/util/tracing/crdbspan.go | 1 | https://github.com/cockroachdb/cockroach/commit/9d55d63b9132b4917e2864581f66cc426836f488 | [
0.009846220724284649,
0.0013422550400719047,
0.00016360785230062902,
0.0002555058163125068,
0.0022077925968915224
] |
{
"id": 2,
"code_window": [
"}\n",
"\n",
"// TestSpanRecordStructuredLimit tests recording behavior when the size of\n",
"// structured data recorded into the span exceeds the configured limit.\n",
"func TestSpanRecordStructuredLimit(t *testing.T) {\n",
"\ttr := NewTracer()\n",
"\tsp := tr.StartSpan(\"root\", WithForceRealSpan())\n",
"\tdefer sp.Finish()\n",
"\n",
"\tpad := func(i int) string { return fmt.Sprintf(\"%06d\", i) }\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tnow := timeutil.Now()\n",
"\tclock := timeutil.NewManualTime(now)\n",
"\ttr := NewTracerWithOpt(context.Background(), WithTestingKnobs(TracerTestingKnobs{Clock: clock}))\n",
"\n"
],
"file_path": "pkg/util/tracing/span_test.go",
"type": "replace",
"edit_start_line_idx": 230
} | // Copyright 2019 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
// Package pgcode defines the PostgreSQL 5-character support codes
// used throughout the CockroachDB source tree.
//
// Note that CockroachDB may not use the same codes as PostgreSQL for
// the same situations.
package pgcode
| pkg/sql/pgwire/pgcode/doc.go | 0 | https://github.com/cockroachdb/cockroach/commit/9d55d63b9132b4917e2864581f66cc426836f488 | [
0.00017880670202430338,
0.00017176626715809107,
0.00016472581773996353,
0.00017176626715809107,
0.000007040442142169923
] |
{
"id": 2,
"code_window": [
"}\n",
"\n",
"// TestSpanRecordStructuredLimit tests recording behavior when the size of\n",
"// structured data recorded into the span exceeds the configured limit.\n",
"func TestSpanRecordStructuredLimit(t *testing.T) {\n",
"\ttr := NewTracer()\n",
"\tsp := tr.StartSpan(\"root\", WithForceRealSpan())\n",
"\tdefer sp.Finish()\n",
"\n",
"\tpad := func(i int) string { return fmt.Sprintf(\"%06d\", i) }\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tnow := timeutil.Now()\n",
"\tclock := timeutil.NewManualTime(now)\n",
"\ttr := NewTracerWithOpt(context.Background(), WithTestingKnobs(TracerTestingKnobs{Clock: clock}))\n",
"\n"
],
"file_path": "pkg/util/tracing/span_test.go",
"type": "replace",
"edit_start_line_idx": 230
} | // Copyright 2021 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package schemaexpr
import (
"bytes"
"fmt"
"sort"
"testing"
"github.com/cockroachdb/cockroach/pkg/sql/sem/tree"
"github.com/cockroachdb/cockroach/pkg/util/leaktest"
"github.com/cockroachdb/cockroach/pkg/util/log"
"github.com/cockroachdb/datadriven"
)
func TestParseComputedColumnRewrites(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
path := "testdata/computed_column_rewrites"
datadriven.RunTest(t, path, func(t *testing.T, d *datadriven.TestData) string {
switch d.Cmd {
case "parse":
rewrites, err := ParseComputedColumnRewrites(d.Input)
if err != nil {
return fmt.Sprintf("error: %v", err)
}
var keys []string
for k := range rewrites {
keys = append(keys, k)
}
sort.Strings(keys)
var buf bytes.Buffer
for _, k := range keys {
fmt.Fprintf(&buf, "(%v) -> (%v)\n", k, tree.Serialize(rewrites[k]))
}
return buf.String()
default:
t.Fatalf("unsupported command %s", d.Cmd)
return ""
}
})
}
| pkg/sql/catalog/schemaexpr/computed_column_rewrites_test.go | 0 | https://github.com/cockroachdb/cockroach/commit/9d55d63b9132b4917e2864581f66cc426836f488 | [
0.00017917210061568767,
0.00017277018923778087,
0.00016729965864215046,
0.00017221990856342018,
0.0000035245795970695326
] |
{
"id": 2,
"code_window": [
"}\n",
"\n",
"// TestSpanRecordStructuredLimit tests recording behavior when the size of\n",
"// structured data recorded into the span exceeds the configured limit.\n",
"func TestSpanRecordStructuredLimit(t *testing.T) {\n",
"\ttr := NewTracer()\n",
"\tsp := tr.StartSpan(\"root\", WithForceRealSpan())\n",
"\tdefer sp.Finish()\n",
"\n",
"\tpad := func(i int) string { return fmt.Sprintf(\"%06d\", i) }\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tnow := timeutil.Now()\n",
"\tclock := timeutil.NewManualTime(now)\n",
"\ttr := NewTracerWithOpt(context.Background(), WithTestingKnobs(TracerTestingKnobs{Clock: clock}))\n",
"\n"
],
"file_path": "pkg/util/tracing/span_test.go",
"type": "replace",
"edit_start_line_idx": 230
} | // Copyright 2020 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package geogfn
import (
"math"
"github.com/cockroachdb/cockroach/pkg/geo"
"github.com/cockroachdb/errors"
"github.com/golang/geo/s2"
"github.com/twpayne/go-geom"
)
// Azimuth returns the azimuth in radians of the segment defined by the given point geometries.
// The azimuth is angle is referenced from north, and is positive clockwise.
// North = 0; East = π/2; South = π; West = 3π/2.
// Returns nil if the two points are the same.
// Returns an error if any of the two Geography items are not points.
func Azimuth(a geo.Geography, b geo.Geography) (*float64, error) {
if a.SRID() != b.SRID() {
return nil, geo.NewMismatchingSRIDsError(a.SpatialObject(), b.SpatialObject())
}
aGeomT, err := a.AsGeomT()
if err != nil {
return nil, err
}
aPoint, ok := aGeomT.(*geom.Point)
if !ok {
return nil, errors.Newf("arguments must be POINT geometries")
}
bGeomT, err := b.AsGeomT()
if err != nil {
return nil, err
}
bPoint, ok := bGeomT.(*geom.Point)
if !ok {
return nil, errors.Newf("arguments must be POINT geometries")
}
if aPoint.Empty() || bPoint.Empty() {
return nil, errors.Newf("cannot call ST_Azimuth with POINT EMPTY")
}
if aPoint.X() == bPoint.X() && aPoint.Y() == bPoint.Y() {
return nil, nil
}
s, err := a.Spheroid()
if err != nil {
return nil, err
}
_, az1, _ := s.Inverse(
s2.LatLngFromDegrees(aPoint.Y(), aPoint.X()),
s2.LatLngFromDegrees(bPoint.Y(), bPoint.X()),
)
// Convert to radians.
az1 = az1 * math.Pi / 180
return &az1, nil
}
| pkg/geo/geogfn/azimuth.go | 0 | https://github.com/cockroachdb/cockroach/commit/9d55d63b9132b4917e2864581f66cc426836f488 | [
0.00017939363897312433,
0.00017202337039634585,
0.00016324275929946452,
0.00017185014439746737,
0.000004193988843326224
] |
{
"id": 3,
"code_window": [
"\tpad := func(i int) string { return fmt.Sprintf(\"%06d\", i) }\n",
"\tpayload := func(i int) Structured { return &types.StringValue{Value: pad(i)} }\n",
"\tanyPayload, err := types.MarshalAny(payload(42))\n",
"\trequire.NoError(t, err)\n",
"\tstructuredRecord := &tracingpb.StructuredRecord{\n",
"\t\tTime: timeutil.Now(),\n",
"\t\tPayload: anyPayload,\n",
"\t}\n",
"\n",
"\tnumStructuredRecordings := maxStructuredBytesPerSpan / structuredRecord.Size()\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tTime: now,\n"
],
"file_path": "pkg/util/tracing/span_test.go",
"type": "replace",
"edit_start_line_idx": 239
} | // Copyright 2021 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package tracing
import (
"fmt"
"sort"
"sync"
"sync/atomic"
"time"
"github.com/cockroachdb/cockroach/pkg/util/ring"
"github.com/cockroachdb/cockroach/pkg/util/syncutil"
"github.com/cockroachdb/cockroach/pkg/util/timeutil"
"github.com/cockroachdb/cockroach/pkg/util/tracing/tracingpb"
"github.com/cockroachdb/logtags"
"github.com/cockroachdb/redact"
"github.com/gogo/protobuf/types"
"go.opentelemetry.io/otel/attribute"
)
// crdbSpan is a span for internal crdb usage. This is used to power SQL session
// tracing.
type crdbSpan struct {
rootSpan *crdbSpan // root span of the containing trace; could be itself
// traceEmpty indicates whether or not the trace rooted at this span
// (provided it is a root span) contains any recordings or baggage. All
// spans hold a reference to the rootSpan; this field is accessed
// through that reference.
traceEmpty int32 // accessed atomically, through markTraceAsNonEmpty and inAnEmptyTrace
traceID uint64 // probabilistically unique
spanID uint64 // probabilistically unique
parentSpanID uint64
goroutineID uint64
startTime time.Time
// logTags are set to the log tags that were available when this Span was
// created, so that there's no need to eagerly copy all of those log tags
// into this Span's tags. If the Span's tags are actually requested, these
// logTags will be copied out at that point.
//
// Note that these tags have not gone through the log tag -> Span tag
// remapping procedure; tagName() needs to be called before exposing each
// tag's key to a user.
logTags *logtags.Buffer
mu crdbSpanMu
testing *TracerTestingKnobs
}
type crdbSpanMu struct {
syncutil.Mutex
// duration is initialized to -1 and set on Finish().
duration time.Duration
operation string // name of operation associated with the span
recording struct {
// recordingType is the recording type of the ongoing recording, if any.
// Its 'load' method may be called without holding the surrounding mutex,
// but its 'swap' method requires the mutex.
recordingType atomicRecordingType
logs sizeLimitedBuffer // of *tracingpb.LogRecords
structured sizeLimitedBuffer // of Structured events
// dropped is true if the span has capped out it's memory limits for
// logs and structured events, and has had to drop some. It's used to
// annotate recordings with the _dropped tag, when applicable.
dropped bool
// children contains the list of child spans started after this Span
// started recording.
children childSpanRefs
// remoteSpan contains the list of remote child span recordings that
// were manually imported.
remoteSpans []tracingpb.RecordedSpan
}
// The Span's associated baggage.
baggage map[string]string
// tags are only captured when recording. These are tags that have been
// added to this Span, and will be appended to the tags in logTags when
// someone needs to actually observe the total set of tags that is a part of
// this Span.
// TODO(radu): perhaps we want a recording to capture all the tags (even
// those that were set before recording started)?
tags []attribute.KeyValue
}
type childSpanRefs struct {
refCount int
preAllocated [4]*crdbSpan
overflow []*crdbSpan
}
func (c *childSpanRefs) len() int {
return c.refCount
}
func (c *childSpanRefs) add(ref *crdbSpan) {
if c.refCount < len(c.preAllocated) {
c.preAllocated[c.refCount] = ref
c.refCount++
return
}
// Only record the child if the parent still has room.
if c.refCount < maxChildrenPerSpan {
c.overflow = append(c.overflow, ref)
c.refCount++
}
}
func (c *childSpanRefs) get(idx int) *crdbSpan {
if idx < len(c.preAllocated) {
ref := c.preAllocated[idx]
if ref == nil {
panic(fmt.Sprintf("idx %d out of bounds", idx))
}
return ref
}
return c.overflow[idx-len(c.preAllocated)]
}
func (c *childSpanRefs) reset() {
for i := 0; i < len(c.preAllocated); i++ {
c.preAllocated[i] = nil
}
c.overflow = nil
c.refCount = 0
}
func newSizeLimitedBuffer(limit int64) sizeLimitedBuffer {
return sizeLimitedBuffer{
limit: limit,
}
}
type sizeLimitedBuffer struct {
ring.Buffer
size int64 // in bytes
limit int64 // in bytes
}
func (b *sizeLimitedBuffer) Reset() {
b.Buffer.Reset()
b.size = 0
}
func (s *crdbSpan) recordingType() RecordingType {
if s == nil {
return RecordingOff
}
return s.mu.recording.recordingType.load()
}
// enableRecording start recording on the Span. From now on, log events and
// child spans will be stored.
func (s *crdbSpan) enableRecording(recType RecordingType) {
if recType == RecordingOff || s.recordingType() == recType {
return
}
s.mu.Lock()
defer s.mu.Unlock()
s.mu.recording.recordingType.swap(recType)
if recType == RecordingVerbose {
s.setBaggageItemLocked(verboseTracingBaggageKey, "1")
}
}
// resetRecording clears any previously recorded info.
//
// NB: This is needed by SQL SessionTracing, who likes to start and stop
// recording repeatedly on the same Span, and collect the (separate) recordings
// every time.
func (s *crdbSpan) resetRecording() {
s.mu.Lock()
defer s.mu.Unlock()
s.mu.recording.logs.Reset()
s.mu.recording.structured.Reset()
s.mu.recording.dropped = false
s.mu.recording.children.reset()
s.mu.recording.remoteSpans = nil
}
func (s *crdbSpan) disableRecording() {
if s.recordingType() == RecordingOff {
return
}
s.mu.Lock()
defer s.mu.Unlock()
oldRecType := s.mu.recording.recordingType.swap(RecordingOff)
// We test the duration as a way to check if the Span has been finished. If it
// has, we don't want to do the call below as it might crash (at least if
// there's a netTr).
if (s.mu.duration == -1) && (oldRecType == RecordingVerbose) {
// Clear the verboseTracingBaggageKey baggage item, assuming that it was set by
// enableRecording().
s.setBaggageItemLocked(verboseTracingBaggageKey, "")
}
}
func (s *crdbSpan) getRecording(wantTags bool) Recording {
if s == nil {
return nil // noop span
}
// Return early (without allocating) if the trace is empty, i.e. there are
// no recordings or baggage. If the trace is verbose, we'll still recurse in
// order to pick up all the operations that were part of the trace, despite
// nothing having any actual data in them.
if s.recordingType() != RecordingVerbose && s.inAnEmptyTrace() && !s.testing.RecordEmptyTraces {
return nil
}
s.mu.Lock()
// The capacity here is approximate since we don't know how many
// grandchildren there are.
result := make(Recording, 0, 1+s.mu.recording.children.len()+len(s.mu.recording.remoteSpans))
// Shallow-copy the children so we can process them without the lock.
var children []*crdbSpan
for i := 0; i < s.mu.recording.children.len(); i++ {
children = append(children, s.mu.recording.children.get(i))
}
result = append(result, s.getRecordingLocked(wantTags))
result = append(result, s.mu.recording.remoteSpans...)
s.mu.Unlock()
for _, child := range children {
result = append(result, child.getRecording(wantTags)...)
}
// Sort the spans by StartTime, except the first Span (the root of this
// recording) which stays in place.
toSort := sortPool.Get().(*Recording) // avoids allocations in sort.Sort
*toSort = result[1:]
sort.Sort(toSort)
*toSort = nil
sortPool.Put(toSort)
return result
}
func (s *crdbSpan) importRemoteSpans(remoteSpans []tracingpb.RecordedSpan) {
if len(remoteSpans) == 0 {
return
}
s.markTraceAsNonEmpty()
// Change the root of the remote recording to be a child of this Span. This is
// usually already the case, except with DistSQL traces where remote
// processors run in spans that FollowFrom an RPC Span that we don't collect.
remoteSpans[0].ParentSpanID = s.spanID
s.mu.Lock()
defer s.mu.Unlock()
s.mu.recording.remoteSpans = append(s.mu.recording.remoteSpans, remoteSpans...)
}
func (s *crdbSpan) setTagLocked(key string, value attribute.Value) {
k := attribute.Key(key)
for i := range s.mu.tags {
if s.mu.tags[i].Key == k {
s.mu.tags[i].Value = value
return
}
}
s.mu.tags = append(s.mu.tags, attribute.KeyValue{Key: k, Value: value})
}
func (s *crdbSpan) record(msg redact.RedactableString) {
if s.recordingType() != RecordingVerbose {
return
}
var now time.Time
if clock := s.testing.Clock; clock != nil {
now = clock.Now()
} else {
now = time.Now()
}
logRecord := &tracingpb.LogRecord{
Time: now,
Message: msg,
// Compatibility with 21.2.
DeprecatedFields: []tracingpb.LogRecord_Field{
{Key: tracingpb.LogMessageField, Value: msg},
},
}
s.recordInternal(logRecord, &s.mu.recording.logs)
}
func (s *crdbSpan) recordStructured(item Structured) {
p, err := types.MarshalAny(item)
if err != nil {
// An error here is an error from Marshal; these
// are unlikely to happen.
return
}
sr := &tracingpb.StructuredRecord{
Time: time.Now(),
Payload: p,
}
s.recordInternal(sr, &s.mu.recording.structured)
}
// sizable is a subset for protoutil.Message, for payloads (log records and
// structured events) that can be recorded.
type sizable interface {
Size() int
}
// inAnEmptyTrace indicates whether or not the containing trace is "empty" (i.e.
// has any recordings or baggage).
func (s *crdbSpan) inAnEmptyTrace() bool {
val := atomic.LoadInt32(&s.rootSpan.traceEmpty)
return val == 0
}
func (s *crdbSpan) markTraceAsNonEmpty() {
atomic.StoreInt32(&s.rootSpan.traceEmpty, 1)
}
func (s *crdbSpan) recordInternal(payload sizable, buffer *sizeLimitedBuffer) {
s.markTraceAsNonEmpty()
s.mu.Lock()
defer s.mu.Unlock()
size := int64(payload.Size())
if size > buffer.limit {
// The incoming payload alone blows past the memory limit. Let's just
// drop it.
s.mu.recording.dropped = true
return
}
buffer.size += size
if buffer.size > buffer.limit {
s.mu.recording.dropped = true
}
for buffer.size > buffer.limit {
first := buffer.GetFirst().(sizable)
buffer.RemoveFirst()
buffer.size -= int64(first.Size())
}
buffer.AddLast(payload)
}
func (s *crdbSpan) setBaggageItemAndTag(restrictedKey, value string) {
s.markTraceAsNonEmpty()
s.mu.Lock()
defer s.mu.Unlock()
s.setBaggageItemLocked(restrictedKey, value)
// Don't set the tag if this is the special cased baggage item indicating
// span verbosity, as it is named nondescriptly and the recording knows
// how to display its verbosity independently.
if restrictedKey != verboseTracingBaggageKey {
s.setTagLocked(restrictedKey, attribute.StringValue(value))
}
}
func (s *crdbSpan) setBaggageItemLocked(restrictedKey, value string) {
if oldVal, ok := s.mu.baggage[restrictedKey]; ok && oldVal == value {
// No-op.
return
}
if s.mu.baggage == nil {
s.mu.baggage = make(map[string]string)
}
s.mu.baggage[restrictedKey] = value
}
// getRecordingLocked returns the Span's recording. This does not include
// children.
//
// When wantTags is false, no tags will be added. This is a performance
// optimization as stringifying the tag values can be expensive.
func (s *crdbSpan) getRecordingLocked(wantTags bool) tracingpb.RecordedSpan {
rs := tracingpb.RecordedSpan{
TraceID: s.traceID,
SpanID: s.spanID,
ParentSpanID: s.parentSpanID,
GoroutineID: s.goroutineID,
Operation: s.mu.operation,
StartTime: s.startTime,
Duration: s.mu.duration,
RedactableLogs: true,
}
if rs.Duration == -1 {
// -1 indicates an unfinished Span. For a recording it's better to put some
// duration in it, otherwise tools get confused. For example, we export
// recordings to Jaeger, and spans with a zero duration don't look nice.
rs.Duration = timeutil.Now().Sub(rs.StartTime)
rs.Finished = false
} else {
rs.Finished = true
}
addTag := func(k, v string) {
if rs.Tags == nil {
rs.Tags = make(map[string]string)
}
rs.Tags[k] = v
}
if wantTags {
if s.mu.duration == -1 {
addTag("_unfinished", "1")
}
if s.mu.recording.recordingType.load() == RecordingVerbose {
addTag("_verbose", "1")
}
if s.mu.recording.dropped {
addTag("_dropped", "1")
}
}
if numEvents := s.mu.recording.structured.Len(); numEvents != 0 {
rs.StructuredRecords = make([]tracingpb.StructuredRecord, numEvents)
for i := 0; i < numEvents; i++ {
event := s.mu.recording.structured.Get(i).(*tracingpb.StructuredRecord)
rs.StructuredRecords[i] = *event
}
}
if len(s.mu.baggage) > 0 {
rs.Baggage = make(map[string]string)
for k, v := range s.mu.baggage {
rs.Baggage[k] = v
}
}
if wantTags {
if s.logTags != nil {
setLogTags(s.logTags.Get(), func(remappedKey string, tag *logtags.Tag) {
addTag(remappedKey, tag.ValueStr())
})
}
for _, kv := range s.mu.tags {
// We encode the tag values as strings.
addTag(string(kv.Key), kv.Value.Emit())
}
}
if numLogs := s.mu.recording.logs.Len(); numLogs != 0 {
rs.Logs = make([]tracingpb.LogRecord, numLogs)
for i := 0; i < numLogs; i++ {
lr := s.mu.recording.logs.Get(i).(*tracingpb.LogRecord)
rs.Logs[i] = *lr
}
}
return rs
}
func (s *crdbSpan) addChild(child *crdbSpan) {
s.mu.Lock()
defer s.mu.Unlock()
s.mu.recording.children.add(child)
}
// setVerboseRecursively sets the verbosity of the crdbSpan appropriately and
// recurses on its list of children.
func (s *crdbSpan) setVerboseRecursively(to bool) {
if to {
s.enableRecording(RecordingVerbose)
} else {
s.disableRecording()
}
s.mu.Lock()
var children []*crdbSpan
for i := 0; i < s.mu.recording.children.len(); i++ {
children = append(children, s.mu.recording.children.get(i))
}
s.mu.Unlock()
for _, child := range children {
child.setVerboseRecursively(to)
}
}
var sortPool = sync.Pool{
New: func() interface{} {
return &Recording{}
},
}
// Less implements sort.Interface.
func (r Recording) Less(i, j int) bool {
return r[i].StartTime.Before(r[j].StartTime)
}
// Swap implements sort.Interface.
func (r Recording) Swap(i, j int) {
r[i], r[j] = r[j], r[i]
}
// Len implements sort.Interface.
func (r Recording) Len() int {
return len(r)
}
type atomicRecordingType RecordingType
// load returns the recording type.
func (art *atomicRecordingType) load() RecordingType {
return RecordingType(atomic.LoadInt32((*int32)(art)))
}
// swap stores the new recording type and returns the old one.
func (art *atomicRecordingType) swap(recType RecordingType) RecordingType {
return RecordingType(atomic.SwapInt32((*int32)(art), int32(recType)))
}
| pkg/util/tracing/crdbspan.go | 1 | https://github.com/cockroachdb/cockroach/commit/9d55d63b9132b4917e2864581f66cc426836f488 | [
0.013701115734875202,
0.0006765119032934308,
0.00016207006410695612,
0.00017231833771802485,
0.0020210538059473038
] |
{
"id": 3,
"code_window": [
"\tpad := func(i int) string { return fmt.Sprintf(\"%06d\", i) }\n",
"\tpayload := func(i int) Structured { return &types.StringValue{Value: pad(i)} }\n",
"\tanyPayload, err := types.MarshalAny(payload(42))\n",
"\trequire.NoError(t, err)\n",
"\tstructuredRecord := &tracingpb.StructuredRecord{\n",
"\t\tTime: timeutil.Now(),\n",
"\t\tPayload: anyPayload,\n",
"\t}\n",
"\n",
"\tnumStructuredRecordings := maxStructuredBytesPerSpan / structuredRecord.Size()\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tTime: now,\n"
],
"file_path": "pkg/util/tracing/span_test.go",
"type": "replace",
"edit_start_line_idx": 239
} | // Copyright 2018 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package colexec
import (
"context"
"fmt"
"testing"
"github.com/cockroachdb/cockroach/pkg/col/coldata"
"github.com/cockroachdb/cockroach/pkg/col/coldatatestutils"
"github.com/cockroachdb/cockroach/pkg/settings/cluster"
"github.com/cockroachdb/cockroach/pkg/sql/colexec/colexecargs"
"github.com/cockroachdb/cockroach/pkg/sql/colexec/colexectestutils"
"github.com/cockroachdb/cockroach/pkg/sql/colexecerror"
"github.com/cockroachdb/cockroach/pkg/sql/colexecop"
"github.com/cockroachdb/cockroach/pkg/sql/execinfra"
"github.com/cockroachdb/cockroach/pkg/sql/execinfrapb"
"github.com/cockroachdb/cockroach/pkg/sql/memsize"
"github.com/cockroachdb/cockroach/pkg/sql/randgen"
"github.com/cockroachdb/cockroach/pkg/sql/sem/tree"
"github.com/cockroachdb/cockroach/pkg/sql/types"
"github.com/cockroachdb/cockroach/pkg/testutils"
"github.com/cockroachdb/cockroach/pkg/util/leaktest"
"github.com/cockroachdb/cockroach/pkg/util/log"
"github.com/cockroachdb/cockroach/pkg/util/randutil"
"github.com/cockroachdb/errors"
)
func TestColumnarizeMaterialize(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
rng, _ := randutil.NewPseudoRand()
nCols := 1 + rng.Intn(4)
var typs []*types.T
for len(typs) < nCols {
typs = append(typs, randgen.RandType(rng))
}
nRows := 10000
rows := randgen.RandEncDatumRowsOfTypes(rng, nRows, typs)
input := execinfra.NewRepeatableRowSource(typs, rows)
ctx := context.Background()
st := cluster.MakeTestingClusterSettings()
evalCtx := tree.MakeTestingEvalContext(st)
defer evalCtx.Stop(ctx)
flowCtx := &execinfra.FlowCtx{
Cfg: &execinfra.ServerConfig{Settings: st},
EvalCtx: &evalCtx,
}
c := NewBufferingColumnarizer(testAllocator, flowCtx, 0, input)
m := NewMaterializer(
flowCtx,
1, /* processorID */
colexecargs.OpWithMetaInfo{Root: c},
typs,
)
m.Start(ctx)
for i := 0; i < nRows; i++ {
row, meta := m.Next()
if meta != nil {
t.Fatalf("unexpected meta %+v", meta)
}
if row == nil {
t.Fatal("unexpected nil row")
}
for j := range typs {
if row[j].Datum.Compare(&evalCtx, rows[i][j].Datum) != 0 {
t.Fatal("unequal rows", row, rows[i])
}
}
}
row, meta := m.Next()
if meta != nil {
t.Fatalf("unexpected meta %+v", meta)
}
if row != nil {
t.Fatal("unexpected not nil row", row)
}
}
func BenchmarkMaterializer(b *testing.B) {
defer log.Scope(b).Close(b)
ctx := context.Background()
st := cluster.MakeTestingClusterSettings()
evalCtx := tree.MakeTestingEvalContext(st)
defer evalCtx.Stop(ctx)
flowCtx := &execinfra.FlowCtx{
Cfg: &execinfra.ServerConfig{Settings: st},
EvalCtx: &evalCtx,
}
rng, _ := randutil.NewPseudoRand()
nBatches := 10
nRows := nBatches * coldata.BatchSize()
for _, typ := range []*types.T{types.Int, types.Float, types.Bytes} {
typs := []*types.T{typ}
nCols := len(typs)
for _, hasNulls := range []bool{false, true} {
for _, useSelectionVector := range []bool{false, true} {
b.Run(fmt.Sprintf("%s/hasNulls=%t/useSel=%t", typ, hasNulls, useSelectionVector), func(b *testing.B) {
nullProb := 0.0
if hasNulls {
nullProb = nullProbability
}
batch := testAllocator.NewMemBatchWithMaxCapacity(typs)
for _, colVec := range batch.ColVecs() {
coldatatestutils.RandomVec(coldatatestutils.RandomVecArgs{
Rand: rng,
Vec: colVec,
N: coldata.BatchSize(),
NullProbability: nullProb,
BytesFixedLength: 8,
})
}
batch.SetLength(coldata.BatchSize())
if useSelectionVector {
batch.SetSelection(true)
sel := batch.Selection()
for i := 0; i < coldata.BatchSize(); i++ {
sel[i] = i
}
}
input := colexectestutils.NewFiniteBatchSource(testAllocator, batch, typs, nBatches)
b.SetBytes(int64(nRows * nCols * int(memsize.Int64)))
for i := 0; i < b.N; i++ {
m := NewMaterializer(
flowCtx,
0, /* processorID */
colexecargs.OpWithMetaInfo{Root: input},
typs,
)
m.Start(ctx)
foundRows := 0
for {
row, meta := m.Next()
if meta != nil {
b.Fatalf("unexpected metadata %v", meta)
}
if row == nil {
break
}
foundRows++
}
if foundRows != nRows {
b.Fatalf("expected %d rows, found %d", nRows, foundRows)
}
input.Reset(nBatches)
}
})
}
}
}
}
func TestMaterializerNextErrorAfterConsumerDone(t *testing.T) {
defer leaktest.AfterTest(t)()
testError := errors.New("test-induced error")
metadataSource := &colexectestutils.CallbackMetadataSource{DrainMetaCb: func() []execinfrapb.ProducerMetadata {
colexecerror.InternalError(testError)
// Unreachable
return nil
}}
ctx := context.Background()
st := cluster.MakeTestingClusterSettings()
evalCtx := tree.MakeTestingEvalContext(st)
defer evalCtx.Stop(ctx)
flowCtx := &execinfra.FlowCtx{
EvalCtx: &evalCtx,
}
m := NewMaterializer(
flowCtx,
0, /* processorID */
colexecargs.OpWithMetaInfo{
Root: &colexecop.CallbackOperator{},
MetadataSources: colexecop.MetadataSources{metadataSource},
},
nil, /* typ */
)
m.Start(ctx)
// Call ConsumerDone.
m.ConsumerDone()
// We expect Next to panic since DrainMeta panics are currently not caught by
// the materializer and it's not clear whether they should be since
// implementers of DrainMeta do not return errors as panics.
testutils.IsError(
colexecerror.CatchVectorizedRuntimeError(func() {
m.Next()
}),
testError.Error(),
)
}
func BenchmarkColumnarizeMaterialize(b *testing.B) {
defer log.Scope(b).Close(b)
types := []*types.T{types.Int, types.Int}
nRows := 10000
nCols := 2
rows := randgen.MakeIntRows(nRows, nCols)
input := execinfra.NewRepeatableRowSource(types, rows)
ctx := context.Background()
st := cluster.MakeTestingClusterSettings()
evalCtx := tree.MakeTestingEvalContext(st)
defer evalCtx.Stop(ctx)
flowCtx := &execinfra.FlowCtx{
Cfg: &execinfra.ServerConfig{Settings: st},
EvalCtx: &evalCtx,
}
c := NewBufferingColumnarizer(testAllocator, flowCtx, 0, input)
b.SetBytes(int64(nRows * nCols * int(memsize.Int64)))
for i := 0; i < b.N; i++ {
m := NewMaterializer(
flowCtx,
1, /* processorID */
colexecargs.OpWithMetaInfo{Root: c},
types,
)
m.Start(ctx)
foundRows := 0
for {
row, meta := m.Next()
if meta != nil {
b.Fatalf("unexpected metadata %v", meta)
}
if row == nil {
break
}
foundRows++
}
if foundRows != nRows {
b.Fatalf("expected %d rows, found %d", nRows, foundRows)
}
input.Reset()
}
}
| pkg/sql/colexec/materializer_test.go | 0 | https://github.com/cockroachdb/cockroach/commit/9d55d63b9132b4917e2864581f66cc426836f488 | [
0.0005343774682842195,
0.00019921721832361072,
0.00016028333629947156,
0.00017227957141585648,
0.00009522926120553166
] |
{
"id": 3,
"code_window": [
"\tpad := func(i int) string { return fmt.Sprintf(\"%06d\", i) }\n",
"\tpayload := func(i int) Structured { return &types.StringValue{Value: pad(i)} }\n",
"\tanyPayload, err := types.MarshalAny(payload(42))\n",
"\trequire.NoError(t, err)\n",
"\tstructuredRecord := &tracingpb.StructuredRecord{\n",
"\t\tTime: timeutil.Now(),\n",
"\t\tPayload: anyPayload,\n",
"\t}\n",
"\n",
"\tnumStructuredRecordings := maxStructuredBytesPerSpan / structuredRecord.Size()\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tTime: now,\n"
],
"file_path": "pkg/util/tracing/span_test.go",
"type": "replace",
"edit_start_line_idx": 239
} | show_savepoint_stmt ::=
'SHOW' 'SAVEPOINT' 'STATUS'
| docs/generated/sql/bnf/show_savepoint_status.bnf | 0 | https://github.com/cockroachdb/cockroach/commit/9d55d63b9132b4917e2864581f66cc426836f488 | [
0.00016758768470026553,
0.00016758768470026553,
0.00016758768470026553,
0.00016758768470026553,
0
] |
{
"id": 3,
"code_window": [
"\tpad := func(i int) string { return fmt.Sprintf(\"%06d\", i) }\n",
"\tpayload := func(i int) Structured { return &types.StringValue{Value: pad(i)} }\n",
"\tanyPayload, err := types.MarshalAny(payload(42))\n",
"\trequire.NoError(t, err)\n",
"\tstructuredRecord := &tracingpb.StructuredRecord{\n",
"\t\tTime: timeutil.Now(),\n",
"\t\tPayload: anyPayload,\n",
"\t}\n",
"\n",
"\tnumStructuredRecordings := maxStructuredBytesPerSpan / structuredRecord.Size()\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tTime: now,\n"
],
"file_path": "pkg/util/tracing/span_test.go",
"type": "replace",
"edit_start_line_idx": 239
} | // Copyright 2020 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
syntax = "proto3";
package cockroach.sql.schemachanger.scpb;
option go_package = "scpb";
import "sql/catalog/descpb/structured.proto";
import "gogoproto/gogo.proto";
message ElementProto {
option (gogoproto.onlyone) = true;
Column column = 1;
PrimaryIndex primary_index = 2;
SecondaryIndex secondary_index = 3;
SequenceDependency sequence_dependency = 4;
UniqueConstraint unique_constraint = 5;
CheckConstraint check_constraint = 6;
Sequence sequence = 7;
DefaultExpression default_expression = 8;
View view = 9;
TypeReference typeRef = 10;
Table table = 11;
OutboundForeignKey outForeignKey = 12;
InboundForeignKey inForeignKey = 13;
RelationDependedOnBy relationDependedOnBy = 14;
SequenceOwnedBy sequenceOwner = 15;
Type type = 16;
Schema schema = 17;
Database database = 18;
}
message Target {
enum Direction {
UNKNOWN = 0;
ADD = 1;
DROP = 2;
}
ElementProto element_proto = 1 [(gogoproto.embed) = true, (gogoproto.nullable) = false];
Direction direction = 2;
}
enum Status {
UNKNOWN = 0;
ABSENT = 1;
DELETE_ONLY = 2;
DELETE_AND_WRITE_ONLY = 3;
BACKFILLED = 4;
VALIDATED = 5;
PUBLIC = 6;
}
message Column {
option (gogoproto.equal) = true;
uint32 table_id = 1 [(gogoproto.customname) = "TableID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb.ID"];
uint32 family_id = 2 [(gogoproto.customname) = "FamilyID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb.FamilyID"];
string family_name = 3;
cockroach.sql.sqlbase.ColumnDescriptor column = 4 [(gogoproto.nullable) = false];
}
message PrimaryIndex {
option (gogoproto.equal) = true;
uint32 table_id = 1 [(gogoproto.customname) = "TableID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb.ID"];
cockroach.sql.sqlbase.IndexDescriptor index = 2 [(gogoproto.nullable) = false];
uint32 other_primary_index_id = 3 [(gogoproto.customname) = "OtherPrimaryIndexID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb.IndexID"];
}
message SecondaryIndex {
option (gogoproto.equal) = true;
uint32 table_id = 1 [(gogoproto.customname) = "TableID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb.ID"];
cockroach.sql.sqlbase.IndexDescriptor index = 2 [(gogoproto.nullable) = false];
uint32 primary_index = 3 [(gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb.IndexID"];
}
message SequenceDependency {
option (gogoproto.equal) = true;
enum Type {
UNKNOWN = 0;
USES = 1;
OWNS = 2;
}
uint32 table_id = 1 [(gogoproto.customname) = "TableID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb.ID"];
uint32 column_id = 2 [(gogoproto.customname) = "ColumnID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb.ColumnID"];
uint32 sequence_id = 3 [(gogoproto.customname) = "SequenceID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb.ID"];
Type type = 4;
}
message UniqueConstraint {
option (gogoproto.equal) = true;
uint32 table_id = 1 [(gogoproto.customname) = "TableID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb.ID"];
uint32 index_id = 2 [(gogoproto.customname) = "IndexID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb.IndexID"];
repeated uint32 column_ids = 3 [(gogoproto.customname) = "ColumnIDs", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb.ColumnID"];
}
message CheckConstraint {
option (gogoproto.equal) = true;
uint32 table_id = 1 [(gogoproto.customname) = "TableID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb.ID"];
string name = 2;
string expr = 3;
repeated uint32 column_ids = 4 [(gogoproto.customname) = "ColumnIDs", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb.ColumnID"];
bool validated = 5;
}
message Sequence {
option (gogoproto.equal) = true;
uint32 sequence_id = 1 [(gogoproto.customname) = "SequenceID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb.ID"];
}
message DefaultExpression {
option (gogoproto.equal) = true;
uint32 table_id = 1 [(gogoproto.customname) = "TableID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb.ID"];
uint32 column_id = 2 [(gogoproto.customname) = "ColumnID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb.ColumnID"];
repeated uint32 usesSequenceIDs =3 [(gogoproto.customname) = "UsesSequenceIDs", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb.ID"];
string default_expr = 4;
}
message View {
option (gogoproto.equal) = true;
uint32 table_id = 1 [(gogoproto.customname) = "TableID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb.ID"];
repeated uint32 dependedOnBy = 2 [(gogoproto.customname) = "DependedOnBy", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb.ID"];
repeated uint32 dependsOn = 3 [(gogoproto.customname) = "DependsOn", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb.ID"];
}
message Table {
option (gogoproto.equal) = true;
uint32 table_id = 1 [(gogoproto.customname) = "TableID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb.ID"];
repeated uint32 dependedOnBy = 2 [(gogoproto.customname) = "DependedOnBy", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb.ID"];
}
// TypeReference is a reference to a type on a descriptor.
// This correspond to an element in DescriptorID pointing to TypeID.
message TypeReference {
option (gogoproto.equal) = true;
uint32 descriptor_id = 2 [(gogoproto.customname) = "DescID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb.ID"];
uint32 type_id = 1 [(gogoproto.customname) = "TypeID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb.ID"];
}
message OutboundForeignKey {
option (gogoproto.equal) = true;
uint32 origin_id = 1 [(gogoproto.customname) = "OriginID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb.ID"];
repeated uint32 origin_columns = 3 [(gogoproto.customname) = "OriginColumns", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb.ColumnID"];
uint32 reference_id = 4 [(gogoproto.customname) = "ReferenceID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb.ID"];
repeated uint32 reference_columns = 5 [(gogoproto.customname) = "ReferenceColumns", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb.ColumnID"];
string name = 6;
}
message InboundForeignKey {
option (gogoproto.equal) = true;
uint32 origin_id = 1 [(gogoproto.customname) = "OriginID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb.ID"];
repeated uint32 origin_columns = 3 [(gogoproto.customname) = "OriginColumns", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb.ColumnID"];
uint32 reference_id = 4 [(gogoproto.customname) = "ReferenceID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb.ID"];
repeated uint32 reference_columns = 5 [(gogoproto.customname) = "ReferenceColumns", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb.ColumnID"];
string name = 6;
}
message SequenceOwnedBy {
uint32 sequence_id = 1 [(gogoproto.customname) = "SequenceID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb.ID"];
uint32 owner_table_id = 2 [(gogoproto.customname) = "OwnerTableID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb.ID"];
}
message RelationDependedOnBy {
option (gogoproto.equal) = true;
uint32 table_id = 1 [(gogoproto.customname) = "TableID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb.ID"];
uint32 dependedOn = 2 [(gogoproto.customname) = "DependedOnBy", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb.ID"];
}
message Type {
uint32 type_id = 1 [(gogoproto.customname) = "TypeID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb.ID"];
}
message Schema {
uint32 schema_id = 1 [(gogoproto.customname) = "SchemaID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb.ID"];
repeated uint32 dependentObjects = 2 [(gogoproto.customname) = "DependentObjects", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb.ID"];
}
message Database {
uint32 database_id = 1 [(gogoproto.customname) = "DatabaseID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb.ID"];
repeated uint32 dependentObjects = 2 [(gogoproto.customname) = "DependentObjects", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb.ID"];
}
| pkg/sql/schemachanger/scpb/scpb.proto | 0 | https://github.com/cockroachdb/cockroach/commit/9d55d63b9132b4917e2864581f66cc426836f488 | [
0.00017816611216403544,
0.00016742211300879717,
0.0001611499465070665,
0.0001664734591031447,
0.000003934645974368323
] |
{
"id": 0,
"code_window": [
"\t\"github.com/photoprism/photoprism/internal/forms\"\n",
"\t\"github.com/photoprism/photoprism/internal/photoprism\"\n",
")\n",
"\n",
"// `GET /api/v1/photos`\n",
"//\n",
"// Query:\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep"
],
"after_edit": [
"// GET /api/v1/photos\n"
],
"file_path": "internal/api/photos.go",
"type": "replace",
"edit_start_line_idx": 13
} | package api
import (
"log"
"net/http"
"strconv"
"github.com/gin-gonic/gin"
"github.com/gin-gonic/gin/binding"
"github.com/photoprism/photoprism/internal/forms"
"github.com/photoprism/photoprism/internal/photoprism"
)
// `GET /api/v1/photos`
//
// Query:
// - `q`: string Query string `form:""`
// - `tags`: string Tags string `form:"tags"`
// - `cat`: string Category
// - `country`: string Country code
// - `camera`: int Camera ID
// - `order`: string Sort order
// - `count`: int Max result count (required)
// - `offset`: int Result offset
// - `before`: date Find photos taken before (format: "2006-01-02")
// - `after`: date Find photos taken after (format: "2006-01-02")
// - `favorites`: bool Find favorites only
func GetPhotos(router *gin.RouterGroup, conf *photoprism.Config) {
router.GET("/photos", func(c *gin.Context) {
var form forms.PhotoSearchForm
search := photoprism.NewSearch(conf.OriginalsPath, conf.GetDb())
c.MustBindWith(&form, binding.Form)
result, err := search.Photos(form)
if err != nil {
c.AbortWithStatusJSON(400, gin.H{"error": err.Error()})
}
c.Header("x-result-count", strconv.Itoa(form.Count))
c.Header("x-result-offset", strconv.Itoa(form.Offset))
c.JSON(http.StatusOK, result)
})
}
// `POST /api/v1/photos/:photoId/like`
//
// Parameters:
// - `photoId`: Photo ID as returned by the API
func LikePhoto(router *gin.RouterGroup, conf *photoprism.Config) {
router.POST("/photos/:photoId/like", func(c *gin.Context) {
search := photoprism.NewSearch(conf.OriginalsPath, conf.GetDb())
photoId, err := strconv.ParseUint(c.Param("photoId"), 10, 64)
if err == nil {
photo := search.FindPhotoByID(photoId)
photo.PhotoFavorite = true
conf.GetDb().Save(&photo)
c.JSON(http.StatusAccepted, http.Response{})
} else {
log.Printf("could not find image for id: %s", err.Error())
c.Data(http.StatusNotFound, "image", []byte(""))
}
})
}
// `DELETE /api/v1/photos/:photoId/like`
//
// Parameters:
// - `photoId`: Photo ID as returned by the API
func DislikePhoto(router *gin.RouterGroup, conf *photoprism.Config) {
router.DELETE("/photos/:photoId/like", func(c *gin.Context) {
search := photoprism.NewSearch(conf.OriginalsPath, conf.GetDb())
photoId, err := strconv.ParseUint(c.Param("photoId"), 10, 64)
if err == nil {
photo := search.FindPhotoByID(photoId)
photo.PhotoFavorite = false
conf.GetDb().Save(&photo)
c.JSON(http.StatusAccepted, http.Response{})
} else {
log.Printf("could not find image for id: %s", err.Error())
c.Data(http.StatusNotFound, "image", []byte(""))
}
})
}
| internal/api/photos.go | 1 | https://github.com/photoprism/photoprism/commit/4a33d430e9d765b502cd1d08d1c88f7b0d88e9fc | [
0.31110820174217224,
0.037396594882011414,
0.00016873719869181514,
0.007006804458796978,
0.09144072979688644
] |
{
"id": 0,
"code_window": [
"\t\"github.com/photoprism/photoprism/internal/forms\"\n",
"\t\"github.com/photoprism/photoprism/internal/photoprism\"\n",
")\n",
"\n",
"// `GET /api/v1/photos`\n",
"//\n",
"// Query:\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep"
],
"after_edit": [
"// GET /api/v1/photos\n"
],
"file_path": "internal/api/photos.go",
"type": "replace",
"edit_start_line_idx": 13
} | package models
import (
"github.com/gosimple/slug"
"github.com/jinzhu/gorm"
)
type Lens struct {
gorm.Model
LensSlug string
LensModel string
LensMake string
LensType string
LensOwner string
LensDescription string `gorm:"type:text;"`
LensNotes string `gorm:"type:text;"`
}
func (Lens) TableName() string {
return "lenses"
}
func NewLens(modelName string, makeName string) *Lens {
if modelName == "" {
modelName = "Unknown"
}
lensSlug := slug.MakeLang(modelName, "en")
result := &Lens{
LensModel: modelName,
LensMake: makeName,
LensSlug: lensSlug,
}
return result
}
func (c *Lens) FirstOrCreate(db *gorm.DB) *Lens {
db.FirstOrCreate(c, "lens_model = ? AND lens_make = ?", c.LensModel, c.LensMake)
return c
}
| internal/models/lens.go | 0 | https://github.com/photoprism/photoprism/commit/4a33d430e9d765b502cd1d08d1c88f7b0d88e9fc | [
0.00017798002227209508,
0.00017410558939445764,
0.00016650886391289532,
0.00017596589168533683,
0.000004358635123935528
] |
{
"id": 0,
"code_window": [
"\t\"github.com/photoprism/photoprism/internal/forms\"\n",
"\t\"github.com/photoprism/photoprism/internal/photoprism\"\n",
")\n",
"\n",
"// `GET /api/v1/photos`\n",
"//\n",
"// Query:\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep"
],
"after_edit": [
"// GET /api/v1/photos\n"
],
"file_path": "internal/api/photos.go",
"type": "replace",
"edit_start_line_idx": 13
} | package photoprism
import (
"bufio"
"errors"
"io/ioutil"
"log"
"os"
"sort"
tf "github.com/tensorflow/tensorflow/tensorflow/go"
"github.com/tensorflow/tensorflow/tensorflow/go/op"
)
// TensorFlow if a tensorflow wrapper given a graph, labels and a modelPath.
type TensorFlow struct {
modelPath string
graph *tf.Graph
labels []string
}
// NewTensorFlow returns a new TensorFlow.
func NewTensorFlow(tensorFlowModelPath string) *TensorFlow {
return &TensorFlow{modelPath: tensorFlowModelPath}
}
// TensorFlowLabel defines a Json struct with label and probability.
type TensorFlowLabel struct {
Label string `json:"label"`
Probability float32 `json:"probability"`
}
// TensorFlowLabels is a slice of tensorflow labels.
type TensorFlowLabels []TensorFlowLabel
func (a TensorFlowLabels) Len() int { return len(a) }
func (a TensorFlowLabels) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a TensorFlowLabels) Less(i, j int) bool { return a[i].Probability > a[j].Probability }
// GetImageTagsFromFile returns a slice of tags given a mediafile filename.
func (t *TensorFlow) GetImageTagsFromFile(filename string) (result []TensorFlowLabel, err error) {
imageBuffer, err := ioutil.ReadFile(filename)
if err != nil {
return nil, err
}
return t.GetImageTags(string(imageBuffer))
}
// GetImageTags returns the tags for a given image.
func (t *TensorFlow) GetImageTags(image string) (result []TensorFlowLabel, err error) {
if err := t.loadModel(); err != nil {
return nil, err
}
// Make tensor
tensor, err := t.makeTensorFromImage(image, "jpeg")
if err != nil {
return nil, errors.New("invalid image")
}
// Run inference
session, err := tf.NewSession(t.graph, nil)
if err != nil {
log.Fatal(err)
}
defer session.Close()
output, err := session.Run(
map[tf.Output]*tf.Tensor{
t.graph.Operation("input").Output(0): tensor,
},
[]tf.Output{
t.graph.Operation("output").Output(0),
},
nil)
if err != nil {
return nil, errors.New("could not run inference")
}
// Return best labels
return t.findBestLabels(output[0].Value().([][]float32)[0]), nil
}
func (t *TensorFlow) loadModel() error {
if t.graph != nil {
// Already loaded
return nil
}
// Load inception model
model, err := ioutil.ReadFile(t.modelPath + "/tensorflow_inception_graph.pb")
if err != nil {
return err
}
t.graph = tf.NewGraph()
if err := t.graph.Import(model, ""); err != nil {
return err
}
// Load labels
labelsFile, err := os.Open(t.modelPath + "/imagenet_comp_graph_label_strings.txt")
if err != nil {
return err
}
defer labelsFile.Close()
scanner := bufio.NewScanner(labelsFile)
// Labels are separated by newlines
for scanner.Scan() {
t.labels = append(t.labels, scanner.Text())
}
if err := scanner.Err(); err != nil {
return err
}
return nil
}
func (t *TensorFlow) findBestLabels(probabilities []float32) []TensorFlowLabel {
// Make a list of label/probability pairs
var resultLabels []TensorFlowLabel
for i, p := range probabilities {
if i >= len(t.labels) {
break
}
resultLabels = append(resultLabels, TensorFlowLabel{Label: t.labels[i], Probability: p})
}
// Sort by probability
sort.Sort(TensorFlowLabels(resultLabels))
// Return top 5 labels
return resultLabels[:5]
}
func (t *TensorFlow) makeTensorFromImage(image string, imageFormat string) (*tf.Tensor, error) {
tensor, err := tf.NewTensor(image)
if err != nil {
return nil, err
}
graph, input, output, err := t.makeTransformImageGraph(imageFormat)
if err != nil {
return nil, err
}
session, err := tf.NewSession(graph, nil)
if err != nil {
return nil, err
}
defer session.Close()
normalized, err := session.Run(
map[tf.Output]*tf.Tensor{input: tensor},
[]tf.Output{output},
nil)
if err != nil {
return nil, err
}
return normalized[0], nil
}
// Creates a graph to decode, rezise and normalize an image
func (t *TensorFlow) makeTransformImageGraph(imageFormat string) (graph *tf.Graph, input, output tf.Output, err error) {
const (
H, W = 224, 224
Mean = float32(117)
Scale = float32(1)
)
s := op.NewScope()
input = op.Placeholder(s, tf.String)
// Decode PNG or JPEG
var decode tf.Output
if imageFormat == "png" {
decode = op.DecodePng(s, input, op.DecodePngChannels(3))
} else {
decode = op.DecodeJpeg(s, input, op.DecodeJpegChannels(3))
}
// Div and Sub perform (value-Mean)/Scale for each pixel
output = op.Div(s,
op.Sub(s,
// Resize to 224x224 with bilinear interpolation
op.ResizeBilinear(s,
// Create a batch containing a single image
op.ExpandDims(s,
// Use decoded pixel values
op.Cast(s, decode, tf.Float),
op.Const(s.SubScope("make_batch"), int32(0))),
op.Const(s.SubScope("size"), []int32{H, W})),
op.Const(s.SubScope("mean"), Mean)),
op.Const(s.SubScope("scale"), Scale))
graph, err = s.Finalize()
return graph, input, output, err
}
| internal/photoprism/tensorflow.go | 0 | https://github.com/photoprism/photoprism/commit/4a33d430e9d765b502cd1d08d1c88f7b0d88e9fc | [
0.0025461751502007246,
0.0002913888019975275,
0.0001665217278059572,
0.00017436774214729667,
0.0005172952078282833
] |
{
"id": 0,
"code_window": [
"\t\"github.com/photoprism/photoprism/internal/forms\"\n",
"\t\"github.com/photoprism/photoprism/internal/photoprism\"\n",
")\n",
"\n",
"// `GET /api/v1/photos`\n",
"//\n",
"// Query:\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep"
],
"after_edit": [
"// GET /api/v1/photos\n"
],
"file_path": "internal/api/photos.go",
"type": "replace",
"edit_start_line_idx": 13
} | @import url("../node_modules/vuetify/dist/vuetify.min.css");
@import url("../node_modules/material-design-icons-iconfont/dist/material-design-icons.css");
@import url("photo.css");
#app div.loading {
text-align: center;
margin: 50px 20px;
}
#busy-overlay {
display: none;
position: absolute;
top: 0;
left: 0;
width: 100%;
height: 100%;
z-index: 100;
background-color: rgba(0,0,0,0.2);
}
body {
background: rgb(250, 250, 250);
color: #333333;
font-family: Helvetica, Arial, sans-serif;
}
footer {
clear: both;
padding: 1rem 2rem;
}
main {
padding: 0;
margin: 0;
}
.v-badge__badge {
font-size: 12px;
height: 19px;
width: 19px;
right: -20px;
top: -8px;
} | frontend/css/app.css | 0 | https://github.com/photoprism/photoprism/commit/4a33d430e9d765b502cd1d08d1c88f7b0d88e9fc | [
0.00017629707872401923,
0.00017213515820913017,
0.00016592023894190788,
0.0001750865631038323,
0.00000429096235166071
] |
{
"id": 1,
"code_window": [
"//\n",
"// Query:\n",
"// - `q`: string Query string `form:\"\"`\n",
"//\t - `tags`: string Tags string `form:\"tags\"`\n",
"//\t - `cat`: string Category\n",
"//\t - `country`: string Country code\n",
"//\t - `camera`: int Camera ID\n",
"//\t - `order`: string Sort order\n",
"//\t - `count`: int Max result count (required)\n",
"//\t - `offset`: int Result offset\n",
"//\t - `before`: date Find photos taken before (format: \"2006-01-02\")\n",
"//\t - `after`: date Find photos taken after (format: \"2006-01-02\")\n",
"//\t - `favorites`: bool Find favorites only\n",
"func GetPhotos(router *gin.RouterGroup, conf *photoprism.Config) {\n",
"\trouter.GET(\"/photos\", func(c *gin.Context) {\n",
"\t\tvar form forms.PhotoSearchForm\n",
"\n",
"\t\tsearch := photoprism.NewSearch(conf.OriginalsPath, conf.GetDb())\n"
],
"labels": [
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"// q: string Query string\n",
"// tags: string Tags\n",
"// cat: string Category\n",
"// country: string Country code\n",
"// camera: int Camera ID\n",
"// order: string Sort order\n",
"// count: int Max result count (required)\n",
"// offset: int Result offset\n",
"// before: date Find photos taken before (format: \"2006-01-02\")\n",
"// after: date Find photos taken after (format: \"2006-01-02\")\n",
"// favorites: bool Find favorites only\n"
],
"file_path": "internal/api/photos.go",
"type": "replace",
"edit_start_line_idx": 16
} | package api
import (
"log"
"net/http"
"strconv"
"github.com/gin-gonic/gin"
"github.com/gin-gonic/gin/binding"
"github.com/photoprism/photoprism/internal/forms"
"github.com/photoprism/photoprism/internal/photoprism"
)
// `GET /api/v1/photos`
//
// Query:
// - `q`: string Query string `form:""`
// - `tags`: string Tags string `form:"tags"`
// - `cat`: string Category
// - `country`: string Country code
// - `camera`: int Camera ID
// - `order`: string Sort order
// - `count`: int Max result count (required)
// - `offset`: int Result offset
// - `before`: date Find photos taken before (format: "2006-01-02")
// - `after`: date Find photos taken after (format: "2006-01-02")
// - `favorites`: bool Find favorites only
func GetPhotos(router *gin.RouterGroup, conf *photoprism.Config) {
router.GET("/photos", func(c *gin.Context) {
var form forms.PhotoSearchForm
search := photoprism.NewSearch(conf.OriginalsPath, conf.GetDb())
c.MustBindWith(&form, binding.Form)
result, err := search.Photos(form)
if err != nil {
c.AbortWithStatusJSON(400, gin.H{"error": err.Error()})
}
c.Header("x-result-count", strconv.Itoa(form.Count))
c.Header("x-result-offset", strconv.Itoa(form.Offset))
c.JSON(http.StatusOK, result)
})
}
// `POST /api/v1/photos/:photoId/like`
//
// Parameters:
// - `photoId`: Photo ID as returned by the API
func LikePhoto(router *gin.RouterGroup, conf *photoprism.Config) {
router.POST("/photos/:photoId/like", func(c *gin.Context) {
search := photoprism.NewSearch(conf.OriginalsPath, conf.GetDb())
photoId, err := strconv.ParseUint(c.Param("photoId"), 10, 64)
if err == nil {
photo := search.FindPhotoByID(photoId)
photo.PhotoFavorite = true
conf.GetDb().Save(&photo)
c.JSON(http.StatusAccepted, http.Response{})
} else {
log.Printf("could not find image for id: %s", err.Error())
c.Data(http.StatusNotFound, "image", []byte(""))
}
})
}
// `DELETE /api/v1/photos/:photoId/like`
//
// Parameters:
// - `photoId`: Photo ID as returned by the API
func DislikePhoto(router *gin.RouterGroup, conf *photoprism.Config) {
router.DELETE("/photos/:photoId/like", func(c *gin.Context) {
search := photoprism.NewSearch(conf.OriginalsPath, conf.GetDb())
photoId, err := strconv.ParseUint(c.Param("photoId"), 10, 64)
if err == nil {
photo := search.FindPhotoByID(photoId)
photo.PhotoFavorite = false
conf.GetDb().Save(&photo)
c.JSON(http.StatusAccepted, http.Response{})
} else {
log.Printf("could not find image for id: %s", err.Error())
c.Data(http.StatusNotFound, "image", []byte(""))
}
})
}
| internal/api/photos.go | 1 | https://github.com/photoprism/photoprism/commit/4a33d430e9d765b502cd1d08d1c88f7b0d88e9fc | [
0.9767905473709106,
0.10966609418392181,
0.000165429271874018,
0.008417998440563679,
0.2896370589733124
] |
{
"id": 1,
"code_window": [
"//\n",
"// Query:\n",
"// - `q`: string Query string `form:\"\"`\n",
"//\t - `tags`: string Tags string `form:\"tags\"`\n",
"//\t - `cat`: string Category\n",
"//\t - `country`: string Country code\n",
"//\t - `camera`: int Camera ID\n",
"//\t - `order`: string Sort order\n",
"//\t - `count`: int Max result count (required)\n",
"//\t - `offset`: int Result offset\n",
"//\t - `before`: date Find photos taken before (format: \"2006-01-02\")\n",
"//\t - `after`: date Find photos taken after (format: \"2006-01-02\")\n",
"//\t - `favorites`: bool Find favorites only\n",
"func GetPhotos(router *gin.RouterGroup, conf *photoprism.Config) {\n",
"\trouter.GET(\"/photos\", func(c *gin.Context) {\n",
"\t\tvar form forms.PhotoSearchForm\n",
"\n",
"\t\tsearch := photoprism.NewSearch(conf.OriginalsPath, conf.GetDb())\n"
],
"labels": [
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"// q: string Query string\n",
"// tags: string Tags\n",
"// cat: string Category\n",
"// country: string Country code\n",
"// camera: int Camera ID\n",
"// order: string Sort order\n",
"// count: int Max result count (required)\n",
"// offset: int Result offset\n",
"// before: date Find photos taken before (format: \"2006-01-02\")\n",
"// after: date Find photos taken after (format: \"2006-01-02\")\n",
"// favorites: bool Find favorites only\n"
],
"file_path": "internal/api/photos.go",
"type": "replace",
"edit_start_line_idx": 16
} | /*
This package contains commands and flags to be used by the main application.
See https://photoprism.org/ for more information about PhotoPrism.
*/
package commands
| internal/commands/doc.go | 0 | https://github.com/photoprism/photoprism/commit/4a33d430e9d765b502cd1d08d1c88f7b0d88e9fc | [
0.00019380691810511053,
0.00019380691810511053,
0.00019380691810511053,
0.00019380691810511053,
0
] |
{
"id": 1,
"code_window": [
"//\n",
"// Query:\n",
"// - `q`: string Query string `form:\"\"`\n",
"//\t - `tags`: string Tags string `form:\"tags\"`\n",
"//\t - `cat`: string Category\n",
"//\t - `country`: string Country code\n",
"//\t - `camera`: int Camera ID\n",
"//\t - `order`: string Sort order\n",
"//\t - `count`: int Max result count (required)\n",
"//\t - `offset`: int Result offset\n",
"//\t - `before`: date Find photos taken before (format: \"2006-01-02\")\n",
"//\t - `after`: date Find photos taken after (format: \"2006-01-02\")\n",
"//\t - `favorites`: bool Find favorites only\n",
"func GetPhotos(router *gin.RouterGroup, conf *photoprism.Config) {\n",
"\trouter.GET(\"/photos\", func(c *gin.Context) {\n",
"\t\tvar form forms.PhotoSearchForm\n",
"\n",
"\t\tsearch := photoprism.NewSearch(conf.OriginalsPath, conf.GetDb())\n"
],
"labels": [
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"// q: string Query string\n",
"// tags: string Tags\n",
"// cat: string Category\n",
"// country: string Country code\n",
"// camera: int Camera ID\n",
"// order: string Sort order\n",
"// count: int Max result count (required)\n",
"// offset: int Result offset\n",
"// before: date Find photos taken before (format: \"2006-01-02\")\n",
"// after: date Find photos taken after (format: \"2006-01-02\")\n",
"// favorites: bool Find favorites only\n"
],
"file_path": "internal/api/photos.go",
"type": "replace",
"edit_start_line_idx": 16
} | /*
Package forms contains tagged structs for input value validation.
See https://photoprism.org/ for more information about PhotoPrism.
*/
package forms
| internal/forms/doc.go | 0 | https://github.com/photoprism/photoprism/commit/4a33d430e9d765b502cd1d08d1c88f7b0d88e9fc | [
0.00042150975787080824,
0.00042150975787080824,
0.00042150975787080824,
0.00042150975787080824,
0
] |
{
"id": 1,
"code_window": [
"//\n",
"// Query:\n",
"// - `q`: string Query string `form:\"\"`\n",
"//\t - `tags`: string Tags string `form:\"tags\"`\n",
"//\t - `cat`: string Category\n",
"//\t - `country`: string Country code\n",
"//\t - `camera`: int Camera ID\n",
"//\t - `order`: string Sort order\n",
"//\t - `count`: int Max result count (required)\n",
"//\t - `offset`: int Result offset\n",
"//\t - `before`: date Find photos taken before (format: \"2006-01-02\")\n",
"//\t - `after`: date Find photos taken after (format: \"2006-01-02\")\n",
"//\t - `favorites`: bool Find favorites only\n",
"func GetPhotos(router *gin.RouterGroup, conf *photoprism.Config) {\n",
"\trouter.GET(\"/photos\", func(c *gin.Context) {\n",
"\t\tvar form forms.PhotoSearchForm\n",
"\n",
"\t\tsearch := photoprism.NewSearch(conf.OriginalsPath, conf.GetDb())\n"
],
"labels": [
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"// q: string Query string\n",
"// tags: string Tags\n",
"// cat: string Category\n",
"// country: string Country code\n",
"// camera: int Camera ID\n",
"// order: string Sort order\n",
"// count: int Max result count (required)\n",
"// offset: int Result offset\n",
"// before: date Find photos taken before (format: \"2006-01-02\")\n",
"// after: date Find photos taken after (format: \"2006-01-02\")\n",
"// favorites: bool Find favorites only\n"
],
"file_path": "internal/api/photos.go",
"type": "replace",
"edit_start_line_idx": 16
} | import Api from 'common/api';
import User from 'model/user';
class Session {
/**
* @param {Storage} storage
*/
constructor(storage) {
this.storage = storage;
this.session_token = this.storage.getItem('session_token');
const userJson = this.storage.getItem('user');
this.user = userJson !== 'undefined' ? new User(JSON.parse(userJson)) : null;
}
setToken(token) {
this.session_token = token;
this.storage.setItem('session_token', token);
Api.defaults.headers.common['X-Session-Token'] = token;
}
getToken() {
return this.session_token;
}
deleteToken() {
this.session_token = null;
this.storage.removeItem('session_token');
Api.defaults.headers.common['X-Session-Token'] = '';
this.deleteUser();
}
setUser(user) {
this.user = user;
this.storage.setItem('user', JSON.stringify(user.getValues()));
}
getUser() {
return this.user;
}
getEmail() {
if (this.isUser()) {
return this.user.userEmail;
}
return '';
}
getFullName() {
if (this.isUser()) {
return this.user.userFirstName + ' ' + this.user.userLastName;
}
return '';
}
getFirstName() {
if (this.isUser()) {
return this.user.userFirstName;
}
return '';
}
isUser() {
return this.user.hasId();
}
isAdmin() {
return this.user.hasId() && this.user.userRole === 'admin';
}
isAnonymous() {
return !this.user.hasId();
}
deleteUser() {
this.user = null;
this.storage.removeItem('user');
}
login(email, password) {
this.deleteToken();
return Api.post('session', { email: email, password: password }).then(
(result) => {
this.setToken(result.data.token);
this.setUser(new User(result.data.user));
}
);
}
logout() {
const token = this.getToken();
this.deleteToken();
Api.delete('session/' + token).then(
() => {
window.location = '/';
}
);
}
}
export default Session;
| frontend/src/common/session.js | 0 | https://github.com/photoprism/photoprism/commit/4a33d430e9d765b502cd1d08d1c88f7b0d88e9fc | [
0.00017542278510518372,
0.00017155254317913204,
0.0001678630942478776,
0.0001709399075480178,
0.000002371359414610197
] |
{
"id": 2,
"code_window": [
"\n",
"\t\tc.JSON(http.StatusOK, result)\n",
"\t})\n",
"}\n",
"\n",
"// `POST /api/v1/photos/:photoId/like`\n",
"//\n",
"// Parameters:\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep"
],
"after_edit": [
"// POST /api/v1/photos/:photoId/like\n"
],
"file_path": "internal/api/photos.go",
"type": "replace",
"edit_start_line_idx": 48
} | package api
import (
"fmt"
"log"
"strconv"
"github.com/gin-gonic/gin"
"github.com/photoprism/photoprism/internal/photoprism"
)
var photoIconSvg = []byte(`
<svg xmlns="http://www.w3.org/2000/svg" width="24" height="24" viewBox="0 0 24 24">
<path d="M0 0h24v24H0z" fill="none"/>
<path d="M21 19V5c0-1.1-.9-2-2-2H5c-1.1 0-2 .9-2 2v14c0 1.1.9 2 2 2h14c1.1 0 2-.9 2-2zM8.5 13.5l2.5 3.01L14.5 12l4.5 6H5l3.5-4.5z"/>
</svg>`)
// `GET /api/v1/thumbnails/:type/:size/:hash`
//
// Parameters:
// - `type`: string Format, either "fit" or "square"
// - `size`: int Size in pixels
// - `hash`: string The file hash as returned by the search API
func GetThumbnail(router *gin.RouterGroup, conf *photoprism.Config) {
router.GET("/thumbnails/:type/:size/:hash", func(c *gin.Context) {
fileHash := c.Param("hash")
thumbnailType := c.Param("type")
size, err := strconv.Atoi(c.Param("size"))
if err != nil {
log.Printf("invalid size: %s", c.Param("size"))
c.Data(400, "image/svg+xml", photoIconSvg)
}
search := photoprism.NewSearch(conf.OriginalsPath, conf.GetDb())
file := search.FindFileByHash(fileHash)
fileName := fmt.Sprintf("%s/%s", conf.OriginalsPath, file.FileName)
if mediaFile, err := photoprism.NewMediaFile(fileName); err == nil {
switch thumbnailType {
case "fit":
if thumbnail, err := mediaFile.GetThumbnail(conf.ThumbnailsPath, size); err == nil {
c.File(thumbnail.GetFilename())
} else {
log.Printf("could not create thumbnail: %s", err.Error())
c.Data(400, "image/svg+xml", photoIconSvg)
}
case "square":
if thumbnail, err := mediaFile.GetSquareThumbnail(conf.ThumbnailsPath, size); err == nil {
c.File(thumbnail.GetFilename())
} else {
log.Printf("could not create square thumbnail: %s", err.Error())
c.Data(400, "image/svg+xml", photoIconSvg)
}
default:
log.Printf("unknown thumbnail type: %s", thumbnailType)
c.Data(400, "image/svg+xml", photoIconSvg)
}
} else {
log.Printf("could not find image for thumbnail: %s", err.Error())
c.Data(404, "image/svg+xml", photoIconSvg)
// Set missing flag so that the file doesn't show up in search results anymore
file.FileMissing = true
conf.GetDb().Save(&file)
}
})
}
| internal/api/thumbnails.go | 1 | https://github.com/photoprism/photoprism/commit/4a33d430e9d765b502cd1d08d1c88f7b0d88e9fc | [
0.000327931105857715,
0.0002170884981751442,
0.0001632564380997792,
0.00018078202265314758,
0.00006206661782925949
] |
{
"id": 2,
"code_window": [
"\n",
"\t\tc.JSON(http.StatusOK, result)\n",
"\t})\n",
"}\n",
"\n",
"// `POST /api/v1/photos/:photoId/like`\n",
"//\n",
"// Parameters:\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep"
],
"after_edit": [
"// POST /api/v1/photos/:photoId/like\n"
],
"file_path": "internal/api/photos.go",
"type": "replace",
"edit_start_line_idx": 48
} | package models
import (
"github.com/jinzhu/gorm"
)
type Location struct {
gorm.Model
LocDisplayName string
LocLat float64
LocLong float64
LocCategory string
LocType string
LocName string
LocHouseNr string
LocStreet string
LocSuburb string
LocCity string
LocPostcode string
LocCounty string
LocState string
LocCountry string
LocCountryCode string
LocDescription string `gorm:"type:text;"`
LocNotes string `gorm:"type:text;"`
LocPhoto *Photo
LocPhotoID uint
LocFavorite bool
}
| internal/models/location.go | 0 | https://github.com/photoprism/photoprism/commit/4a33d430e9d765b502cd1d08d1c88f7b0d88e9fc | [
0.00017668548389337957,
0.0001725093461573124,
0.00016828184016048908,
0.00017256071441806853,
0.0000034309655347897206
] |
{
"id": 2,
"code_window": [
"\n",
"\t\tc.JSON(http.StatusOK, result)\n",
"\t})\n",
"}\n",
"\n",
"// `POST /api/v1/photos/:photoId/like`\n",
"//\n",
"// Parameters:\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep"
],
"after_edit": [
"// POST /api/v1/photos/:photoId/like\n"
],
"file_path": "internal/api/photos.go",
"type": "replace",
"edit_start_line_idx": 48
} | package commands
import (
"fmt"
"log"
"github.com/photoprism/photoprism/internal/photoprism"
"github.com/urfave/cli"
)
var IndexCommand = cli.Command{
Name: "index",
Usage: "Re-indexes all originals",
Action: indexAction,
}
// Indexes original photos; called by IndexCommand
func indexAction(context *cli.Context) error {
conf := photoprism.NewConfig(context)
if err := conf.CreateDirectories(); err != nil {
log.Fatal(err)
}
conf.MigrateDb()
fmt.Printf("Indexing photos in %s...\n", conf.OriginalsPath)
tensorFlow := photoprism.NewTensorFlow(conf.GetTensorFlowModelPath())
indexer := photoprism.NewIndexer(conf.OriginalsPath, tensorFlow, conf.GetDb())
indexer.IndexAll()
fmt.Println("Done.")
return nil
}
| internal/commands/index.go | 0 | https://github.com/photoprism/photoprism/commit/4a33d430e9d765b502cd1d08d1c88f7b0d88e9fc | [
0.002712571993470192,
0.000904124986846,
0.00016703394067008048,
0.00036844704300165176,
0.0010566256241872907
] |
{
"id": 2,
"code_window": [
"\n",
"\t\tc.JSON(http.StatusOK, result)\n",
"\t})\n",
"}\n",
"\n",
"// `POST /api/v1/photos/:photoId/like`\n",
"//\n",
"// Parameters:\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep"
],
"after_edit": [
"// POST /api/v1/photos/:photoId/like\n"
],
"file_path": "internal/api/photos.go",
"type": "replace",
"edit_start_line_idx": 48
} | export GO111MODULE=on
GOCMD=go
GOINSTALL=$(GOCMD) install
GOBUILD=$(GOCMD) build
GOMOD=$(GOCMD) mod
GORUN=$(GOCMD) run
GOCLEAN=$(GOCMD) clean
GOTEST=$(GOCMD) test
GOGET=$(GOCMD) get
GOFMT=$(GOCMD) fmt
GOIMPORTS=goimports
BINARY_NAME=photoprism
all: tensorflow-model dep js build
install: install-bin install-assets install-config
install-bin:
$(GOINSTALL) cmd/photoprism/photoprism.go
install-assets:
mkdir -p /var/photoprism
mkdir -p /var/photoprism/photos
mkdir -p /var/photoprism/thumbnails
cp -r assets/favicons /var/photoprism
cp -r assets/public /var/photoprism
cp -r assets/templates /var/photoprism
cp -r assets/tensorflow /var/photoprism
install-config:
mkdir -p /etc/photoprism
test -e /etc/photoprism/photoprism.yml || cp -n configs/photoprism.yml /etc/photoprism/photoprism.yml
build:
$(GOBUILD) cmd/photoprism/photoprism.go
js:
(cd frontend && yarn install --prod)
(cd frontend && env NODE_ENV=production npm run build)
start:
$(GORUN) cmd/photoprism/photoprism.go start
migrate:
$(GORUN) cmd/photoprism/photoprism.go migrate
test:
$(GOTEST) -v ./internal/...
clean:
$(GOCLEAN)
rm -f $(BINARY_NAME)
tensorflow-model:
scripts/download-tf-model.sh
docker-push:
scripts/docker-push.sh
fmt:
$(GOIMPORTS) -w internal cmd
$(GOFMT) ./internal/... ./cmd/...
dep:
$(GOBUILD) -v ./...
$(GOMOD) tidy
upgrade:
$(GOGET) -u | Makefile | 0 | https://github.com/photoprism/photoprism/commit/4a33d430e9d765b502cd1d08d1c88f7b0d88e9fc | [
0.0024718905333429575,
0.0005526244058273733,
0.00016480154590681195,
0.0001694973325356841,
0.0008583244634792209
] |
{
"id": 3,
"code_window": [
"//\n",
"// Parameters:\n",
"// - `photoId`: Photo ID as returned by the API\n",
"func LikePhoto(router *gin.RouterGroup, conf *photoprism.Config) {\n",
"\trouter.POST(\"/photos/:photoId/like\", func(c *gin.Context) {\n",
"\t\tsearch := photoprism.NewSearch(conf.OriginalsPath, conf.GetDb())\n",
"\n"
],
"labels": [
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"// photoId: int Photo ID as returned by the API\n"
],
"file_path": "internal/api/photos.go",
"type": "replace",
"edit_start_line_idx": 51
} | package api
import (
"log"
"net/http"
"strconv"
"github.com/gin-gonic/gin"
"github.com/gin-gonic/gin/binding"
"github.com/photoprism/photoprism/internal/forms"
"github.com/photoprism/photoprism/internal/photoprism"
)
// `GET /api/v1/photos`
//
// Query:
// - `q`: string Query string `form:""`
// - `tags`: string Tags string `form:"tags"`
// - `cat`: string Category
// - `country`: string Country code
// - `camera`: int Camera ID
// - `order`: string Sort order
// - `count`: int Max result count (required)
// - `offset`: int Result offset
// - `before`: date Find photos taken before (format: "2006-01-02")
// - `after`: date Find photos taken after (format: "2006-01-02")
// - `favorites`: bool Find favorites only
func GetPhotos(router *gin.RouterGroup, conf *photoprism.Config) {
router.GET("/photos", func(c *gin.Context) {
var form forms.PhotoSearchForm
search := photoprism.NewSearch(conf.OriginalsPath, conf.GetDb())
c.MustBindWith(&form, binding.Form)
result, err := search.Photos(form)
if err != nil {
c.AbortWithStatusJSON(400, gin.H{"error": err.Error()})
}
c.Header("x-result-count", strconv.Itoa(form.Count))
c.Header("x-result-offset", strconv.Itoa(form.Offset))
c.JSON(http.StatusOK, result)
})
}
// `POST /api/v1/photos/:photoId/like`
//
// Parameters:
// - `photoId`: Photo ID as returned by the API
func LikePhoto(router *gin.RouterGroup, conf *photoprism.Config) {
router.POST("/photos/:photoId/like", func(c *gin.Context) {
search := photoprism.NewSearch(conf.OriginalsPath, conf.GetDb())
photoId, err := strconv.ParseUint(c.Param("photoId"), 10, 64)
if err == nil {
photo := search.FindPhotoByID(photoId)
photo.PhotoFavorite = true
conf.GetDb().Save(&photo)
c.JSON(http.StatusAccepted, http.Response{})
} else {
log.Printf("could not find image for id: %s", err.Error())
c.Data(http.StatusNotFound, "image", []byte(""))
}
})
}
// `DELETE /api/v1/photos/:photoId/like`
//
// Parameters:
// - `photoId`: Photo ID as returned by the API
func DislikePhoto(router *gin.RouterGroup, conf *photoprism.Config) {
router.DELETE("/photos/:photoId/like", func(c *gin.Context) {
search := photoprism.NewSearch(conf.OriginalsPath, conf.GetDb())
photoId, err := strconv.ParseUint(c.Param("photoId"), 10, 64)
if err == nil {
photo := search.FindPhotoByID(photoId)
photo.PhotoFavorite = false
conf.GetDb().Save(&photo)
c.JSON(http.StatusAccepted, http.Response{})
} else {
log.Printf("could not find image for id: %s", err.Error())
c.Data(http.StatusNotFound, "image", []byte(""))
}
})
}
| internal/api/photos.go | 1 | https://github.com/photoprism/photoprism/commit/4a33d430e9d765b502cd1d08d1c88f7b0d88e9fc | [
0.9970912933349609,
0.4089927077293396,
0.00017332303104922175,
0.05181700363755226,
0.4799380302429199
] |
{
"id": 3,
"code_window": [
"//\n",
"// Parameters:\n",
"// - `photoId`: Photo ID as returned by the API\n",
"func LikePhoto(router *gin.RouterGroup, conf *photoprism.Config) {\n",
"\trouter.POST(\"/photos/:photoId/like\", func(c *gin.Context) {\n",
"\t\tsearch := photoprism.NewSearch(conf.OriginalsPath, conf.GetDb())\n",
"\n"
],
"labels": [
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"// photoId: int Photo ID as returned by the API\n"
],
"file_path": "internal/api/photos.go",
"type": "replace",
"edit_start_line_idx": 51
} | package photoprism
import (
"log"
"os"
"os/user"
"path/filepath"
"time"
"github.com/jinzhu/gorm"
_ "github.com/jinzhu/gorm/dialects/mssql" // Import gorm drivers
_ "github.com/jinzhu/gorm/dialects/mysql"
_ "github.com/jinzhu/gorm/dialects/postgres"
_ "github.com/jinzhu/gorm/dialects/sqlite"
"github.com/kylelemons/go-gypsy/yaml"
"github.com/photoprism/photoprism/internal/models"
"github.com/urfave/cli"
)
// Config provides a struct in which application configuration is stored.
type Config struct {
Debug bool
ConfigFile string
ServerIP string
ServerPort int
ServerMode string
AssetsPath string
ThumbnailsPath string
OriginalsPath string
ImportPath string
ExportPath string
DarktableCli string
DatabaseDriver string
DatabaseDsn string
db *gorm.DB
}
type configValues map[string]interface{}
// NewConfig creates a new configuration entity by using two methods.
// 1: SetValuesFromFile: This will initialize values from a yaml config file.
// 2: SetValuesFromCliContext: Which comes after SetValuesFromFile and overrides
// any previous values giving an option two override file configs through the CLI.
func NewConfig(context *cli.Context) *Config {
c := &Config{}
c.SetValuesFromFile(GetExpandedFilename(context.GlobalString("config-file")))
c.SetValuesFromCliContext(context)
return c
}
// SetValuesFromFile uses a yaml config file to initiate the configuration entity.
func (c *Config) SetValuesFromFile(fileName string) error {
yamlConfig, err := yaml.ReadFile(fileName)
if err != nil {
return err
}
c.ConfigFile = fileName
if debug, err := yamlConfig.GetBool("debug"); err == nil {
c.Debug = debug
}
if serverIP, err := yamlConfig.Get("server-host"); err == nil {
c.ServerIP = serverIP
}
if serverPort, err := yamlConfig.GetInt("server-port"); err == nil {
c.ServerPort = int(serverPort)
}
if serverMode, err := yamlConfig.Get("server-mode"); err == nil {
c.ServerMode = serverMode
}
if assetsPath, err := yamlConfig.Get("assets-path"); err == nil {
c.AssetsPath = GetExpandedFilename(assetsPath)
}
if thumbnailsPath, err := yamlConfig.Get("thumbnails-path"); err == nil {
c.ThumbnailsPath = GetExpandedFilename(thumbnailsPath)
}
if originalsPath, err := yamlConfig.Get("originals-path"); err == nil {
c.OriginalsPath = GetExpandedFilename(originalsPath)
}
if importPath, err := yamlConfig.Get("import-path"); err == nil {
c.ImportPath = GetExpandedFilename(importPath)
}
if exportPath, err := yamlConfig.Get("export-path"); err == nil {
c.ExportPath = GetExpandedFilename(exportPath)
}
if darktableCli, err := yamlConfig.Get("darktable-cli"); err == nil {
c.DarktableCli = GetExpandedFilename(darktableCli)
}
if databaseDriver, err := yamlConfig.Get("database-driver"); err == nil {
c.DatabaseDriver = databaseDriver
}
if databaseDsn, err := yamlConfig.Get("database-dsn"); err == nil {
c.DatabaseDsn = databaseDsn
}
return nil
}
// SetValuesFromCliContext uses values from the CLI to setup configuration overrides
// for the entity.
func (c *Config) SetValuesFromCliContext(context *cli.Context) error {
if context.GlobalBool("debug") {
c.Debug = context.GlobalBool("debug")
}
if context.GlobalIsSet("assets-path") || c.AssetsPath == "" {
c.AssetsPath = GetExpandedFilename(context.GlobalString("assets-path"))
}
if context.GlobalIsSet("thumbnails-path") || c.ThumbnailsPath == "" {
c.ThumbnailsPath = GetExpandedFilename(context.GlobalString("thumbnails-path"))
}
if context.GlobalIsSet("originals-path") || c.OriginalsPath == "" {
c.OriginalsPath = GetExpandedFilename(context.GlobalString("originals-path"))
}
if context.GlobalIsSet("import-path") || c.ImportPath == "" {
c.ImportPath = GetExpandedFilename(context.GlobalString("import-path"))
}
if context.GlobalIsSet("export-path") || c.ExportPath == "" {
c.ExportPath = GetExpandedFilename(context.GlobalString("export-path"))
}
if context.GlobalIsSet("darktable-cli") || c.DarktableCli == "" {
c.DarktableCli = GetExpandedFilename(context.GlobalString("darktable-cli"))
}
if context.GlobalIsSet("database-driver") || c.DatabaseDriver == "" {
c.DatabaseDriver = context.GlobalString("database-driver")
}
if context.GlobalIsSet("database-dsn") || c.DatabaseDsn == "" {
c.DatabaseDsn = context.GlobalString("database-dsn")
}
return nil
}
// CreateDirectories creates all the folders that photoprism needs. These are:
// OriginalsPath
// ThumbnailsPath
// ImportPath
// ExportPath
func (c *Config) CreateDirectories() error {
if err := os.MkdirAll(c.OriginalsPath, os.ModePerm); err != nil {
return err
}
if err := os.MkdirAll(c.ThumbnailsPath, os.ModePerm); err != nil {
return err
}
if err := os.MkdirAll(c.ImportPath, os.ModePerm); err != nil {
return err
}
if err := os.MkdirAll(c.ExportPath, os.ModePerm); err != nil {
return err
}
return nil
}
// connectToDatabase estabilishes a connection to a database given a driver.
// It tries to do this 12 times with a 5 second sleep intervall in between.
func (c *Config) connectToDatabase() error {
db, err := gorm.Open(c.DatabaseDriver, c.DatabaseDsn)
if err != nil || db == nil {
for i := 1; i <= 12; i++ {
time.Sleep(5 * time.Second)
db, err = gorm.Open(c.DatabaseDriver, c.DatabaseDsn)
if db != nil && err == nil {
break
}
}
if err != nil || db == nil {
log.Fatal(err)
}
}
c.db = db
return err
}
// GetAssetsPath returns the path to the assets.
func (c *Config) GetAssetsPath() string {
return c.AssetsPath
}
// GetTensorFlowModelPath returns the tensorflow model path.
func (c *Config) GetTensorFlowModelPath() string {
return c.GetAssetsPath() + "/tensorflow"
}
// GetTemplatesPath returns the templates path.
func (c *Config) GetTemplatesPath() string {
return c.GetAssetsPath() + "/templates"
}
// GetFaviconsPath returns the favicons path.
func (c *Config) GetFaviconsPath() string {
return c.GetAssetsPath() + "/favicons"
}
// GetPublicPath returns the public path.
func (c *Config) GetPublicPath() string {
return c.GetAssetsPath() + "/public"
}
// GetPublicBuildPath returns the public build path.
func (c *Config) GetPublicBuildPath() string {
return c.GetPublicPath() + "/build"
}
// GetDb gets a db connection. If it already is estabilished it will return that.
func (c *Config) GetDb() *gorm.DB {
if c.db == nil {
c.connectToDatabase()
}
return c.db
}
// MigrateDb will start a migration process.
func (c *Config) MigrateDb() {
db := c.GetDb()
db.AutoMigrate(&models.File{},
&models.Photo{},
&models.Tag{},
&models.Album{},
&models.Location{},
&models.Camera{},
&models.Lens{},
&models.Country{})
if !db.Dialect().HasIndex("photos", "photos_fulltext") {
db.Exec("CREATE FULLTEXT INDEX photos_fulltext ON photos (photo_title, photo_description, photo_artist, photo_colors)")
}
}
// GetClientConfig returns a loaded and set configuration entity.
func (c *Config) GetClientConfig() map[string]interface{} {
db := c.GetDb()
var cameras []*models.Camera
type country struct {
LocCountry string
LocCountryCode string
}
var countries []country
db.Model(&models.Location{}).Select("DISTINCT loc_country_code, loc_country").Scan(&countries)
db.Where("deleted_at IS NULL").Limit(1000).Order("camera_model").Find(&cameras)
jsHash := fileHash(c.GetPublicBuildPath() + "/app.js")
cssHash := fileHash(c.GetPublicBuildPath() + "/app.css")
result := configValues{
"title": "PhotoPrism",
"debug": c.Debug,
"cameras": cameras,
"countries": countries,
"jsHash": jsHash,
"cssHash": cssHash,
}
return result
}
// GetExpandedFilename returns the expanded format for a filename.
func GetExpandedFilename(filename string) string {
usr, _ := user.Current()
dir := usr.HomeDir
if filename == "" {
panic("filename was empty")
}
if len(filename) > 2 && filename[:2] == "~/" {
filename = filepath.Join(dir, filename[2:])
}
result, _ := filepath.Abs(filename)
return result
}
| internal/photoprism/config.go | 0 | https://github.com/photoprism/photoprism/commit/4a33d430e9d765b502cd1d08d1c88f7b0d88e9fc | [
0.008406909182667732,
0.0011168294586241245,
0.00016424464411102235,
0.00017182627925649285,
0.002070925198495388
] |
{
"id": 3,
"code_window": [
"//\n",
"// Parameters:\n",
"// - `photoId`: Photo ID as returned by the API\n",
"func LikePhoto(router *gin.RouterGroup, conf *photoprism.Config) {\n",
"\trouter.POST(\"/photos/:photoId/like\", func(c *gin.Context) {\n",
"\t\tsearch := photoprism.NewSearch(conf.OriginalsPath, conf.GetDb())\n",
"\n"
],
"labels": [
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"// photoId: int Photo ID as returned by the API\n"
],
"file_path": "internal/api/photos.go",
"type": "replace",
"edit_start_line_idx": 51
} | module.exports = {
env: {
browser: true,
commonjs: true,
es6: true,
node: true,
mocha: true,
},
extends: 'eslint:recommended',
parserOptions: {
sourceType: 'module',
},
rules: {
'comma-dangle': ['error', 'always-multiline'],
indent: ['error', 4],
'linebreak-style': ['error', 'unix'],
quotes: ['error', 'single'],
semi: ['error', 'always'],
'no-unused-vars': ['warn'],
'no-console': 0,
},
}; | frontend/.eslintrc.js | 0 | https://github.com/photoprism/photoprism/commit/4a33d430e9d765b502cd1d08d1c88f7b0d88e9fc | [
0.00017463654512539506,
0.00017420202493667603,
0.00017335046140942723,
0.00017461908282712102,
6.021919602972048e-7
] |
{
"id": 3,
"code_window": [
"//\n",
"// Parameters:\n",
"// - `photoId`: Photo ID as returned by the API\n",
"func LikePhoto(router *gin.RouterGroup, conf *photoprism.Config) {\n",
"\trouter.POST(\"/photos/:photoId/like\", func(c *gin.Context) {\n",
"\t\tsearch := photoprism.NewSearch(conf.OriginalsPath, conf.GetDb())\n",
"\n"
],
"labels": [
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"// photoId: int Photo ID as returned by the API\n"
],
"file_path": "internal/api/photos.go",
"type": "replace",
"edit_start_line_idx": 51
} | package photoprism
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestMediaFile_GetThumbnail(t *testing.T) {
conf := NewTestConfig()
conf.CreateDirectories()
conf.InitializeTestData(t)
image1, err := NewMediaFile(conf.ImportPath + "/iphone/IMG_6788.JPG")
assert.Nil(t, err)
thumbnail1, err := image1.GetThumbnail(conf.ThumbnailsPath, 350)
assert.Empty(t, err)
assert.IsType(t, &MediaFile{}, thumbnail1)
}
func TestMediaFile_GetSquareThumbnail(t *testing.T) {
conf := NewTestConfig()
conf.CreateDirectories()
conf.InitializeTestData(t)
image1, err := NewMediaFile(conf.ImportPath + "/iphone/IMG_6788.JPG")
assert.Nil(t, err)
thumbnail1, err := image1.GetSquareThumbnail(conf.ThumbnailsPath, 350)
assert.Empty(t, err)
assert.IsType(t, &MediaFile{}, thumbnail1)
}
func TestCreateThumbnailsFromOriginals(t *testing.T) {
conf := NewTestConfig()
conf.CreateDirectories()
conf.InitializeTestData(t)
tensorFlow := NewTensorFlow(conf.GetTensorFlowModelPath())
indexer := NewIndexer(conf.OriginalsPath, tensorFlow, conf.GetDb())
converter := NewConverter(conf.DarktableCli)
importer := NewImporter(conf.OriginalsPath, indexer, converter)
importer.ImportPhotosFromDirectory(conf.ImportPath)
CreateThumbnailsFromOriginals(conf.OriginalsPath, conf.ThumbnailsPath, 600, false)
CreateThumbnailsFromOriginals(conf.OriginalsPath, conf.ThumbnailsPath, 300, true)
}
| internal/photoprism/thumbnails_test.go | 0 | https://github.com/photoprism/photoprism/commit/4a33d430e9d765b502cd1d08d1c88f7b0d88e9fc | [
0.005033720284700394,
0.0019555387552827597,
0.00016594411863479763,
0.001763737527653575,
0.001566398423165083
] |
{
"id": 4,
"code_window": [
"\t})\n",
"}\n",
"\n",
"// `DELETE /api/v1/photos/:photoId/like`\n",
"//\n",
"// Parameters:\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep"
],
"after_edit": [
"// DELETE /api/v1/photos/:photoId/like\n"
],
"file_path": "internal/api/photos.go",
"type": "replace",
"edit_start_line_idx": 70
} | package api
import (
"log"
"net/http"
"strconv"
"github.com/gin-gonic/gin"
"github.com/gin-gonic/gin/binding"
"github.com/photoprism/photoprism/internal/forms"
"github.com/photoprism/photoprism/internal/photoprism"
)
// `GET /api/v1/photos`
//
// Query:
// - `q`: string Query string `form:""`
// - `tags`: string Tags string `form:"tags"`
// - `cat`: string Category
// - `country`: string Country code
// - `camera`: int Camera ID
// - `order`: string Sort order
// - `count`: int Max result count (required)
// - `offset`: int Result offset
// - `before`: date Find photos taken before (format: "2006-01-02")
// - `after`: date Find photos taken after (format: "2006-01-02")
// - `favorites`: bool Find favorites only
func GetPhotos(router *gin.RouterGroup, conf *photoprism.Config) {
router.GET("/photos", func(c *gin.Context) {
var form forms.PhotoSearchForm
search := photoprism.NewSearch(conf.OriginalsPath, conf.GetDb())
c.MustBindWith(&form, binding.Form)
result, err := search.Photos(form)
if err != nil {
c.AbortWithStatusJSON(400, gin.H{"error": err.Error()})
}
c.Header("x-result-count", strconv.Itoa(form.Count))
c.Header("x-result-offset", strconv.Itoa(form.Offset))
c.JSON(http.StatusOK, result)
})
}
// `POST /api/v1/photos/:photoId/like`
//
// Parameters:
// - `photoId`: Photo ID as returned by the API
func LikePhoto(router *gin.RouterGroup, conf *photoprism.Config) {
router.POST("/photos/:photoId/like", func(c *gin.Context) {
search := photoprism.NewSearch(conf.OriginalsPath, conf.GetDb())
photoId, err := strconv.ParseUint(c.Param("photoId"), 10, 64)
if err == nil {
photo := search.FindPhotoByID(photoId)
photo.PhotoFavorite = true
conf.GetDb().Save(&photo)
c.JSON(http.StatusAccepted, http.Response{})
} else {
log.Printf("could not find image for id: %s", err.Error())
c.Data(http.StatusNotFound, "image", []byte(""))
}
})
}
// `DELETE /api/v1/photos/:photoId/like`
//
// Parameters:
// - `photoId`: Photo ID as returned by the API
func DislikePhoto(router *gin.RouterGroup, conf *photoprism.Config) {
router.DELETE("/photos/:photoId/like", func(c *gin.Context) {
search := photoprism.NewSearch(conf.OriginalsPath, conf.GetDb())
photoId, err := strconv.ParseUint(c.Param("photoId"), 10, 64)
if err == nil {
photo := search.FindPhotoByID(photoId)
photo.PhotoFavorite = false
conf.GetDb().Save(&photo)
c.JSON(http.StatusAccepted, http.Response{})
} else {
log.Printf("could not find image for id: %s", err.Error())
c.Data(http.StatusNotFound, "image", []byte(""))
}
})
}
| internal/api/photos.go | 1 | https://github.com/photoprism/photoprism/commit/4a33d430e9d765b502cd1d08d1c88f7b0d88e9fc | [
0.02425936982035637,
0.0061126030050218105,
0.00015871957293711603,
0.0011193312238901854,
0.008426300249993801
] |
{
"id": 4,
"code_window": [
"\t})\n",
"}\n",
"\n",
"// `DELETE /api/v1/photos/:photoId/like`\n",
"//\n",
"// Parameters:\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep"
],
"after_edit": [
"// DELETE /api/v1/photos/:photoId/like\n"
],
"file_path": "internal/api/photos.go",
"type": "replace",
"edit_start_line_idx": 70
} | version: '3.3'
services:
photoprism:
build: .
image: photoprism/photoprism
command: tail -f /dev/null
database:
image: mysql:latest
command: mysqld --character-set-server=utf8mb4 --collation-server=utf8mb4_unicode_ci --max-connections=1024
environment:
MYSQL_ROOT_PASSWORD: photoprism
MYSQL_USER: photoprism
MYSQL_PASSWORD: photoprism
MYSQL_DATABASE: photoprism | docker-compose.travis.yml | 0 | https://github.com/photoprism/photoprism/commit/4a33d430e9d765b502cd1d08d1c88f7b0d88e9fc | [
0.0001717075501801446,
0.00016987956769298762,
0.00016805158520583063,
0.00016987956769298762,
0.0000018279824871569872
] |
{
"id": 4,
"code_window": [
"\t})\n",
"}\n",
"\n",
"// `DELETE /api/v1/photos/:photoId/like`\n",
"//\n",
"// Parameters:\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep"
],
"after_edit": [
"// DELETE /api/v1/photos/:photoId/like\n"
],
"file_path": "internal/api/photos.go",
"type": "replace",
"edit_start_line_idx": 70
} | const path = require('path');
const ExtractTextPlugin = require('extract-text-webpack-plugin');
const webpack = require('webpack');
const PATHS = {
app: path.join(__dirname, 'src/app.js'),
css: path.join(__dirname, 'css'),
build: path.join(__dirname, '../assets/public/build'),
};
const cssPlugin = new ExtractTextPlugin({
filename: '[name].css',
});
// See https://github.com/webpack/loader-utils/issues/56
process.noDeprecation = true;
const config = {
devtool: false,
entry: {
app: PATHS.app,
},
output: {
path: PATHS.build,
filename: '[name].js',
},
resolve: {
modules: [
path.join(__dirname, 'src'),
path.join(__dirname, 'node_modules'),
],
alias: {
vue: 'vue/dist/vue.js',
},
},
plugins: [
cssPlugin
],
node: {
fs: 'empty',
},
module: {
rules: [
{
test: /\.(js)$/,
include: PATHS.app,
enforce: 'pre',
loader: 'eslint-loader',
},
{
test: /\.js$/,
loader: 'babel-loader',
query: {
presets: ['es2015'],
},
},
{
test: /\.vue$/,
loader: 'vue-loader',
options: {
loaders: {
js: 'babel-loader?presets[]=es2015',
},
},
},
{
test: /\.css$/,
include: PATHS.css,
exclude: /node_modules/,
use: cssPlugin.extract({
use: 'css-loader',
fallback: 'style-loader',
}),
},
{
test: /\.css$/,
include: /node_modules/,
loaders: ['style-loader', 'css-loader']
},
{
test: /\.scss$/,
loaders: ['style-loader', 'css-loader', 'sass-loader']
},
{
test: /\.(png|jpg|jpeg|gif|svg|woff|woff2)$/,
loader: 'url-loader',
},
{
test: /\.(wav|mp3|eot|ttf)$/,
loader: 'file-loader',
},
{
test: /\.svg/,
use: {
loader: 'svg-url-loader',
options: {},
},
},
],
},
};
// No sourcemap for production
if (process.env.NODE_ENV !== "production") {
const devToolPlugin = new webpack.SourceMapDevToolPlugin({
filename: '[name].map',
});
config.plugins.push(devToolPlugin);
}
module.exports = config; | frontend/webpack.config.js | 0 | https://github.com/photoprism/photoprism/commit/4a33d430e9d765b502cd1d08d1c88f7b0d88e9fc | [
0.00017292339180130512,
0.00016950703866314143,
0.00016648726887069643,
0.0001696045364951715,
0.0000019667993456096156
] |
{
"id": 4,
"code_window": [
"\t})\n",
"}\n",
"\n",
"// `DELETE /api/v1/photos/:photoId/like`\n",
"//\n",
"// Parameters:\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep"
],
"after_edit": [
"// DELETE /api/v1/photos/:photoId/like\n"
],
"file_path": "internal/api/photos.go",
"type": "replace",
"edit_start_line_idx": 70
} | package commands
import (
"fmt"
"log"
"github.com/photoprism/photoprism/internal/photoprism"
"github.com/urfave/cli"
)
var IndexCommand = cli.Command{
Name: "index",
Usage: "Re-indexes all originals",
Action: indexAction,
}
// Indexes original photos; called by IndexCommand
func indexAction(context *cli.Context) error {
conf := photoprism.NewConfig(context)
if err := conf.CreateDirectories(); err != nil {
log.Fatal(err)
}
conf.MigrateDb()
fmt.Printf("Indexing photos in %s...\n", conf.OriginalsPath)
tensorFlow := photoprism.NewTensorFlow(conf.GetTensorFlowModelPath())
indexer := photoprism.NewIndexer(conf.OriginalsPath, tensorFlow, conf.GetDb())
indexer.IndexAll()
fmt.Println("Done.")
return nil
}
| internal/commands/index.go | 0 | https://github.com/photoprism/photoprism/commit/4a33d430e9d765b502cd1d08d1c88f7b0d88e9fc | [
0.002670127199962735,
0.0009126901859417558,
0.0001676989340921864,
0.0004064673848915845,
0.001032995292916894
] |
{
"id": 5,
"code_window": [
"//\n",
"// Parameters:\n",
"// - `photoId`: Photo ID as returned by the API\n",
"func DislikePhoto(router *gin.RouterGroup, conf *photoprism.Config) {\n",
"\trouter.DELETE(\"/photos/:photoId/like\", func(c *gin.Context) {\n",
"\t\tsearch := photoprism.NewSearch(conf.OriginalsPath, conf.GetDb())\n",
"\n"
],
"labels": [
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"// photoId: int Photo ID as returned by the API\n"
],
"file_path": "internal/api/photos.go",
"type": "replace",
"edit_start_line_idx": 73
} | package api
import (
"fmt"
"log"
"strconv"
"github.com/gin-gonic/gin"
"github.com/photoprism/photoprism/internal/photoprism"
)
var photoIconSvg = []byte(`
<svg xmlns="http://www.w3.org/2000/svg" width="24" height="24" viewBox="0 0 24 24">
<path d="M0 0h24v24H0z" fill="none"/>
<path d="M21 19V5c0-1.1-.9-2-2-2H5c-1.1 0-2 .9-2 2v14c0 1.1.9 2 2 2h14c1.1 0 2-.9 2-2zM8.5 13.5l2.5 3.01L14.5 12l4.5 6H5l3.5-4.5z"/>
</svg>`)
// `GET /api/v1/thumbnails/:type/:size/:hash`
//
// Parameters:
// - `type`: string Format, either "fit" or "square"
// - `size`: int Size in pixels
// - `hash`: string The file hash as returned by the search API
func GetThumbnail(router *gin.RouterGroup, conf *photoprism.Config) {
router.GET("/thumbnails/:type/:size/:hash", func(c *gin.Context) {
fileHash := c.Param("hash")
thumbnailType := c.Param("type")
size, err := strconv.Atoi(c.Param("size"))
if err != nil {
log.Printf("invalid size: %s", c.Param("size"))
c.Data(400, "image/svg+xml", photoIconSvg)
}
search := photoprism.NewSearch(conf.OriginalsPath, conf.GetDb())
file := search.FindFileByHash(fileHash)
fileName := fmt.Sprintf("%s/%s", conf.OriginalsPath, file.FileName)
if mediaFile, err := photoprism.NewMediaFile(fileName); err == nil {
switch thumbnailType {
case "fit":
if thumbnail, err := mediaFile.GetThumbnail(conf.ThumbnailsPath, size); err == nil {
c.File(thumbnail.GetFilename())
} else {
log.Printf("could not create thumbnail: %s", err.Error())
c.Data(400, "image/svg+xml", photoIconSvg)
}
case "square":
if thumbnail, err := mediaFile.GetSquareThumbnail(conf.ThumbnailsPath, size); err == nil {
c.File(thumbnail.GetFilename())
} else {
log.Printf("could not create square thumbnail: %s", err.Error())
c.Data(400, "image/svg+xml", photoIconSvg)
}
default:
log.Printf("unknown thumbnail type: %s", thumbnailType)
c.Data(400, "image/svg+xml", photoIconSvg)
}
} else {
log.Printf("could not find image for thumbnail: %s", err.Error())
c.Data(404, "image/svg+xml", photoIconSvg)
// Set missing flag so that the file doesn't show up in search results anymore
file.FileMissing = true
conf.GetDb().Save(&file)
}
})
}
| internal/api/thumbnails.go | 1 | https://github.com/photoprism/photoprism/commit/4a33d430e9d765b502cd1d08d1c88f7b0d88e9fc | [
0.9880561828613281,
0.22440820932388306,
0.00016820163000375032,
0.0011127120815217495,
0.3900347948074341
] |
{
"id": 5,
"code_window": [
"//\n",
"// Parameters:\n",
"// - `photoId`: Photo ID as returned by the API\n",
"func DislikePhoto(router *gin.RouterGroup, conf *photoprism.Config) {\n",
"\trouter.DELETE(\"/photos/:photoId/like\", func(c *gin.Context) {\n",
"\t\tsearch := photoprism.NewSearch(conf.OriginalsPath, conf.GetDb())\n",
"\n"
],
"labels": [
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"// photoId: int Photo ID as returned by the API\n"
],
"file_path": "internal/api/photos.go",
"type": "replace",
"edit_start_line_idx": 73
} | package server
import (
"fmt"
"github.com/gin-gonic/gin"
"github.com/photoprism/photoprism/internal/photoprism"
)
func Start(conf *photoprism.Config) {
if conf.ServerMode != "" {
gin.SetMode(conf.ServerMode)
} else if conf.Debug == false {
gin.SetMode(gin.ReleaseMode)
}
app := gin.Default()
// Set template directory
app.LoadHTMLGlob(conf.GetTemplatesPath() + "/*")
registerRoutes(app, conf)
app.Run(fmt.Sprintf("%s:%d", conf.ServerIP, conf.ServerPort))
}
| internal/server/server.go | 0 | https://github.com/photoprism/photoprism/commit/4a33d430e9d765b502cd1d08d1c88f7b0d88e9fc | [
0.004545351956039667,
0.001636109664104879,
0.00016571122978348285,
0.00019726587925106287,
0.0020571851637214422
] |
{
"id": 5,
"code_window": [
"//\n",
"// Parameters:\n",
"// - `photoId`: Photo ID as returned by the API\n",
"func DislikePhoto(router *gin.RouterGroup, conf *photoprism.Config) {\n",
"\trouter.DELETE(\"/photos/:photoId/like\", func(c *gin.Context) {\n",
"\t\tsearch := photoprism.NewSearch(conf.OriginalsPath, conf.GetDb())\n",
"\n"
],
"labels": [
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"// photoId: int Photo ID as returned by the API\n"
],
"file_path": "internal/api/photos.go",
"type": "replace",
"edit_start_line_idx": 73
} | package commands
import (
"fmt"
"github.com/photoprism/photoprism/internal/photoprism"
"github.com/urfave/cli"
)
var ConfigCommand = cli.Command{
Name: "config",
Usage: "Displays global configuration values",
Action: configAction,
}
// Prints current configuration; called by ConfigCommand
func configAction(context *cli.Context) error {
conf := photoprism.NewConfig(context)
fmt.Printf("NAME VALUE\n")
fmt.Printf("debug %t\n", conf.Debug)
fmt.Printf("config-file %s\n", conf.ConfigFile)
fmt.Printf("assets-path %s\n", conf.AssetsPath)
fmt.Printf("originals-path %s\n", conf.OriginalsPath)
fmt.Printf("thumbnails-path %s\n", conf.ThumbnailsPath)
fmt.Printf("import-path %s\n", conf.ImportPath)
fmt.Printf("export-path %s\n", conf.ExportPath)
fmt.Printf("darktable-cli %s\n", conf.DarktableCli)
fmt.Printf("database-driver %s\n", conf.DatabaseDriver)
fmt.Printf("database-dsn %s\n", conf.DatabaseDsn)
return nil
}
| internal/commands/config.go | 0 | https://github.com/photoprism/photoprism/commit/4a33d430e9d765b502cd1d08d1c88f7b0d88e9fc | [
0.00308084674179554,
0.0011221308959648013,
0.00017118931282311678,
0.0006182438228279352,
0.001147483242675662
] |
{
"id": 5,
"code_window": [
"//\n",
"// Parameters:\n",
"// - `photoId`: Photo ID as returned by the API\n",
"func DislikePhoto(router *gin.RouterGroup, conf *photoprism.Config) {\n",
"\trouter.DELETE(\"/photos/:photoId/like\", func(c *gin.Context) {\n",
"\t\tsearch := photoprism.NewSearch(conf.OriginalsPath, conf.GetDb())\n",
"\n"
],
"labels": [
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"// photoId: int Photo ID as returned by the API\n"
],
"file_path": "internal/api/photos.go",
"type": "replace",
"edit_start_line_idx": 73
} | package photoprism
import (
"image"
"os"
"sort"
"github.com/RobCherry/vibrant"
"github.com/lucasb-eyer/go-colorful"
"golang.org/x/image/colornames"
)
func getColorNames(actualColor colorful.Color) (result []string) {
var maxDistance = 0.22
for colorName, colorRGBA := range colornames.Map {
colorColorful, _ := colorful.MakeColor(colorRGBA)
currentDistance := colorColorful.DistanceRgb(actualColor)
if maxDistance >= currentDistance {
result = append(result, colorName)
}
}
return result
}
// GetColors returns color information for a given mediafiles.
func (m *MediaFile) GetColors() (colors []string, vibrantHex string, mutedHex string) {
file, _ := os.Open(m.filename)
defer file.Close()
decodedImage, _, _ := image.Decode(file)
palette := vibrant.NewPaletteBuilder(decodedImage).Generate()
if vibrantSwatch := palette.VibrantSwatch(); vibrantSwatch != nil {
color, _ := colorful.MakeColor(vibrantSwatch.Color())
colors = append(colors, getColorNames(color)...)
vibrantHex = color.Hex()
}
if mutedSwatch := palette.MutedSwatch(); mutedSwatch != nil {
color, _ := colorful.MakeColor(mutedSwatch.Color())
colors = append(colors, getColorNames(color)...)
mutedHex = color.Hex()
}
sort.Strings(colors)
return colors, vibrantHex, mutedHex
}
| internal/photoprism/colors.go | 0 | https://github.com/photoprism/photoprism/commit/4a33d430e9d765b502cd1d08d1c88f7b0d88e9fc | [
0.0014531450578942895,
0.0003862683952320367,
0.00016489335393998772,
0.000174839558894746,
0.00047714009997434914
] |
{
"id": 6,
"code_window": [
"<path d=\"M21 19V5c0-1.1-.9-2-2-2H5c-1.1 0-2 .9-2 2v14c0 1.1.9 2 2 2h14c1.1 0 2-.9 2-2zM8.5 13.5l2.5 3.01L14.5 12l4.5 6H5l3.5-4.5z\"/>\n",
"</svg>`)\n",
"\n",
"// `GET /api/v1/thumbnails/:type/:size/:hash`\n",
"//\n",
"// Parameters:\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep"
],
"after_edit": [
"// GET /api/v1/thumbnails/:type/:size/:hash\n"
],
"file_path": "internal/api/thumbnails.go",
"type": "replace",
"edit_start_line_idx": 17
} | package api
import (
"log"
"net/http"
"strconv"
"github.com/gin-gonic/gin"
"github.com/gin-gonic/gin/binding"
"github.com/photoprism/photoprism/internal/forms"
"github.com/photoprism/photoprism/internal/photoprism"
)
// `GET /api/v1/photos`
//
// Query:
// - `q`: string Query string `form:""`
// - `tags`: string Tags string `form:"tags"`
// - `cat`: string Category
// - `country`: string Country code
// - `camera`: int Camera ID
// - `order`: string Sort order
// - `count`: int Max result count (required)
// - `offset`: int Result offset
// - `before`: date Find photos taken before (format: "2006-01-02")
// - `after`: date Find photos taken after (format: "2006-01-02")
// - `favorites`: bool Find favorites only
func GetPhotos(router *gin.RouterGroup, conf *photoprism.Config) {
router.GET("/photos", func(c *gin.Context) {
var form forms.PhotoSearchForm
search := photoprism.NewSearch(conf.OriginalsPath, conf.GetDb())
c.MustBindWith(&form, binding.Form)
result, err := search.Photos(form)
if err != nil {
c.AbortWithStatusJSON(400, gin.H{"error": err.Error()})
}
c.Header("x-result-count", strconv.Itoa(form.Count))
c.Header("x-result-offset", strconv.Itoa(form.Offset))
c.JSON(http.StatusOK, result)
})
}
// `POST /api/v1/photos/:photoId/like`
//
// Parameters:
// - `photoId`: Photo ID as returned by the API
func LikePhoto(router *gin.RouterGroup, conf *photoprism.Config) {
router.POST("/photos/:photoId/like", func(c *gin.Context) {
search := photoprism.NewSearch(conf.OriginalsPath, conf.GetDb())
photoId, err := strconv.ParseUint(c.Param("photoId"), 10, 64)
if err == nil {
photo := search.FindPhotoByID(photoId)
photo.PhotoFavorite = true
conf.GetDb().Save(&photo)
c.JSON(http.StatusAccepted, http.Response{})
} else {
log.Printf("could not find image for id: %s", err.Error())
c.Data(http.StatusNotFound, "image", []byte(""))
}
})
}
// `DELETE /api/v1/photos/:photoId/like`
//
// Parameters:
// - `photoId`: Photo ID as returned by the API
func DislikePhoto(router *gin.RouterGroup, conf *photoprism.Config) {
router.DELETE("/photos/:photoId/like", func(c *gin.Context) {
search := photoprism.NewSearch(conf.OriginalsPath, conf.GetDb())
photoId, err := strconv.ParseUint(c.Param("photoId"), 10, 64)
if err == nil {
photo := search.FindPhotoByID(photoId)
photo.PhotoFavorite = false
conf.GetDb().Save(&photo)
c.JSON(http.StatusAccepted, http.Response{})
} else {
log.Printf("could not find image for id: %s", err.Error())
c.Data(http.StatusNotFound, "image", []byte(""))
}
})
}
| internal/api/photos.go | 1 | https://github.com/photoprism/photoprism/commit/4a33d430e9d765b502cd1d08d1c88f7b0d88e9fc | [
0.00018151324184145778,
0.00017152352666016668,
0.00015974199050106108,
0.000171209845575504,
0.000005706357114831917
] |
{
"id": 6,
"code_window": [
"<path d=\"M21 19V5c0-1.1-.9-2-2-2H5c-1.1 0-2 .9-2 2v14c0 1.1.9 2 2 2h14c1.1 0 2-.9 2-2zM8.5 13.5l2.5 3.01L14.5 12l4.5 6H5l3.5-4.5z\"/>\n",
"</svg>`)\n",
"\n",
"// `GET /api/v1/thumbnails/:type/:size/:hash`\n",
"//\n",
"// Parameters:\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep"
],
"after_edit": [
"// GET /api/v1/thumbnails/:type/:size/:hash\n"
],
"file_path": "internal/api/thumbnails.go",
"type": "replace",
"edit_start_line_idx": 17
} | <template>
<v-container fluid>
<h1>TODO</h1>
<p class="md-subheading">
This page is not implemented yet
</p>
</v-container>
</template>
<script>
export default {
name: 'todo',
data() {
return {
};
},
methods: {
}
};
</script>
<style scoped>
h1, h2 {
font-weight: normal;
}
ul {
list-style-type: none;
padding: 0;
}
li {
display: inline-block;
margin: 0 10px;
}
</style>
| frontend/src/app/pages/todo.vue | 0 | https://github.com/photoprism/photoprism/commit/4a33d430e9d765b502cd1d08d1c88f7b0d88e9fc | [
0.00017471666797064245,
0.00017122812278103083,
0.00016669345495756716,
0.00017175116227008402,
0.000002965483190564555
] |
{
"id": 6,
"code_window": [
"<path d=\"M21 19V5c0-1.1-.9-2-2-2H5c-1.1 0-2 .9-2 2v14c0 1.1.9 2 2 2h14c1.1 0 2-.9 2-2zM8.5 13.5l2.5 3.01L14.5 12l4.5 6H5l3.5-4.5z\"/>\n",
"</svg>`)\n",
"\n",
"// `GET /api/v1/thumbnails/:type/:size/:hash`\n",
"//\n",
"// Parameters:\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep"
],
"after_edit": [
"// GET /api/v1/thumbnails/:type/:size/:hash\n"
],
"file_path": "internal/api/thumbnails.go",
"type": "replace",
"edit_start_line_idx": 17
} | module.exports = {
env: {
browser: true,
commonjs: true,
es6: true,
node: true,
mocha: true,
},
extends: 'eslint:recommended',
parserOptions: {
sourceType: 'module',
},
rules: {
'comma-dangle': ['error', 'always-multiline'],
indent: ['error', 4],
'linebreak-style': ['error', 'unix'],
quotes: ['error', 'single'],
semi: ['error', 'always'],
'no-unused-vars': ['warn'],
'no-console': 0,
},
}; | frontend/.eslintrc.js | 0 | https://github.com/photoprism/photoprism/commit/4a33d430e9d765b502cd1d08d1c88f7b0d88e9fc | [
0.0001745673071127385,
0.0001710012584226206,
0.00016786405467428267,
0.00017057244258467108,
0.0000027533394586498616
] |
{
"id": 6,
"code_window": [
"<path d=\"M21 19V5c0-1.1-.9-2-2-2H5c-1.1 0-2 .9-2 2v14c0 1.1.9 2 2 2h14c1.1 0 2-.9 2-2zM8.5 13.5l2.5 3.01L14.5 12l4.5 6H5l3.5-4.5z\"/>\n",
"</svg>`)\n",
"\n",
"// `GET /api/v1/thumbnails/:type/:size/:hash`\n",
"//\n",
"// Parameters:\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep"
],
"after_edit": [
"// GET /api/v1/thumbnails/:type/:size/:hash\n"
],
"file_path": "internal/api/thumbnails.go",
"type": "replace",
"edit_start_line_idx": 17
} | package photoprism
import (
"strings"
"time"
"github.com/jinzhu/gorm"
"github.com/photoprism/photoprism/internal/forms"
"github.com/photoprism/photoprism/internal/models"
)
// Search searches given an originals path and a db instance.
type Search struct {
originalsPath string
db *gorm.DB
}
// SearchCount is the total number of search hits.
type SearchCount struct {
Total int
}
// PhotoSearchResult is a found mediafile.
type PhotoSearchResult struct {
// Photo
ID uint
CreatedAt time.Time
UpdatedAt time.Time
DeletedAt time.Time
TakenAt time.Time
PhotoTitle string
PhotoDescription string
PhotoArtist string
PhotoKeywords string
PhotoColors string
PhotoVibrantColor string
PhotoMutedColor string
PhotoCanonicalName string
PhotoLat float64
PhotoLong float64
PhotoFavorite bool
// Camera
CameraID uint
CameraModel string
CameraMake string
// Lens
LensID uint
LensModel string
LensMake string
// Country
CountryID string
CountryName string
// Location
LocationID uint
LocDisplayName string
LocName string
LocCity string
LocPostcode string
LocCounty string
LocState string
LocCountry string
LocCountryCode string
LocCategory string
LocType string
// File
FileID uint
FilePrimary bool
FileMissing bool
FileName string
FileHash string
FilePerceptualHash string
FileType string
FileMime string
FileWidth int
FileHeight int
FileOrientation int
FileAspectRatio float64
// Tags
Tags string
}
// NewSearch returns a new Search type with a given path and db instance.
func NewSearch(originalsPath string, db *gorm.DB) *Search {
instance := &Search{
originalsPath: originalsPath,
db: db,
}
return instance
}
// Photos searches for photos based on a Form and returns a PhotoSearchResult slice.
func (s *Search) Photos(form forms.PhotoSearchForm) ([]PhotoSearchResult, error) {
q := s.db.NewScope(nil).DB()
q = q.Table("photos").
Select(`SQL_CALC_FOUND_ROWS photos.*,
files.id AS file_id, files.file_primary, files.file_missing, files.file_name, files.file_hash, files.file_perceptual_hash, files.file_type, files.file_mime, files.file_width, files.file_height, files.file_aspect_ratio, files.file_orientation,
cameras.camera_make, cameras.camera_model,
lenses.lens_make, lenses.lens_model,
countries.country_name,
locations.loc_display_name, locations.loc_name, locations.loc_city, locations.loc_postcode, locations.loc_county, locations.loc_state, locations.loc_country, locations.loc_country_code, locations.loc_category, locations.loc_type,
GROUP_CONCAT(tags.tag_label) AS tags`).
Joins("JOIN files ON files.photo_id = photos.id AND files.file_primary AND files.deleted_at IS NULL").
Joins("JOIN cameras ON cameras.id = photos.camera_id").
Joins("JOIN lenses ON lenses.id = photos.lens_id").
Joins("LEFT JOIN countries ON countries.id = photos.country_id").
Joins("LEFT JOIN locations ON locations.id = photos.location_id").
Joins("LEFT JOIN photo_tags ON photo_tags.photo_id = photos.id").
Joins("LEFT JOIN tags ON photo_tags.tag_id = tags.id").
Where("photos.deleted_at IS NULL AND files.file_missing = 0").
Group("photos.id, files.id")
if form.Query != "" {
q = q.Where("tags.tag_label LIKE ? OR MATCH (photo_title, photo_description, photo_artist, photo_colors) AGAINST (?)", "%"+strings.ToLower(form.Query)+"%", form.Query)
}
if form.CameraID > 0 {
q = q.Where("photos.camera_id = ?", form.CameraID)
}
if form.Country != "" {
q = q.Where("locations.loc_country_code = ?", form.Country)
}
switch form.Cat {
case "amenity":
q = q.Where("locations.loc_category = 'amenity'")
case "bank":
q = q.Where("locations.loc_type = 'bank'")
case "building":
q = q.Where("locations.loc_category = 'building'")
case "school":
q = q.Where("locations.loc_type = 'school'")
case "supermarket":
q = q.Where("locations.loc_type = 'supermarket'")
case "shop":
q = q.Where("locations.loc_category = 'shop'")
case "hotel":
q = q.Where("locations.loc_type = 'hotel'")
case "bar":
q = q.Where("locations.loc_type = 'bar'")
case "parking":
q = q.Where("locations.loc_type = 'parking'")
case "airport":
q = q.Where("locations.loc_category = 'aeroway'")
case "historic":
q = q.Where("locations.loc_category = 'historic'")
case "tourism":
q = q.Where("locations.loc_category = 'tourism'")
default:
}
switch form.Order {
case "newest":
q = q.Order("taken_at DESC")
case "oldest":
q = q.Order("taken_at")
case "imported":
q = q.Order("created_at DESC")
default:
q = q.Order("taken_at DESC")
}
if form.Count > 0 && form.Count <= 1000 {
q = q.Limit(form.Count).Offset(form.Offset)
} else {
q = q.Limit(100).Offset(0)
}
var results []PhotoSearchResult
if result := q.Scan(&results); result.Error != nil {
return results, result.Error
}
return results, nil
}
// FindFiles finds files returning maximum results defined by limit
// and finding them from an offest defined by offset.
func (s *Search) FindFiles(limit int, offset int) (files []models.File) {
s.db.Where(&models.File{}).Limit(limit).Offset(offset).Find(&files)
return files
}
// FindFileByID returns a mediafile given a certain ID.
func (s *Search) FindFileByID(id string) (file models.File) {
s.db.Where("id = ?", id).First(&file)
return file
}
// FindFileByHash finds a file with a given hash string.
func (s *Search) FindFileByHash(fileHash string) (file models.File) {
s.db.Where("file_hash = ?", fileHash).First(&file)
return file
}
// FindPhotoByID returns a Photo based on the ID.
func (s *Search) FindPhotoByID(photoID uint64) (photo models.Photo) {
s.db.Where("id = ?", photoID).First(&photo)
return photo
}
| internal/photoprism/search.go | 0 | https://github.com/photoprism/photoprism/commit/4a33d430e9d765b502cd1d08d1c88f7b0d88e9fc | [
0.000245540781179443,
0.00017335143638774753,
0.00016318967391271144,
0.00017134653171524405,
0.000016208901797654107
] |
{
"id": 7,
"code_window": [
"//\n",
"// Parameters:\n",
"// - `type`: string Format, either \"fit\" or \"square\"\n",
"// - `size`: int Size in pixels\n",
"// - `hash`: string The file hash as returned by the search API\n",
"func GetThumbnail(router *gin.RouterGroup, conf *photoprism.Config) {\n",
"\trouter.GET(\"/thumbnails/:type/:size/:hash\", func(c *gin.Context) {\n",
"\t\tfileHash := c.Param(\"hash\")\n"
],
"labels": [
"keep",
"keep",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"// type: string Format, either \"fit\" or \"square\"\n",
"// size: int Size in pixels\n",
"// hash: string The file hash as returned by the search API\n"
],
"file_path": "internal/api/thumbnails.go",
"type": "replace",
"edit_start_line_idx": 20
} | package api
import (
"fmt"
"log"
"strconv"
"github.com/gin-gonic/gin"
"github.com/photoprism/photoprism/internal/photoprism"
)
var photoIconSvg = []byte(`
<svg xmlns="http://www.w3.org/2000/svg" width="24" height="24" viewBox="0 0 24 24">
<path d="M0 0h24v24H0z" fill="none"/>
<path d="M21 19V5c0-1.1-.9-2-2-2H5c-1.1 0-2 .9-2 2v14c0 1.1.9 2 2 2h14c1.1 0 2-.9 2-2zM8.5 13.5l2.5 3.01L14.5 12l4.5 6H5l3.5-4.5z"/>
</svg>`)
// `GET /api/v1/thumbnails/:type/:size/:hash`
//
// Parameters:
// - `type`: string Format, either "fit" or "square"
// - `size`: int Size in pixels
// - `hash`: string The file hash as returned by the search API
func GetThumbnail(router *gin.RouterGroup, conf *photoprism.Config) {
router.GET("/thumbnails/:type/:size/:hash", func(c *gin.Context) {
fileHash := c.Param("hash")
thumbnailType := c.Param("type")
size, err := strconv.Atoi(c.Param("size"))
if err != nil {
log.Printf("invalid size: %s", c.Param("size"))
c.Data(400, "image/svg+xml", photoIconSvg)
}
search := photoprism.NewSearch(conf.OriginalsPath, conf.GetDb())
file := search.FindFileByHash(fileHash)
fileName := fmt.Sprintf("%s/%s", conf.OriginalsPath, file.FileName)
if mediaFile, err := photoprism.NewMediaFile(fileName); err == nil {
switch thumbnailType {
case "fit":
if thumbnail, err := mediaFile.GetThumbnail(conf.ThumbnailsPath, size); err == nil {
c.File(thumbnail.GetFilename())
} else {
log.Printf("could not create thumbnail: %s", err.Error())
c.Data(400, "image/svg+xml", photoIconSvg)
}
case "square":
if thumbnail, err := mediaFile.GetSquareThumbnail(conf.ThumbnailsPath, size); err == nil {
c.File(thumbnail.GetFilename())
} else {
log.Printf("could not create square thumbnail: %s", err.Error())
c.Data(400, "image/svg+xml", photoIconSvg)
}
default:
log.Printf("unknown thumbnail type: %s", thumbnailType)
c.Data(400, "image/svg+xml", photoIconSvg)
}
} else {
log.Printf("could not find image for thumbnail: %s", err.Error())
c.Data(404, "image/svg+xml", photoIconSvg)
// Set missing flag so that the file doesn't show up in search results anymore
file.FileMissing = true
conf.GetDb().Save(&file)
}
})
}
| internal/api/thumbnails.go | 1 | https://github.com/photoprism/photoprism/commit/4a33d430e9d765b502cd1d08d1c88f7b0d88e9fc | [
0.999065101146698,
0.5017451047897339,
0.00020451472664717585,
0.5027450323104858,
0.4947584867477417
] |
{
"id": 7,
"code_window": [
"//\n",
"// Parameters:\n",
"// - `type`: string Format, either \"fit\" or \"square\"\n",
"// - `size`: int Size in pixels\n",
"// - `hash`: string The file hash as returned by the search API\n",
"func GetThumbnail(router *gin.RouterGroup, conf *photoprism.Config) {\n",
"\trouter.GET(\"/thumbnails/:type/:size/:hash\", func(c *gin.Context) {\n",
"\t\tfileHash := c.Param(\"hash\")\n"
],
"labels": [
"keep",
"keep",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"// type: string Format, either \"fit\" or \"square\"\n",
"// size: int Size in pixels\n",
"// hash: string The file hash as returned by the search API\n"
],
"file_path": "internal/api/thumbnails.go",
"type": "replace",
"edit_start_line_idx": 20
} | <!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<meta http-equiv="X-UA-Compatible" content="IE=edge,chrome=1">
<title>{{ .title }}</title>
<meta name="description" content="">
<meta name="author" content="">
<meta name="viewport" content="width=device-width, initial-scale=1">
<link rel="apple-touch-icon" href="/favicon.png">
<link rel="shortcut icon" href="/favicon.ico">
<link rel="icon" type="image/png" href="/favicon.png" />
<link rel="stylesheet" href="/assets/build/app.css?{{ .cssHash }}">
<script>
window.appConfig = {
appName: "{{ .title }}",
appVersion: "1.0.0",
debug: {{ .debug }},
cameras: {{ .cameras }},
countries: {{ .countries }}
};
</script>
</head>
<body>
<!--[if lt IE 8]>
<p class="browserupgrade">You are using an <strong>outdated</strong> browser. Please <a href="http://browsehappy.com/">upgrade your browser</a> to improve your experience.</p>
<![endif]-->
<div id="app" class="container">
<div class="loading">
<div class="v-progress-linear" style="height: 7px;"><div class="v-progress-linear__background amber" style="height: 7px; opacity: 0.3; width: 100%;"></div><div class="v-progress-linear__bar"><div class="v-progress-linear__bar__indeterminate v-progress-linear__bar__indeterminate--active"><div class="v-progress-linear__bar__indeterminate long amber"></div><div class="v-progress-linear__bar__indeterminate short amber"></div></div><!----></div></div>
</div>
</div>
<footer>
</footer>
<script src="/assets/build/app.js?{{ .jsHash }}"></script>
</body>
</html> | assets/templates/index.tmpl | 0 | https://github.com/photoprism/photoprism/commit/4a33d430e9d765b502cd1d08d1c88f7b0d88e9fc | [
0.00020451472664717585,
0.00017706802464090288,
0.00016862461052369326,
0.0001707345072645694,
0.000013751445294474252
] |
{
"id": 7,
"code_window": [
"//\n",
"// Parameters:\n",
"// - `type`: string Format, either \"fit\" or \"square\"\n",
"// - `size`: int Size in pixels\n",
"// - `hash`: string The file hash as returned by the search API\n",
"func GetThumbnail(router *gin.RouterGroup, conf *photoprism.Config) {\n",
"\trouter.GET(\"/thumbnails/:type/:size/:hash\", func(c *gin.Context) {\n",
"\t\tfileHash := c.Param(\"hash\")\n"
],
"labels": [
"keep",
"keep",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"// type: string Format, either \"fit\" or \"square\"\n",
"// size: int Size in pixels\n",
"// hash: string The file hash as returned by the search API\n"
],
"file_path": "internal/api/thumbnails.go",
"type": "replace",
"edit_start_line_idx": 20
} | import axios from 'axios';
import Event from 'pubsub-js';
import 'babel-polyfill';
const Api = axios.create({
baseURL: '/api/v1',
headers: {common: {
'X-Session-Token': window.localStorage.getItem('session_token'),
}},
});
Api.interceptors.request.use(function (config) {
// Do something before request is sent
Event.publish('ajax.start', config);
return config;
}, function (error) {
// Do something with request error
return Promise.reject(error);
});
Api.interceptors.response.use(function (response) {
Event.publish('ajax.end', response);
return response;
}, function (error) {
if(console && console.log) {
console.log(error);
}
let errorMessage = 'An error occurred - are you offline?';
let code = error.code;
if(error.response && error.response.data) {
let data = error.response.data;
code = data.code;
errorMessage = data.message ? data.message : data.error;
}
Event.publish('ajax.end');
Event.publish('alert.error', errorMessage);
if(code === 401) {
window.location = '/';
}
return Promise.reject(error);
});
export default Api;
| frontend/src/common/api.js | 0 | https://github.com/photoprism/photoprism/commit/4a33d430e9d765b502cd1d08d1c88f7b0d88e9fc | [
0.00017129071056842804,
0.0001691685029072687,
0.00016694239457137883,
0.00016946374671533704,
0.000001575307578605134
] |
{
"id": 7,
"code_window": [
"//\n",
"// Parameters:\n",
"// - `type`: string Format, either \"fit\" or \"square\"\n",
"// - `size`: int Size in pixels\n",
"// - `hash`: string The file hash as returned by the search API\n",
"func GetThumbnail(router *gin.RouterGroup, conf *photoprism.Config) {\n",
"\trouter.GET(\"/thumbnails/:type/:size/:hash\", func(c *gin.Context) {\n",
"\t\tfileHash := c.Param(\"hash\")\n"
],
"labels": [
"keep",
"keep",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"// type: string Format, either \"fit\" or \"square\"\n",
"// size: int Size in pixels\n",
"// hash: string The file hash as returned by the search API\n"
],
"file_path": "internal/api/thumbnails.go",
"type": "replace",
"edit_start_line_idx": 20
} | package photoprism
import (
"encoding/json"
"fmt"
"net/http"
"strconv"
"strings"
"github.com/photoprism/photoprism/internal/models"
"github.com/pkg/errors"
)
type openstreetmapAddress struct {
HouseNumber string `json:"house_number"`
Road string `json:"road"`
Suburb string `json:"suburb"`
Town string `json:"town"`
City string `json:"city"`
Postcode string `json:"postcode"`
County string `json:"county"`
State string `json:"state"`
Country string `json:"country"`
CountryCode string `json:"country_code"`
}
type openstreetmapLocation struct {
PlaceID string `json:"place_id"`
Lat string `json:"lat"`
Lon string `json:"lon"`
Name string `json:"name"`
Category string `json:"category"`
Type string `json:"type"`
DisplayName string `json:"display_name"`
Address *openstreetmapAddress `json:"address"`
}
// GetLocation See https://wiki.openstreetmap.org/wiki/Nominatim#Reverse_Geocoding
func (m *MediaFile) GetLocation() (*models.Location, error) {
if m.location != nil {
return m.location, nil
}
location := &models.Location{}
openstreetmapLocation := &openstreetmapLocation{
Address: &openstreetmapAddress{},
}
if exifData, err := m.GetExifData(); err == nil {
url := fmt.Sprintf("https://nominatim.openstreetmap.org/reverse?lat=%f&lon=%f&format=jsonv2", exifData.Lat, exifData.Long)
if res, err := http.Get(url); err == nil {
json.NewDecoder(res.Body).Decode(openstreetmapLocation)
} else {
return nil, err
}
} else {
return nil, err
}
if id, err := strconv.Atoi(openstreetmapLocation.PlaceID); err == nil && id > 0 {
location.ID = uint(id)
} else {
return nil, errors.New("no location found")
}
if openstreetmapLocation.Address.City != "" {
location.LocCity = openstreetmapLocation.Address.City
} else {
location.LocCity = openstreetmapLocation.Address.Town
}
if lat, err := strconv.ParseFloat(openstreetmapLocation.Lat, 64); err == nil {
location.LocLat = lat
}
if lon, err := strconv.ParseFloat(openstreetmapLocation.Lon, 64); err == nil {
location.LocLong = lon
}
location.LocName = strings.Title(openstreetmapLocation.Name)
location.LocHouseNr = openstreetmapLocation.Address.HouseNumber
location.LocStreet = openstreetmapLocation.Address.Road
location.LocSuburb = openstreetmapLocation.Address.Suburb
location.LocPostcode = openstreetmapLocation.Address.Postcode
location.LocCounty = openstreetmapLocation.Address.County
location.LocState = openstreetmapLocation.Address.State
location.LocCountry = openstreetmapLocation.Address.Country
location.LocCountryCode = openstreetmapLocation.Address.CountryCode
location.LocDisplayName = openstreetmapLocation.DisplayName
location.LocCategory = openstreetmapLocation.Category
if openstreetmapLocation.Type != "yes" && openstreetmapLocation.Type != "unclassified" {
location.LocType = openstreetmapLocation.Type
}
m.location = location
return m.location, nil
}
| internal/photoprism/openstreetmap.go | 0 | https://github.com/photoprism/photoprism/commit/4a33d430e9d765b502cd1d08d1c88f7b0d88e9fc | [
0.00018806137086357921,
0.0001723563182167709,
0.00016627262812107801,
0.00017024211410898715,
0.0000067803957790602
] |
{
"id": 0,
"code_window": [
"\ts.txn.SetOption(tikvstore.EnableAsyncCommit, s.GetSessionVars().EnableAsyncCommit)\n",
"\ts.txn.SetOption(tikvstore.Enable1PC, s.GetSessionVars().Enable1PC)\n",
"\t// priority of the sysvar is lower than `start transaction with causal consistency only`\n",
"\tif s.txn.GetOption(tikvstore.GuaranteeLinearizability) == nil {\n",
"\t\t// We needn't ask the TiKV client to guarantee linearizability for auto-commit transactions\n",
"\t\t// because the property is naturally holds:\n",
"\t\t// We guarantee the commitTS of any transaction must not exceed the next timestamp from the TSO.\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tif val := s.txn.GetOption(tikvstore.GuaranteeLinearizability); val == nil || val.(bool) {\n"
],
"file_path": "session/session.go",
"type": "replace",
"edit_start_line_idx": 519
} | // Copyright 2020 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package tikv_test
import (
"bytes"
"context"
"fmt"
"math"
"sync/atomic"
"testing"
"time"
. "github.com/pingcap/check"
"github.com/pingcap/errors"
"github.com/pingcap/kvproto/pkg/kvrpcpb"
"github.com/pingcap/tidb/store/mockstore/unistore"
"github.com/pingcap/tidb/store/tikv"
tikverr "github.com/pingcap/tidb/store/tikv/error"
"github.com/pingcap/tidb/store/tikv/kv"
"github.com/pingcap/tidb/store/tikv/mockstore/cluster"
"github.com/pingcap/tidb/store/tikv/oracle"
"github.com/pingcap/tidb/store/tikv/tikvrpc"
"github.com/pingcap/tidb/store/tikv/util"
)
func TestT(t *testing.T) {
CustomVerboseFlag = true
TestingT(t)
}
// testAsyncCommitCommon is used to put common parts that will be both used by
// testAsyncCommitSuite and testAsyncCommitFailSuite.
type testAsyncCommitCommon struct {
cluster cluster.Cluster
store *tikv.KVStore
}
func (s *testAsyncCommitCommon) setUpTest(c *C) {
if *WithTiKV {
s.store = NewTestStore(c)
return
}
client, pdClient, cluster, err := unistore.New("")
c.Assert(err, IsNil)
unistore.BootstrapWithSingleStore(cluster)
s.cluster = cluster
store, err := tikv.NewTestTiKVStore(client, pdClient, nil, nil, 0)
c.Assert(err, IsNil)
s.store = store
}
func (s *testAsyncCommitCommon) putAlphabets(c *C, enableAsyncCommit bool) {
for ch := byte('a'); ch <= byte('z'); ch++ {
s.putKV(c, []byte{ch}, []byte{ch}, enableAsyncCommit)
}
}
func (s *testAsyncCommitCommon) putKV(c *C, key, value []byte, enableAsyncCommit bool) (uint64, uint64) {
txn := s.beginAsyncCommit(c)
err := txn.Set(key, value)
c.Assert(err, IsNil)
err = txn.Commit(context.Background())
c.Assert(err, IsNil)
return txn.StartTS(), txn.GetCommitTS()
}
func (s *testAsyncCommitCommon) mustGetFromTxn(c *C, txn tikv.TxnProbe, key, expectedValue []byte) {
v, err := txn.Get(context.Background(), key)
c.Assert(err, IsNil)
c.Assert(v, BytesEquals, expectedValue)
}
func (s *testAsyncCommitCommon) mustGetLock(c *C, key []byte) *tikv.Lock {
ver, err := s.store.CurrentTimestamp(oracle.GlobalTxnScope)
c.Assert(err, IsNil)
req := tikvrpc.NewRequest(tikvrpc.CmdGet, &kvrpcpb.GetRequest{
Key: key,
Version: ver,
})
bo := tikv.NewBackofferWithVars(context.Background(), 5000, nil)
loc, err := s.store.GetRegionCache().LocateKey(bo, key)
c.Assert(err, IsNil)
resp, err := s.store.SendReq(bo, req, loc.Region, time.Second*10)
c.Assert(err, IsNil)
c.Assert(resp.Resp, NotNil)
keyErr := resp.Resp.(*kvrpcpb.GetResponse).GetError()
c.Assert(keyErr, NotNil)
var lockutil tikv.LockProbe
lock, err := lockutil.ExtractLockFromKeyErr(keyErr)
c.Assert(err, IsNil)
return lock
}
func (s *testAsyncCommitCommon) mustPointGet(c *C, key, expectedValue []byte) {
snap := s.store.GetSnapshot(math.MaxUint64)
value, err := snap.Get(context.Background(), key)
c.Assert(err, IsNil)
c.Assert(value, BytesEquals, expectedValue)
}
func (s *testAsyncCommitCommon) mustGetFromSnapshot(c *C, version uint64, key, expectedValue []byte) {
snap := s.store.GetSnapshot(version)
value, err := snap.Get(context.Background(), key)
c.Assert(err, IsNil)
c.Assert(value, BytesEquals, expectedValue)
}
func (s *testAsyncCommitCommon) mustGetNoneFromSnapshot(c *C, version uint64, key []byte) {
snap := s.store.GetSnapshot(version)
_, err := snap.Get(context.Background(), key)
c.Assert(errors.Cause(err), Equals, tikverr.ErrNotExist)
}
func (s *testAsyncCommitCommon) beginAsyncCommitWithLinearizability(c *C) tikv.TxnProbe {
txn := s.beginAsyncCommit(c)
txn.SetOption(kv.GuaranteeLinearizability, true)
return txn
}
func (s *testAsyncCommitCommon) beginAsyncCommit(c *C) tikv.TxnProbe {
txn, err := s.store.Begin()
c.Assert(err, IsNil)
txn.SetEnableAsyncCommit(true)
return tikv.TxnProbe{KVTxn: txn}
}
func (s *testAsyncCommitCommon) begin(c *C) tikv.TxnProbe {
txn, err := s.store.Begin()
c.Assert(err, IsNil)
return tikv.TxnProbe{KVTxn: txn}
}
type testAsyncCommitSuite struct {
OneByOneSuite
testAsyncCommitCommon
bo *tikv.Backoffer
}
var _ = SerialSuites(&testAsyncCommitSuite{})
func (s *testAsyncCommitSuite) SetUpTest(c *C) {
s.testAsyncCommitCommon.setUpTest(c)
s.bo = tikv.NewBackofferWithVars(context.Background(), 5000, nil)
}
func (s *testAsyncCommitSuite) lockKeysWithAsyncCommit(c *C, keys, values [][]byte, primaryKey, primaryValue []byte, commitPrimary bool) (uint64, uint64) {
txn, err := s.store.Begin()
c.Assert(err, IsNil)
txn.SetEnableAsyncCommit(true)
for i, k := range keys {
if len(values[i]) > 0 {
err = txn.Set(k, values[i])
} else {
err = txn.Delete(k)
}
c.Assert(err, IsNil)
}
if len(primaryValue) > 0 {
err = txn.Set(primaryKey, primaryValue)
} else {
err = txn.Delete(primaryKey)
}
c.Assert(err, IsNil)
txnProbe := tikv.TxnProbe{KVTxn: txn}
tpc, err := txnProbe.NewCommitter(0)
c.Assert(err, IsNil)
tpc.SetPrimaryKey(primaryKey)
ctx := context.Background()
err = tpc.PrewriteAllMutations(ctx)
c.Assert(err, IsNil)
if commitPrimary {
commitTS, err := s.store.GetOracle().GetTimestamp(ctx, &oracle.Option{TxnScope: oracle.GlobalTxnScope})
c.Assert(err, IsNil)
tpc.SetCommitTS(commitTS)
err = tpc.CommitMutations(ctx)
c.Assert(err, IsNil)
}
return txn.StartTS(), tpc.GetCommitTS()
}
func (s *testAsyncCommitSuite) TestCheckSecondaries(c *C) {
// This test doesn't support tikv mode.
if *WithTiKV {
return
}
s.putAlphabets(c, true)
loc, err := s.store.GetRegionCache().LocateKey(s.bo, []byte("a"))
c.Assert(err, IsNil)
newRegionID, peerID := s.cluster.AllocID(), s.cluster.AllocID()
s.cluster.Split(loc.Region.GetID(), newRegionID, []byte("e"), []uint64{peerID}, peerID)
s.store.GetRegionCache().InvalidateCachedRegion(loc.Region)
// No locks to check, only primary key is locked, should be successful.
s.lockKeysWithAsyncCommit(c, [][]byte{}, [][]byte{}, []byte("z"), []byte("z"), false)
lock := s.mustGetLock(c, []byte("z"))
lock.UseAsyncCommit = true
ts, err := s.store.GetOracle().GetTimestamp(context.Background(), &oracle.Option{TxnScope: oracle.GlobalTxnScope})
c.Assert(err, IsNil)
var lockutil tikv.LockProbe
status := lockutil.NewLockStatus(nil, true, ts)
resolver := tikv.LockResolverProbe{LockResolver: s.store.GetLockResolver()}
err = resolver.ResolveLockAsync(s.bo, lock, status)
c.Assert(err, IsNil)
currentTS, err := s.store.GetOracle().GetTimestamp(context.Background(), &oracle.Option{TxnScope: oracle.GlobalTxnScope})
c.Assert(err, IsNil)
status, err = resolver.GetTxnStatus(s.bo, lock.TxnID, []byte("z"), currentTS, currentTS, true, false, nil)
c.Assert(err, IsNil)
c.Assert(status.IsCommitted(), IsTrue)
c.Assert(status.CommitTS(), Equals, ts)
// One key is committed (i), one key is locked (a). Should get committed.
ts, err = s.store.GetOracle().GetTimestamp(context.Background(), &oracle.Option{TxnScope: oracle.GlobalTxnScope})
c.Assert(err, IsNil)
commitTs := ts + 10
gotCheckA := int64(0)
gotCheckB := int64(0)
gotResolve := int64(0)
gotOther := int64(0)
mock := mockResolveClient{
inner: s.store.GetTiKVClient(),
onCheckSecondaries: func(req *kvrpcpb.CheckSecondaryLocksRequest) (*tikvrpc.Response, error) {
if req.StartVersion != ts {
return nil, errors.Errorf("Bad start version: %d, expected: %d", req.StartVersion, ts)
}
var resp kvrpcpb.CheckSecondaryLocksResponse
for _, k := range req.Keys {
if bytes.Equal(k, []byte("a")) {
atomic.StoreInt64(&gotCheckA, 1)
resp = kvrpcpb.CheckSecondaryLocksResponse{
Locks: []*kvrpcpb.LockInfo{{Key: []byte("a"), PrimaryLock: []byte("z"), LockVersion: ts, UseAsyncCommit: true}},
CommitTs: commitTs,
}
} else if bytes.Equal(k, []byte("i")) {
atomic.StoreInt64(&gotCheckB, 1)
resp = kvrpcpb.CheckSecondaryLocksResponse{
Locks: []*kvrpcpb.LockInfo{},
CommitTs: commitTs,
}
} else {
fmt.Printf("Got other key: %s\n", k)
atomic.StoreInt64(&gotOther, 1)
}
}
return &tikvrpc.Response{Resp: &resp}, nil
},
onResolveLock: func(req *kvrpcpb.ResolveLockRequest) (*tikvrpc.Response, error) {
if req.StartVersion != ts {
return nil, errors.Errorf("Bad start version: %d, expected: %d", req.StartVersion, ts)
}
if req.CommitVersion != commitTs {
return nil, errors.Errorf("Bad commit version: %d, expected: %d", req.CommitVersion, commitTs)
}
for _, k := range req.Keys {
if bytes.Equal(k, []byte("a")) || bytes.Equal(k, []byte("z")) {
atomic.StoreInt64(&gotResolve, 1)
} else {
atomic.StoreInt64(&gotOther, 1)
}
}
resp := kvrpcpb.ResolveLockResponse{}
return &tikvrpc.Response{Resp: &resp}, nil
},
}
s.store.SetTiKVClient(&mock)
status = lockutil.NewLockStatus([][]byte{[]byte("a"), []byte("i")}, true, 0)
lock = &tikv.Lock{
Key: []byte("a"),
Primary: []byte("z"),
TxnID: ts,
LockType: kvrpcpb.Op_Put,
UseAsyncCommit: true,
MinCommitTS: ts + 5,
}
_ = s.beginAsyncCommit(c)
err = resolver.ResolveLockAsync(s.bo, lock, status)
c.Assert(err, IsNil)
c.Assert(gotCheckA, Equals, int64(1))
c.Assert(gotCheckB, Equals, int64(1))
c.Assert(gotOther, Equals, int64(0))
c.Assert(gotResolve, Equals, int64(1))
// One key has been rolled back (b), one is locked (a). Should be rolled back.
ts, err = s.store.GetOracle().GetTimestamp(context.Background(), &oracle.Option{TxnScope: oracle.GlobalTxnScope})
c.Assert(err, IsNil)
commitTs = ts + 10
gotCheckA = int64(0)
gotCheckB = int64(0)
gotResolve = int64(0)
gotOther = int64(0)
mock.onResolveLock = func(req *kvrpcpb.ResolveLockRequest) (*tikvrpc.Response, error) {
if req.StartVersion != ts {
return nil, errors.Errorf("Bad start version: %d, expected: %d", req.StartVersion, ts)
}
if req.CommitVersion != commitTs {
return nil, errors.Errorf("Bad commit version: %d, expected: 0", req.CommitVersion)
}
for _, k := range req.Keys {
if bytes.Equal(k, []byte("a")) || bytes.Equal(k, []byte("z")) {
atomic.StoreInt64(&gotResolve, 1)
} else {
atomic.StoreInt64(&gotOther, 1)
}
}
resp := kvrpcpb.ResolveLockResponse{}
return &tikvrpc.Response{Resp: &resp}, nil
}
lock.TxnID = ts
lock.MinCommitTS = ts + 5
err = resolver.ResolveLockAsync(s.bo, lock, status)
c.Assert(err, IsNil)
c.Assert(gotCheckA, Equals, int64(1))
c.Assert(gotCheckB, Equals, int64(1))
c.Assert(gotResolve, Equals, int64(1))
c.Assert(gotOther, Equals, int64(0))
}
func (s *testAsyncCommitSuite) TestRepeatableRead(c *C) {
var sessionID uint64 = 0
test := func(isPessimistic bool) {
s.putKV(c, []byte("k1"), []byte("v1"), true)
sessionID++
ctx := context.WithValue(context.Background(), util.SessionID, sessionID)
txn1 := s.beginAsyncCommit(c)
txn1.SetPessimistic(isPessimistic)
s.mustGetFromTxn(c, txn1, []byte("k1"), []byte("v1"))
txn1.Set([]byte("k1"), []byte("v2"))
for i := 0; i < 20; i++ {
_, err := s.store.GetOracle().GetTimestamp(ctx, &oracle.Option{TxnScope: oracle.GlobalTxnScope})
c.Assert(err, IsNil)
}
txn2 := s.beginAsyncCommit(c)
s.mustGetFromTxn(c, txn2, []byte("k1"), []byte("v1"))
err := txn1.Commit(ctx)
c.Assert(err, IsNil)
// Check txn1 is committed in async commit.
c.Assert(txn1.IsAsyncCommit(), IsTrue)
s.mustGetFromTxn(c, txn2, []byte("k1"), []byte("v1"))
err = txn2.Rollback()
c.Assert(err, IsNil)
txn3 := s.beginAsyncCommit(c)
s.mustGetFromTxn(c, txn3, []byte("k1"), []byte("v2"))
err = txn3.Rollback()
c.Assert(err, IsNil)
}
test(false)
test(true)
}
// It's just a simple validation of linearizability.
// Extra tests are needed to test this feature with the control of the TiKV cluster.
func (s *testAsyncCommitSuite) TestAsyncCommitLinearizability(c *C) {
t1 := s.beginAsyncCommitWithLinearizability(c)
t2 := s.beginAsyncCommitWithLinearizability(c)
err := t1.Set([]byte("a"), []byte("a1"))
c.Assert(err, IsNil)
err = t2.Set([]byte("b"), []byte("b1"))
c.Assert(err, IsNil)
ctx := context.WithValue(context.Background(), util.SessionID, uint64(1))
// t2 commits earlier than t1
err = t2.Commit(ctx)
c.Assert(err, IsNil)
err = t1.Commit(ctx)
c.Assert(err, IsNil)
commitTS1 := t1.GetCommitTS()
commitTS2 := t2.GetCommitTS()
c.Assert(commitTS2, Less, commitTS1)
}
// TestAsyncCommitWithMultiDC tests that async commit can only be enabled in global transactions
func (s *testAsyncCommitSuite) TestAsyncCommitWithMultiDC(c *C) {
// It requires setting placement rules to run with TiKV
if *WithTiKV {
return
}
localTxn := s.beginAsyncCommit(c)
err := localTxn.Set([]byte("a"), []byte("a1"))
localTxn.SetScope("bj")
c.Assert(err, IsNil)
ctx := context.WithValue(context.Background(), util.SessionID, uint64(1))
err = localTxn.Commit(ctx)
c.Assert(err, IsNil)
c.Assert(localTxn.IsAsyncCommit(), IsFalse)
globalTxn := s.beginAsyncCommit(c)
err = globalTxn.Set([]byte("b"), []byte("b1"))
globalTxn.SetScope(oracle.GlobalTxnScope)
c.Assert(err, IsNil)
err = globalTxn.Commit(ctx)
c.Assert(err, IsNil)
c.Assert(globalTxn.IsAsyncCommit(), IsTrue)
}
func (s *testAsyncCommitSuite) TestResolveTxnFallbackFromAsyncCommit(c *C) {
keys := [][]byte{[]byte("k0"), []byte("k1")}
values := [][]byte{[]byte("v00"), []byte("v10")}
initTest := func() tikv.CommitterProbe {
t0 := s.begin(c)
err := t0.Set(keys[0], values[0])
c.Assert(err, IsNil)
err = t0.Set(keys[1], values[1])
c.Assert(err, IsNil)
err = t0.Commit(context.Background())
c.Assert(err, IsNil)
t1 := s.beginAsyncCommit(c)
err = t1.Set(keys[0], []byte("v01"))
c.Assert(err, IsNil)
err = t1.Set(keys[1], []byte("v11"))
c.Assert(err, IsNil)
committer, err := t1.NewCommitter(1)
c.Assert(err, IsNil)
committer.SetLockTTL(1)
committer.SetUseAsyncCommit()
return committer
}
prewriteKey := func(committer tikv.CommitterProbe, idx int, fallback bool) {
bo := tikv.NewBackofferWithVars(context.Background(), 5000, nil)
loc, err := s.store.GetRegionCache().LocateKey(bo, keys[idx])
c.Assert(err, IsNil)
req := committer.BuildPrewriteRequest(loc.Region.GetID(), loc.Region.GetConfVer(), loc.Region.GetVer(),
committer.GetMutations().Slice(idx, idx+1), 1)
if fallback {
req.Req.(*kvrpcpb.PrewriteRequest).MaxCommitTs = 1
}
resp, err := s.store.SendReq(bo, req, loc.Region, 5000)
c.Assert(err, IsNil)
c.Assert(resp.Resp, NotNil)
}
readKey := func(idx int) {
t2 := s.begin(c)
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
defer cancel()
val, err := t2.Get(ctx, keys[idx])
c.Assert(err, IsNil)
c.Assert(val, DeepEquals, values[idx])
}
// Case 1: Fallback primary, read primary
committer := initTest()
prewriteKey(committer, 0, true)
prewriteKey(committer, 1, false)
readKey(0)
readKey(1)
// Case 2: Fallback primary, read secondary
committer = initTest()
prewriteKey(committer, 0, true)
prewriteKey(committer, 1, false)
readKey(1)
readKey(0)
// Case 3: Fallback secondary, read primary
committer = initTest()
prewriteKey(committer, 0, false)
prewriteKey(committer, 1, true)
readKey(0)
readKey(1)
// Case 4: Fallback secondary, read secondary
committer = initTest()
prewriteKey(committer, 0, false)
prewriteKey(committer, 1, true)
readKey(1)
readKey(0)
// Case 5: Fallback both, read primary
committer = initTest()
prewriteKey(committer, 0, true)
prewriteKey(committer, 1, true)
readKey(0)
readKey(1)
// Case 6: Fallback both, read secondary
committer = initTest()
prewriteKey(committer, 0, true)
prewriteKey(committer, 1, true)
readKey(1)
readKey(0)
}
type mockResolveClient struct {
inner tikv.Client
onResolveLock func(*kvrpcpb.ResolveLockRequest) (*tikvrpc.Response, error)
onCheckSecondaries func(*kvrpcpb.CheckSecondaryLocksRequest) (*tikvrpc.Response, error)
}
func (m *mockResolveClient) SendRequest(ctx context.Context, addr string, req *tikvrpc.Request, timeout time.Duration) (*tikvrpc.Response, error) {
// Intercept check secondary locks and resolve lock messages if the callback is non-nil.
// If the callback returns (nil, nil), forward to the inner client.
if cr, ok := req.Req.(*kvrpcpb.CheckSecondaryLocksRequest); ok && m.onCheckSecondaries != nil {
result, err := m.onCheckSecondaries(cr)
if result != nil || err != nil {
return result, err
}
} else if rr, ok := req.Req.(*kvrpcpb.ResolveLockRequest); ok && m.onResolveLock != nil {
result, err := m.onResolveLock(rr)
if result != nil || err != nil {
return result, err
}
}
return m.inner.SendRequest(ctx, addr, req, timeout)
}
func (m *mockResolveClient) Close() error {
return m.inner.Close()
}
| store/tikv/tests/async_commit_test.go | 1 | https://github.com/pingcap/tidb/commit/cc83cc524f8d3fd661f6e62d129ba043cc74501e | [
0.004583071451634169,
0.0008054801146499813,
0.00016112705634441227,
0.00020522321574389935,
0.001162746106274426
] |
{
"id": 0,
"code_window": [
"\ts.txn.SetOption(tikvstore.EnableAsyncCommit, s.GetSessionVars().EnableAsyncCommit)\n",
"\ts.txn.SetOption(tikvstore.Enable1PC, s.GetSessionVars().Enable1PC)\n",
"\t// priority of the sysvar is lower than `start transaction with causal consistency only`\n",
"\tif s.txn.GetOption(tikvstore.GuaranteeLinearizability) == nil {\n",
"\t\t// We needn't ask the TiKV client to guarantee linearizability for auto-commit transactions\n",
"\t\t// because the property is naturally holds:\n",
"\t\t// We guarantee the commitTS of any transaction must not exceed the next timestamp from the TSO.\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tif val := s.txn.GetOption(tikvstore.GuaranteeLinearizability); val == nil || val.(bool) {\n"
],
"file_path": "session/session.go",
"type": "replace",
"edit_start_line_idx": 519
} | // Copyright 2016 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"context"
"flag"
"fmt"
"math/rand"
"strconv"
"strings"
"time"
"github.com/pingcap/log"
"github.com/pingcap/parser/terror"
"github.com/pingcap/tidb/session"
"github.com/pingcap/tidb/store"
"github.com/pingcap/tidb/store/driver"
"github.com/pingcap/tidb/store/tikv"
"github.com/pingcap/tidb/util/logutil"
"go.uber.org/zap"
)
var (
addr = flag.String("addr", "127.0.0.1:2379", "pd address")
tableName = flag.String("table", "benchdb", "name of the table")
batchSize = flag.Int("batch", 100, "number of statements in a transaction, used for insert and update-random only")
blobSize = flag.Int("blob", 1000, "size of the blob column in the row")
logLevel = flag.String("L", "warn", "log level")
runJobs = flag.String("run", strings.Join([]string{
"create",
"truncate",
"insert:0_10000",
"update-random:0_10000:100000",
"select:0_10000:10",
"update-range:5000_5100:1000",
"select:0_10000:10",
"gc",
"select:0_10000:10",
}, "|"), "jobs to run")
)
func main() {
flag.Parse()
flag.PrintDefaults()
err := logutil.InitZapLogger(logutil.NewLogConfig(*logLevel, logutil.DefaultLogFormat, "", logutil.EmptyFileLogConfig, false))
terror.MustNil(err)
err = store.Register("tikv", driver.TiKVDriver{})
terror.MustNil(err)
ut := newBenchDB()
works := strings.Split(*runJobs, "|")
for _, v := range works {
work := strings.ToLower(strings.TrimSpace(v))
name, spec := ut.mustParseWork(work)
switch name {
case "create":
ut.createTable()
case "truncate":
ut.truncateTable()
case "insert":
ut.insertRows(spec)
case "update-random", "update_random":
ut.updateRandomRows(spec)
case "update-range", "update_range":
ut.updateRangeRows(spec)
case "select":
ut.selectRows(spec)
case "query":
ut.query(spec)
default:
cLog("Unknown job ", v)
return
}
}
}
type benchDB struct {
store tikv.Storage
session session.Session
}
func newBenchDB() *benchDB {
// Create TiKV store and disable GC as we will trigger GC manually.
store, err := store.New("tikv://" + *addr + "?disableGC=true")
terror.MustNil(err)
_, err = session.BootstrapSession(store)
terror.MustNil(err)
se, err := session.CreateSession(store)
terror.MustNil(err)
_, err = se.ExecuteInternal(context.Background(), "use test")
terror.MustNil(err)
return &benchDB{
store: store.(tikv.Storage),
session: se,
}
}
func (ut *benchDB) mustExec(sql string, args ...interface{}) {
// executeInternal only return one resultSet for this.
rs, err := ut.session.ExecuteInternal(context.Background(), sql, args...)
defer func() {
if rs != nil {
err = rs.Close()
if err != nil {
log.Fatal(err.Error())
}
}
}()
if err != nil {
log.Fatal(err.Error())
return
}
if rs != nil {
ctx := context.Background()
req := rs.NewChunk()
for {
err := rs.Next(ctx, req)
if err != nil {
log.Fatal(err.Error())
}
if req.NumRows() == 0 {
break
}
}
}
}
func (ut *benchDB) mustParseWork(work string) (name string, spec string) {
strs := strings.Split(work, ":")
if len(strs) == 1 {
return strs[0], ""
}
return strs[0], strings.Join(strs[1:], ":")
}
func (ut *benchDB) mustParseInt(s string) int {
i, err := strconv.Atoi(s)
if err != nil {
log.Fatal(err.Error())
}
return i
}
func (ut *benchDB) mustParseRange(s string) (start, end int) {
strs := strings.Split(s, "_")
if len(strs) != 2 {
log.Fatal("parse range failed", zap.String("invalid range", s))
}
startStr, endStr := strs[0], strs[1]
start = ut.mustParseInt(startStr)
end = ut.mustParseInt(endStr)
if start < 0 || end < start {
log.Fatal("parse range failed", zap.String("invalid range", s))
}
return
}
func (ut *benchDB) mustParseSpec(s string) (start, end, count int) {
strs := strings.Split(s, ":")
start, end = ut.mustParseRange(strs[0])
if len(strs) == 1 {
count = 1
return
}
count = ut.mustParseInt(strs[1])
return
}
func (ut *benchDB) createTable() {
cLog("create table")
createSQL := `CREATE TABLE IF NOT EXISTS %n (
id bigint(20) NOT NULL,
name varchar(32) NOT NULL,
exp bigint(20) NOT NULL DEFAULT '0',
data blob,
PRIMARY KEY (id),
UNIQUE KEY name (name)
)`
ut.mustExec(createSQL, *tableName)
}
func (ut *benchDB) truncateTable() {
cLog("truncate table")
ut.mustExec("truncate table %n", *tableName)
}
func (ut *benchDB) runCountTimes(name string, count int, f func()) {
var (
sum, first, last time.Duration
min = time.Minute
max = time.Nanosecond
)
cLogf("%s started", name)
for i := 0; i < count; i++ {
before := time.Now()
f()
dur := time.Since(before)
if first == 0 {
first = dur
}
last = dur
if dur < min {
min = dur
}
if dur > max {
max = dur
}
sum += dur
}
cLogf("%s done, avg %s, count %d, sum %s, first %s, last %s, max %s, min %s\n\n",
name, sum/time.Duration(count), count, sum, first, last, max, min)
}
func (ut *benchDB) insertRows(spec string) {
start, end, _ := ut.mustParseSpec(spec)
loopCount := (end - start + *batchSize - 1) / *batchSize
id := start
ut.runCountTimes("insert", loopCount, func() {
ut.mustExec("begin")
buf := make([]byte, *blobSize/2)
for i := 0; i < *batchSize; i++ {
if id == end {
break
}
rand.Read(buf)
insertQuery := "insert %n (id, name, data) values(%?, %?, %?)"
ut.mustExec(insertQuery, *tableName, id, id, buf)
id++
}
ut.mustExec("commit")
})
}
func (ut *benchDB) updateRandomRows(spec string) {
start, end, totalCount := ut.mustParseSpec(spec)
loopCount := (totalCount + *batchSize - 1) / *batchSize
var runCount = 0
ut.runCountTimes("update-random", loopCount, func() {
ut.mustExec("begin")
for i := 0; i < *batchSize; i++ {
if runCount == totalCount {
break
}
id := rand.Intn(end-start) + start
updateQuery := "update %n set exp = exp + 1 where id = %?"
ut.mustExec(updateQuery, *tableName, id)
runCount++
}
ut.mustExec("commit")
})
}
func (ut *benchDB) updateRangeRows(spec string) {
start, end, count := ut.mustParseSpec(spec)
ut.runCountTimes("update-range", count, func() {
ut.mustExec("begin")
updateQuery := "update %n set exp = exp + 1 where id >= %? and id < %?"
ut.mustExec(updateQuery, *tableName, start, end)
ut.mustExec("commit")
})
}
func (ut *benchDB) selectRows(spec string) {
start, end, count := ut.mustParseSpec(spec)
ut.runCountTimes("select", count, func() {
selectQuery := "select * from %n where id >= %? and id < %?"
ut.mustExec(selectQuery, *tableName, start, end)
})
}
func (ut *benchDB) query(spec string) {
strs := strings.Split(spec, ":")
sql := strs[0]
count, err := strconv.Atoi(strs[1])
terror.MustNil(err)
ut.runCountTimes("query", count, func() {
ut.mustExec(sql)
})
}
func cLogf(format string, args ...interface{}) {
str := fmt.Sprintf(format, args...)
fmt.Println("\033[0;32m" + str + "\033[0m\n")
}
func cLog(args ...interface{}) {
str := fmt.Sprint(args...)
fmt.Println("\033[0;32m" + str + "\033[0m\n")
}
| cmd/benchdb/main.go | 0 | https://github.com/pingcap/tidb/commit/cc83cc524f8d3fd661f6e62d129ba043cc74501e | [
0.00028536797617562115,
0.00017807450785767287,
0.00016349650104530156,
0.00017148174811154604,
0.00002794576357700862
] |
{
"id": 0,
"code_window": [
"\ts.txn.SetOption(tikvstore.EnableAsyncCommit, s.GetSessionVars().EnableAsyncCommit)\n",
"\ts.txn.SetOption(tikvstore.Enable1PC, s.GetSessionVars().Enable1PC)\n",
"\t// priority of the sysvar is lower than `start transaction with causal consistency only`\n",
"\tif s.txn.GetOption(tikvstore.GuaranteeLinearizability) == nil {\n",
"\t\t// We needn't ask the TiKV client to guarantee linearizability for auto-commit transactions\n",
"\t\t// because the property is naturally holds:\n",
"\t\t// We guarantee the commitTS of any transaction must not exceed the next timestamp from the TSO.\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tif val := s.txn.GetOption(tikvstore.GuaranteeLinearizability); val == nil || val.(bool) {\n"
],
"file_path": "session/session.go",
"type": "replace",
"edit_start_line_idx": 519
} | // Copyright 2020 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package expression_test
import (
. "github.com/pingcap/check"
"github.com/pingcap/tidb/domain"
"github.com/pingcap/tidb/kv"
"github.com/pingcap/tidb/sessionctx"
"github.com/pingcap/tidb/util/mock"
"github.com/pingcap/tidb/util/testkit"
"github.com/pingcap/tidb/util/testutil"
)
var _ = Suite(&testFlagSimplifySuite{})
type testFlagSimplifySuite struct {
store kv.Storage
dom *domain.Domain
ctx sessionctx.Context
testData testutil.TestData
}
func (s *testFlagSimplifySuite) SetUpSuite(c *C) {
var err error
s.store, s.dom, err = newStoreWithBootstrap()
c.Assert(err, IsNil)
s.ctx = mock.NewContext()
s.testData, err = testutil.LoadTestSuiteData("testdata", "flag_simplify")
c.Assert(err, IsNil)
}
func (s *testFlagSimplifySuite) TearDownSuite(c *C) {
c.Assert(s.testData.GenerateOutputIfNeeded(), IsNil)
s.dom.Close()
s.store.Close()
}
func (s *testFlagSimplifySuite) TestSimplifyExpressionByFlag(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(id int primary key, a bigint unsigned not null, b bigint unsigned)")
var input []string
var output []struct {
SQL string
Plan []string
}
s.testData.GetTestCases(c, &input, &output)
for i, tt := range input {
s.testData.OnRecord(func() {
output[i].SQL = tt
output[i].Plan = s.testData.ConvertRowsToStrings(tk.MustQuery(tt).Rows())
})
tk.MustQuery(tt).Check(testkit.Rows(output[i].Plan...))
}
}
| expression/flag_simplify_test.go | 0 | https://github.com/pingcap/tidb/commit/cc83cc524f8d3fd661f6e62d129ba043cc74501e | [
0.00017726175428833812,
0.0001703081652522087,
0.00016282378055620939,
0.0001719819993013516,
0.000005398880148277385
] |
{
"id": 0,
"code_window": [
"\ts.txn.SetOption(tikvstore.EnableAsyncCommit, s.GetSessionVars().EnableAsyncCommit)\n",
"\ts.txn.SetOption(tikvstore.Enable1PC, s.GetSessionVars().Enable1PC)\n",
"\t// priority of the sysvar is lower than `start transaction with causal consistency only`\n",
"\tif s.txn.GetOption(tikvstore.GuaranteeLinearizability) == nil {\n",
"\t\t// We needn't ask the TiKV client to guarantee linearizability for auto-commit transactions\n",
"\t\t// because the property is naturally holds:\n",
"\t\t// We guarantee the commitTS of any transaction must not exceed the next timestamp from the TSO.\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tif val := s.txn.GetOption(tikvstore.GuaranteeLinearizability); val == nil || val.(bool) {\n"
],
"file_path": "session/session.go",
"type": "replace",
"edit_start_line_idx": 519
} | // Copyright 2020 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package tikv_test
import (
. "github.com/pingcap/check"
pb "github.com/pingcap/kvproto/pkg/kvrpcpb"
"github.com/pingcap/tidb/store/mockstore/unistore"
"github.com/pingcap/tidb/store/tikv"
)
type testPrewriteSuite struct {
store *tikv.KVStore
}
var _ = Suite(&testPrewriteSuite{})
func (s *testPrewriteSuite) SetUpTest(c *C) {
client, pdClient, cluster, err := unistore.New("")
c.Assert(err, IsNil)
unistore.BootstrapWithSingleStore(cluster)
store, err := tikv.NewTestTiKVStore(client, pdClient, nil, nil, 0)
c.Assert(err, IsNil)
s.store = store
}
func (s *testPrewriteSuite) TestSetMinCommitTSInAsyncCommit(c *C) {
t, err := s.store.Begin()
c.Assert(err, IsNil)
txn := tikv.TxnProbe{KVTxn: t}
err = txn.Set([]byte("k"), []byte("v"))
c.Assert(err, IsNil)
committer, err := txn.NewCommitter(1)
c.Assert(err, IsNil)
committer.SetUseAsyncCommit()
buildRequest := func() *pb.PrewriteRequest {
req := committer.BuildPrewriteRequest(1, 1, 1, committer.GetMutations(), 1)
return req.Req.(*pb.PrewriteRequest)
}
// no forUpdateTS
req := buildRequest()
c.Assert(req.MinCommitTs, Equals, txn.StartTS()+1)
// forUpdateTS is set
committer.SetForUpdateTS(txn.StartTS() + (5 << 18))
req = buildRequest()
c.Assert(req.MinCommitTs, Equals, committer.GetForUpdateTS()+1)
// minCommitTS is set
committer.SetMinCommitTS(txn.StartTS() + (10 << 18))
req = buildRequest()
c.Assert(req.MinCommitTs, Equals, committer.GetMinCommitTS())
}
| store/tikv/tests/prewrite_test.go | 0 | https://github.com/pingcap/tidb/commit/cc83cc524f8d3fd661f6e62d129ba043cc74501e | [
0.0018848096951842308,
0.0007151690078899264,
0.0001706697221379727,
0.00028926675440743566,
0.0006540819304063916
] |
{
"id": 1,
"code_window": [
"\t\ttxn.SetEnableAsyncCommit(val.(bool))\n",
"\tcase tikvstore.Enable1PC:\n",
"\t\ttxn.SetEnable1PC(val.(bool))\n",
"\tcase tikvstore.TxnScope:\n",
"\t\ttxn.SetScope(val.(string))\n",
"\tcase tikvstore.IsStalenessReadOnly:\n",
"\t\ttxn.KVTxn.GetSnapshot().SetIsStatenessReadOnly(val.(bool))\n"
],
"labels": [
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tcase tikvstore.GuaranteeLinearizability:\n",
"\t\ttxn.SetCausalConsistency(!val.(bool))\n"
],
"file_path": "store/driver/txn/txn_driver.go",
"type": "add",
"edit_start_line_idx": 164
} | // Copyright 2016 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package tikv
import (
"bytes"
"context"
"encoding/hex"
"math"
"math/rand"
"strings"
"sync"
"sync/atomic"
"time"
"unsafe"
"github.com/pingcap/errors"
"github.com/pingcap/failpoint"
pb "github.com/pingcap/kvproto/pkg/kvrpcpb"
"github.com/pingcap/parser/terror"
"github.com/pingcap/tidb/store/tikv/config"
tikverr "github.com/pingcap/tidb/store/tikv/error"
"github.com/pingcap/tidb/store/tikv/kv"
"github.com/pingcap/tidb/store/tikv/logutil"
"github.com/pingcap/tidb/store/tikv/metrics"
"github.com/pingcap/tidb/store/tikv/oracle"
"github.com/pingcap/tidb/store/tikv/tikvrpc"
"github.com/pingcap/tidb/store/tikv/unionstore"
"github.com/pingcap/tidb/store/tikv/util"
"github.com/prometheus/client_golang/prometheus"
zap "go.uber.org/zap"
)
type twoPhaseCommitAction interface {
handleSingleBatch(*twoPhaseCommitter, *Backoffer, batchMutations) error
tiKVTxnRegionsNumHistogram() prometheus.Observer
String() string
}
// Global variable set by config file.
var (
ManagedLockTTL uint64 = 20000 // 20s
)
// twoPhaseCommitter executes a two-phase commit protocol.
type twoPhaseCommitter struct {
store *KVStore
txn *KVTxn
startTS uint64
mutations *memBufferMutations
lockTTL uint64
commitTS uint64
priority pb.CommandPri
sessionID uint64 // sessionID is used for log.
cleanWg sync.WaitGroup
detail unsafe.Pointer
txnSize int
hasNoNeedCommitKeys bool
primaryKey []byte
forUpdateTS uint64
mu struct {
sync.RWMutex
undeterminedErr error // undeterminedErr saves the rpc error we encounter when commit primary key.
committed bool
}
syncLog bool
// For pessimistic transaction
isPessimistic bool
isFirstLock bool
// regionTxnSize stores the number of keys involved in each region
regionTxnSize map[uint64]int
// Used by pessimistic transaction and large transaction.
ttlManager
testingKnobs struct {
acAfterCommitPrimary chan struct{}
bkAfterCommitPrimary chan struct{}
noFallBack bool
}
useAsyncCommit uint32
minCommitTS uint64
maxCommitTS uint64
prewriteStarted bool
prewriteCancelled uint32
useOnePC uint32
onePCCommitTS uint64
hasTriedAsyncCommit bool
hasTriedOnePC bool
// doingAmend means the amend prewrite is ongoing.
doingAmend bool
binlog BinlogExecutor
}
type memBufferMutations struct {
storage *unionstore.MemDB
handles []unionstore.MemKeyHandle
}
func newMemBufferMutations(sizeHint int, storage *unionstore.MemDB) *memBufferMutations {
return &memBufferMutations{
handles: make([]unionstore.MemKeyHandle, 0, sizeHint),
storage: storage,
}
}
func (m *memBufferMutations) Len() int {
return len(m.handles)
}
func (m *memBufferMutations) GetKey(i int) []byte {
return m.storage.GetKeyByHandle(m.handles[i])
}
func (m *memBufferMutations) GetKeys() [][]byte {
ret := make([][]byte, m.Len())
for i := range ret {
ret[i] = m.GetKey(i)
}
return ret
}
func (m *memBufferMutations) GetValue(i int) []byte {
v, _ := m.storage.GetValueByHandle(m.handles[i])
return v
}
func (m *memBufferMutations) GetOp(i int) pb.Op {
return pb.Op(m.handles[i].UserData >> 1)
}
func (m *memBufferMutations) IsPessimisticLock(i int) bool {
return m.handles[i].UserData&1 != 0
}
func (m *memBufferMutations) Slice(from, to int) CommitterMutations {
return &memBufferMutations{
handles: m.handles[from:to],
storage: m.storage,
}
}
func (m *memBufferMutations) Push(op pb.Op, isPessimisticLock bool, handle unionstore.MemKeyHandle) {
aux := uint16(op) << 1
if isPessimisticLock {
aux |= 1
}
handle.UserData = aux
m.handles = append(m.handles, handle)
}
// CommitterMutations contains the mutations to be submitted.
type CommitterMutations interface {
Len() int
GetKey(i int) []byte
GetKeys() [][]byte
GetOp(i int) pb.Op
GetValue(i int) []byte
IsPessimisticLock(i int) bool
Slice(from, to int) CommitterMutations
}
// PlainMutations contains transaction operations.
type PlainMutations struct {
ops []pb.Op
keys [][]byte
values [][]byte
isPessimisticLock []bool
}
// NewPlainMutations creates a PlainMutations object with sizeHint reserved.
func NewPlainMutations(sizeHint int) PlainMutations {
return PlainMutations{
ops: make([]pb.Op, 0, sizeHint),
keys: make([][]byte, 0, sizeHint),
values: make([][]byte, 0, sizeHint),
isPessimisticLock: make([]bool, 0, sizeHint),
}
}
// Slice return a sub mutations in range [from, to).
func (c *PlainMutations) Slice(from, to int) CommitterMutations {
var res PlainMutations
res.keys = c.keys[from:to]
if c.ops != nil {
res.ops = c.ops[from:to]
}
if c.values != nil {
res.values = c.values[from:to]
}
if c.isPessimisticLock != nil {
res.isPessimisticLock = c.isPessimisticLock[from:to]
}
return &res
}
// Push another mutation into mutations.
func (c *PlainMutations) Push(op pb.Op, key []byte, value []byte, isPessimisticLock bool) {
c.ops = append(c.ops, op)
c.keys = append(c.keys, key)
c.values = append(c.values, value)
c.isPessimisticLock = append(c.isPessimisticLock, isPessimisticLock)
}
// Len returns the count of mutations.
func (c *PlainMutations) Len() int {
return len(c.keys)
}
// GetKey returns the key at index.
func (c *PlainMutations) GetKey(i int) []byte {
return c.keys[i]
}
// GetKeys returns the keys.
func (c *PlainMutations) GetKeys() [][]byte {
return c.keys
}
// GetOps returns the key ops.
func (c *PlainMutations) GetOps() []pb.Op {
return c.ops
}
// GetValues returns the key values.
func (c *PlainMutations) GetValues() [][]byte {
return c.values
}
// GetPessimisticFlags returns the key pessimistic flags.
func (c *PlainMutations) GetPessimisticFlags() []bool {
return c.isPessimisticLock
}
// GetOp returns the key op at index.
func (c *PlainMutations) GetOp(i int) pb.Op {
return c.ops[i]
}
// GetValue returns the key value at index.
func (c *PlainMutations) GetValue(i int) []byte {
if len(c.values) <= i {
return nil
}
return c.values[i]
}
// IsPessimisticLock returns the key pessimistic flag at index.
func (c *PlainMutations) IsPessimisticLock(i int) bool {
return c.isPessimisticLock[i]
}
// PlainMutation represents a single transaction operation.
type PlainMutation struct {
KeyOp pb.Op
Key []byte
Value []byte
IsPessimisticLock bool
}
// MergeMutations append input mutations into current mutations.
func (c *PlainMutations) MergeMutations(mutations PlainMutations) {
c.ops = append(c.ops, mutations.ops...)
c.keys = append(c.keys, mutations.keys...)
c.values = append(c.values, mutations.values...)
c.isPessimisticLock = append(c.isPessimisticLock, mutations.isPessimisticLock...)
}
// AppendMutation merges a single Mutation into the current mutations.
func (c *PlainMutations) AppendMutation(mutation PlainMutation) {
c.ops = append(c.ops, mutation.KeyOp)
c.keys = append(c.keys, mutation.Key)
c.values = append(c.values, mutation.Value)
c.isPessimisticLock = append(c.isPessimisticLock, mutation.IsPessimisticLock)
}
// newTwoPhaseCommitter creates a twoPhaseCommitter.
func newTwoPhaseCommitter(txn *KVTxn, sessionID uint64) (*twoPhaseCommitter, error) {
return &twoPhaseCommitter{
store: txn.store,
txn: txn,
startTS: txn.StartTS(),
sessionID: sessionID,
regionTxnSize: map[uint64]int{},
ttlManager: ttlManager{
ch: make(chan struct{}),
},
isPessimistic: txn.IsPessimistic(),
binlog: txn.binlog,
}, nil
}
func (c *twoPhaseCommitter) extractKeyExistsErr(err *tikverr.ErrKeyExist) error {
if !c.txn.us.HasPresumeKeyNotExists(err.GetKey()) {
return errors.Errorf("session %d, existErr for key:%s should not be nil", c.sessionID, err.GetKey())
}
return errors.Trace(err)
}
// KVFilter is a filter that filters out unnecessary KV pairs.
type KVFilter interface {
// IsUnnecessaryKeyValue returns whether this KV pair should be committed.
IsUnnecessaryKeyValue(key, value []byte, flags kv.KeyFlags) bool
}
func (c *twoPhaseCommitter) initKeysAndMutations() error {
var size, putCnt, delCnt, lockCnt, checkCnt int
txn := c.txn
memBuf := txn.GetMemBuffer()
sizeHint := txn.us.GetMemBuffer().Len()
c.mutations = newMemBufferMutations(sizeHint, memBuf)
c.isPessimistic = txn.IsPessimistic()
filter := txn.kvFilter
var err error
for it := memBuf.IterWithFlags(nil, nil); it.Valid(); err = it.Next() {
_ = err
key := it.Key()
flags := it.Flags()
var value []byte
var op pb.Op
if !it.HasValue() {
if !flags.HasLocked() {
continue
}
op = pb.Op_Lock
lockCnt++
} else {
value = it.Value()
if len(value) > 0 {
isUnnecessaryKV := filter != nil && filter.IsUnnecessaryKeyValue(key, value, flags)
if isUnnecessaryKV {
if !flags.HasLocked() {
continue
}
// If the key was locked before, we should prewrite the lock even if
// the KV needn't be committed according to the filter. Otherwise, we
// were forgetting removing pessimistic locks added before.
op = pb.Op_Lock
lockCnt++
} else {
op = pb.Op_Put
if flags.HasPresumeKeyNotExists() {
op = pb.Op_Insert
}
putCnt++
}
} else {
if !txn.IsPessimistic() && flags.HasPresumeKeyNotExists() {
// delete-your-writes keys in optimistic txn need check not exists in prewrite-phase
// due to `Op_CheckNotExists` doesn't prewrite lock, so mark those keys should not be used in commit-phase.
op = pb.Op_CheckNotExists
checkCnt++
memBuf.UpdateFlags(key, kv.SetPrewriteOnly)
} else {
// normal delete keys in optimistic txn can be delete without not exists checking
// delete-your-writes keys in pessimistic txn can ensure must be no exists so can directly delete them
op = pb.Op_Del
delCnt++
}
}
}
var isPessimistic bool
if flags.HasLocked() {
isPessimistic = c.isPessimistic
}
c.mutations.Push(op, isPessimistic, it.Handle())
size += len(key) + len(value)
if len(c.primaryKey) == 0 && op != pb.Op_CheckNotExists {
c.primaryKey = key
}
}
if c.mutations.Len() == 0 {
return nil
}
c.txnSize = size
const logEntryCount = 10000
const logSize = 4 * 1024 * 1024 // 4MB
if c.mutations.Len() > logEntryCount || size > logSize {
logutil.BgLogger().Info("[BIG_TXN]",
zap.Uint64("session", c.sessionID),
zap.String("key sample", kv.StrKey(c.mutations.GetKey(0))),
zap.Int("size", size),
zap.Int("keys", c.mutations.Len()),
zap.Int("puts", putCnt),
zap.Int("dels", delCnt),
zap.Int("locks", lockCnt),
zap.Int("checks", checkCnt),
zap.Uint64("txnStartTS", txn.startTS))
}
// Sanity check for startTS.
if txn.StartTS() == math.MaxUint64 {
err = errors.Errorf("try to commit with invalid txnStartTS: %d", txn.StartTS())
logutil.BgLogger().Error("commit failed",
zap.Uint64("session", c.sessionID),
zap.Error(err))
return errors.Trace(err)
}
commitDetail := &util.CommitDetails{WriteSize: size, WriteKeys: c.mutations.Len()}
metrics.TiKVTxnWriteKVCountHistogram.Observe(float64(commitDetail.WriteKeys))
metrics.TiKVTxnWriteSizeHistogram.Observe(float64(commitDetail.WriteSize))
c.hasNoNeedCommitKeys = checkCnt > 0
c.lockTTL = txnLockTTL(txn.startTime, size)
c.priority = txn.priority.ToPB()
c.syncLog = txn.syncLog
c.setDetail(commitDetail)
return nil
}
func (c *twoPhaseCommitter) primary() []byte {
if len(c.primaryKey) == 0 {
return c.mutations.GetKey(0)
}
return c.primaryKey
}
// asyncSecondaries returns all keys that must be checked in the recovery phase of an async commit.
func (c *twoPhaseCommitter) asyncSecondaries() [][]byte {
secondaries := make([][]byte, 0, c.mutations.Len())
for i := 0; i < c.mutations.Len(); i++ {
k := c.mutations.GetKey(i)
if bytes.Equal(k, c.primary()) || c.mutations.GetOp(i) == pb.Op_CheckNotExists {
continue
}
secondaries = append(secondaries, k)
}
return secondaries
}
const bytesPerMiB = 1024 * 1024
func txnLockTTL(startTime time.Time, txnSize int) uint64 {
// Increase lockTTL for large transactions.
// The formula is `ttl = ttlFactor * sqrt(sizeInMiB)`.
// When writeSize is less than 256KB, the base ttl is defaultTTL (3s);
// When writeSize is 1MiB, 4MiB, or 10MiB, ttl is 6s, 12s, 20s correspondingly;
lockTTL := defaultLockTTL
if txnSize >= txnCommitBatchSize {
sizeMiB := float64(txnSize) / bytesPerMiB
lockTTL = uint64(float64(ttlFactor) * math.Sqrt(sizeMiB))
if lockTTL < defaultLockTTL {
lockTTL = defaultLockTTL
}
if lockTTL > ManagedLockTTL {
lockTTL = ManagedLockTTL
}
}
// Increase lockTTL by the transaction's read time.
// When resolving a lock, we compare current ts and startTS+lockTTL to decide whether to clean up. If a txn
// takes a long time to read, increasing its TTL will help to prevent it from been aborted soon after prewrite.
elapsed := time.Since(startTime) / time.Millisecond
return lockTTL + uint64(elapsed)
}
var preSplitDetectThreshold uint32 = 100000
var preSplitSizeThreshold uint32 = 32 << 20
// doActionOnMutations groups keys into primary batch and secondary batches, if primary batch exists in the key,
// it does action on primary batch first, then on secondary batches. If action is commit, secondary batches
// is done in background goroutine.
func (c *twoPhaseCommitter) doActionOnMutations(bo *Backoffer, action twoPhaseCommitAction, mutations CommitterMutations) error {
if mutations.Len() == 0 {
return nil
}
groups, err := c.groupMutations(bo, mutations)
if err != nil {
return errors.Trace(err)
}
// This is redundant since `doActionOnGroupMutations` will still split groups into batches and
// check the number of batches. However we don't want the check fail after any code changes.
c.checkOnePCFallBack(action, len(groups))
return c.doActionOnGroupMutations(bo, action, groups)
}
// groupMutations groups mutations by region, then checks for any large groups and in that case pre-splits the region.
func (c *twoPhaseCommitter) groupMutations(bo *Backoffer, mutations CommitterMutations) ([]groupedMutations, error) {
groups, err := c.store.regionCache.groupSortedMutationsByRegion(bo, mutations)
if err != nil {
return nil, errors.Trace(err)
}
// Pre-split regions to avoid too much write workload into a single region.
// In the large transaction case, this operation is important to avoid TiKV 'server is busy' error.
var didPreSplit bool
preSplitDetectThresholdVal := atomic.LoadUint32(&preSplitDetectThreshold)
for _, group := range groups {
if uint32(group.mutations.Len()) >= preSplitDetectThresholdVal {
logutil.BgLogger().Info("2PC detect large amount of mutations on a single region",
zap.Uint64("region", group.region.GetID()),
zap.Int("mutations count", group.mutations.Len()))
// Use context.Background, this time should not add up to Backoffer.
if c.store.preSplitRegion(context.Background(), group) {
didPreSplit = true
}
}
}
// Reload region cache again.
if didPreSplit {
groups, err = c.store.regionCache.groupSortedMutationsByRegion(bo, mutations)
if err != nil {
return nil, errors.Trace(err)
}
}
return groups, nil
}
// doActionOnGroupedMutations splits groups into batches (there is one group per region, and potentially many batches per group, but all mutations
// in a batch will belong to the same region).
func (c *twoPhaseCommitter) doActionOnGroupMutations(bo *Backoffer, action twoPhaseCommitAction, groups []groupedMutations) error {
action.tiKVTxnRegionsNumHistogram().Observe(float64(len(groups)))
var sizeFunc = c.keySize
switch act := action.(type) {
case actionPrewrite:
// Do not update regionTxnSize on retries. They are not used when building a PrewriteRequest.
if len(bo.errors) == 0 {
for _, group := range groups {
c.regionTxnSize[group.region.id] = group.mutations.Len()
}
}
sizeFunc = c.keyValueSize
atomic.AddInt32(&c.getDetail().PrewriteRegionNum, int32(len(groups)))
case actionPessimisticLock:
if act.LockCtx.Stats != nil {
act.LockCtx.Stats.RegionNum = int32(len(groups))
}
}
batchBuilder := newBatched(c.primary())
for _, group := range groups {
batchBuilder.appendBatchMutationsBySize(group.region, group.mutations, sizeFunc, txnCommitBatchSize)
}
firstIsPrimary := batchBuilder.setPrimary()
actionCommit, actionIsCommit := action.(actionCommit)
_, actionIsCleanup := action.(actionCleanup)
_, actionIsPessimiticLock := action.(actionPessimisticLock)
c.checkOnePCFallBack(action, len(batchBuilder.allBatches()))
var err error
failpoint.Inject("skipKeyReturnOK", func(val failpoint.Value) {
valStr, ok := val.(string)
if ok && c.sessionID > 0 {
if firstIsPrimary && actionIsPessimiticLock {
logutil.Logger(bo.ctx).Warn("pessimisticLock failpoint", zap.String("valStr", valStr))
switch valStr {
case "pessimisticLockSkipPrimary":
err = c.doActionOnBatches(bo, action, batchBuilder.allBatches())
failpoint.Return(err)
case "pessimisticLockSkipSecondary":
err = c.doActionOnBatches(bo, action, batchBuilder.primaryBatch())
failpoint.Return(err)
}
}
}
})
failpoint.Inject("pessimisticRollbackDoNth", func() {
_, actionIsPessimisticRollback := action.(actionPessimisticRollback)
if actionIsPessimisticRollback && c.sessionID > 0 {
logutil.Logger(bo.ctx).Warn("pessimisticRollbackDoNth failpoint")
failpoint.Return(nil)
}
})
if firstIsPrimary &&
((actionIsCommit && !c.isAsyncCommit()) || actionIsCleanup || actionIsPessimiticLock) {
// primary should be committed(not async commit)/cleanup/pessimistically locked first
err = c.doActionOnBatches(bo, action, batchBuilder.primaryBatch())
if err != nil {
return errors.Trace(err)
}
if actionIsCommit && c.testingKnobs.bkAfterCommitPrimary != nil && c.testingKnobs.acAfterCommitPrimary != nil {
c.testingKnobs.acAfterCommitPrimary <- struct{}{}
<-c.testingKnobs.bkAfterCommitPrimary
}
batchBuilder.forgetPrimary()
}
// Already spawned a goroutine for async commit transaction.
if actionIsCommit && !actionCommit.retry && !c.isAsyncCommit() {
secondaryBo := NewBackofferWithVars(context.Background(), int(atomic.LoadUint64(&CommitMaxBackoff)), c.txn.vars)
go func() {
if c.sessionID > 0 {
failpoint.Inject("beforeCommitSecondaries", func(v failpoint.Value) {
if s, ok := v.(string); !ok {
logutil.Logger(bo.ctx).Info("[failpoint] sleep 2s before commit secondary keys",
zap.Uint64("sessionID", c.sessionID), zap.Uint64("txnStartTS", c.startTS), zap.Uint64("txnCommitTS", c.commitTS))
time.Sleep(2 * time.Second)
} else if s == "skip" {
logutil.Logger(bo.ctx).Info("[failpoint] injected skip committing secondaries",
zap.Uint64("sessionID", c.sessionID), zap.Uint64("txnStartTS", c.startTS), zap.Uint64("txnCommitTS", c.commitTS))
failpoint.Return()
}
})
}
e := c.doActionOnBatches(secondaryBo, action, batchBuilder.allBatches())
if e != nil {
logutil.BgLogger().Debug("2PC async doActionOnBatches",
zap.Uint64("session", c.sessionID),
zap.Stringer("action type", action),
zap.Error(e))
metrics.SecondaryLockCleanupFailureCounterCommit.Inc()
}
}()
} else {
err = c.doActionOnBatches(bo, action, batchBuilder.allBatches())
}
return errors.Trace(err)
}
// doActionOnBatches does action to batches in parallel.
func (c *twoPhaseCommitter) doActionOnBatches(bo *Backoffer, action twoPhaseCommitAction, batches []batchMutations) error {
if len(batches) == 0 {
return nil
}
noNeedFork := len(batches) == 1
if !noNeedFork {
if ac, ok := action.(actionCommit); ok && ac.retry {
noNeedFork = true
}
}
if noNeedFork {
for _, b := range batches {
e := action.handleSingleBatch(c, bo, b)
if e != nil {
logutil.BgLogger().Debug("2PC doActionOnBatches failed",
zap.Uint64("session", c.sessionID),
zap.Stringer("action type", action),
zap.Error(e),
zap.Uint64("txnStartTS", c.startTS))
return errors.Trace(e)
}
}
return nil
}
rateLim := len(batches)
// Set rateLim here for the large transaction.
// If the rate limit is too high, tikv will report service is busy.
// If the rate limit is too low, we can't full utilize the tikv's throughput.
// TODO: Find a self-adaptive way to control the rate limit here.
if rateLim > config.GetGlobalConfig().CommitterConcurrency {
rateLim = config.GetGlobalConfig().CommitterConcurrency
}
batchExecutor := newBatchExecutor(rateLim, c, action, bo)
err := batchExecutor.process(batches)
return errors.Trace(err)
}
func (c *twoPhaseCommitter) keyValueSize(key, value []byte) int {
return len(key) + len(value)
}
func (c *twoPhaseCommitter) keySize(key, value []byte) int {
return len(key)
}
type ttlManagerState uint32
const (
stateUninitialized ttlManagerState = iota
stateRunning
stateClosed
)
type ttlManager struct {
state ttlManagerState
ch chan struct{}
lockCtx *kv.LockCtx
}
func (tm *ttlManager) run(c *twoPhaseCommitter, lockCtx *kv.LockCtx) {
// Run only once.
if !atomic.CompareAndSwapUint32((*uint32)(&tm.state), uint32(stateUninitialized), uint32(stateRunning)) {
return
}
tm.lockCtx = lockCtx
noKeepAlive := false
failpoint.Inject("doNotKeepAlive", func() {
noKeepAlive = true
})
if !noKeepAlive {
go tm.keepAlive(c)
}
}
func (tm *ttlManager) close() {
if !atomic.CompareAndSwapUint32((*uint32)(&tm.state), uint32(stateRunning), uint32(stateClosed)) {
return
}
close(tm.ch)
}
func (tm *ttlManager) keepAlive(c *twoPhaseCommitter) {
// Ticker is set to 1/2 of the ManagedLockTTL.
ticker := time.NewTicker(time.Duration(atomic.LoadUint64(&ManagedLockTTL)) * time.Millisecond / 2)
defer ticker.Stop()
for {
select {
case <-tm.ch:
return
case <-ticker.C:
// If kill signal is received, the ttlManager should exit.
if tm.lockCtx != nil && tm.lockCtx.Killed != nil && atomic.LoadUint32(tm.lockCtx.Killed) != 0 {
return
}
bo := NewBackofferWithVars(context.Background(), pessimisticLockMaxBackoff, c.txn.vars)
now, err := c.store.GetOracle().GetTimestamp(bo.ctx, &oracle.Option{TxnScope: oracle.GlobalTxnScope})
if err != nil {
err1 := bo.Backoff(BoPDRPC, err)
if err1 != nil {
logutil.Logger(bo.ctx).Warn("keepAlive get tso fail",
zap.Error(err))
return
}
continue
}
uptime := uint64(oracle.ExtractPhysical(now) - oracle.ExtractPhysical(c.startTS))
if uptime > config.GetGlobalConfig().MaxTxnTTL {
// Checks maximum lifetime for the ttlManager, so when something goes wrong
// the key will not be locked forever.
logutil.Logger(bo.ctx).Info("ttlManager live up to its lifetime",
zap.Uint64("txnStartTS", c.startTS),
zap.Uint64("uptime", uptime),
zap.Uint64("maxTxnTTL", config.GetGlobalConfig().MaxTxnTTL))
metrics.TiKVTTLLifeTimeReachCounter.Inc()
// the pessimistic locks may expire if the ttl manager has timed out, set `LockExpired` flag
// so that this transaction could only commit or rollback with no more statement executions
if c.isPessimistic && tm.lockCtx != nil && tm.lockCtx.LockExpired != nil {
atomic.StoreUint32(tm.lockCtx.LockExpired, 1)
}
return
}
newTTL := uptime + atomic.LoadUint64(&ManagedLockTTL)
logutil.Logger(bo.ctx).Info("send TxnHeartBeat",
zap.Uint64("startTS", c.startTS), zap.Uint64("newTTL", newTTL))
startTime := time.Now()
_, err = sendTxnHeartBeat(bo, c.store, c.primary(), c.startTS, newTTL)
if err != nil {
metrics.TxnHeartBeatHistogramError.Observe(time.Since(startTime).Seconds())
logutil.Logger(bo.ctx).Warn("send TxnHeartBeat failed",
zap.Error(err),
zap.Uint64("txnStartTS", c.startTS))
return
}
metrics.TxnHeartBeatHistogramOK.Observe(time.Since(startTime).Seconds())
}
}
}
func sendTxnHeartBeat(bo *Backoffer, store *KVStore, primary []byte, startTS, ttl uint64) (uint64, error) {
req := tikvrpc.NewRequest(tikvrpc.CmdTxnHeartBeat, &pb.TxnHeartBeatRequest{
PrimaryLock: primary,
StartVersion: startTS,
AdviseLockTtl: ttl,
})
for {
loc, err := store.GetRegionCache().LocateKey(bo, primary)
if err != nil {
return 0, errors.Trace(err)
}
resp, err := store.SendReq(bo, req, loc.Region, ReadTimeoutShort)
if err != nil {
return 0, errors.Trace(err)
}
regionErr, err := resp.GetRegionError()
if err != nil {
return 0, errors.Trace(err)
}
if regionErr != nil {
err = bo.Backoff(BoRegionMiss, errors.New(regionErr.String()))
if err != nil {
return 0, errors.Trace(err)
}
continue
}
if resp.Resp == nil {
return 0, errors.Trace(tikverr.ErrBodyMissing)
}
cmdResp := resp.Resp.(*pb.TxnHeartBeatResponse)
if keyErr := cmdResp.GetError(); keyErr != nil {
return 0, errors.Errorf("txn %d heartbeat fail, primary key = %v, err = %s", startTS, hex.EncodeToString(primary), extractKeyErr(keyErr))
}
return cmdResp.GetLockTtl(), nil
}
}
// checkAsyncCommit checks if async commit protocol is available for current transaction commit, true is returned if possible.
func (c *twoPhaseCommitter) checkAsyncCommit() bool {
// Disable async commit in local transactions
if c.txn.GetScope() != oracle.GlobalTxnScope {
return false
}
asyncCommitCfg := config.GetGlobalConfig().TiKVClient.AsyncCommit
// TODO the keys limit need more tests, this value makes the unit test pass by now.
// Async commit is not compatible with Binlog because of the non unique timestamp issue.
if c.sessionID > 0 && c.txn.enableAsyncCommit &&
uint(c.mutations.Len()) <= asyncCommitCfg.KeysLimit &&
!c.shouldWriteBinlog() {
totalKeySize := uint64(0)
for i := 0; i < c.mutations.Len(); i++ {
totalKeySize += uint64(len(c.mutations.GetKey(i)))
if totalKeySize > asyncCommitCfg.TotalKeySizeLimit {
return false
}
}
return true
}
return false
}
// checkOnePC checks if 1PC protocol is available for current transaction.
func (c *twoPhaseCommitter) checkOnePC() bool {
// Disable 1PC in local transactions
if c.txn.GetScope() != oracle.GlobalTxnScope {
return false
}
return c.sessionID > 0 && !c.shouldWriteBinlog() && c.txn.enable1PC
}
func (c *twoPhaseCommitter) needLinearizability() bool {
GuaranteeLinearizabilityOption := c.txn.us.GetOption(kv.GuaranteeLinearizability)
// by default, guarantee
return GuaranteeLinearizabilityOption == nil || GuaranteeLinearizabilityOption.(bool)
}
func (c *twoPhaseCommitter) isAsyncCommit() bool {
return atomic.LoadUint32(&c.useAsyncCommit) > 0
}
func (c *twoPhaseCommitter) setAsyncCommit(val bool) {
if val {
atomic.StoreUint32(&c.useAsyncCommit, 1)
} else {
atomic.StoreUint32(&c.useAsyncCommit, 0)
}
}
func (c *twoPhaseCommitter) isOnePC() bool {
return atomic.LoadUint32(&c.useOnePC) > 0
}
func (c *twoPhaseCommitter) setOnePC(val bool) {
if val {
atomic.StoreUint32(&c.useOnePC, 1)
} else {
atomic.StoreUint32(&c.useOnePC, 0)
}
}
func (c *twoPhaseCommitter) checkOnePCFallBack(action twoPhaseCommitAction, batchCount int) {
if _, ok := action.(actionPrewrite); ok {
if batchCount > 1 {
c.setOnePC(false)
}
}
}
func (c *twoPhaseCommitter) cleanup(ctx context.Context) {
c.cleanWg.Add(1)
go func() {
failpoint.Inject("commitFailedSkipCleanup", func() {
logutil.Logger(ctx).Info("[failpoint] injected skip cleanup secondaries on failure",
zap.Uint64("txnStartTS", c.startTS))
c.cleanWg.Done()
failpoint.Return()
})
cleanupKeysCtx := context.WithValue(context.Background(), TxnStartKey, ctx.Value(TxnStartKey))
var err error
if !c.isOnePC() {
err = c.cleanupMutations(NewBackofferWithVars(cleanupKeysCtx, cleanupMaxBackoff, c.txn.vars), c.mutations)
} else if c.isPessimistic {
err = c.pessimisticRollbackMutations(NewBackofferWithVars(cleanupKeysCtx, cleanupMaxBackoff, c.txn.vars), c.mutations)
}
if err != nil {
metrics.SecondaryLockCleanupFailureCounterRollback.Inc()
logutil.Logger(ctx).Info("2PC cleanup failed", zap.Error(err), zap.Uint64("txnStartTS", c.startTS),
zap.Bool("isPessimistic", c.isPessimistic), zap.Bool("isOnePC", c.isOnePC()))
} else {
logutil.Logger(ctx).Debug("2PC clean up done",
zap.Uint64("txnStartTS", c.startTS), zap.Bool("isPessimistic", c.isPessimistic),
zap.Bool("isOnePC", c.isOnePC()))
}
c.cleanWg.Done()
}()
}
// execute executes the two-phase commit protocol.
func (c *twoPhaseCommitter) execute(ctx context.Context) (err error) {
var binlogSkipped bool
defer func() {
if c.isOnePC() {
// The error means the 1PC transaction failed.
if err != nil {
if c.getUndeterminedErr() == nil {
c.cleanup(ctx)
}
metrics.OnePCTxnCounterError.Inc()
} else {
metrics.OnePCTxnCounterOk.Inc()
}
} else if c.isAsyncCommit() {
// The error means the async commit should not succeed.
if err != nil {
if c.getUndeterminedErr() == nil {
c.cleanup(ctx)
}
metrics.AsyncCommitTxnCounterError.Inc()
} else {
metrics.AsyncCommitTxnCounterOk.Inc()
}
} else {
// Always clean up all written keys if the txn does not commit.
c.mu.RLock()
committed := c.mu.committed
undetermined := c.mu.undeterminedErr != nil
c.mu.RUnlock()
if !committed && !undetermined {
c.cleanup(ctx)
metrics.TwoPCTxnCounterError.Inc()
} else {
metrics.TwoPCTxnCounterOk.Inc()
}
c.txn.commitTS = c.commitTS
if binlogSkipped {
c.binlog.Skip()
return
}
if !c.shouldWriteBinlog() {
return
}
if err != nil {
c.binlog.Commit(ctx, 0)
} else {
c.binlog.Commit(ctx, int64(c.commitTS))
}
}
}()
commitTSMayBeCalculated := false
// Check async commit is available or not.
if c.checkAsyncCommit() {
commitTSMayBeCalculated = true
c.setAsyncCommit(true)
c.hasTriedAsyncCommit = true
}
// Check if 1PC is enabled.
if c.checkOnePC() {
commitTSMayBeCalculated = true
c.setOnePC(true)
c.hasTriedOnePC = true
}
// If we want to use async commit or 1PC and also want linearizability across
// all nodes, we have to make sure the commit TS of this transaction is greater
// than the snapshot TS of all existent readers. So we get a new timestamp
// from PD as our MinCommitTS.
if commitTSMayBeCalculated && c.needLinearizability() {
failpoint.Inject("getMinCommitTSFromTSO", nil)
minCommitTS, err := c.store.oracle.GetTimestamp(ctx, &oracle.Option{TxnScope: oracle.GlobalTxnScope})
// If we fail to get a timestamp from PD, we just propagate the failure
// instead of falling back to the normal 2PC because a normal 2PC will
// also be likely to fail due to the same timestamp issue.
if err != nil {
return errors.Trace(err)
}
c.minCommitTS = minCommitTS
}
// Calculate maxCommitTS if necessary
if commitTSMayBeCalculated {
if err = c.calculateMaxCommitTS(ctx); err != nil {
return errors.Trace(err)
}
}
if c.sessionID > 0 {
failpoint.Inject("beforePrewrite", nil)
}
c.prewriteStarted = true
var binlogChan <-chan BinlogWriteResult
if c.shouldWriteBinlog() {
binlogChan = c.binlog.Prewrite(ctx, c.primary())
}
prewriteBo := NewBackofferWithVars(ctx, PrewriteMaxBackoff, c.txn.vars)
start := time.Now()
err = c.prewriteMutations(prewriteBo, c.mutations)
if err != nil {
// TODO: Now we return an undetermined error as long as one of the prewrite
// RPCs fails. However, if there are multiple errors and some of the errors
// are not RPC failures, we can return the actual error instead of undetermined.
if undeterminedErr := c.getUndeterminedErr(); undeterminedErr != nil {
logutil.Logger(ctx).Error("2PC commit result undetermined",
zap.Error(err),
zap.NamedError("rpcErr", undeterminedErr),
zap.Uint64("txnStartTS", c.startTS))
return errors.Trace(terror.ErrResultUndetermined)
}
}
commitDetail := c.getDetail()
commitDetail.PrewriteTime = time.Since(start)
if prewriteBo.totalSleep > 0 {
atomic.AddInt64(&commitDetail.CommitBackoffTime, int64(prewriteBo.totalSleep)*int64(time.Millisecond))
commitDetail.Mu.Lock()
commitDetail.Mu.BackoffTypes = append(commitDetail.Mu.BackoffTypes, prewriteBo.types...)
commitDetail.Mu.Unlock()
}
if binlogChan != nil {
startWaitBinlog := time.Now()
binlogWriteResult := <-binlogChan
commitDetail.WaitPrewriteBinlogTime = time.Since(startWaitBinlog)
if binlogWriteResult != nil {
binlogSkipped = binlogWriteResult.Skipped()
binlogErr := binlogWriteResult.GetError()
if binlogErr != nil {
return binlogErr
}
}
}
if err != nil {
logutil.Logger(ctx).Debug("2PC failed on prewrite",
zap.Error(err),
zap.Uint64("txnStartTS", c.startTS))
return errors.Trace(err)
}
// strip check_not_exists keys that no need to commit.
c.stripNoNeedCommitKeys()
var commitTS uint64
if c.isOnePC() {
if c.onePCCommitTS == 0 {
err = errors.Errorf("session %d invalid onePCCommitTS for 1PC protocol after prewrite, startTS=%v", c.sessionID, c.startTS)
return errors.Trace(err)
}
c.commitTS = c.onePCCommitTS
c.txn.commitTS = c.commitTS
logutil.Logger(ctx).Debug("1PC protocol is used to commit this txn",
zap.Uint64("startTS", c.startTS), zap.Uint64("commitTS", c.commitTS),
zap.Uint64("session", c.sessionID))
return nil
}
if c.onePCCommitTS != 0 {
logutil.Logger(ctx).Fatal("non 1PC transaction committed in 1PC",
zap.Uint64("session", c.sessionID), zap.Uint64("startTS", c.startTS))
}
if c.isAsyncCommit() {
if c.minCommitTS == 0 {
err = errors.Errorf("session %d invalid minCommitTS for async commit protocol after prewrite, startTS=%v", c.sessionID, c.startTS)
return errors.Trace(err)
}
commitTS = c.minCommitTS
} else {
start = time.Now()
logutil.Event(ctx, "start get commit ts")
commitTS, err = c.store.getTimestampWithRetry(NewBackofferWithVars(ctx, tsoMaxBackoff, c.txn.vars), c.txn.GetScope())
if err != nil {
logutil.Logger(ctx).Warn("2PC get commitTS failed",
zap.Error(err),
zap.Uint64("txnStartTS", c.startTS))
return errors.Trace(err)
}
commitDetail.GetCommitTsTime = time.Since(start)
logutil.Event(ctx, "finish get commit ts")
logutil.SetTag(ctx, "commitTs", commitTS)
}
if !c.isAsyncCommit() {
tryAmend := c.isPessimistic && c.sessionID > 0 && c.txn.schemaAmender != nil
if !tryAmend {
_, _, err = c.checkSchemaValid(ctx, commitTS, c.txn.schemaVer, false)
if err != nil {
return errors.Trace(err)
}
} else {
relatedSchemaChange, memAmended, err := c.checkSchemaValid(ctx, commitTS, c.txn.schemaVer, true)
if err != nil {
return errors.Trace(err)
}
if memAmended {
// Get new commitTS and check schema valid again.
newCommitTS, err := c.getCommitTS(ctx, commitDetail)
if err != nil {
return errors.Trace(err)
}
// If schema check failed between commitTS and newCommitTs, report schema change error.
_, _, err = c.checkSchemaValid(ctx, newCommitTS, relatedSchemaChange.LatestInfoSchema, false)
if err != nil {
logutil.Logger(ctx).Info("schema check after amend failed, it means the schema version changed again",
zap.Uint64("startTS", c.startTS),
zap.Uint64("amendTS", commitTS),
zap.Int64("amendedSchemaVersion", relatedSchemaChange.LatestInfoSchema.SchemaMetaVersion()),
zap.Uint64("newCommitTS", newCommitTS))
return errors.Trace(err)
}
commitTS = newCommitTS
}
}
}
atomic.StoreUint64(&c.commitTS, commitTS)
if c.store.oracle.IsExpired(c.startTS, MaxTxnTimeUse, &oracle.Option{TxnScope: oracle.GlobalTxnScope}) {
err = errors.Errorf("session %d txn takes too much time, txnStartTS: %d, comm: %d",
c.sessionID, c.startTS, c.commitTS)
return err
}
if c.sessionID > 0 {
failpoint.Inject("beforeCommit", func(val failpoint.Value) {
// Pass multiple instructions in one string, delimited by commas, to trigger multiple behaviors, like
// `return("delay,fail")`. Then they will be executed sequentially at once.
if v, ok := val.(string); ok {
for _, action := range strings.Split(v, ",") {
// Async commit transactions cannot return error here, since it's already successful.
if action == "fail" && !c.isAsyncCommit() {
logutil.Logger(ctx).Info("[failpoint] injected failure before commit", zap.Uint64("txnStartTS", c.startTS))
failpoint.Return(errors.New("injected failure before commit"))
} else if action == "delay" {
duration := time.Duration(rand.Int63n(int64(time.Second) * 5))
logutil.Logger(ctx).Info("[failpoint] injected delay before commit",
zap.Uint64("txnStartTS", c.startTS), zap.Duration("duration", duration))
time.Sleep(duration)
}
}
}
})
}
if c.isAsyncCommit() {
// For async commit protocol, the commit is considered success here.
c.txn.commitTS = c.commitTS
logutil.Logger(ctx).Debug("2PC will use async commit protocol to commit this txn",
zap.Uint64("startTS", c.startTS), zap.Uint64("commitTS", c.commitTS),
zap.Uint64("sessionID", c.sessionID))
go func() {
failpoint.Inject("asyncCommitDoNothing", func() {
failpoint.Return()
})
commitBo := NewBackofferWithVars(ctx, int(atomic.LoadUint64(&CommitMaxBackoff)), c.txn.vars)
err := c.commitMutations(commitBo, c.mutations)
if err != nil {
logutil.Logger(ctx).Warn("2PC async commit failed", zap.Uint64("sessionID", c.sessionID),
zap.Uint64("startTS", c.startTS), zap.Uint64("commitTS", c.commitTS), zap.Error(err))
}
}()
return nil
}
return c.commitTxn(ctx, commitDetail)
}
func (c *twoPhaseCommitter) commitTxn(ctx context.Context, commitDetail *util.CommitDetails) error {
c.txn.GetMemBuffer().DiscardValues()
start := time.Now()
commitBo := NewBackofferWithVars(ctx, int(atomic.LoadUint64(&CommitMaxBackoff)), c.txn.vars)
err := c.commitMutations(commitBo, c.mutations)
commitDetail.CommitTime = time.Since(start)
if commitBo.totalSleep > 0 {
atomic.AddInt64(&commitDetail.CommitBackoffTime, int64(commitBo.totalSleep)*int64(time.Millisecond))
commitDetail.Mu.Lock()
commitDetail.Mu.BackoffTypes = append(commitDetail.Mu.BackoffTypes, commitBo.types...)
commitDetail.Mu.Unlock()
}
if err != nil {
if undeterminedErr := c.getUndeterminedErr(); undeterminedErr != nil {
logutil.Logger(ctx).Error("2PC commit result undetermined",
zap.Error(err),
zap.NamedError("rpcErr", undeterminedErr),
zap.Uint64("txnStartTS", c.startTS))
err = errors.Trace(terror.ErrResultUndetermined)
}
if !c.mu.committed {
logutil.Logger(ctx).Debug("2PC failed on commit",
zap.Error(err),
zap.Uint64("txnStartTS", c.startTS))
return errors.Trace(err)
}
logutil.Logger(ctx).Debug("got some exceptions, but 2PC was still successful",
zap.Error(err),
zap.Uint64("txnStartTS", c.startTS))
}
return nil
}
func (c *twoPhaseCommitter) stripNoNeedCommitKeys() {
if !c.hasNoNeedCommitKeys {
return
}
m := c.mutations
var newIdx int
for oldIdx := range m.handles {
key := m.GetKey(oldIdx)
flags, err := c.txn.GetMemBuffer().GetFlags(key)
if err == nil && flags.HasPrewriteOnly() {
continue
}
m.handles[newIdx] = m.handles[oldIdx]
newIdx++
}
c.mutations.handles = c.mutations.handles[:newIdx]
}
// SchemaVer is the infoSchema which will return the schema version.
type SchemaVer interface {
// SchemaMetaVersion returns the meta schema version.
SchemaMetaVersion() int64
}
// SchemaLeaseChecker is used to validate schema version is not changed during transaction execution.
type SchemaLeaseChecker interface {
// CheckBySchemaVer checks if the schema has changed for the transaction related tables between the startSchemaVer
// and the schema version at txnTS, all the related schema changes will be returned.
CheckBySchemaVer(txnTS uint64, startSchemaVer SchemaVer) (*RelatedSchemaChange, error)
}
// RelatedSchemaChange contains information about schema diff between two schema versions.
type RelatedSchemaChange struct {
PhyTblIDS []int64
ActionTypes []uint64
LatestInfoSchema SchemaVer
Amendable bool
}
func (c *twoPhaseCommitter) amendPessimisticLock(ctx context.Context, addMutations CommitterMutations) error {
keysNeedToLock := NewPlainMutations(addMutations.Len())
for i := 0; i < addMutations.Len(); i++ {
if addMutations.IsPessimisticLock(i) {
keysNeedToLock.Push(addMutations.GetOp(i), addMutations.GetKey(i), addMutations.GetValue(i), addMutations.IsPessimisticLock(i))
}
}
// For unique index amend, we need to pessimistic lock the generated new index keys first.
// Set doingAmend to true to force the pessimistic lock do the exist check for these keys.
c.doingAmend = true
defer func() { c.doingAmend = false }()
if keysNeedToLock.Len() > 0 {
lCtx := &kv.LockCtx{
Killed: c.lockCtx.Killed,
ForUpdateTS: c.forUpdateTS,
LockWaitTime: c.lockCtx.LockWaitTime,
WaitStartTime: time.Now(),
}
tryTimes := uint(0)
retryLimit := config.GetGlobalConfig().PessimisticTxn.MaxRetryCount
var err error
for tryTimes < retryLimit {
pessimisticLockBo := NewBackofferWithVars(ctx, pessimisticLockMaxBackoff, c.txn.vars)
err = c.pessimisticLockMutations(pessimisticLockBo, lCtx, &keysNeedToLock)
if err != nil {
// KeysNeedToLock won't change, so don't async rollback pessimistic locks here for write conflict.
if _, ok := errors.Cause(err).(*tikverr.ErrWriteConflict); ok {
newForUpdateTSVer, err := c.store.CurrentTimestamp(oracle.GlobalTxnScope)
if err != nil {
return errors.Trace(err)
}
lCtx.ForUpdateTS = newForUpdateTSVer
c.forUpdateTS = newForUpdateTSVer
logutil.Logger(ctx).Info("amend pessimistic lock pessimistic retry lock",
zap.Uint("tryTimes", tryTimes), zap.Uint64("startTS", c.startTS),
zap.Uint64("newForUpdateTS", c.forUpdateTS))
tryTimes++
continue
}
logutil.Logger(ctx).Warn("amend pessimistic lock has failed", zap.Error(err), zap.Uint64("txnStartTS", c.startTS))
return err
}
logutil.Logger(ctx).Info("amend pessimistic lock finished", zap.Uint64("startTS", c.startTS),
zap.Uint64("forUpdateTS", c.forUpdateTS), zap.Int("keys", keysNeedToLock.Len()))
break
}
if err != nil {
logutil.Logger(ctx).Warn("amend pessimistic lock failed after retry",
zap.Uint("tryTimes", tryTimes), zap.Uint64("startTS", c.startTS))
return err
}
}
return nil
}
func (c *twoPhaseCommitter) tryAmendTxn(ctx context.Context, startInfoSchema SchemaVer, change *RelatedSchemaChange) (bool, error) {
addMutations, err := c.txn.schemaAmender.AmendTxn(ctx, startInfoSchema, change, c.mutations)
if err != nil {
return false, err
}
// Add new mutations to the mutation list or prewrite them if prewrite already starts.
if addMutations != nil && addMutations.Len() > 0 {
err = c.amendPessimisticLock(ctx, addMutations)
if err != nil {
logutil.Logger(ctx).Info("amendPessimisticLock has failed", zap.Error(err))
return false, err
}
if c.prewriteStarted {
prewriteBo := NewBackofferWithVars(ctx, PrewriteMaxBackoff, c.txn.vars)
err = c.prewriteMutations(prewriteBo, addMutations)
if err != nil {
logutil.Logger(ctx).Warn("amend prewrite has failed", zap.Error(err), zap.Uint64("txnStartTS", c.startTS))
return false, err
}
logutil.Logger(ctx).Info("amend prewrite finished", zap.Uint64("txnStartTS", c.startTS))
return true, nil
}
memBuf := c.txn.GetMemBuffer()
for i := 0; i < addMutations.Len(); i++ {
key := addMutations.GetKey(i)
op := addMutations.GetOp(i)
var err error
if op == pb.Op_Del {
err = memBuf.Delete(key)
} else {
err = memBuf.Set(key, addMutations.GetValue(i))
}
if err != nil {
logutil.Logger(ctx).Warn("amend mutations has failed", zap.Error(err), zap.Uint64("txnStartTS", c.startTS))
return false, err
}
handle := c.txn.GetMemBuffer().IterWithFlags(key, nil).Handle()
c.mutations.Push(op, addMutations.IsPessimisticLock(i), handle)
}
}
return false, nil
}
func (c *twoPhaseCommitter) getCommitTS(ctx context.Context, commitDetail *util.CommitDetails) (uint64, error) {
start := time.Now()
logutil.Event(ctx, "start get commit ts")
commitTS, err := c.store.getTimestampWithRetry(NewBackofferWithVars(ctx, tsoMaxBackoff, c.txn.vars), c.txn.GetScope())
if err != nil {
logutil.Logger(ctx).Warn("2PC get commitTS failed",
zap.Error(err),
zap.Uint64("txnStartTS", c.startTS))
return 0, errors.Trace(err)
}
commitDetail.GetCommitTsTime = time.Since(start)
logutil.Event(ctx, "finish get commit ts")
logutil.SetTag(ctx, "commitTS", commitTS)
// Check commitTS.
if commitTS <= c.startTS {
err = errors.Errorf("session %d invalid transaction tso with txnStartTS=%v while txnCommitTS=%v",
c.sessionID, c.startTS, commitTS)
logutil.BgLogger().Error("invalid transaction", zap.Error(err))
return 0, errors.Trace(err)
}
return commitTS, nil
}
// checkSchemaValid checks if the schema has changed, if tryAmend is set to true, committer will try to amend
// this transaction using the related schema changes.
func (c *twoPhaseCommitter) checkSchemaValid(ctx context.Context, checkTS uint64, startInfoSchema SchemaVer,
tryAmend bool) (*RelatedSchemaChange, bool, error) {
failpoint.Inject("failCheckSchemaValid", func() {
logutil.Logger(ctx).Info("[failpoint] injected fail schema check",
zap.Uint64("txnStartTS", c.startTS))
err := errors.Errorf("mock check schema valid failure")
failpoint.Return(nil, false, err)
})
if c.txn.schemaLeaseChecker == nil {
if c.sessionID > 0 {
logutil.Logger(ctx).Warn("schemaLeaseChecker is not set for this transaction",
zap.Uint64("sessionID", c.sessionID),
zap.Uint64("startTS", c.startTS),
zap.Uint64("commitTS", checkTS))
}
return nil, false, nil
}
relatedChanges, err := c.txn.schemaLeaseChecker.CheckBySchemaVer(checkTS, startInfoSchema)
if err != nil {
if tryAmend && relatedChanges != nil && relatedChanges.Amendable && c.txn.schemaAmender != nil {
memAmended, amendErr := c.tryAmendTxn(ctx, startInfoSchema, relatedChanges)
if amendErr != nil {
logutil.BgLogger().Info("txn amend has failed", zap.Uint64("sessionID", c.sessionID),
zap.Uint64("startTS", c.startTS), zap.Error(amendErr))
return nil, false, err
}
logutil.Logger(ctx).Info("amend txn successfully",
zap.Uint64("sessionID", c.sessionID), zap.Uint64("txn startTS", c.startTS), zap.Bool("memAmended", memAmended),
zap.Uint64("checkTS", checkTS), zap.Int64("startInfoSchemaVer", startInfoSchema.SchemaMetaVersion()),
zap.Int64s("table ids", relatedChanges.PhyTblIDS), zap.Uint64s("action types", relatedChanges.ActionTypes))
return relatedChanges, memAmended, nil
}
return nil, false, errors.Trace(err)
}
return nil, false, nil
}
func (c *twoPhaseCommitter) calculateMaxCommitTS(ctx context.Context) error {
// Amend txn with current time first, then we can make sure we have another SafeWindow time to commit
currentTS := oracle.EncodeTSO(int64(time.Since(c.txn.startTime)/time.Millisecond)) + c.startTS
_, _, err := c.checkSchemaValid(ctx, currentTS, c.txn.schemaVer, true)
if err != nil {
logutil.Logger(ctx).Info("Schema changed for async commit txn",
zap.Error(err),
zap.Uint64("startTS", c.startTS))
return errors.Trace(err)
}
safeWindow := config.GetGlobalConfig().TiKVClient.AsyncCommit.SafeWindow
maxCommitTS := oracle.EncodeTSO(int64(safeWindow/time.Millisecond)) + currentTS
logutil.BgLogger().Debug("calculate MaxCommitTS",
zap.Time("startTime", c.txn.startTime),
zap.Duration("safeWindow", safeWindow),
zap.Uint64("startTS", c.startTS),
zap.Uint64("maxCommitTS", maxCommitTS))
c.maxCommitTS = maxCommitTS
return nil
}
func (c *twoPhaseCommitter) shouldWriteBinlog() bool {
return c.binlog != nil
}
// TiKV recommends each RPC packet should be less than ~1MB. We keep each packet's
// Key+Value size below 16KB.
const txnCommitBatchSize = 16 * 1024
type batchMutations struct {
region RegionVerID
mutations CommitterMutations
isPrimary bool
}
type batched struct {
batches []batchMutations
primaryIdx int
primaryKey []byte
}
func newBatched(primaryKey []byte) *batched {
return &batched{
primaryIdx: -1,
primaryKey: primaryKey,
}
}
// appendBatchMutationsBySize appends mutations to b. It may split the keys to make
// sure each batch's size does not exceed the limit.
func (b *batched) appendBatchMutationsBySize(region RegionVerID, mutations CommitterMutations, sizeFn func(k, v []byte) int, limit int) {
failpoint.Inject("twoPCRequestBatchSizeLimit", func() {
limit = 1
})
var start, end int
for start = 0; start < mutations.Len(); start = end {
var size int
for end = start; end < mutations.Len() && size < limit; end++ {
var k, v []byte
k = mutations.GetKey(end)
v = mutations.GetValue(end)
size += sizeFn(k, v)
if b.primaryIdx < 0 && bytes.Equal(k, b.primaryKey) {
b.primaryIdx = len(b.batches)
}
}
b.batches = append(b.batches, batchMutations{
region: region,
mutations: mutations.Slice(start, end),
})
}
}
func (b *batched) setPrimary() bool {
// If the batches include the primary key, put it to the first
if b.primaryIdx >= 0 {
if len(b.batches) > 0 {
b.batches[b.primaryIdx].isPrimary = true
b.batches[0], b.batches[b.primaryIdx] = b.batches[b.primaryIdx], b.batches[0]
b.primaryIdx = 0
}
return true
}
return false
}
func (b *batched) allBatches() []batchMutations {
return b.batches
}
// primaryBatch returns the batch containing the primary key.
// Precondition: `b.setPrimary() == true`
func (b *batched) primaryBatch() []batchMutations {
return b.batches[:1]
}
func (b *batched) forgetPrimary() {
if len(b.batches) == 0 {
return
}
b.batches = b.batches[1:]
}
// batchExecutor is txn controller providing rate control like utils
type batchExecutor struct {
rateLim int // concurrent worker numbers
rateLimiter *util.RateLimit // rate limiter for concurrency control, maybe more strategies
committer *twoPhaseCommitter // here maybe more different type committer in the future
action twoPhaseCommitAction // the work action type
backoffer *Backoffer // Backoffer
tokenWaitDuration time.Duration // get token wait time
}
// newBatchExecutor create processor to handle concurrent batch works(prewrite/commit etc)
func newBatchExecutor(rateLimit int, committer *twoPhaseCommitter,
action twoPhaseCommitAction, backoffer *Backoffer) *batchExecutor {
return &batchExecutor{rateLimit, nil, committer,
action, backoffer, 1 * time.Millisecond}
}
// initUtils do initialize batchExecutor related policies like rateLimit util
func (batchExe *batchExecutor) initUtils() error {
// init rateLimiter by injected rate limit number
batchExe.rateLimiter = util.NewRateLimit(batchExe.rateLim)
return nil
}
// startWork concurrently do the work for each batch considering rate limit
func (batchExe *batchExecutor) startWorker(exitCh chan struct{}, ch chan error, batches []batchMutations) {
for idx, batch1 := range batches {
waitStart := time.Now()
if exit := batchExe.rateLimiter.GetToken(exitCh); !exit {
batchExe.tokenWaitDuration += time.Since(waitStart)
batch := batch1
go func() {
defer batchExe.rateLimiter.PutToken()
var singleBatchBackoffer *Backoffer
if _, ok := batchExe.action.(actionCommit); ok {
// Because the secondary batches of the commit actions are implemented to be
// committed asynchronously in background goroutines, we should not
// fork a child context and call cancel() while the foreground goroutine exits.
// Otherwise the background goroutines will be canceled execeptionally.
// Here we makes a new clone of the original backoffer for this goroutine
// exclusively to avoid the data race when using the same backoffer
// in concurrent goroutines.
singleBatchBackoffer = batchExe.backoffer.Clone()
} else {
var singleBatchCancel context.CancelFunc
singleBatchBackoffer, singleBatchCancel = batchExe.backoffer.Fork()
defer singleBatchCancel()
}
beforeSleep := singleBatchBackoffer.totalSleep
ch <- batchExe.action.handleSingleBatch(batchExe.committer, singleBatchBackoffer, batch)
commitDetail := batchExe.committer.getDetail()
if commitDetail != nil { // lock operations of pessimistic-txn will let commitDetail be nil
if delta := singleBatchBackoffer.totalSleep - beforeSleep; delta > 0 {
atomic.AddInt64(&commitDetail.CommitBackoffTime, int64(singleBatchBackoffer.totalSleep-beforeSleep)*int64(time.Millisecond))
commitDetail.Mu.Lock()
commitDetail.Mu.BackoffTypes = append(commitDetail.Mu.BackoffTypes, singleBatchBackoffer.types...)
commitDetail.Mu.Unlock()
}
}
}()
} else {
logutil.Logger(batchExe.backoffer.ctx).Info("break startWorker",
zap.Stringer("action", batchExe.action), zap.Int("batch size", len(batches)),
zap.Int("index", idx))
break
}
}
}
// process will start worker routine and collect results
func (batchExe *batchExecutor) process(batches []batchMutations) error {
var err error
err = batchExe.initUtils()
if err != nil {
logutil.Logger(batchExe.backoffer.ctx).Error("batchExecutor initUtils failed", zap.Error(err))
return err
}
// For prewrite, stop sending other requests after receiving first error.
var cancel context.CancelFunc
if _, ok := batchExe.action.(actionPrewrite); ok {
batchExe.backoffer, cancel = batchExe.backoffer.Fork()
defer cancel()
}
// concurrently do the work for each batch.
ch := make(chan error, len(batches))
exitCh := make(chan struct{})
go batchExe.startWorker(exitCh, ch, batches)
// check results
for i := 0; i < len(batches); i++ {
if e := <-ch; e != nil {
logutil.Logger(batchExe.backoffer.ctx).Debug("2PC doActionOnBatch failed",
zap.Uint64("session", batchExe.committer.sessionID),
zap.Stringer("action type", batchExe.action),
zap.Error(e),
zap.Uint64("txnStartTS", batchExe.committer.startTS))
// Cancel other requests and return the first error.
if cancel != nil {
logutil.Logger(batchExe.backoffer.ctx).Debug("2PC doActionOnBatch to cancel other actions",
zap.Uint64("session", batchExe.committer.sessionID),
zap.Stringer("action type", batchExe.action),
zap.Uint64("txnStartTS", batchExe.committer.startTS))
atomic.StoreUint32(&batchExe.committer.prewriteCancelled, 1)
cancel()
}
if err == nil {
err = e
}
}
}
close(exitCh)
metrics.TiKVTokenWaitDuration.Observe(float64(batchExe.tokenWaitDuration.Nanoseconds()))
return err
}
func (c *twoPhaseCommitter) setDetail(d *util.CommitDetails) {
atomic.StorePointer(&c.detail, unsafe.Pointer(d))
}
func (c *twoPhaseCommitter) getDetail() *util.CommitDetails {
return (*util.CommitDetails)(atomic.LoadPointer(&c.detail))
}
func (c *twoPhaseCommitter) setUndeterminedErr(err error) {
c.mu.Lock()
defer c.mu.Unlock()
c.mu.undeterminedErr = err
}
func (c *twoPhaseCommitter) getUndeterminedErr() error {
c.mu.RLock()
defer c.mu.RUnlock()
return c.mu.undeterminedErr
}
func (c *twoPhaseCommitter) mutationsOfKeys(keys [][]byte) CommitterMutations {
var res PlainMutations
for i := 0; i < c.mutations.Len(); i++ {
for _, key := range keys {
if bytes.Equal(c.mutations.GetKey(i), key) {
res.Push(c.mutations.GetOp(i), c.mutations.GetKey(i), c.mutations.GetValue(i), c.mutations.IsPessimisticLock(i))
break
}
}
}
return &res
}
| store/tikv/2pc.go | 1 | https://github.com/pingcap/tidb/commit/cc83cc524f8d3fd661f6e62d129ba043cc74501e | [
0.010382532142102718,
0.00044745136983692646,
0.00015928478387650102,
0.0001703912712400779,
0.0012507502688094974
] |
{
"id": 1,
"code_window": [
"\t\ttxn.SetEnableAsyncCommit(val.(bool))\n",
"\tcase tikvstore.Enable1PC:\n",
"\t\ttxn.SetEnable1PC(val.(bool))\n",
"\tcase tikvstore.TxnScope:\n",
"\t\ttxn.SetScope(val.(string))\n",
"\tcase tikvstore.IsStalenessReadOnly:\n",
"\t\ttxn.KVTxn.GetSnapshot().SetIsStatenessReadOnly(val.(bool))\n"
],
"labels": [
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tcase tikvstore.GuaranteeLinearizability:\n",
"\t\ttxn.SetCausalConsistency(!val.(bool))\n"
],
"file_path": "store/driver/txn/txn_driver.go",
"type": "add",
"edit_start_line_idx": 164
} | // Copyright 2018 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package metrics
import (
"github.com/prometheus/client_golang/prometheus"
)
// distsql metrics.
var (
DistSQLQueryHistogram = prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Namespace: "tidb",
Subsystem: "distsql",
Name: "handle_query_duration_seconds",
Help: "Bucketed histogram of processing time (s) of handled queries.",
Buckets: prometheus.ExponentialBuckets(0.0005, 2, 29), // 0.5ms ~ 1.5days
}, []string{LblType, LblSQLType})
DistSQLScanKeysPartialHistogram = prometheus.NewHistogram(
prometheus.HistogramOpts{
Namespace: "tidb",
Subsystem: "distsql",
Name: "scan_keys_partial_num",
Help: "number of scanned keys for each partial result.",
},
)
DistSQLScanKeysHistogram = prometheus.NewHistogram(
prometheus.HistogramOpts{
Namespace: "tidb",
Subsystem: "distsql",
Name: "scan_keys_num",
Help: "number of scanned keys for each query.",
},
)
DistSQLPartialCountHistogram = prometheus.NewHistogram(
prometheus.HistogramOpts{
Namespace: "tidb",
Subsystem: "distsql",
Name: "partial_num",
Help: "number of partial results for each query.",
},
)
DistSQLCoprCacheHistogram = prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Namespace: "tidb",
Subsystem: "distsql",
Name: "copr_cache",
Help: "coprocessor cache hit, evict and miss number",
Buckets: prometheus.ExponentialBuckets(1, 2, 16),
}, []string{LblType})
)
| metrics/distsql.go | 0 | https://github.com/pingcap/tidb/commit/cc83cc524f8d3fd661f6e62d129ba043cc74501e | [
0.00017803467926569283,
0.00017335405573248863,
0.0001697998377494514,
0.00017339065379928797,
0.0000030009500733285677
] |
{
"id": 1,
"code_window": [
"\t\ttxn.SetEnableAsyncCommit(val.(bool))\n",
"\tcase tikvstore.Enable1PC:\n",
"\t\ttxn.SetEnable1PC(val.(bool))\n",
"\tcase tikvstore.TxnScope:\n",
"\t\ttxn.SetScope(val.(string))\n",
"\tcase tikvstore.IsStalenessReadOnly:\n",
"\t\ttxn.KVTxn.GetSnapshot().SetIsStatenessReadOnly(val.(bool))\n"
],
"labels": [
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tcase tikvstore.GuaranteeLinearizability:\n",
"\t\ttxn.SetCausalConsistency(!val.(bool))\n"
],
"file_path": "store/driver/txn/txn_driver.go",
"type": "add",
"edit_start_line_idx": 164
} | // Copyright 2020 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package ddl
import (
"context"
. "github.com/pingcap/check"
"github.com/pingcap/failpoint"
"github.com/pingcap/parser/model"
"github.com/pingcap/parser/mysql"
"github.com/pingcap/tidb/kv"
"github.com/pingcap/tidb/sessionctx"
"github.com/pingcap/tidb/sessionctx/variable"
"github.com/pingcap/tidb/types"
)
var _ = SerialSuites(&testPartitionSuite{})
type testPartitionSuite struct {
store kv.Storage
}
func (s *testPartitionSuite) SetUpSuite(c *C) {
s.store = testCreateStore(c, "test_store")
}
func (s *testPartitionSuite) TearDownSuite(c *C) {
err := s.store.Close()
c.Assert(err, IsNil)
}
func (s *testPartitionSuite) TestDropAndTruncatePartition(c *C) {
d := testNewDDLAndStart(
context.Background(),
c,
WithStore(s.store),
WithLease(testLease),
)
defer func() {
err := d.Stop()
c.Assert(err, IsNil)
}()
dbInfo := testSchemaInfo(c, d, "test_partition")
testCreateSchema(c, testNewContext(d), d, dbInfo)
// generate 5 partition in tableInfo.
tblInfo, partIDs := buildTableInfoWithPartition(c, d)
ctx := testNewContext(d)
testCreateTable(c, ctx, d, dbInfo, tblInfo)
testDropPartition(c, ctx, d, dbInfo, tblInfo, []string{"p0", "p1"})
testTruncatePartition(c, ctx, d, dbInfo, tblInfo, []int64{partIDs[3], partIDs[4]})
}
func buildTableInfoWithPartition(c *C, d *ddl) (*model.TableInfo, []int64) {
tbl := &model.TableInfo{
Name: model.NewCIStr("t"),
}
col := &model.ColumnInfo{
Name: model.NewCIStr("c"),
Offset: 0,
State: model.StatePublic,
FieldType: *types.NewFieldType(mysql.TypeLong),
ID: allocateColumnID(tbl),
}
genIDs, err := d.genGlobalIDs(1)
c.Assert(err, IsNil)
tbl.ID = genIDs[0]
tbl.Columns = []*model.ColumnInfo{col}
tbl.Charset = "utf8"
tbl.Collate = "utf8_bin"
partIDs, err := d.genGlobalIDs(5)
c.Assert(err, IsNil)
partInfo := &model.PartitionInfo{
Type: model.PartitionTypeRange,
Expr: tbl.Columns[0].Name.L,
Enable: true,
Definitions: []model.PartitionDefinition{
{
ID: partIDs[0],
Name: model.NewCIStr("p0"),
LessThan: []string{"100"},
},
{
ID: partIDs[1],
Name: model.NewCIStr("p1"),
LessThan: []string{"200"},
},
{
ID: partIDs[2],
Name: model.NewCIStr("p2"),
LessThan: []string{"300"},
},
{
ID: partIDs[3],
Name: model.NewCIStr("p3"),
LessThan: []string{"400"},
},
{
ID: partIDs[4],
Name: model.NewCIStr("p4"),
LessThan: []string{"500"},
},
},
}
tbl.Partition = partInfo
return tbl, partIDs
}
func buildDropPartitionJob(dbInfo *model.DBInfo, tblInfo *model.TableInfo, partNames []string) *model.Job {
return &model.Job{
SchemaID: dbInfo.ID,
TableID: tblInfo.ID,
Type: model.ActionDropTablePartition,
BinlogInfo: &model.HistoryInfo{},
Args: []interface{}{partNames},
}
}
func testDropPartition(c *C, ctx sessionctx.Context, d *ddl, dbInfo *model.DBInfo, tblInfo *model.TableInfo, partNames []string) *model.Job {
job := buildDropPartitionJob(dbInfo, tblInfo, partNames)
err := d.doDDLJob(ctx, job)
c.Assert(err, IsNil)
v := getSchemaVer(c, ctx)
checkHistoryJobArgs(c, ctx, job.ID, &historyJobArgs{ver: v, tbl: tblInfo})
return job
}
func buildTruncatePartitionJob(dbInfo *model.DBInfo, tblInfo *model.TableInfo, pids []int64) *model.Job {
return &model.Job{
SchemaID: dbInfo.ID,
TableID: tblInfo.ID,
Type: model.ActionTruncateTablePartition,
BinlogInfo: &model.HistoryInfo{},
Args: []interface{}{pids},
}
}
func testTruncatePartition(c *C, ctx sessionctx.Context, d *ddl, dbInfo *model.DBInfo, tblInfo *model.TableInfo, pids []int64) *model.Job {
job := buildTruncatePartitionJob(dbInfo, tblInfo, pids)
err := d.doDDLJob(ctx, job)
c.Assert(err, IsNil)
v := getSchemaVer(c, ctx)
checkHistoryJobArgs(c, ctx, job.ID, &historyJobArgs{ver: v, tbl: tblInfo})
return job
}
func testAddPartition(c *C, ctx sessionctx.Context, d *ddl, dbInfo *model.DBInfo, tblInfo *model.TableInfo) error {
ids, err := d.genGlobalIDs(1)
c.Assert(err, IsNil)
partitionInfo := &model.PartitionInfo{
Type: model.PartitionTypeRange,
Expr: tblInfo.Columns[0].Name.L,
Enable: true,
Definitions: []model.PartitionDefinition{
{
ID: ids[0],
Name: model.NewCIStr("p2"),
LessThan: []string{"300"},
},
},
}
addPartitionJob := &model.Job{
SchemaID: dbInfo.ID,
TableID: tblInfo.ID,
Type: model.ActionAddTablePartition,
BinlogInfo: &model.HistoryInfo{},
Args: []interface{}{partitionInfo},
}
return d.doDDLJob(ctx, addPartitionJob)
}
func (s *testPartitionSuite) TestAddPartitionReplicaBiggerThanTiFlashStores(c *C) {
d := testNewDDLAndStart(
context.Background(),
c,
WithStore(s.store),
WithLease(testLease),
)
defer func() {
err := d.Stop()
c.Assert(err, IsNil)
}()
dbInfo := testSchemaInfo(c, d, "test_partition2")
testCreateSchema(c, testNewContext(d), d, dbInfo)
// Build a tableInfo with replica count = 1 while there is no real tiFlash store.
tblInfo := buildTableInfoWithReplicaInfo(c, d)
ctx := testNewContext(d)
testCreateTable(c, ctx, d, dbInfo, tblInfo)
err := testAddPartition(c, ctx, d, dbInfo, tblInfo)
// Since there is no real TiFlash store (less than replica count), adding a partition will error here.
c.Assert(err, NotNil)
c.Assert(err.Error(), Equals, "[ddl:-1][ddl] the tiflash replica count: 1 should be less than the total tiflash server count: 0")
// Test `add partition` waiting TiFlash replica can exit when its retry count is beyond the limitation.
originErrCountLimit := variable.GetDDLErrorCountLimit()
variable.SetDDLErrorCountLimit(3)
defer func() {
variable.SetDDLErrorCountLimit(originErrCountLimit)
}()
c.Assert(failpoint.Enable("github.com/pingcap/tidb/ddl/mockWaitTiFlashReplica", `return(true)`), IsNil)
defer func() {
c.Assert(failpoint.Disable("github.com/pingcap/tidb/ddl/mockWaitTiFlashReplica"), IsNil)
}()
err = testAddPartition(c, ctx, d, dbInfo, tblInfo)
c.Assert(err, NotNil)
c.Assert(err.Error(), Equals, "[ddl:-1]DDL job rollback, error msg: [ddl] add partition wait for tiflash replica to complete")
}
func buildTableInfoWithReplicaInfo(c *C, d *ddl) *model.TableInfo {
tbl := &model.TableInfo{
Name: model.NewCIStr("t1"),
}
col := &model.ColumnInfo{
Name: model.NewCIStr("c"),
Offset: 0,
State: model.StatePublic,
FieldType: *types.NewFieldType(mysql.TypeLong),
ID: allocateColumnID(tbl),
}
genIDs, err := d.genGlobalIDs(1)
c.Assert(err, IsNil)
tbl.ID = genIDs[0]
tbl.Columns = []*model.ColumnInfo{col}
tbl.Charset = "utf8"
tbl.Collate = "utf8_bin"
tbl.TiFlashReplica = &model.TiFlashReplicaInfo{
Count: 1,
Available: true,
}
partIDs, err := d.genGlobalIDs(2)
c.Assert(err, IsNil)
partInfo := &model.PartitionInfo{
Type: model.PartitionTypeRange,
Expr: tbl.Columns[0].Name.L,
Enable: true,
Definitions: []model.PartitionDefinition{
{
ID: partIDs[0],
Name: model.NewCIStr("p0"),
LessThan: []string{"100"},
},
{
ID: partIDs[1],
Name: model.NewCIStr("p1"),
LessThan: []string{"200"},
},
},
}
tbl.Partition = partInfo
return tbl
}
| ddl/partition_test.go | 0 | https://github.com/pingcap/tidb/commit/cc83cc524f8d3fd661f6e62d129ba043cc74501e | [
0.00017906090943142772,
0.00017319137987215072,
0.0001644380099605769,
0.00017349647532682866,
0.0000035210389341955306
] |
{
"id": 1,
"code_window": [
"\t\ttxn.SetEnableAsyncCommit(val.(bool))\n",
"\tcase tikvstore.Enable1PC:\n",
"\t\ttxn.SetEnable1PC(val.(bool))\n",
"\tcase tikvstore.TxnScope:\n",
"\t\ttxn.SetScope(val.(string))\n",
"\tcase tikvstore.IsStalenessReadOnly:\n",
"\t\ttxn.KVTxn.GetSnapshot().SetIsStatenessReadOnly(val.(bool))\n"
],
"labels": [
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tcase tikvstore.GuaranteeLinearizability:\n",
"\t\ttxn.SetCausalConsistency(!val.(bool))\n"
],
"file_path": "store/driver/txn/txn_driver.go",
"type": "add",
"edit_start_line_idx": 164
} | // Copyright 2021 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package sem
import (
"testing"
"github.com/pingcap/parser/mysql"
. "github.com/pingcap/check"
)
func TestT(t *testing.T) {
TestingT(t)
}
var _ = Suite(&testSecurity{})
type testSecurity struct{}
func (s *testSecurity) TestInvisibleSchema(c *C) {
c.Assert(IsInvisibleSchema(metricsSchema), IsTrue)
c.Assert(IsInvisibleSchema("METRICS_ScHEma"), IsTrue)
c.Assert(IsInvisibleSchema("mysql"), IsFalse)
c.Assert(IsInvisibleSchema(informationSchema), IsFalse)
c.Assert(IsInvisibleSchema("Bogusname"), IsFalse)
}
func (s *testSecurity) TestIsInvisibleTable(c *C) {
mysqlTbls := []string{exprPushdownBlacklist, gcDeleteRange, gcDeleteRangeDone, optRuleBlacklist, tidb, globalVariables}
infoSchemaTbls := []string{clusterConfig, clusterHardware, clusterLoad, clusterLog, clusterSystemInfo, inspectionResult,
inspectionRules, inspectionSummary, metricsSummary, metricsSummaryByLabel, metricsTables, tidbHotRegions}
perfSChemaTbls := []string{pdProfileAllocs, pdProfileBlock, pdProfileCPU, pdProfileGoroutines, pdProfileMemory,
pdProfileMutex, tidbProfileAllocs, tidbProfileBlock, tidbProfileCPU, tidbProfileGoroutines,
tidbProfileMemory, tidbProfileMutex, tikvProfileCPU}
for _, tbl := range mysqlTbls {
c.Assert(IsInvisibleTable(mysql.SystemDB, tbl), IsTrue)
}
for _, tbl := range infoSchemaTbls {
c.Assert(IsInvisibleTable(informationSchema, tbl), IsTrue)
}
for _, tbl := range perfSChemaTbls {
c.Assert(IsInvisibleTable(performanceSchema, tbl), IsTrue)
}
c.Assert(IsInvisibleTable(metricsSchema, "acdc"), IsTrue)
c.Assert(IsInvisibleTable(metricsSchema, "fdsgfd"), IsTrue)
c.Assert(IsInvisibleTable("test", "t1"), IsFalse)
}
func (s *testSecurity) TestIsRestrictedPrivilege(c *C) {
c.Assert(IsRestrictedPrivilege("RESTRICTED_TABLES_ADMIN"), IsTrue)
c.Assert(IsRestrictedPrivilege("RESTRICTED_STATUS_VARIABLES_ADMIN"), IsTrue)
c.Assert(IsRestrictedPrivilege("CONNECTION_ADMIN"), IsFalse)
c.Assert(IsRestrictedPrivilege("BACKUP_ADMIN"), IsFalse)
c.Assert(IsRestrictedPrivilege("aa"), IsFalse)
}
func (s *testSecurity) TestIsInvisibleStatusVar(c *C) {
c.Assert(IsInvisibleStatusVar(tidbGCLeaderDesc), IsTrue)
c.Assert(IsInvisibleStatusVar("server_id"), IsFalse)
c.Assert(IsInvisibleStatusVar("ddl_schema_version"), IsFalse)
c.Assert(IsInvisibleStatusVar("Ssl_version"), IsFalse)
}
| util/sem/sem_test.go | 0 | https://github.com/pingcap/tidb/commit/cc83cc524f8d3fd661f6e62d129ba043cc74501e | [
0.00030291450093500316,
0.00018843816360458732,
0.00016510557907167822,
0.00017481963732279837,
0.0000434783760283608
] |
{
"id": 2,
"code_window": [
"\t}\n",
"}\n",
"\n",
"func (txn *tikvTxn) GetOption(opt int) interface{} {\n",
"\tswitch opt {\n",
"\tcase tikvstore.TxnScope:\n",
"\t\treturn txn.KVTxn.GetScope()\n",
"\tdefault:\n",
"\t\treturn txn.KVTxn.GetOption(opt)\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tcase tikvstore.GuaranteeLinearizability:\n",
"\t\treturn !txn.KVTxn.IsCasualConsistency()\n"
],
"file_path": "store/driver/txn/txn_driver.go",
"type": "add",
"edit_start_line_idx": 177
} | // Copyright 2020 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package tikv_test
import (
"bytes"
"context"
"fmt"
"math"
"sync/atomic"
"testing"
"time"
. "github.com/pingcap/check"
"github.com/pingcap/errors"
"github.com/pingcap/kvproto/pkg/kvrpcpb"
"github.com/pingcap/tidb/store/mockstore/unistore"
"github.com/pingcap/tidb/store/tikv"
tikverr "github.com/pingcap/tidb/store/tikv/error"
"github.com/pingcap/tidb/store/tikv/kv"
"github.com/pingcap/tidb/store/tikv/mockstore/cluster"
"github.com/pingcap/tidb/store/tikv/oracle"
"github.com/pingcap/tidb/store/tikv/tikvrpc"
"github.com/pingcap/tidb/store/tikv/util"
)
func TestT(t *testing.T) {
CustomVerboseFlag = true
TestingT(t)
}
// testAsyncCommitCommon is used to put common parts that will be both used by
// testAsyncCommitSuite and testAsyncCommitFailSuite.
type testAsyncCommitCommon struct {
cluster cluster.Cluster
store *tikv.KVStore
}
func (s *testAsyncCommitCommon) setUpTest(c *C) {
if *WithTiKV {
s.store = NewTestStore(c)
return
}
client, pdClient, cluster, err := unistore.New("")
c.Assert(err, IsNil)
unistore.BootstrapWithSingleStore(cluster)
s.cluster = cluster
store, err := tikv.NewTestTiKVStore(client, pdClient, nil, nil, 0)
c.Assert(err, IsNil)
s.store = store
}
func (s *testAsyncCommitCommon) putAlphabets(c *C, enableAsyncCommit bool) {
for ch := byte('a'); ch <= byte('z'); ch++ {
s.putKV(c, []byte{ch}, []byte{ch}, enableAsyncCommit)
}
}
func (s *testAsyncCommitCommon) putKV(c *C, key, value []byte, enableAsyncCommit bool) (uint64, uint64) {
txn := s.beginAsyncCommit(c)
err := txn.Set(key, value)
c.Assert(err, IsNil)
err = txn.Commit(context.Background())
c.Assert(err, IsNil)
return txn.StartTS(), txn.GetCommitTS()
}
func (s *testAsyncCommitCommon) mustGetFromTxn(c *C, txn tikv.TxnProbe, key, expectedValue []byte) {
v, err := txn.Get(context.Background(), key)
c.Assert(err, IsNil)
c.Assert(v, BytesEquals, expectedValue)
}
func (s *testAsyncCommitCommon) mustGetLock(c *C, key []byte) *tikv.Lock {
ver, err := s.store.CurrentTimestamp(oracle.GlobalTxnScope)
c.Assert(err, IsNil)
req := tikvrpc.NewRequest(tikvrpc.CmdGet, &kvrpcpb.GetRequest{
Key: key,
Version: ver,
})
bo := tikv.NewBackofferWithVars(context.Background(), 5000, nil)
loc, err := s.store.GetRegionCache().LocateKey(bo, key)
c.Assert(err, IsNil)
resp, err := s.store.SendReq(bo, req, loc.Region, time.Second*10)
c.Assert(err, IsNil)
c.Assert(resp.Resp, NotNil)
keyErr := resp.Resp.(*kvrpcpb.GetResponse).GetError()
c.Assert(keyErr, NotNil)
var lockutil tikv.LockProbe
lock, err := lockutil.ExtractLockFromKeyErr(keyErr)
c.Assert(err, IsNil)
return lock
}
func (s *testAsyncCommitCommon) mustPointGet(c *C, key, expectedValue []byte) {
snap := s.store.GetSnapshot(math.MaxUint64)
value, err := snap.Get(context.Background(), key)
c.Assert(err, IsNil)
c.Assert(value, BytesEquals, expectedValue)
}
func (s *testAsyncCommitCommon) mustGetFromSnapshot(c *C, version uint64, key, expectedValue []byte) {
snap := s.store.GetSnapshot(version)
value, err := snap.Get(context.Background(), key)
c.Assert(err, IsNil)
c.Assert(value, BytesEquals, expectedValue)
}
func (s *testAsyncCommitCommon) mustGetNoneFromSnapshot(c *C, version uint64, key []byte) {
snap := s.store.GetSnapshot(version)
_, err := snap.Get(context.Background(), key)
c.Assert(errors.Cause(err), Equals, tikverr.ErrNotExist)
}
func (s *testAsyncCommitCommon) beginAsyncCommitWithLinearizability(c *C) tikv.TxnProbe {
txn := s.beginAsyncCommit(c)
txn.SetOption(kv.GuaranteeLinearizability, true)
return txn
}
func (s *testAsyncCommitCommon) beginAsyncCommit(c *C) tikv.TxnProbe {
txn, err := s.store.Begin()
c.Assert(err, IsNil)
txn.SetEnableAsyncCommit(true)
return tikv.TxnProbe{KVTxn: txn}
}
func (s *testAsyncCommitCommon) begin(c *C) tikv.TxnProbe {
txn, err := s.store.Begin()
c.Assert(err, IsNil)
return tikv.TxnProbe{KVTxn: txn}
}
type testAsyncCommitSuite struct {
OneByOneSuite
testAsyncCommitCommon
bo *tikv.Backoffer
}
var _ = SerialSuites(&testAsyncCommitSuite{})
func (s *testAsyncCommitSuite) SetUpTest(c *C) {
s.testAsyncCommitCommon.setUpTest(c)
s.bo = tikv.NewBackofferWithVars(context.Background(), 5000, nil)
}
func (s *testAsyncCommitSuite) lockKeysWithAsyncCommit(c *C, keys, values [][]byte, primaryKey, primaryValue []byte, commitPrimary bool) (uint64, uint64) {
txn, err := s.store.Begin()
c.Assert(err, IsNil)
txn.SetEnableAsyncCommit(true)
for i, k := range keys {
if len(values[i]) > 0 {
err = txn.Set(k, values[i])
} else {
err = txn.Delete(k)
}
c.Assert(err, IsNil)
}
if len(primaryValue) > 0 {
err = txn.Set(primaryKey, primaryValue)
} else {
err = txn.Delete(primaryKey)
}
c.Assert(err, IsNil)
txnProbe := tikv.TxnProbe{KVTxn: txn}
tpc, err := txnProbe.NewCommitter(0)
c.Assert(err, IsNil)
tpc.SetPrimaryKey(primaryKey)
ctx := context.Background()
err = tpc.PrewriteAllMutations(ctx)
c.Assert(err, IsNil)
if commitPrimary {
commitTS, err := s.store.GetOracle().GetTimestamp(ctx, &oracle.Option{TxnScope: oracle.GlobalTxnScope})
c.Assert(err, IsNil)
tpc.SetCommitTS(commitTS)
err = tpc.CommitMutations(ctx)
c.Assert(err, IsNil)
}
return txn.StartTS(), tpc.GetCommitTS()
}
func (s *testAsyncCommitSuite) TestCheckSecondaries(c *C) {
// This test doesn't support tikv mode.
if *WithTiKV {
return
}
s.putAlphabets(c, true)
loc, err := s.store.GetRegionCache().LocateKey(s.bo, []byte("a"))
c.Assert(err, IsNil)
newRegionID, peerID := s.cluster.AllocID(), s.cluster.AllocID()
s.cluster.Split(loc.Region.GetID(), newRegionID, []byte("e"), []uint64{peerID}, peerID)
s.store.GetRegionCache().InvalidateCachedRegion(loc.Region)
// No locks to check, only primary key is locked, should be successful.
s.lockKeysWithAsyncCommit(c, [][]byte{}, [][]byte{}, []byte("z"), []byte("z"), false)
lock := s.mustGetLock(c, []byte("z"))
lock.UseAsyncCommit = true
ts, err := s.store.GetOracle().GetTimestamp(context.Background(), &oracle.Option{TxnScope: oracle.GlobalTxnScope})
c.Assert(err, IsNil)
var lockutil tikv.LockProbe
status := lockutil.NewLockStatus(nil, true, ts)
resolver := tikv.LockResolverProbe{LockResolver: s.store.GetLockResolver()}
err = resolver.ResolveLockAsync(s.bo, lock, status)
c.Assert(err, IsNil)
currentTS, err := s.store.GetOracle().GetTimestamp(context.Background(), &oracle.Option{TxnScope: oracle.GlobalTxnScope})
c.Assert(err, IsNil)
status, err = resolver.GetTxnStatus(s.bo, lock.TxnID, []byte("z"), currentTS, currentTS, true, false, nil)
c.Assert(err, IsNil)
c.Assert(status.IsCommitted(), IsTrue)
c.Assert(status.CommitTS(), Equals, ts)
// One key is committed (i), one key is locked (a). Should get committed.
ts, err = s.store.GetOracle().GetTimestamp(context.Background(), &oracle.Option{TxnScope: oracle.GlobalTxnScope})
c.Assert(err, IsNil)
commitTs := ts + 10
gotCheckA := int64(0)
gotCheckB := int64(0)
gotResolve := int64(0)
gotOther := int64(0)
mock := mockResolveClient{
inner: s.store.GetTiKVClient(),
onCheckSecondaries: func(req *kvrpcpb.CheckSecondaryLocksRequest) (*tikvrpc.Response, error) {
if req.StartVersion != ts {
return nil, errors.Errorf("Bad start version: %d, expected: %d", req.StartVersion, ts)
}
var resp kvrpcpb.CheckSecondaryLocksResponse
for _, k := range req.Keys {
if bytes.Equal(k, []byte("a")) {
atomic.StoreInt64(&gotCheckA, 1)
resp = kvrpcpb.CheckSecondaryLocksResponse{
Locks: []*kvrpcpb.LockInfo{{Key: []byte("a"), PrimaryLock: []byte("z"), LockVersion: ts, UseAsyncCommit: true}},
CommitTs: commitTs,
}
} else if bytes.Equal(k, []byte("i")) {
atomic.StoreInt64(&gotCheckB, 1)
resp = kvrpcpb.CheckSecondaryLocksResponse{
Locks: []*kvrpcpb.LockInfo{},
CommitTs: commitTs,
}
} else {
fmt.Printf("Got other key: %s\n", k)
atomic.StoreInt64(&gotOther, 1)
}
}
return &tikvrpc.Response{Resp: &resp}, nil
},
onResolveLock: func(req *kvrpcpb.ResolveLockRequest) (*tikvrpc.Response, error) {
if req.StartVersion != ts {
return nil, errors.Errorf("Bad start version: %d, expected: %d", req.StartVersion, ts)
}
if req.CommitVersion != commitTs {
return nil, errors.Errorf("Bad commit version: %d, expected: %d", req.CommitVersion, commitTs)
}
for _, k := range req.Keys {
if bytes.Equal(k, []byte("a")) || bytes.Equal(k, []byte("z")) {
atomic.StoreInt64(&gotResolve, 1)
} else {
atomic.StoreInt64(&gotOther, 1)
}
}
resp := kvrpcpb.ResolveLockResponse{}
return &tikvrpc.Response{Resp: &resp}, nil
},
}
s.store.SetTiKVClient(&mock)
status = lockutil.NewLockStatus([][]byte{[]byte("a"), []byte("i")}, true, 0)
lock = &tikv.Lock{
Key: []byte("a"),
Primary: []byte("z"),
TxnID: ts,
LockType: kvrpcpb.Op_Put,
UseAsyncCommit: true,
MinCommitTS: ts + 5,
}
_ = s.beginAsyncCommit(c)
err = resolver.ResolveLockAsync(s.bo, lock, status)
c.Assert(err, IsNil)
c.Assert(gotCheckA, Equals, int64(1))
c.Assert(gotCheckB, Equals, int64(1))
c.Assert(gotOther, Equals, int64(0))
c.Assert(gotResolve, Equals, int64(1))
// One key has been rolled back (b), one is locked (a). Should be rolled back.
ts, err = s.store.GetOracle().GetTimestamp(context.Background(), &oracle.Option{TxnScope: oracle.GlobalTxnScope})
c.Assert(err, IsNil)
commitTs = ts + 10
gotCheckA = int64(0)
gotCheckB = int64(0)
gotResolve = int64(0)
gotOther = int64(0)
mock.onResolveLock = func(req *kvrpcpb.ResolveLockRequest) (*tikvrpc.Response, error) {
if req.StartVersion != ts {
return nil, errors.Errorf("Bad start version: %d, expected: %d", req.StartVersion, ts)
}
if req.CommitVersion != commitTs {
return nil, errors.Errorf("Bad commit version: %d, expected: 0", req.CommitVersion)
}
for _, k := range req.Keys {
if bytes.Equal(k, []byte("a")) || bytes.Equal(k, []byte("z")) {
atomic.StoreInt64(&gotResolve, 1)
} else {
atomic.StoreInt64(&gotOther, 1)
}
}
resp := kvrpcpb.ResolveLockResponse{}
return &tikvrpc.Response{Resp: &resp}, nil
}
lock.TxnID = ts
lock.MinCommitTS = ts + 5
err = resolver.ResolveLockAsync(s.bo, lock, status)
c.Assert(err, IsNil)
c.Assert(gotCheckA, Equals, int64(1))
c.Assert(gotCheckB, Equals, int64(1))
c.Assert(gotResolve, Equals, int64(1))
c.Assert(gotOther, Equals, int64(0))
}
func (s *testAsyncCommitSuite) TestRepeatableRead(c *C) {
var sessionID uint64 = 0
test := func(isPessimistic bool) {
s.putKV(c, []byte("k1"), []byte("v1"), true)
sessionID++
ctx := context.WithValue(context.Background(), util.SessionID, sessionID)
txn1 := s.beginAsyncCommit(c)
txn1.SetPessimistic(isPessimistic)
s.mustGetFromTxn(c, txn1, []byte("k1"), []byte("v1"))
txn1.Set([]byte("k1"), []byte("v2"))
for i := 0; i < 20; i++ {
_, err := s.store.GetOracle().GetTimestamp(ctx, &oracle.Option{TxnScope: oracle.GlobalTxnScope})
c.Assert(err, IsNil)
}
txn2 := s.beginAsyncCommit(c)
s.mustGetFromTxn(c, txn2, []byte("k1"), []byte("v1"))
err := txn1.Commit(ctx)
c.Assert(err, IsNil)
// Check txn1 is committed in async commit.
c.Assert(txn1.IsAsyncCommit(), IsTrue)
s.mustGetFromTxn(c, txn2, []byte("k1"), []byte("v1"))
err = txn2.Rollback()
c.Assert(err, IsNil)
txn3 := s.beginAsyncCommit(c)
s.mustGetFromTxn(c, txn3, []byte("k1"), []byte("v2"))
err = txn3.Rollback()
c.Assert(err, IsNil)
}
test(false)
test(true)
}
// It's just a simple validation of linearizability.
// Extra tests are needed to test this feature with the control of the TiKV cluster.
func (s *testAsyncCommitSuite) TestAsyncCommitLinearizability(c *C) {
t1 := s.beginAsyncCommitWithLinearizability(c)
t2 := s.beginAsyncCommitWithLinearizability(c)
err := t1.Set([]byte("a"), []byte("a1"))
c.Assert(err, IsNil)
err = t2.Set([]byte("b"), []byte("b1"))
c.Assert(err, IsNil)
ctx := context.WithValue(context.Background(), util.SessionID, uint64(1))
// t2 commits earlier than t1
err = t2.Commit(ctx)
c.Assert(err, IsNil)
err = t1.Commit(ctx)
c.Assert(err, IsNil)
commitTS1 := t1.GetCommitTS()
commitTS2 := t2.GetCommitTS()
c.Assert(commitTS2, Less, commitTS1)
}
// TestAsyncCommitWithMultiDC tests that async commit can only be enabled in global transactions
func (s *testAsyncCommitSuite) TestAsyncCommitWithMultiDC(c *C) {
// It requires setting placement rules to run with TiKV
if *WithTiKV {
return
}
localTxn := s.beginAsyncCommit(c)
err := localTxn.Set([]byte("a"), []byte("a1"))
localTxn.SetScope("bj")
c.Assert(err, IsNil)
ctx := context.WithValue(context.Background(), util.SessionID, uint64(1))
err = localTxn.Commit(ctx)
c.Assert(err, IsNil)
c.Assert(localTxn.IsAsyncCommit(), IsFalse)
globalTxn := s.beginAsyncCommit(c)
err = globalTxn.Set([]byte("b"), []byte("b1"))
globalTxn.SetScope(oracle.GlobalTxnScope)
c.Assert(err, IsNil)
err = globalTxn.Commit(ctx)
c.Assert(err, IsNil)
c.Assert(globalTxn.IsAsyncCommit(), IsTrue)
}
func (s *testAsyncCommitSuite) TestResolveTxnFallbackFromAsyncCommit(c *C) {
keys := [][]byte{[]byte("k0"), []byte("k1")}
values := [][]byte{[]byte("v00"), []byte("v10")}
initTest := func() tikv.CommitterProbe {
t0 := s.begin(c)
err := t0.Set(keys[0], values[0])
c.Assert(err, IsNil)
err = t0.Set(keys[1], values[1])
c.Assert(err, IsNil)
err = t0.Commit(context.Background())
c.Assert(err, IsNil)
t1 := s.beginAsyncCommit(c)
err = t1.Set(keys[0], []byte("v01"))
c.Assert(err, IsNil)
err = t1.Set(keys[1], []byte("v11"))
c.Assert(err, IsNil)
committer, err := t1.NewCommitter(1)
c.Assert(err, IsNil)
committer.SetLockTTL(1)
committer.SetUseAsyncCommit()
return committer
}
prewriteKey := func(committer tikv.CommitterProbe, idx int, fallback bool) {
bo := tikv.NewBackofferWithVars(context.Background(), 5000, nil)
loc, err := s.store.GetRegionCache().LocateKey(bo, keys[idx])
c.Assert(err, IsNil)
req := committer.BuildPrewriteRequest(loc.Region.GetID(), loc.Region.GetConfVer(), loc.Region.GetVer(),
committer.GetMutations().Slice(idx, idx+1), 1)
if fallback {
req.Req.(*kvrpcpb.PrewriteRequest).MaxCommitTs = 1
}
resp, err := s.store.SendReq(bo, req, loc.Region, 5000)
c.Assert(err, IsNil)
c.Assert(resp.Resp, NotNil)
}
readKey := func(idx int) {
t2 := s.begin(c)
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
defer cancel()
val, err := t2.Get(ctx, keys[idx])
c.Assert(err, IsNil)
c.Assert(val, DeepEquals, values[idx])
}
// Case 1: Fallback primary, read primary
committer := initTest()
prewriteKey(committer, 0, true)
prewriteKey(committer, 1, false)
readKey(0)
readKey(1)
// Case 2: Fallback primary, read secondary
committer = initTest()
prewriteKey(committer, 0, true)
prewriteKey(committer, 1, false)
readKey(1)
readKey(0)
// Case 3: Fallback secondary, read primary
committer = initTest()
prewriteKey(committer, 0, false)
prewriteKey(committer, 1, true)
readKey(0)
readKey(1)
// Case 4: Fallback secondary, read secondary
committer = initTest()
prewriteKey(committer, 0, false)
prewriteKey(committer, 1, true)
readKey(1)
readKey(0)
// Case 5: Fallback both, read primary
committer = initTest()
prewriteKey(committer, 0, true)
prewriteKey(committer, 1, true)
readKey(0)
readKey(1)
// Case 6: Fallback both, read secondary
committer = initTest()
prewriteKey(committer, 0, true)
prewriteKey(committer, 1, true)
readKey(1)
readKey(0)
}
type mockResolveClient struct {
inner tikv.Client
onResolveLock func(*kvrpcpb.ResolveLockRequest) (*tikvrpc.Response, error)
onCheckSecondaries func(*kvrpcpb.CheckSecondaryLocksRequest) (*tikvrpc.Response, error)
}
func (m *mockResolveClient) SendRequest(ctx context.Context, addr string, req *tikvrpc.Request, timeout time.Duration) (*tikvrpc.Response, error) {
// Intercept check secondary locks and resolve lock messages if the callback is non-nil.
// If the callback returns (nil, nil), forward to the inner client.
if cr, ok := req.Req.(*kvrpcpb.CheckSecondaryLocksRequest); ok && m.onCheckSecondaries != nil {
result, err := m.onCheckSecondaries(cr)
if result != nil || err != nil {
return result, err
}
} else if rr, ok := req.Req.(*kvrpcpb.ResolveLockRequest); ok && m.onResolveLock != nil {
result, err := m.onResolveLock(rr)
if result != nil || err != nil {
return result, err
}
}
return m.inner.SendRequest(ctx, addr, req, timeout)
}
func (m *mockResolveClient) Close() error {
return m.inner.Close()
}
| store/tikv/tests/async_commit_test.go | 1 | https://github.com/pingcap/tidb/commit/cc83cc524f8d3fd661f6e62d129ba043cc74501e | [
0.9984310269355774,
0.19682219624519348,
0.00016755492833908647,
0.001384414848871529,
0.358079731464386
] |
{
"id": 2,
"code_window": [
"\t}\n",
"}\n",
"\n",
"func (txn *tikvTxn) GetOption(opt int) interface{} {\n",
"\tswitch opt {\n",
"\tcase tikvstore.TxnScope:\n",
"\t\treturn txn.KVTxn.GetScope()\n",
"\tdefault:\n",
"\t\treturn txn.KVTxn.GetOption(opt)\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tcase tikvstore.GuaranteeLinearizability:\n",
"\t\treturn !txn.KVTxn.IsCasualConsistency()\n"
],
"file_path": "store/driver/txn/txn_driver.go",
"type": "add",
"edit_start_line_idx": 177
} | // Copyright 2018 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package implementation
import (
"math"
plannercore "github.com/pingcap/tidb/planner/core"
"github.com/pingcap/tidb/planner/memo"
)
// SortImpl implementation of PhysicalSort.
type SortImpl struct {
baseImpl
}
// NewSortImpl creates a new sort Implementation.
func NewSortImpl(sort *plannercore.PhysicalSort) *SortImpl {
return &SortImpl{baseImpl{plan: sort}}
}
// CalcCost calculates the cost of the sort Implementation.
func (impl *SortImpl) CalcCost(outCount float64, children ...memo.Implementation) float64 {
cnt := math.Min(children[0].GetPlan().Stats().RowCount, impl.plan.GetChildReqProps(0).ExpectedCnt)
sort := impl.plan.(*plannercore.PhysicalSort)
impl.cost = sort.GetCost(cnt, children[0].GetPlan().Schema()) + children[0].GetCost()
return impl.cost
}
// AttachChildren implements Implementation AttachChildren interface.
func (impl *SortImpl) AttachChildren(children ...memo.Implementation) memo.Implementation {
sort := impl.plan.(*plannercore.PhysicalSort)
sort.SetChildren(children[0].GetPlan())
// When the Sort orderByItems contain ScalarFunction, we need
// to inject two Projections below and above the Sort.
impl.plan = plannercore.InjectProjBelowSort(sort, sort.ByItems)
return impl
}
// NominalSortImpl is the implementation of NominalSort.
type NominalSortImpl struct {
baseImpl
}
// AttachChildren implements Implementation AttachChildren interface.
func (impl *NominalSortImpl) AttachChildren(children ...memo.Implementation) memo.Implementation {
return children[0]
}
// NewNominalSortImpl creates a new NominalSort Implementation.
func NewNominalSortImpl(sort *plannercore.NominalSort) *NominalSortImpl {
return &NominalSortImpl{baseImpl{plan: sort}}
}
| planner/implementation/sort.go | 0 | https://github.com/pingcap/tidb/commit/cc83cc524f8d3fd661f6e62d129ba043cc74501e | [
0.0011422841344028711,
0.0004860006447415799,
0.00016833640984259546,
0.00021469152125064284,
0.00038316816790029407
] |
{
"id": 2,
"code_window": [
"\t}\n",
"}\n",
"\n",
"func (txn *tikvTxn) GetOption(opt int) interface{} {\n",
"\tswitch opt {\n",
"\tcase tikvstore.TxnScope:\n",
"\t\treturn txn.KVTxn.GetScope()\n",
"\tdefault:\n",
"\t\treturn txn.KVTxn.GetOption(opt)\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tcase tikvstore.GuaranteeLinearizability:\n",
"\t\treturn !txn.KVTxn.IsCasualConsistency()\n"
],
"file_path": "store/driver/txn/txn_driver.go",
"type": "add",
"edit_start_line_idx": 177
} | // Copyright 2019 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package executor
import (
"context"
"fmt"
"math"
"sort"
"strconv"
"strings"
"github.com/pingcap/errors"
"github.com/pingcap/failpoint"
"github.com/pingcap/tidb/infoschema"
plannercore "github.com/pingcap/tidb/planner/core"
"github.com/pingcap/tidb/sessionctx"
"github.com/pingcap/tidb/sessionctx/variable"
"github.com/pingcap/tidb/types"
"github.com/pingcap/tidb/util"
"github.com/pingcap/tidb/util/chunk"
"github.com/pingcap/tidb/util/set"
"github.com/pingcap/tidb/util/sqlexec"
)
type (
// inspectionResult represents a abnormal diagnosis result
inspectionResult struct {
tp string
instance string
statusAddress string
// represents the diagnostics item, e.g: `ddl.lease` `raftstore.cpuusage`
item string
// diagnosis result value base on current cluster status
actual string
expected string
severity string
detail string
// degree only used for sort.
degree float64
}
inspectionName string
inspectionFilter struct {
set set.StringSet
timeRange plannercore.QueryTimeRange
}
inspectionRule interface {
name() string
inspect(ctx context.Context, sctx sessionctx.Context, filter inspectionFilter) []inspectionResult
}
)
func (n inspectionName) name() string {
return string(n)
}
func (f inspectionFilter) enable(name string) bool {
return len(f.set) == 0 || f.set.Exist(name)
}
type (
// configInspection is used to check whether a same configuration item has a
// different value between different instance in the cluster
configInspection struct{ inspectionName }
// versionInspection is used to check whether the same component has different
// version in the cluster
versionInspection struct{ inspectionName }
// nodeLoadInspection is used to check the node load of memory/disk/cpu
// have reached a high-level threshold
nodeLoadInspection struct{ inspectionName }
// criticalErrorInspection is used to check are there some critical errors
// occurred in the past
criticalErrorInspection struct{ inspectionName }
// thresholdCheckInspection is used to check some threshold value, like CPU usage, leader count change.
thresholdCheckInspection struct{ inspectionName }
)
var inspectionRules = []inspectionRule{
&configInspection{inspectionName: "config"},
&versionInspection{inspectionName: "version"},
&nodeLoadInspection{inspectionName: "node-load"},
&criticalErrorInspection{inspectionName: "critical-error"},
&thresholdCheckInspection{inspectionName: "threshold-check"},
}
type inspectionResultRetriever struct {
dummyCloser
retrieved bool
extractor *plannercore.InspectionResultTableExtractor
timeRange plannercore.QueryTimeRange
instanceToStatusAddress map[string]string
statusToInstanceAddress map[string]string
}
func (e *inspectionResultRetriever) retrieve(ctx context.Context, sctx sessionctx.Context) ([][]types.Datum, error) {
if e.retrieved || e.extractor.SkipInspection {
return nil, nil
}
e.retrieved = true
// Some data of cluster-level memory tables will be retrieved many times in different inspection rules,
// and the cost of retrieving some data is expensive. We use the `TableSnapshot` to cache those data
// and obtain them lazily, and provide a consistent view of inspection tables for each inspection rules.
// All cached snapshots should be released at the end of retrieving.
sctx.GetSessionVars().InspectionTableCache = map[string]variable.TableSnapshot{}
defer func() { sctx.GetSessionVars().InspectionTableCache = nil }()
failpoint.InjectContext(ctx, "mockMergeMockInspectionTables", func() {
// Merge mock snapshots injected from failpoint for test purpose
mockTables, ok := ctx.Value("__mockInspectionTables").(map[string]variable.TableSnapshot)
if ok {
for name, snap := range mockTables {
sctx.GetSessionVars().InspectionTableCache[strings.ToLower(name)] = snap
}
}
})
if e.instanceToStatusAddress == nil {
// Get cluster info.
e.instanceToStatusAddress = make(map[string]string)
e.statusToInstanceAddress = make(map[string]string)
var rows []chunk.Row
exec := sctx.(sqlexec.RestrictedSQLExecutor)
stmt, err := exec.ParseWithParams(ctx, "select instance,status_address from information_schema.cluster_info;")
if err == nil {
rows, _, err = exec.ExecRestrictedStmt(ctx, stmt)
}
if err != nil {
sctx.GetSessionVars().StmtCtx.AppendWarning(fmt.Errorf("get cluster info failed: %v", err))
}
for _, row := range rows {
if row.Len() < 2 {
continue
}
e.instanceToStatusAddress[row.GetString(0)] = row.GetString(1)
e.statusToInstanceAddress[row.GetString(1)] = row.GetString(0)
}
}
rules := inspectionFilter{set: e.extractor.Rules}
items := inspectionFilter{set: e.extractor.Items, timeRange: e.timeRange}
var finalRows [][]types.Datum
for _, r := range inspectionRules {
name := r.name()
if !rules.enable(name) {
continue
}
results := r.inspect(ctx, sctx, items)
if len(results) == 0 {
continue
}
// make result stable
sort.Slice(results, func(i, j int) bool {
if results[i].degree != results[j].degree {
return results[i].degree > results[j].degree
}
if lhs, rhs := results[i].item, results[j].item; lhs != rhs {
return lhs < rhs
}
if results[i].actual != results[j].actual {
return results[i].actual < results[j].actual
}
if lhs, rhs := results[i].tp, results[j].tp; lhs != rhs {
return lhs < rhs
}
return results[i].instance < results[j].instance
})
for _, result := range results {
if len(result.instance) == 0 {
result.instance = e.statusToInstanceAddress[result.statusAddress]
}
if len(result.statusAddress) == 0 {
result.statusAddress = e.instanceToStatusAddress[result.instance]
}
finalRows = append(finalRows, types.MakeDatums(
name,
result.item,
result.tp,
result.instance,
result.statusAddress,
result.actual,
result.expected,
result.severity,
result.detail,
))
}
}
return finalRows, nil
}
func (c configInspection) inspect(ctx context.Context, sctx sessionctx.Context, filter inspectionFilter) []inspectionResult {
var results []inspectionResult
results = append(results, c.inspectDiffConfig(ctx, sctx, filter)...)
results = append(results, c.inspectCheckConfig(ctx, sctx, filter)...)
return results
}
func (configInspection) inspectDiffConfig(ctx context.Context, sctx sessionctx.Context, filter inspectionFilter) []inspectionResult {
// check the configuration consistent
ignoreConfigKey := []string{
// TiDB
"port",
"status.status-port",
"host",
"path",
"advertise-address",
"status.status-port",
"log.file.filename",
"log.slow-query-file",
"tmp-storage-path",
// PD
"advertise-client-urls",
"advertise-peer-urls",
"client-urls",
"data-dir",
"log-file",
"log.file.filename",
"metric.job",
"name",
"peer-urls",
// TiKV
"server.addr",
"server.advertise-addr",
"server.advertise-status-addr",
"server.status-addr",
"log-file",
"raftstore.raftdb-path",
"storage.data-dir",
"storage.block-cache.capacity",
}
var rows []chunk.Row
exec := sctx.(sqlexec.RestrictedSQLExecutor)
stmt, err := exec.ParseWithParams(ctx, "select type, `key`, count(distinct value) as c from information_schema.cluster_config where `key` not in (%?) group by type, `key` having c > 1", ignoreConfigKey)
if err == nil {
rows, _, err = exec.ExecRestrictedStmt(ctx, stmt)
}
if err != nil {
sctx.GetSessionVars().StmtCtx.AppendWarning(fmt.Errorf("check configuration consistency failed: %v", err))
}
generateDetail := func(tp, item string) string {
var rows []chunk.Row
stmt, err := exec.ParseWithParams(ctx, "select value, instance from information_schema.cluster_config where type=%? and `key`=%?;", tp, item)
if err == nil {
rows, _, err = exec.ExecRestrictedStmt(ctx, stmt)
}
if err != nil {
sctx.GetSessionVars().StmtCtx.AppendWarning(fmt.Errorf("check configuration consistency failed: %v", err))
return fmt.Sprintf("the cluster has different config value of %[2]s, execute the sql to see more detail: select * from information_schema.cluster_config where type='%[1]s' and `key`='%[2]s'",
tp, item)
}
m := make(map[string][]string)
for _, row := range rows {
value := row.GetString(0)
instance := row.GetString(1)
m[value] = append(m[value], instance)
}
groups := make([]string, 0, len(m))
for k, v := range m {
sort.Strings(v)
groups = append(groups, fmt.Sprintf("%s config value is %s", strings.Join(v, ","), k))
}
sort.Strings(groups)
return strings.Join(groups, "\n")
}
var results []inspectionResult
for _, row := range rows {
if filter.enable(row.GetString(1)) {
detail := generateDetail(row.GetString(0), row.GetString(1))
results = append(results, inspectionResult{
tp: row.GetString(0),
instance: "",
item: row.GetString(1), // key
actual: "inconsistent",
expected: "consistent",
severity: "warning",
detail: detail,
})
}
}
return results
}
func (c configInspection) inspectCheckConfig(ctx context.Context, sctx sessionctx.Context, filter inspectionFilter) []inspectionResult {
// check the configuration in reason.
cases := []struct {
table string
tp string
key string
expect string
cond string
detail string
}{
{
table: "cluster_config",
key: "log.slow-threshold",
expect: "> 0",
cond: "type = 'tidb' and `key` = 'log.slow-threshold' and value = '0'",
detail: "slow-threshold = 0 will record every query to slow log, it may affect performance",
},
{
table: "cluster_config",
key: "raftstore.sync-log",
expect: "true",
cond: "type = 'tikv' and `key` = 'raftstore.sync-log' and value = 'false'",
detail: "sync-log should be true to avoid recover region when the machine breaks down",
},
{
table: "cluster_systeminfo",
key: "transparent_hugepage_enabled",
expect: "always madvise [never]",
cond: "system_name = 'kernel' and name = 'transparent_hugepage_enabled' and value not like '%[never]%'",
detail: "Transparent HugePages can cause memory allocation delays during runtime, TiDB recommends that you disable Transparent HugePages on all TiDB servers",
},
}
var results []inspectionResult
var rows []chunk.Row
sql := new(strings.Builder)
exec := sctx.(sqlexec.RestrictedSQLExecutor)
for _, cas := range cases {
if !filter.enable(cas.key) {
continue
}
sql.Reset()
fmt.Fprintf(sql, "select type,instance,value from information_schema.%s where %s", cas.table, cas.cond)
stmt, err := exec.ParseWithParams(ctx, sql.String())
if err == nil {
rows, _, err = exec.ExecRestrictedStmt(ctx, stmt)
}
if err != nil {
sctx.GetSessionVars().StmtCtx.AppendWarning(fmt.Errorf("check configuration in reason failed: %v", err))
}
for _, row := range rows {
results = append(results, inspectionResult{
tp: row.GetString(0),
instance: row.GetString(1),
item: cas.key,
actual: row.GetString(2),
expected: cas.expect,
severity: "warning",
detail: cas.detail,
})
}
}
results = append(results, c.checkTiKVBlockCacheSizeConfig(ctx, sctx, filter)...)
return results
}
func (c configInspection) checkTiKVBlockCacheSizeConfig(ctx context.Context, sctx sessionctx.Context, filter inspectionFilter) []inspectionResult {
item := "storage.block-cache.capacity"
if !filter.enable(item) {
return nil
}
var rows []chunk.Row
exec := sctx.(sqlexec.RestrictedSQLExecutor)
stmt, err := exec.ParseWithParams(ctx, "select instance,value from information_schema.cluster_config where type='tikv' and `key` = 'storage.block-cache.capacity'")
if err == nil {
rows, _, err = exec.ExecRestrictedStmt(ctx, stmt)
}
if err != nil {
sctx.GetSessionVars().StmtCtx.AppendWarning(fmt.Errorf("check configuration in reason failed: %v", err))
}
extractIP := func(addr string) string {
if idx := strings.Index(addr, ":"); idx > -1 {
return addr[0:idx]
}
return addr
}
ipToBlockSize := make(map[string]uint64)
ipToCount := make(map[string]int)
for _, row := range rows {
ip := extractIP(row.GetString(0))
size, err := c.convertReadableSizeToByteSize(row.GetString(1))
if err != nil {
sctx.GetSessionVars().StmtCtx.AppendWarning(fmt.Errorf("check TiKV block-cache configuration in reason failed: %v", err))
return nil
}
ipToBlockSize[ip] += size
ipToCount[ip]++
}
stmt, err = exec.ParseWithParams(ctx, "select instance, value from metrics_schema.node_total_memory where time=now()")
if err == nil {
rows, _, err = exec.ExecRestrictedStmt(ctx, stmt)
}
if err != nil {
sctx.GetSessionVars().StmtCtx.AppendWarning(fmt.Errorf("check configuration in reason failed: %v", err))
}
ipToMemorySize := make(map[string]float64)
for _, row := range rows {
ip := extractIP(row.GetString(0))
size := row.GetFloat64(1)
ipToMemorySize[ip] += size
}
var results []inspectionResult
for ip, blockSize := range ipToBlockSize {
if memorySize, ok := ipToMemorySize[ip]; ok {
if float64(blockSize) > memorySize*0.45 {
detail := fmt.Sprintf("There are %v TiKV server in %v node, the total 'storage.block-cache.capacity' of TiKV is more than (0.45 * total node memory)",
ipToCount[ip], ip)
results = append(results, inspectionResult{
tp: "tikv",
instance: ip,
item: item,
actual: fmt.Sprintf("%v", blockSize),
expected: fmt.Sprintf("< %.0f", memorySize*0.45),
severity: "warning",
detail: detail,
})
}
}
}
return results
}
func (configInspection) convertReadableSizeToByteSize(sizeStr string) (uint64, error) {
const KB = uint64(1024)
const MB = KB * 1024
const GB = MB * 1024
const TB = GB * 1024
const PB = TB * 1024
rate := uint64(1)
if strings.HasSuffix(sizeStr, "KiB") {
rate = KB
} else if strings.HasSuffix(sizeStr, "MiB") {
rate = MB
} else if strings.HasSuffix(sizeStr, "GiB") {
rate = GB
} else if strings.HasSuffix(sizeStr, "TiB") {
rate = TB
} else if strings.HasSuffix(sizeStr, "PiB") {
rate = PB
}
if rate != 1 && len(sizeStr) > 3 {
sizeStr = sizeStr[:len(sizeStr)-3]
}
size, err := strconv.Atoi(sizeStr)
if err != nil {
return 0, errors.Trace(err)
}
return uint64(size) * rate, nil
}
func (versionInspection) inspect(ctx context.Context, sctx sessionctx.Context, filter inspectionFilter) []inspectionResult {
exec := sctx.(sqlexec.RestrictedSQLExecutor)
var rows []chunk.Row
// check the configuration consistent
stmt, err := exec.ParseWithParams(ctx, "select type, count(distinct git_hash) as c from information_schema.cluster_info group by type having c > 1;")
if err == nil {
rows, _, err = exec.ExecRestrictedStmt(ctx, stmt)
}
if err != nil {
sctx.GetSessionVars().StmtCtx.AppendWarning(fmt.Errorf("check version consistency failed: %v", err))
}
const name = "git_hash"
var results []inspectionResult
for _, row := range rows {
if filter.enable(name) {
results = append(results, inspectionResult{
tp: row.GetString(0),
instance: "",
item: name,
actual: "inconsistent",
expected: "consistent",
severity: "critical",
detail: fmt.Sprintf("the cluster has %[1]v different %[2]s versions, execute the sql to see more detail: select * from information_schema.cluster_info where type='%[2]s'", row.GetUint64(1), row.GetString(0)),
})
}
}
return results
}
func (c nodeLoadInspection) inspect(ctx context.Context, sctx sessionctx.Context, filter inspectionFilter) []inspectionResult {
var rules = []ruleChecker{
inspectCPULoad{item: "load1", tbl: "node_load1"},
inspectCPULoad{item: "load5", tbl: "node_load5"},
inspectCPULoad{item: "load15", tbl: "node_load15"},
inspectVirtualMemUsage{},
inspectSwapMemoryUsed{},
inspectDiskUsage{},
}
return checkRules(ctx, sctx, filter, rules)
}
type inspectVirtualMemUsage struct{}
func (inspectVirtualMemUsage) genSQL(timeRange plannercore.QueryTimeRange) string {
sql := fmt.Sprintf("select instance, max(value) as max_usage from metrics_schema.node_memory_usage %s group by instance having max_usage >= 70", timeRange.Condition())
return sql
}
func (i inspectVirtualMemUsage) genResult(sql string, row chunk.Row) inspectionResult {
return inspectionResult{
tp: "node",
instance: row.GetString(0),
item: i.getItem(),
actual: fmt.Sprintf("%.1f%%", row.GetFloat64(1)),
expected: "< 70%",
severity: "warning",
detail: "the memory-usage is too high",
}
}
func (inspectVirtualMemUsage) getItem() string {
return "virtual-memory-usage"
}
type inspectSwapMemoryUsed struct{}
func (inspectSwapMemoryUsed) genSQL(timeRange plannercore.QueryTimeRange) string {
sql := fmt.Sprintf("select instance, max(value) as max_used from metrics_schema.node_memory_swap_used %s group by instance having max_used > 0", timeRange.Condition())
return sql
}
func (i inspectSwapMemoryUsed) genResult(sql string, row chunk.Row) inspectionResult {
return inspectionResult{
tp: "node",
instance: row.GetString(0),
item: i.getItem(),
actual: fmt.Sprintf("%.1f", row.GetFloat64(1)),
expected: "0",
severity: "warning",
}
}
func (inspectSwapMemoryUsed) getItem() string {
return "swap-memory-used"
}
type inspectDiskUsage struct{}
func (inspectDiskUsage) genSQL(timeRange plannercore.QueryTimeRange) string {
sql := fmt.Sprintf("select instance, device, max(value) as max_usage from metrics_schema.node_disk_usage %v and device like '/%%' group by instance, device having max_usage >= 70", timeRange.Condition())
return sql
}
func (i inspectDiskUsage) genResult(sql string, row chunk.Row) inspectionResult {
return inspectionResult{
tp: "node",
instance: row.GetString(0),
item: i.getItem(),
actual: fmt.Sprintf("%.1f%%", row.GetFloat64(2)),
expected: "< 70%",
severity: "warning",
detail: "the disk-usage of " + row.GetString(1) + " is too high",
}
}
func (inspectDiskUsage) getItem() string {
return "disk-usage"
}
type inspectCPULoad struct {
item string
tbl string
}
func (i inspectCPULoad) genSQL(timeRange plannercore.QueryTimeRange) string {
sql := fmt.Sprintf(`select t1.instance, t1.max_load , 0.7*t2.cpu_count from
(select instance,max(value) as max_load from metrics_schema.%[1]s %[2]s group by instance) as t1 join
(select instance,max(value) as cpu_count from metrics_schema.node_virtual_cpus %[2]s group by instance) as t2
on t1.instance=t2.instance where t1.max_load>(0.7*t2.cpu_count);`, i.tbl, timeRange.Condition())
return sql
}
func (i inspectCPULoad) genResult(sql string, row chunk.Row) inspectionResult {
return inspectionResult{
tp: "node",
instance: row.GetString(0),
item: "cpu-" + i.item,
actual: fmt.Sprintf("%.1f", row.GetFloat64(1)),
expected: fmt.Sprintf("< %.1f", row.GetFloat64(2)),
severity: "warning",
detail: i.getItem() + " should less than (cpu_logical_cores * 0.7)",
}
}
func (i inspectCPULoad) getItem() string {
return "cpu-" + i.item
}
func (c criticalErrorInspection) inspect(ctx context.Context, sctx sessionctx.Context, filter inspectionFilter) []inspectionResult {
results := c.inspectError(ctx, sctx, filter)
results = append(results, c.inspectForServerDown(ctx, sctx, filter)...)
return results
}
func (criticalErrorInspection) inspectError(ctx context.Context, sctx sessionctx.Context, filter inspectionFilter) []inspectionResult {
var rules = []struct {
tp string
item string
tbl string
}{
{tp: "tikv", item: "critical-error", tbl: "tikv_critical_error_total_count"},
{tp: "tidb", item: "panic-count", tbl: "tidb_panic_count_total_count"},
{tp: "tidb", item: "binlog-error", tbl: "tidb_binlog_error_total_count"},
{tp: "tikv", item: "scheduler-is-busy", tbl: "tikv_scheduler_is_busy_total_count"},
{tp: "tikv", item: "coprocessor-is-busy", tbl: "tikv_coprocessor_is_busy_total_count"},
{tp: "tikv", item: "channel-is-full", tbl: "tikv_channel_full_total_count"},
{tp: "tikv", item: "tikv_engine_write_stall", tbl: "tikv_engine_write_stall"},
}
condition := filter.timeRange.Condition()
var results []inspectionResult
var rows []chunk.Row
exec := sctx.(sqlexec.RestrictedSQLExecutor)
sql := new(strings.Builder)
for _, rule := range rules {
if filter.enable(rule.item) {
def, found := infoschema.MetricTableMap[rule.tbl]
if !found {
sctx.GetSessionVars().StmtCtx.AppendWarning(fmt.Errorf("metrics table: %s not found", rule.tbl))
continue
}
sql.Reset()
fmt.Fprintf(sql, "select `%[1]s`,sum(value) as total from `%[2]s`.`%[3]s` %[4]s group by `%[1]s` having total>=1.0",
strings.Join(def.Labels, "`,`"), util.MetricSchemaName.L, rule.tbl, condition)
stmt, err := exec.ParseWithParams(ctx, sql.String())
if err == nil {
rows, _, err = exec.ExecRestrictedStmt(ctx, stmt)
}
if err != nil {
sctx.GetSessionVars().StmtCtx.AppendWarning(fmt.Errorf("execute '%s' failed: %v", sql, err))
continue
}
for _, row := range rows {
var actual, detail string
var degree float64
if rest := def.Labels[1:]; len(rest) > 0 {
values := make([]string, 0, len(rest))
// `i+1` and `1+len(rest)` means skip the first field `instance`
for i := range rest {
values = append(values, row.GetString(i+1))
}
// TODO: find a better way to construct the `actual` field
actual = fmt.Sprintf("%.2f(%s)", row.GetFloat64(1+len(rest)), strings.Join(values, ", "))
degree = row.GetFloat64(1 + len(rest))
} else {
actual = fmt.Sprintf("%.2f", row.GetFloat64(1))
degree = row.GetFloat64(1)
}
detail = fmt.Sprintf("the total number of errors about '%s' is too many", rule.item)
result := inspectionResult{
tp: rule.tp,
// NOTE: all tables which can be inspected here whose first label must be `instance`
statusAddress: row.GetString(0),
item: rule.item,
actual: actual,
expected: "0",
severity: "critical",
detail: detail,
degree: degree,
}
results = append(results, result)
}
}
}
return results
}
func (criticalErrorInspection) inspectForServerDown(ctx context.Context, sctx sessionctx.Context, filter inspectionFilter) []inspectionResult {
item := "server-down"
if !filter.enable(item) {
return nil
}
condition := filter.timeRange.Condition()
exec := sctx.(sqlexec.RestrictedSQLExecutor)
sql := new(strings.Builder)
fmt.Fprintf(sql, `select t1.job,t1.instance, t2.min_time from
(select instance,job from metrics_schema.up %[1]s group by instance,job having max(value)-min(value)>0) as t1 join
(select instance,min(time) as min_time from metrics_schema.up %[1]s and value=0 group by instance,job) as t2 on t1.instance=t2.instance order by job`, condition)
var rows []chunk.Row
stmt, err := exec.ParseWithParams(ctx, sql.String())
if err == nil {
rows, _, err = exec.ExecRestrictedStmt(ctx, stmt)
}
if err != nil {
sctx.GetSessionVars().StmtCtx.AppendWarning(fmt.Errorf("execute '%s' failed: %v", sql, err))
}
var results []inspectionResult
for _, row := range rows {
if row.Len() < 3 {
continue
}
detail := fmt.Sprintf("%s %s disconnect with prometheus around time '%s'", row.GetString(0), row.GetString(1), row.GetTime(2))
result := inspectionResult{
tp: row.GetString(0),
statusAddress: row.GetString(1),
item: item,
actual: "",
expected: "",
severity: "critical",
detail: detail,
degree: 10000 + float64(len(results)),
}
results = append(results, result)
}
// Check from log.
sql.Reset()
fmt.Fprintf(sql, "select type,instance,time from information_schema.cluster_log %s and level = 'info' and message like '%%Welcome to'", condition)
stmt, err = exec.ParseWithParams(ctx, sql.String())
if err == nil {
rows, _, err = exec.ExecRestrictedStmt(ctx, stmt)
}
if err != nil {
sctx.GetSessionVars().StmtCtx.AppendWarning(fmt.Errorf("execute '%s' failed: %v", sql, err))
}
for _, row := range rows {
if row.Len() < 3 {
continue
}
detail := fmt.Sprintf("%s %s restarted at time '%s'", row.GetString(0), row.GetString(1), row.GetString(2))
result := inspectionResult{
tp: row.GetString(0),
instance: row.GetString(1),
item: item,
actual: "",
expected: "",
severity: "critical",
detail: detail,
degree: 10000 + float64(len(results)),
}
results = append(results, result)
}
return results
}
func (c thresholdCheckInspection) inspect(ctx context.Context, sctx sessionctx.Context, filter inspectionFilter) []inspectionResult {
inspects := []func(context.Context, sessionctx.Context, inspectionFilter) []inspectionResult{
c.inspectThreshold1,
c.inspectThreshold2,
c.inspectThreshold3,
c.inspectForLeaderDrop,
}
var results []inspectionResult
for _, inspect := range inspects {
re := inspect(ctx, sctx, filter)
results = append(results, re...)
}
return results
}
func (thresholdCheckInspection) inspectThreshold1(ctx context.Context, sctx sessionctx.Context, filter inspectionFilter) []inspectionResult {
var rules = []struct {
item string
component string
configKey string
threshold float64
}{
{
item: "coprocessor-normal-cpu",
component: "cop_normal%",
configKey: "readpool.coprocessor.normal-concurrency",
threshold: 0.9},
{
item: "coprocessor-high-cpu",
component: "cop_high%",
configKey: "readpool.coprocessor.high-concurrency",
threshold: 0.9,
},
{
item: "coprocessor-low-cpu",
component: "cop_low%",
configKey: "readpool.coprocessor.low-concurrency",
threshold: 0.9,
},
{
item: "grpc-cpu",
component: "grpc%",
configKey: "server.grpc-concurrency",
threshold: 0.9,
},
{
item: "raftstore-cpu",
component: "raftstore_%",
configKey: "raftstore.store-pool-size",
threshold: 0.8,
},
{
item: "apply-cpu",
component: "apply_%",
configKey: "raftstore.apply-pool-size",
threshold: 0.8,
},
{
item: "storage-readpool-normal-cpu",
component: "store_read_norm%",
configKey: "readpool.storage.normal-concurrency",
threshold: 0.9,
},
{
item: "storage-readpool-high-cpu",
component: "store_read_high%",
configKey: "readpool.storage.high-concurrency",
threshold: 0.9,
},
{
item: "storage-readpool-low-cpu",
component: "store_read_low%",
configKey: "readpool.storage.low-concurrency",
threshold: 0.9,
},
{
item: "scheduler-worker-cpu",
component: "sched_%",
configKey: "storage.scheduler-worker-pool-size",
threshold: 0.85,
},
{
item: "split-check-cpu",
component: "split_check",
threshold: 0.9,
},
}
condition := filter.timeRange.Condition()
var results []inspectionResult
var rows []chunk.Row
exec := sctx.(sqlexec.RestrictedSQLExecutor)
sql := new(strings.Builder)
for _, rule := range rules {
if !filter.enable(rule.item) {
continue
}
sql.Reset()
if len(rule.configKey) > 0 {
fmt.Fprintf(sql, `select t1.status_address, t1.cpu, (t2.value * %[2]f) as threshold, t2.value from
(select status_address, max(sum_value) as cpu from (select instance as status_address, sum(value) as sum_value from metrics_schema.tikv_thread_cpu %[4]s and name like '%[1]s' group by instance, time) as tmp group by tmp.status_address) as t1 join
(select instance, value from information_schema.cluster_config where type='tikv' and %[5]s = '%[3]s') as t2 join
(select instance,status_address from information_schema.cluster_info where type='tikv') as t3
on t1.status_address=t3.status_address and t2.instance=t3.instance where t1.cpu > (t2.value * %[2]f)`, rule.component, rule.threshold, rule.configKey, condition, "`key`")
} else {
fmt.Fprintf(sql, `select t1.instance, t1.cpu, %[2]f from
(select instance, max(value) as cpu from metrics_schema.tikv_thread_cpu %[3]s and name like '%[1]s' group by instance) as t1
where t1.cpu > %[2]f;`, rule.component, rule.threshold, condition)
}
stmt, err := exec.ParseWithParams(ctx, sql.String())
if err == nil {
rows, _, err = exec.ExecRestrictedStmt(ctx, stmt)
}
if err != nil {
sctx.GetSessionVars().StmtCtx.AppendWarning(fmt.Errorf("execute '%s' failed: %v", sql, err))
continue
}
for _, row := range rows {
actual := fmt.Sprintf("%.2f", row.GetFloat64(1))
degree := math.Abs(row.GetFloat64(1)-row.GetFloat64(2)) / math.Max(row.GetFloat64(1), row.GetFloat64(2))
expected := ""
if len(rule.configKey) > 0 {
expected = fmt.Sprintf("< %.2f, config: %v=%v", row.GetFloat64(2), rule.configKey, row.GetString(3))
} else {
expected = fmt.Sprintf("< %.2f", row.GetFloat64(2))
}
detail := fmt.Sprintf("the '%s' max cpu-usage of %s tikv is too high", rule.item, row.GetString(0))
result := inspectionResult{
tp: "tikv",
statusAddress: row.GetString(0),
item: rule.item,
actual: actual,
expected: expected,
severity: "warning",
detail: detail,
degree: degree,
}
results = append(results, result)
}
}
return results
}
func (thresholdCheckInspection) inspectThreshold2(ctx context.Context, sctx sessionctx.Context, filter inspectionFilter) []inspectionResult {
var rules = []struct {
tp string
item string
tbl string
condition string
threshold float64
factor float64
isMin bool
detail string
}{
{
tp: "tidb",
item: "tso-duration",
tbl: "pd_tso_wait_duration",
condition: "quantile=0.999",
threshold: 0.05,
},
{
tp: "tidb",
item: "get-token-duration",
tbl: "tidb_get_token_duration",
condition: "quantile=0.999",
threshold: 0.001,
factor: 10e5, // the unit is microsecond
},
{
tp: "tidb",
item: "load-schema-duration",
tbl: "tidb_load_schema_duration",
condition: "quantile=0.99",
threshold: 1,
},
{
tp: "tikv",
item: "scheduler-cmd-duration",
tbl: "tikv_scheduler_command_duration",
condition: "quantile=0.99",
threshold: 0.1,
},
{
tp: "tikv",
item: "handle-snapshot-duration",
tbl: "tikv_handle_snapshot_duration",
threshold: 30,
},
{
tp: "tikv",
item: "storage-write-duration",
tbl: "tikv_storage_async_request_duration",
condition: "type='write'",
threshold: 0.1,
},
{
tp: "tikv",
item: "storage-snapshot-duration",
tbl: "tikv_storage_async_request_duration",
condition: "type='snapshot'",
threshold: 0.05,
},
{
tp: "tikv",
item: "rocksdb-write-duration",
tbl: "tikv_engine_write_duration",
condition: "type='write_max'",
threshold: 0.1,
factor: 10e5, // the unit is microsecond
},
{
tp: "tikv",
item: "rocksdb-get-duration",
tbl: "tikv_engine_max_get_duration",
condition: "type='get_max'",
threshold: 0.05,
factor: 10e5,
},
{
tp: "tikv",
item: "rocksdb-seek-duration",
tbl: "tikv_engine_max_seek_duration",
condition: "type='seek_max'",
threshold: 0.05,
factor: 10e5, // the unit is microsecond
},
{
tp: "tikv",
item: "scheduler-pending-cmd-count",
tbl: "tikv_scheduler_pending_commands",
threshold: 1000,
detail: " %s tikv scheduler has too many pending commands",
},
{
tp: "tikv",
item: "index-block-cache-hit",
tbl: "tikv_block_index_cache_hit",
condition: "value > 0",
threshold: 0.95,
isMin: true,
},
{
tp: "tikv",
item: "filter-block-cache-hit",
tbl: "tikv_block_filter_cache_hit",
condition: "value > 0",
threshold: 0.95,
isMin: true,
},
{
tp: "tikv",
item: "data-block-cache-hit",
tbl: "tikv_block_data_cache_hit",
condition: "value > 0",
threshold: 0.80,
isMin: true,
},
}
condition := filter.timeRange.Condition()
var results []inspectionResult
var rows []chunk.Row
sql := new(strings.Builder)
exec := sctx.(sqlexec.RestrictedSQLExecutor)
for _, rule := range rules {
if !filter.enable(rule.item) {
continue
}
cond := condition
if len(rule.condition) > 0 {
cond = fmt.Sprintf("%s and %s", cond, rule.condition)
}
if rule.factor == 0 {
rule.factor = 1
}
sql.Reset()
if rule.isMin {
fmt.Fprintf(sql, "select instance, min(value)/%.0f as min_value from metrics_schema.%s %s group by instance having min_value < %f;", rule.factor, rule.tbl, cond, rule.threshold)
} else {
fmt.Fprintf(sql, "select instance, max(value)/%.0f as max_value from metrics_schema.%s %s group by instance having max_value > %f;", rule.factor, rule.tbl, cond, rule.threshold)
}
stmt, err := exec.ParseWithParams(ctx, sql.String())
if err == nil {
rows, _, err = exec.ExecRestrictedStmt(ctx, stmt)
}
if err != nil {
sctx.GetSessionVars().StmtCtx.AppendWarning(fmt.Errorf("execute '%s' failed: %v", sql, err))
continue
}
for _, row := range rows {
actual := fmt.Sprintf("%.3f", row.GetFloat64(1))
degree := math.Abs(row.GetFloat64(1)-rule.threshold) / math.Max(row.GetFloat64(1), rule.threshold)
expected := ""
if rule.isMin {
expected = fmt.Sprintf("> %.3f", rule.threshold)
} else {
expected = fmt.Sprintf("< %.3f", rule.threshold)
}
detail := rule.detail
if len(detail) == 0 {
if strings.HasSuffix(rule.item, "duration") {
detail = fmt.Sprintf("max duration of %s %s %s is too slow", row.GetString(0), rule.tp, rule.item)
} else if strings.HasSuffix(rule.item, "hit") {
detail = fmt.Sprintf("min %s rate of %s %s is too low", rule.item, row.GetString(0), rule.tp)
}
} else {
detail = fmt.Sprintf(detail, row.GetString(0))
}
result := inspectionResult{
tp: rule.tp,
statusAddress: row.GetString(0),
item: rule.item,
actual: actual,
expected: expected,
severity: "warning",
detail: detail,
degree: degree,
}
results = append(results, result)
}
}
return results
}
type ruleChecker interface {
genSQL(timeRange plannercore.QueryTimeRange) string
genResult(sql string, row chunk.Row) inspectionResult
getItem() string
}
type compareStoreStatus struct {
item string
tp string
threshold float64
}
func (c compareStoreStatus) genSQL(timeRange plannercore.QueryTimeRange) string {
condition := fmt.Sprintf(`where t1.time>='%[1]s' and t1.time<='%[2]s' and
t2.time>='%[1]s' and t2.time<='%[2]s'`, timeRange.From.Format(plannercore.MetricTableTimeFormat),
timeRange.To.Format(plannercore.MetricTableTimeFormat))
return fmt.Sprintf(`
SELECT t1.address,
max(t1.value),
t2.address,
min(t2.value),
max((t1.value-t2.value)/t1.value) AS ratio
FROM metrics_schema.pd_scheduler_store_status t1
JOIN metrics_schema.pd_scheduler_store_status t2 %s
AND t1.type='%s'
AND t1.time = t2.time
AND t1.type=t2.type
AND t1.address != t2.address
AND (t1.value-t2.value)/t1.value>%v
AND t1.value > 0
GROUP BY t1.address,t2.address
ORDER BY ratio desc`, condition, c.tp, c.threshold)
}
func (c compareStoreStatus) genResult(_ string, row chunk.Row) inspectionResult {
addr1 := row.GetString(0)
value1 := row.GetFloat64(1)
addr2 := row.GetString(2)
value2 := row.GetFloat64(3)
ratio := row.GetFloat64(4)
detail := fmt.Sprintf("%v max %s is %.2f, much more than %v min %s %.2f", addr1, c.tp, value1, addr2, c.tp, value2)
return inspectionResult{
tp: "tikv",
instance: addr2,
item: c.item,
actual: fmt.Sprintf("%.2f%%", ratio*100),
expected: fmt.Sprintf("< %.2f%%", c.threshold*100),
severity: "warning",
detail: detail,
degree: ratio,
}
}
func (c compareStoreStatus) getItem() string {
return c.item
}
type checkRegionHealth struct{}
func (c checkRegionHealth) genSQL(timeRange plannercore.QueryTimeRange) string {
condition := timeRange.Condition()
return fmt.Sprintf(`select instance, sum(value) as sum_value from metrics_schema.pd_region_health %s and
type in ('extra-peer-region-count','learner-peer-region-count','pending-peer-region-count') having sum_value>100`, condition)
}
func (c checkRegionHealth) genResult(_ string, row chunk.Row) inspectionResult {
detail := fmt.Sprintf("the count of extra-perr and learner-peer and pending-peer are %v, it means the scheduling is too frequent or too slow", row.GetFloat64(1))
actual := fmt.Sprintf("%.2f", row.GetFloat64(1))
degree := math.Abs(row.GetFloat64(1)-100) / math.Max(row.GetFloat64(1), 100)
return inspectionResult{
tp: "pd",
instance: row.GetString(0),
item: c.getItem(),
actual: actual,
expected: "< 100",
severity: "warning",
detail: detail,
degree: degree,
}
}
func (c checkRegionHealth) getItem() string {
return "region-health"
}
type checkStoreRegionTooMuch struct{}
func (c checkStoreRegionTooMuch) genSQL(timeRange plannercore.QueryTimeRange) string {
condition := timeRange.Condition()
return fmt.Sprintf(`select address, max(value) from metrics_schema.pd_scheduler_store_status %s and type='region_count' and value > 20000 group by address`, condition)
}
func (c checkStoreRegionTooMuch) genResult(sql string, row chunk.Row) inspectionResult {
actual := fmt.Sprintf("%.2f", row.GetFloat64(1))
degree := math.Abs(row.GetFloat64(1)-20000) / math.Max(row.GetFloat64(1), 20000)
return inspectionResult{
tp: "tikv",
instance: row.GetString(0),
item: c.getItem(),
actual: actual,
expected: "<= 20000",
severity: "warning",
detail: fmt.Sprintf("%s tikv has too many regions", row.GetString(0)),
degree: degree,
}
}
func (c checkStoreRegionTooMuch) getItem() string {
return "region-count"
}
func (thresholdCheckInspection) inspectThreshold3(ctx context.Context, sctx sessionctx.Context, filter inspectionFilter) []inspectionResult {
var rules = []ruleChecker{
compareStoreStatus{
item: "leader-score-balance",
tp: "leader_score",
threshold: 0.05,
},
compareStoreStatus{
item: "region-score-balance",
tp: "region_score",
threshold: 0.05,
},
compareStoreStatus{
item: "store-available-balance",
tp: "store_available",
threshold: 0.2,
},
checkRegionHealth{},
checkStoreRegionTooMuch{},
}
return checkRules(ctx, sctx, filter, rules)
}
func checkRules(ctx context.Context, sctx sessionctx.Context, filter inspectionFilter, rules []ruleChecker) []inspectionResult {
var results []inspectionResult
var rows []chunk.Row
exec := sctx.(sqlexec.RestrictedSQLExecutor)
for _, rule := range rules {
if !filter.enable(rule.getItem()) {
continue
}
sql := rule.genSQL(filter.timeRange)
stmt, err := exec.ParseWithParams(ctx, sql)
if err == nil {
rows, _, err = exec.ExecRestrictedStmt(ctx, stmt)
}
if err != nil {
sctx.GetSessionVars().StmtCtx.AppendWarning(fmt.Errorf("execute '%s' failed: %v", sql, err))
continue
}
for _, row := range rows {
results = append(results, rule.genResult(sql, row))
}
}
return results
}
func (c thresholdCheckInspection) inspectForLeaderDrop(ctx context.Context, sctx sessionctx.Context, filter inspectionFilter) []inspectionResult {
condition := filter.timeRange.Condition()
threshold := 50.0
sql := new(strings.Builder)
fmt.Fprintf(sql, `select address,min(value) as mi,max(value) as mx from metrics_schema.pd_scheduler_store_status %s and type='leader_count' group by address having mx-mi>%v`, condition, threshold)
exec := sctx.(sqlexec.RestrictedSQLExecutor)
var rows []chunk.Row
stmt, err := exec.ParseWithParams(ctx, sql.String())
if err == nil {
rows, _, err = exec.ExecRestrictedStmt(ctx, stmt)
}
if err != nil {
sctx.GetSessionVars().StmtCtx.AppendWarning(fmt.Errorf("execute '%s' failed: %v", sql, err))
return nil
}
var results []inspectionResult
for _, row := range rows {
address := row.GetString(0)
sql.Reset()
fmt.Fprintf(sql, `select time, value from metrics_schema.pd_scheduler_store_status %s and type='leader_count' and address = '%s' order by time`, condition, address)
var subRows []chunk.Row
stmt, err := exec.ParseWithParams(ctx, sql.String())
if err == nil {
subRows, _, err = exec.ExecRestrictedStmt(ctx, stmt)
}
if err != nil {
sctx.GetSessionVars().StmtCtx.AppendWarning(fmt.Errorf("execute '%s' failed: %v", sql, err))
continue
}
lastValue := float64(0)
for i, subRows := range subRows {
v := subRows.GetFloat64(1)
if i == 0 {
lastValue = v
continue
}
if lastValue-v > threshold {
level := "warning"
if v == 0 {
level = "critical"
}
results = append(results, inspectionResult{
tp: "tikv",
instance: address,
item: "leader-drop",
actual: fmt.Sprintf("%.0f", lastValue-v),
expected: fmt.Sprintf("<= %.0f", threshold),
severity: level,
detail: fmt.Sprintf("%s tikv has too many leader-drop around time %s, leader count from %.0f drop to %.0f", address, subRows.GetTime(0), lastValue, v),
degree: lastValue - v,
})
break
}
lastValue = v
}
}
return results
}
| executor/inspection_result.go | 0 | https://github.com/pingcap/tidb/commit/cc83cc524f8d3fd661f6e62d129ba043cc74501e | [
0.2692027688026428,
0.003419159445911646,
0.0001649559271754697,
0.000207298799068667,
0.02423463948071003
] |
{
"id": 2,
"code_window": [
"\t}\n",
"}\n",
"\n",
"func (txn *tikvTxn) GetOption(opt int) interface{} {\n",
"\tswitch opt {\n",
"\tcase tikvstore.TxnScope:\n",
"\t\treturn txn.KVTxn.GetScope()\n",
"\tdefault:\n",
"\t\treturn txn.KVTxn.GetOption(opt)\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tcase tikvstore.GuaranteeLinearizability:\n",
"\t\treturn !txn.KVTxn.IsCasualConsistency()\n"
],
"file_path": "store/driver/txn/txn_driver.go",
"type": "add",
"edit_start_line_idx": 177
} | // Copyright 2017 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package expression_test
import (
"math"
. "github.com/pingcap/check"
"github.com/pingcap/parser"
"github.com/pingcap/parser/charset"
"github.com/pingcap/parser/mysql"
"github.com/pingcap/tidb/domain"
plannercore "github.com/pingcap/tidb/planner/core"
"github.com/pingcap/tidb/session"
"github.com/pingcap/tidb/sessionctx"
"github.com/pingcap/tidb/sessionctx/variable"
"github.com/pingcap/tidb/types"
"github.com/pingcap/tidb/util/printer"
"github.com/pingcap/tidb/util/testkit"
"golang.org/x/net/context"
)
var _ = SerialSuites(&testInferTypeSuite{})
type typeInferTestCase struct {
sql string
tp byte
chs string
flag uint
flen int
decimal int
}
type testInferTypeSuite struct {
*parser.Parser
}
func (s *testInferTypeSuite) SetUpSuite(c *C) {
s.Parser = parser.New()
}
func (s *testInferTypeSuite) TearDownSuite(c *C) {
}
func (s *testInferTypeSuite) TestInferType(c *C) {
store, dom, err := newStoreWithBootstrap()
c.Assert(err, IsNil)
defer func() {
dom.Close()
store.Close()
}()
se, err := session.CreateSession4Test(store)
c.Assert(err, IsNil)
testKit := testkit.NewTestKit(c, store)
testKit.MustExec("use test")
testKit.MustExec("drop table if exists t")
sql := `create table t (
c_bit bit(10),
c_int_d int,
c_uint_d int unsigned,
c_bigint_d bigint,
c_ubigint_d bigint unsigned,
c_float_d float,
c_ufloat_d float unsigned,
c_double_d double,
c_udouble_d double unsigned,
c_decimal decimal(6, 3),
c_udecimal decimal(10, 3) unsigned,
c_decimal_d decimal,
c_udecimal_d decimal unsigned,
c_datetime datetime(2),
c_datetime_d datetime,
c_time time(3),
c_time_d time,
c_date date,
c_timestamp timestamp(4) DEFAULT CURRENT_TIMESTAMP(4),
c_timestamp_d timestamp DEFAULT CURRENT_TIMESTAMP,
c_char char(20),
c_bchar char(20) binary,
c_varchar varchar(20),
c_bvarchar varchar(20) binary,
c_text_d text,
c_btext_d text binary,
c_binary binary(20),
c_varbinary varbinary(20),
c_blob_d blob,
c_set set('a', 'b', 'c'),
c_enum enum('a', 'b', 'c'),
c_json JSON,
c_year year
)`
testKit.MustExec(sql)
testKit.MustExec(`set tidb_enable_noop_functions=1;`)
var tests []typeInferTestCase
tests = append(tests, s.createTestCase4Constants()...)
tests = append(tests, s.createTestCase4Cast()...)
tests = append(tests, s.createTestCase4Columns()...)
tests = append(tests, s.createTestCase4StrFuncs()...)
tests = append(tests, s.createTestCase4MathFuncs()...)
tests = append(tests, s.createTestCase4ArithmeticFuncs()...)
tests = append(tests, s.createTestCase4LogicalFuncs()...)
tests = append(tests, s.createTestCase4ControlFuncs()...)
tests = append(tests, s.createTestCase4Aggregations()...)
tests = append(tests, s.createTestCase4InfoFunc()...)
tests = append(tests, s.createTestCase4EncryptionFuncs()...)
tests = append(tests, s.createTestCase4CompareFuncs()...)
tests = append(tests, s.createTestCase4Miscellaneous()...)
tests = append(tests, s.createTestCase4OpFuncs()...)
tests = append(tests, s.createTestCase4OtherFuncs()...)
tests = append(tests, s.createTestCase4TimeFuncs()...)
tests = append(tests, s.createTestCase4LikeFuncs()...)
tests = append(tests, s.createTestCase4Literals()...)
tests = append(tests, s.createTestCase4JSONFuncs()...)
tests = append(tests, s.createTestCase4MiscellaneousFunc()...)
sctx := testKit.Se.(sessionctx.Context)
c.Assert(sctx.GetSessionVars().SetSystemVar(variable.CharacterSetConnection, mysql.DefaultCharset), IsNil)
c.Assert(sctx.GetSessionVars().SetSystemVar(variable.CollationConnection, mysql.DefaultCollationName), IsNil)
ctx := context.Background()
for _, tt := range tests {
sql := "select " + tt.sql + " from t"
comment := Commentf("for %s", sql)
stmt, err := s.ParseOneStmt(sql, "", "")
c.Assert(err, IsNil, comment)
err = se.NewTxn(context.Background())
c.Assert(err, IsNil)
is := domain.GetDomain(sctx).InfoSchema()
err = plannercore.Preprocess(sctx, stmt, is)
c.Assert(err, IsNil, comment)
p, _, err := plannercore.BuildLogicalPlan(ctx, sctx, stmt, is)
c.Assert(err, IsNil, comment)
tp := p.Schema().Columns[0].RetType
c.Assert(tp.Tp, Equals, tt.tp, comment)
c.Assert(tp.Charset, Equals, tt.chs, comment)
c.Assert(tp.Flag, Equals, tt.flag, comment)
c.Assert(tp.Flen, Equals, tt.flen, comment)
c.Assert(tp.Decimal, Equals, tt.decimal, comment)
}
}
func (s *testInferTypeSuite) createTestCase4Constants() []typeInferTestCase {
return []typeInferTestCase{
{"1", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag | mysql.NotNullFlag, 1, 0},
{"-1", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag | mysql.NotNullFlag, 2, 0},
{"1.23", mysql.TypeNewDecimal, charset.CharsetBin, mysql.BinaryFlag | mysql.NotNullFlag, 4, 2},
{"-1.23", mysql.TypeNewDecimal, charset.CharsetBin, mysql.BinaryFlag | mysql.NotNullFlag, 5, 2},
{"123e5", mysql.TypeDouble, charset.CharsetBin, mysql.BinaryFlag | mysql.NotNullFlag, 8, types.UnspecifiedLength},
{"-123e5", mysql.TypeDouble, charset.CharsetBin, mysql.BinaryFlag | mysql.NotNullFlag, 9, types.UnspecifiedLength},
{"123e-5", mysql.TypeDouble, charset.CharsetBin, mysql.BinaryFlag | mysql.NotNullFlag, 7, types.UnspecifiedLength},
{"-123e-5", mysql.TypeDouble, charset.CharsetBin, mysql.BinaryFlag | mysql.NotNullFlag, 8, types.UnspecifiedLength},
{"NULL", mysql.TypeNull, charset.CharsetBin, mysql.BinaryFlag, 0, 0},
{"TRUE", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag | mysql.IsBooleanFlag | mysql.NotNullFlag, 1, 0},
{"FALSE", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag | mysql.IsBooleanFlag | mysql.NotNullFlag, 1, 0},
{"'1234'", mysql.TypeVarString, charset.CharsetUTF8MB4, 0 | mysql.NotNullFlag, 4, types.UnspecifiedLength},
{"_utf8'1234'", mysql.TypeVarString, charset.CharsetUTF8, 0 | mysql.NotNullFlag, 4, types.UnspecifiedLength},
{"_binary'1234'", mysql.TypeVarString, charset.CharsetBin, mysql.BinaryFlag | mysql.NotNullFlag, 4, types.UnspecifiedLength},
{"b'0001'", mysql.TypeVarString, charset.CharsetBin, mysql.BinaryFlag | mysql.NotNullFlag, 3, 0},
{"b'000100001'", mysql.TypeVarString, charset.CharsetBin, mysql.BinaryFlag | mysql.NotNullFlag, 6, 0},
{"b'0000000000010000'", mysql.TypeVarString, charset.CharsetBin, mysql.BinaryFlag | mysql.NotNullFlag, 6, 0},
{"x'10'", mysql.TypeVarString, charset.CharsetBin, mysql.BinaryFlag | mysql.UnsignedFlag | mysql.NotNullFlag, 3, 0},
{"x'ff10'", mysql.TypeVarString, charset.CharsetBin, mysql.BinaryFlag | mysql.UnsignedFlag | mysql.NotNullFlag, 6, 0},
{"x'0000000000000000ff10'", mysql.TypeVarString, charset.CharsetBin, mysql.BinaryFlag | mysql.UnsignedFlag | mysql.NotNullFlag, 30, 0},
}
}
func (s *testInferTypeSuite) createTestCase4Cast() []typeInferTestCase {
return []typeInferTestCase{
{"CAST(c_int_d AS BINARY)", mysql.TypeVarString, charset.CharsetBin, mysql.BinaryFlag, -1, -1}, // TODO: Flen should be 11.
{"CAST(c_int_d AS BINARY(5))", mysql.TypeString, charset.CharsetBin, mysql.BinaryFlag, 5, -1},
{"CAST(c_int_d AS CHAR)", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, -1, -1}, // TODO: Flen should be 11.
{"CAST(c_int_d AS CHAR(5))", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, 5, -1},
{"CAST(c_int_d AS DATE)", mysql.TypeDate, charset.CharsetBin, mysql.BinaryFlag, 10, 0},
{"CAST(c_int_d AS DATETIME)", mysql.TypeDatetime, charset.CharsetBin, mysql.BinaryFlag, 19, 0},
{"CAST(c_int_d AS DECIMAL)", mysql.TypeNewDecimal, charset.CharsetBin, mysql.BinaryFlag, 10, 0},
{"CAST(c_int_d AS DECIMAL(10))", mysql.TypeNewDecimal, charset.CharsetBin, mysql.BinaryFlag, 10, 0},
{"CAST(c_int_d AS DECIMAL(10,3))", mysql.TypeNewDecimal, charset.CharsetBin, mysql.BinaryFlag, 10, 3}, // TODO: Flen should be 12
{"CAST(c_int_d AS JSON)", mysql.TypeJSON, charset.CharsetUTF8MB4, mysql.BinaryFlag | mysql.ParseToJSONFlag, 12582912 / 3, 0},
{"CAST(c_int_d AS SIGNED)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 22, 0}, // TODO: Flen should be 11.
{"CAST(c_int_d AS SIGNED INTEGER)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 22, 0}, // TODO: Flen should be 11.
{"CAST(c_int_d AS TIME)", mysql.TypeDuration, charset.CharsetBin, mysql.BinaryFlag, 10, 0},
{"CAST(c_int_d AS UNSIGNED)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag | mysql.UnsignedFlag, 22, 0}, // TODO: Flen should be 11.
{"CAST(c_int_d AS UNSIGNED INTEGER)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag | mysql.UnsignedFlag, 22, 0}, // TODO: Flen should be 11.
}
}
func (s *testInferTypeSuite) createTestCase4Columns() []typeInferTestCase {
return []typeInferTestCase{
{"c_bit ", mysql.TypeBit, charset.CharsetBin, mysql.UnsignedFlag, 10, 0},
{"c_year ", mysql.TypeYear, charset.CharsetBin, mysql.UnsignedFlag | mysql.ZerofillFlag, 4, 0},
{"c_int_d ", mysql.TypeLong, charset.CharsetBin, 0, 11, 0},
{"c_uint_d ", mysql.TypeLong, charset.CharsetBin, mysql.UnsignedFlag, 10, 0},
{"c_bigint_d ", mysql.TypeLonglong, charset.CharsetBin, 0, 20, 0},
{"c_ubigint_d ", mysql.TypeLonglong, charset.CharsetBin, mysql.UnsignedFlag, 20, 0},
{"c_float_d ", mysql.TypeFloat, charset.CharsetBin, 0, 12, types.UnspecifiedLength},
{"c_ufloat_d ", mysql.TypeFloat, charset.CharsetBin, mysql.UnsignedFlag, 12, types.UnspecifiedLength},
{"c_double_d ", mysql.TypeDouble, charset.CharsetBin, 0, 22, types.UnspecifiedLength},
{"c_udouble_d ", mysql.TypeDouble, charset.CharsetBin, mysql.UnsignedFlag, 22, types.UnspecifiedLength},
{"c_decimal ", mysql.TypeNewDecimal, charset.CharsetBin, 0, 6, 3}, // TODO: Flen should be 8
{"c_udecimal ", mysql.TypeNewDecimal, charset.CharsetBin, mysql.UnsignedFlag, 10, 3}, // TODO: Flen should be 11
{"c_decimal_d ", mysql.TypeNewDecimal, charset.CharsetBin, 0, 10, 0},
{"c_udecimal_d ", mysql.TypeNewDecimal, charset.CharsetBin, mysql.UnsignedFlag, 10, 0},
{"c_datetime ", mysql.TypeDatetime, charset.CharsetBin, mysql.BinaryFlag, 22, 2},
{"c_datetime_d ", mysql.TypeDatetime, charset.CharsetBin, mysql.BinaryFlag, 19, 0},
{"c_time ", mysql.TypeDuration, charset.CharsetBin, mysql.BinaryFlag, 14, 3},
{"c_time_d ", mysql.TypeDuration, charset.CharsetBin, mysql.BinaryFlag, 10, 0},
{"c_timestamp ", mysql.TypeTimestamp, charset.CharsetBin, mysql.BinaryFlag, 24, 4},
{"c_timestamp_d", mysql.TypeTimestamp, charset.CharsetBin, mysql.BinaryFlag, 19, 0},
{"c_char ", mysql.TypeString, charset.CharsetUTF8MB4, 0, 20, 0},
{"c_bchar ", mysql.TypeString, charset.CharsetUTF8MB4, 0, 20, 0},
{"c_varchar ", mysql.TypeVarchar, charset.CharsetUTF8MB4, 0, 20, 0}, // TODO: tp should be TypeVarString
{"c_bvarchar ", mysql.TypeVarchar, charset.CharsetUTF8MB4, 0, 20, 0}, // TODO: tp should be TypeVarString
{"c_text_d ", mysql.TypeBlob, charset.CharsetUTF8MB4, 0, 65535, 0}, // TODO: BlobFlag
{"c_btext_d ", mysql.TypeBlob, charset.CharsetUTF8MB4, 0, 65535, 0}, // TODO: BlobFlag
{"c_binary ", mysql.TypeString, charset.CharsetBin, mysql.BinaryFlag, 20, 0},
{"c_varbinary ", mysql.TypeVarchar, charset.CharsetBin, mysql.BinaryFlag, 20, 0}, // TODO: tp should be TypeVarString
{"c_blob_d ", mysql.TypeBlob, charset.CharsetBin, mysql.BinaryFlag, 65535, 0}, // TODO: BlobFlag
{"c_set ", mysql.TypeSet, charset.CharsetUTF8MB4, 0, 5, 0}, // TODO: SetFlag
{"c_enum ", mysql.TypeEnum, charset.CharsetUTF8MB4, 0, 1, 0}, // TODO: EnumFlag
}
}
func (s *testInferTypeSuite) createTestCase4StrFuncs() []typeInferTestCase {
return []typeInferTestCase{
{"strcmp(c_char, c_char)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 2, 0},
{"space(c_int_d)", mysql.TypeLongBlob, mysql.DefaultCharset, 0, mysql.MaxBlobWidth, types.UnspecifiedLength},
{"CONCAT(c_binary, c_int_d)", mysql.TypeVarString, charset.CharsetBin, mysql.BinaryFlag, 40, types.UnspecifiedLength},
{"CONCAT(c_bchar, c_int_d)", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, 40, types.UnspecifiedLength},
{"CONCAT(c_bchar, 0x80)", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, 23, types.UnspecifiedLength},
{"CONCAT('T', 'i', 'DB')", mysql.TypeVarString, charset.CharsetUTF8MB4, 0 | mysql.NotNullFlag, 4, types.UnspecifiedLength},
{"CONCAT('T', 'i', 'DB', c_binary)", mysql.TypeVarString, charset.CharsetBin, mysql.BinaryFlag, 24, types.UnspecifiedLength},
{"CONCAT_WS('-', 'T', 'i', 'DB')", mysql.TypeVarString, charset.CharsetUTF8MB4, 0 | mysql.NotNullFlag, 6, types.UnspecifiedLength},
{"CONCAT_WS(',', 'TiDB', c_binary)", mysql.TypeVarString, charset.CharsetBin, mysql.BinaryFlag, 25, types.UnspecifiedLength},
{"left(c_int_d, c_int_d)", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, 20, types.UnspecifiedLength},
{"right(c_int_d, c_int_d)", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, 20, types.UnspecifiedLength},
{"lower(c_int_d)", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, 20, types.UnspecifiedLength},
{"lower(c_binary)", mysql.TypeVarString, charset.CharsetBin, mysql.BinaryFlag, 20, types.UnspecifiedLength},
{"upper(c_int_d)", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, 20, types.UnspecifiedLength},
{"upper(c_binary)", mysql.TypeVarString, charset.CharsetBin, mysql.BinaryFlag, 20, types.UnspecifiedLength},
{"replace(1234, 2, 55)", mysql.TypeVarString, charset.CharsetUTF8MB4, 0 | mysql.NotNullFlag, 20, types.UnspecifiedLength},
{"replace(c_binary, 1, 2)", mysql.TypeVarString, charset.CharsetBin, mysql.BinaryFlag, 20, types.UnspecifiedLength},
{"to_base64(c_binary)", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, 28, types.UnspecifiedLength},
{"substr(c_int_d, c_int_d)", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, 20, types.UnspecifiedLength},
{"substr(c_binary, c_int_d)", mysql.TypeVarString, charset.CharsetBin, mysql.BinaryFlag, 20, types.UnspecifiedLength},
{"uuid()", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, 36, types.UnspecifiedLength},
{"bit_length(c_char)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 10, 0},
{"substring_index(c_int_d, '.', 1)", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, 20, types.UnspecifiedLength},
{"substring_index(c_binary, '.', 1)", mysql.TypeVarString, charset.CharsetBin, mysql.BinaryFlag, 20, types.UnspecifiedLength},
{"hex(c_char)", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, 120, types.UnspecifiedLength},
{"hex(c_int_d)", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, 22, types.UnspecifiedLength},
{"unhex(c_int_d)", mysql.TypeVarString, charset.CharsetBin, mysql.BinaryFlag, 6, types.UnspecifiedLength},
{"unhex(c_char)", mysql.TypeVarString, charset.CharsetBin, mysql.BinaryFlag, 30, types.UnspecifiedLength},
{"ltrim(c_char)", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, 20, types.UnspecifiedLength},
{"ltrim(c_binary)", mysql.TypeVarString, charset.CharsetBin, mysql.BinaryFlag, 20, types.UnspecifiedLength},
{"rtrim(c_char)", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, 20, types.UnspecifiedLength},
{"rtrim(c_binary)", mysql.TypeVarString, charset.CharsetBin, mysql.BinaryFlag, 20, types.UnspecifiedLength},
{"trim(c_char)", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, 20, types.UnspecifiedLength},
{"trim(c_binary)", mysql.TypeVarString, charset.CharsetBin, mysql.BinaryFlag, 20, types.UnspecifiedLength},
{"ascii(c_char)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 3, 0},
{"ord(c_char)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 10, 0},
{`c_int_d like 'abc%'`, mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag | mysql.IsBooleanFlag, 1, 0},
{"tidb_version()", mysql.TypeVarString, charset.CharsetUTF8MB4, 0 | mysql.NotNullFlag, len(printer.GetTiDBInfo()), types.UnspecifiedLength},
{"tidb_is_ddl_owner()", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag | mysql.NotNullFlag, mysql.MaxIntWidth, 0},
{"password(c_char)", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, mysql.PWDHashLen + 1, types.UnspecifiedLength},
{"elt(c_int_d, c_char, c_char, c_char)", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, 20, types.UnspecifiedLength},
{"elt(c_int_d, c_char, c_char, c_binary)", mysql.TypeVarString, charset.CharsetBin, mysql.BinaryFlag, 20, types.UnspecifiedLength},
{"elt(c_int_d, c_char, c_int_d)", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, 20, types.UnspecifiedLength},
{"elt(c_int_d, c_char, c_double_d, c_int_d)", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, 20, types.UnspecifiedLength},
{"elt(c_int_d, c_char, c_double_d, c_int_d, c_binary)", mysql.TypeVarString, charset.CharsetBin, mysql.BinaryFlag, 20, types.UnspecifiedLength},
{"locate(c_char, c_char)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxIntWidth, 0},
{"locate(c_binary, c_binary)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxIntWidth, 0},
{"locate(c_char, c_binary)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxIntWidth, 0},
{"locate(c_binary, c_char)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxIntWidth, 0},
{"locate(c_char, c_char, c_int_d)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxIntWidth, 0},
{"locate(c_char, c_binary, c_int_d)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxIntWidth, 0},
{"locate(c_binary, c_char, c_int_d)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxIntWidth, 0},
{"locate(c_binary, c_binary, c_int_d)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxIntWidth, 0},
{"lpad('TiDB', 12, 'go' )", mysql.TypeVarString, charset.CharsetUTF8MB4, mysql.NotNullFlag, 48, types.UnspecifiedLength},
{"lpad(c_binary, 12, 'go' )", mysql.TypeVarString, charset.CharsetBin, mysql.BinaryFlag, 12, types.UnspecifiedLength},
{"lpad(c_char, c_int_d, c_binary)", mysql.TypeLongBlob, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxBlobWidth, types.UnspecifiedLength},
{"lpad(c_char, c_int_d, c_char )", mysql.TypeLongBlob, charset.CharsetUTF8MB4, 0, mysql.MaxBlobWidth, types.UnspecifiedLength},
{"rpad('TiDB', 12, 'go' )", mysql.TypeVarString, charset.CharsetUTF8MB4, mysql.NotNullFlag, 48, types.UnspecifiedLength},
{"rpad(c_binary, 12, 'go' )", mysql.TypeVarString, charset.CharsetBin, mysql.BinaryFlag, 12, types.UnspecifiedLength},
{"rpad(c_char, c_int_d, c_binary)", mysql.TypeLongBlob, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxBlobWidth, types.UnspecifiedLength},
{"rpad(c_char, c_int_d, c_char )", mysql.TypeLongBlob, charset.CharsetUTF8MB4, 0, mysql.MaxBlobWidth, types.UnspecifiedLength},
{"from_base64(c_int_d )", mysql.TypeLongBlob, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxBlobWidth, types.UnspecifiedLength},
{"from_base64(c_bigint_d )", mysql.TypeLongBlob, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxBlobWidth, types.UnspecifiedLength},
{"from_base64(c_float_d )", mysql.TypeLongBlob, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxBlobWidth, types.UnspecifiedLength},
{"from_base64(c_double_d )", mysql.TypeLongBlob, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxBlobWidth, types.UnspecifiedLength},
{"from_base64(c_decimal )", mysql.TypeLongBlob, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxBlobWidth, types.UnspecifiedLength},
{"from_base64(c_datetime )", mysql.TypeLongBlob, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxBlobWidth, types.UnspecifiedLength},
{"from_base64(c_time_d )", mysql.TypeLongBlob, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxBlobWidth, types.UnspecifiedLength},
{"from_base64(c_timestamp_d)", mysql.TypeLongBlob, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxBlobWidth, types.UnspecifiedLength},
{"from_base64(c_char )", mysql.TypeLongBlob, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxBlobWidth, types.UnspecifiedLength},
{"from_base64(c_varchar )", mysql.TypeLongBlob, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxBlobWidth, types.UnspecifiedLength},
{"from_base64(c_text_d )", mysql.TypeLongBlob, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxBlobWidth, types.UnspecifiedLength},
{"from_base64(c_binary )", mysql.TypeLongBlob, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxBlobWidth, types.UnspecifiedLength},
{"from_base64(c_varbinary )", mysql.TypeLongBlob, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxBlobWidth, types.UnspecifiedLength},
{"from_base64(c_blob_d )", mysql.TypeLongBlob, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxBlobWidth, types.UnspecifiedLength},
{"from_base64(c_set )", mysql.TypeLongBlob, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxBlobWidth, types.UnspecifiedLength},
{"from_base64(c_enum )", mysql.TypeLongBlob, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxBlobWidth, types.UnspecifiedLength},
{"bin(c_int_d )", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, 64, types.UnspecifiedLength},
{"bin(c_bigint_d )", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, 64, types.UnspecifiedLength},
{"bin(c_float_d )", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, 64, types.UnspecifiedLength},
{"bin(c_double_d )", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, 64, types.UnspecifiedLength},
{"bin(c_decimal )", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, 64, types.UnspecifiedLength},
{"bin(c_datetime )", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, 64, types.UnspecifiedLength},
{"bin(c_time_d )", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, 64, types.UnspecifiedLength},
{"bin(c_timestamp_d)", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, 64, types.UnspecifiedLength},
{"bin(c_char )", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, 64, types.UnspecifiedLength},
{"bin(c_varchar )", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, 64, types.UnspecifiedLength},
{"bin(c_text_d )", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, 64, types.UnspecifiedLength},
{"bin(c_binary )", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, 64, types.UnspecifiedLength},
{"bin(c_varbinary )", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, 64, types.UnspecifiedLength},
{"bin(c_blob_d )", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, 64, types.UnspecifiedLength},
{"bin(c_set )", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, 64, types.UnspecifiedLength},
{"bin(c_enum )", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, 64, types.UnspecifiedLength},
{"char_length(c_int_d)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxIntWidth, 0},
{"char_length(c_float_d)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxIntWidth, 0},
{"char_length(c_double_d)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxIntWidth, 0},
{"char_length(c_decimal)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxIntWidth, 0},
{"char_length(c_datetime)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxIntWidth, 0},
{"char_length(c_time_d)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxIntWidth, 0},
{"char_length(c_timestamp_d)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxIntWidth, 0},
{"char_length(c_char)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxIntWidth, 0},
{"char_length(c_varchar)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxIntWidth, 0},
{"char_length(c_text_d)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxIntWidth, 0},
{"char_length(c_binary)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxIntWidth, 0},
{"char_length(c_varbinary)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxIntWidth, 0},
{"char_length(c_blob_d)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxIntWidth, 0},
{"char_length(c_set)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxIntWidth, 0},
{"char_length(c_enum)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxIntWidth, 0},
{"character_length(c_int_d)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxIntWidth, 0},
{"character_length(c_float_d)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxIntWidth, 0},
{"character_length(c_double_d)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxIntWidth, 0},
{"character_length(c_decimal)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxIntWidth, 0},
{"character_length(c_datetime)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxIntWidth, 0},
{"character_length(c_time_d)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxIntWidth, 0},
{"character_length(c_timestamp_d)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxIntWidth, 0},
{"character_length(c_char)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxIntWidth, 0},
{"character_length(c_varchar)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxIntWidth, 0},
{"character_length(c_text_d)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxIntWidth, 0},
{"character_length(c_binary)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxIntWidth, 0},
{"character_length(c_varbinary)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxIntWidth, 0},
{"character_length(c_blob_d)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxIntWidth, 0},
{"character_length(c_set)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxIntWidth, 0},
{"character_length(c_enum)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxIntWidth, 0},
{"char(c_int_d )", mysql.TypeVarString, charset.CharsetBin, mysql.BinaryFlag, 4, types.UnspecifiedLength},
{"char(c_bigint_d )", mysql.TypeVarString, charset.CharsetBin, mysql.BinaryFlag, 4, types.UnspecifiedLength},
{"char(c_float_d )", mysql.TypeVarString, charset.CharsetBin, mysql.BinaryFlag, 4, types.UnspecifiedLength},
{"char(c_double_d )", mysql.TypeVarString, charset.CharsetBin, mysql.BinaryFlag, 4, types.UnspecifiedLength},
{"char(c_decimal )", mysql.TypeVarString, charset.CharsetBin, mysql.BinaryFlag, 4, types.UnspecifiedLength},
{"char(c_datetime )", mysql.TypeVarString, charset.CharsetBin, mysql.BinaryFlag, 4, types.UnspecifiedLength},
{"char(c_time_d )", mysql.TypeVarString, charset.CharsetBin, mysql.BinaryFlag, 4, types.UnspecifiedLength},
{"char(c_timestamp_d)", mysql.TypeVarString, charset.CharsetBin, mysql.BinaryFlag, 4, types.UnspecifiedLength},
{"char(c_char )", mysql.TypeVarString, charset.CharsetBin, mysql.BinaryFlag, 4, types.UnspecifiedLength},
{"char(c_varchar )", mysql.TypeVarString, charset.CharsetBin, mysql.BinaryFlag, 4, types.UnspecifiedLength},
{"char(c_text_d )", mysql.TypeVarString, charset.CharsetBin, mysql.BinaryFlag, 4, types.UnspecifiedLength},
{"char(c_binary )", mysql.TypeVarString, charset.CharsetBin, mysql.BinaryFlag, 4, types.UnspecifiedLength},
{"char(c_varbinary )", mysql.TypeVarString, charset.CharsetBin, mysql.BinaryFlag, 4, types.UnspecifiedLength},
{"char(c_blob_d )", mysql.TypeVarString, charset.CharsetBin, mysql.BinaryFlag, 4, types.UnspecifiedLength},
{"char(c_set )", mysql.TypeVarString, charset.CharsetBin, mysql.BinaryFlag, 4, types.UnspecifiedLength},
{"char(c_enum )", mysql.TypeVarString, charset.CharsetBin, mysql.BinaryFlag, 4, types.UnspecifiedLength},
{"char(c_int_d , c_int_d using utf8)", mysql.TypeVarString, charset.CharsetUTF8, 0, 8, types.UnspecifiedLength},
{"char(c_bigint_d , c_bigint_d using utf8)", mysql.TypeVarString, charset.CharsetUTF8, 0, 8, types.UnspecifiedLength},
{"char(c_float_d , c_float_d using utf8)", mysql.TypeVarString, charset.CharsetUTF8, 0, 8, types.UnspecifiedLength},
{"char(c_double_d , c_double_d using utf8)", mysql.TypeVarString, charset.CharsetUTF8, 0, 8, types.UnspecifiedLength},
{"char(c_decimal , c_decimal using utf8)", mysql.TypeVarString, charset.CharsetUTF8, 0, 8, types.UnspecifiedLength},
{"char(c_datetime , c_datetime using utf8)", mysql.TypeVarString, charset.CharsetUTF8, 0, 8, types.UnspecifiedLength},
{"char(c_time_d , c_time_d using utf8)", mysql.TypeVarString, charset.CharsetUTF8, 0, 8, types.UnspecifiedLength},
{"char(c_timestamp_d, c_timestamp_d using utf8)", mysql.TypeVarString, charset.CharsetUTF8, 0, 8, types.UnspecifiedLength},
{"char(c_char , c_char using utf8)", mysql.TypeVarString, charset.CharsetUTF8, 0, 8, types.UnspecifiedLength},
{"char(c_varchar , c_varchar using utf8)", mysql.TypeVarString, charset.CharsetUTF8, 0, 8, types.UnspecifiedLength},
{"char(c_text_d , c_text_d using utf8)", mysql.TypeVarString, charset.CharsetUTF8, 0, 8, types.UnspecifiedLength},
{"char(c_binary , c_binary using utf8)", mysql.TypeVarString, charset.CharsetUTF8, 0, 8, types.UnspecifiedLength},
{"char(c_varbinary , c_varbinary using utf8)", mysql.TypeVarString, charset.CharsetUTF8, 0, 8, types.UnspecifiedLength},
{"char(c_blob_d , c_blob_d using utf8)", mysql.TypeVarString, charset.CharsetUTF8, 0, 8, types.UnspecifiedLength},
{"char(c_set , c_set using utf8)", mysql.TypeVarString, charset.CharsetUTF8, 0, 8, types.UnspecifiedLength},
{"char(c_enum , c_enum using utf8)", mysql.TypeVarString, charset.CharsetUTF8, 0, 8, types.UnspecifiedLength},
{"instr(c_char, c_binary )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 11, 0},
{"instr(c_char, c_char )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 11, 0},
{"instr(c_char, c_time_d )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 11, 0},
{"reverse(c_int_d )", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, 20, types.UnspecifiedLength},
{"reverse(c_bigint_d )", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, 20, types.UnspecifiedLength},
{"reverse(c_float_d )", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, -1, types.UnspecifiedLength},
{"reverse(c_double_d )", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, -1, types.UnspecifiedLength},
{"reverse(c_decimal )", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, 8, types.UnspecifiedLength},
{"reverse(c_char )", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, 20, types.UnspecifiedLength},
{"reverse(c_varchar )", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, 20, types.UnspecifiedLength},
{"reverse(c_text_d )", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, 65535, types.UnspecifiedLength},
{"reverse(c_binary )", mysql.TypeVarString, charset.CharsetBin, mysql.BinaryFlag, 20, types.UnspecifiedLength},
{"reverse(c_varbinary )", mysql.TypeVarString, charset.CharsetBin, mysql.BinaryFlag, 20, types.UnspecifiedLength},
{"reverse(c_blob_d )", mysql.TypeVarString, charset.CharsetBin, mysql.BinaryFlag, 65535, types.UnspecifiedLength},
{"reverse(c_set )", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, 5, types.UnspecifiedLength},
{"reverse(c_enum )", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, 1, types.UnspecifiedLength},
{"oct(c_int_d )", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, 64, types.UnspecifiedLength},
{"oct(c_bigint_d )", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, 64, types.UnspecifiedLength},
{"oct(c_float_d )", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, 64, types.UnspecifiedLength},
{"oct(c_double_d )", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, 64, types.UnspecifiedLength},
{"oct(c_decimal )", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, 64, types.UnspecifiedLength},
{"oct(c_datetime )", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, 64, types.UnspecifiedLength},
{"oct(c_time_d )", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, 64, types.UnspecifiedLength},
{"oct(c_timestamp_d)", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, 64, types.UnspecifiedLength},
{"oct(c_char )", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, 64, types.UnspecifiedLength},
{"oct(c_varchar )", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, 64, types.UnspecifiedLength},
{"oct(c_text_d )", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, 64, types.UnspecifiedLength},
{"oct(c_binary )", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, 64, types.UnspecifiedLength},
{"oct(c_varbinary )", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, 64, types.UnspecifiedLength},
{"oct(c_blob_d )", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, 64, types.UnspecifiedLength},
{"oct(c_set )", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, 64, types.UnspecifiedLength},
{"oct(c_enum )", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, 64, types.UnspecifiedLength},
{"find_in_set(c_int_d , c_text_d)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 3, 0},
{"find_in_set(c_bigint_d , c_text_d)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 3, 0},
{"find_in_set(c_float_d , c_text_d)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 3, 0},
{"find_in_set(c_double_d , c_text_d)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 3, 0},
{"find_in_set(c_decimal , c_text_d)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 3, 0},
{"find_in_set(c_datetime , c_text_d)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 3, 0},
{"find_in_set(c_time_d , c_text_d)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 3, 0},
{"find_in_set(c_timestamp_d, c_text_d)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 3, 0},
{"find_in_set(c_char , c_text_d)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 3, 0},
{"find_in_set(c_varchar , c_text_d)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 3, 0},
{"find_in_set(c_text_d , c_text_d)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 3, 0},
{"find_in_set(c_binary , c_text_d)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 3, 0},
{"find_in_set(c_varbinary , c_text_d)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 3, 0},
{"find_in_set(c_blob_d , c_text_d)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 3, 0},
{"find_in_set(c_set , c_text_d)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 3, 0},
{"find_in_set(c_enum , c_text_d)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 3, 0},
{"make_set(c_int_d , c_text_d)", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, 65535, types.UnspecifiedLength},
{"make_set(c_bigint_d , c_text_d, c_binary)", mysql.TypeMediumBlob, charset.CharsetBin, mysql.BinaryFlag, 65556, types.UnspecifiedLength},
{"make_set(1 , c_text_d, 0x40)", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, 65535, types.UnspecifiedLength},
{"quote(c_int_d )", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, 42, types.UnspecifiedLength},
{"quote(c_bigint_d )", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, 42, types.UnspecifiedLength},
{"quote(c_float_d )", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, 0, types.UnspecifiedLength},
{"quote(c_double_d )", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, 0, types.UnspecifiedLength},
{"convert(c_double_d using utf8mb4)", mysql.TypeLongBlob, charset.CharsetUTF8MB4, 0, mysql.MaxBlobWidth, types.UnspecifiedLength},
{"convert(c_binary using utf8mb4)", mysql.TypeLongBlob, charset.CharsetUTF8MB4, 0, mysql.MaxBlobWidth, types.UnspecifiedLength},
{"convert(c_binary using 'binary')", mysql.TypeLongBlob, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxBlobWidth, types.UnspecifiedLength},
{"convert(c_text_d using 'binary')", mysql.TypeLongBlob, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxBlobWidth, types.UnspecifiedLength},
{"insert(c_varchar, c_int_d, c_int_d, c_varchar)", mysql.TypeLongBlob, charset.CharsetUTF8MB4, 0, mysql.MaxBlobWidth, types.UnspecifiedLength},
{"insert(c_varchar, c_int_d, c_int_d, 0x40)", mysql.TypeLongBlob, charset.CharsetUTF8MB4, 0, mysql.MaxBlobWidth, types.UnspecifiedLength},
{"insert(c_varchar, c_int_d, c_int_d, c_binary)", mysql.TypeLongBlob, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxBlobWidth, types.UnspecifiedLength},
{"insert(c_binary, c_int_d, c_int_d, c_varchar)", mysql.TypeLongBlob, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxBlobWidth, types.UnspecifiedLength},
{"insert(c_binary, c_int_d, c_int_d, c_binary)", mysql.TypeLongBlob, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxBlobWidth, types.UnspecifiedLength},
{"export_set(c_double_d, c_text_d, c_text_d)", mysql.TypeLongBlob, charset.CharsetUTF8MB4, 0, mysql.MaxBlobWidth, types.UnspecifiedLength},
{"export_set(c_double_d, c_text_d, c_text_d, c_text_d)", mysql.TypeLongBlob, charset.CharsetUTF8MB4, 0, mysql.MaxBlobWidth, types.UnspecifiedLength},
{"export_set(c_double_d, c_text_d, c_text_d, c_text_d, c_int_d)", mysql.TypeLongBlob, charset.CharsetUTF8MB4, 0, mysql.MaxBlobWidth, types.UnspecifiedLength},
{"format(c_double_d, c_double_d)", mysql.TypeLongBlob, charset.CharsetUTF8MB4, 0, mysql.MaxBlobWidth, types.UnspecifiedLength},
{"format(c_double_d, c_double_d, c_binary)", mysql.TypeLongBlob, charset.CharsetUTF8MB4, 0, mysql.MaxBlobWidth, types.UnspecifiedLength},
{"field(c_double_d, c_text_d)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxIntWidth, 0},
}
}
func (s *testInferTypeSuite) createTestCase4MathFuncs() []typeInferTestCase {
return []typeInferTestCase{
{"cos(c_double_d)", mysql.TypeDouble, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxRealWidth, types.UnspecifiedLength},
{"sin(c_double_d)", mysql.TypeDouble, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxRealWidth, types.UnspecifiedLength},
{"tan(c_double_d)", mysql.TypeDouble, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxRealWidth, types.UnspecifiedLength},
{"exp(c_int_d)", mysql.TypeDouble, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxRealWidth, types.UnspecifiedLength},
{"exp(c_float_d)", mysql.TypeDouble, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxRealWidth, types.UnspecifiedLength},
{"exp(c_double_d)", mysql.TypeDouble, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxRealWidth, types.UnspecifiedLength},
{"exp(c_decimal)", mysql.TypeDouble, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxRealWidth, types.UnspecifiedLength},
{"exp(c_datetime)", mysql.TypeDouble, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxRealWidth, types.UnspecifiedLength},
{"exp(c_time_d)", mysql.TypeDouble, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxRealWidth, types.UnspecifiedLength},
{"exp(c_timestamp_d)", mysql.TypeDouble, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxRealWidth, types.UnspecifiedLength},
{"exp(c_binary)", mysql.TypeDouble, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxRealWidth, types.UnspecifiedLength},
{"pi()", mysql.TypeDouble, charset.CharsetBin, mysql.BinaryFlag | mysql.NotNullFlag, 8, 6},
{"~c_int_d", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag | mysql.UnsignedFlag, mysql.MaxIntWidth, 0},
{"!c_int_d", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag | mysql.IsBooleanFlag, 1, 0},
{"c_int_d & c_int_d", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag | mysql.UnsignedFlag, mysql.MaxIntWidth, 0},
{"c_int_d | c_int_d", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag | mysql.UnsignedFlag, mysql.MaxIntWidth, 0},
{"c_int_d ^ c_int_d", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag | mysql.UnsignedFlag, mysql.MaxIntWidth, 0},
{"c_int_d << c_int_d", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag | mysql.UnsignedFlag, mysql.MaxIntWidth, 0},
{"c_int_d >> c_int_d", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag | mysql.UnsignedFlag, mysql.MaxIntWidth, 0},
{"log2(c_int_d)", mysql.TypeDouble, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxRealWidth, types.UnspecifiedLength},
{"log10(c_int_d)", mysql.TypeDouble, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxRealWidth, types.UnspecifiedLength},
{"log(c_int_d)", mysql.TypeDouble, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxRealWidth, types.UnspecifiedLength},
{"log(2, c_int_d)", mysql.TypeDouble, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxRealWidth, types.UnspecifiedLength},
{"degrees(c_int_d)", mysql.TypeDouble, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxRealWidth, types.UnspecifiedLength},
{"atan(c_double_d)", mysql.TypeDouble, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxRealWidth, types.UnspecifiedLength},
{"atan(c_double_d,c_double_d)", mysql.TypeDouble, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxRealWidth, types.UnspecifiedLength},
{"asin(c_double_d)", mysql.TypeDouble, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxRealWidth, types.UnspecifiedLength},
{"acos(c_double_d)", mysql.TypeDouble, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxRealWidth, types.UnspecifiedLength},
{"cot(c_int_d)", mysql.TypeDouble, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxRealWidth, types.UnspecifiedLength},
{"cot(c_float_d)", mysql.TypeDouble, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxRealWidth, types.UnspecifiedLength},
{"cot(c_double_d)", mysql.TypeDouble, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxRealWidth, types.UnspecifiedLength},
{"cot(c_decimal)", mysql.TypeDouble, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxRealWidth, types.UnspecifiedLength},
{"cot(c_datetime)", mysql.TypeDouble, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxRealWidth, types.UnspecifiedLength},
{"cot(c_time_d)", mysql.TypeDouble, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxRealWidth, types.UnspecifiedLength},
{"cot(c_timestamp_d)", mysql.TypeDouble, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxRealWidth, types.UnspecifiedLength},
{"cot(c_binary)", mysql.TypeDouble, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxRealWidth, types.UnspecifiedLength},
{"floor(c_int_d)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 11, 0},
{"floor(c_uint_d)", mysql.TypeLonglong, charset.CharsetBin, mysql.UnsignedFlag | mysql.BinaryFlag, 10, 0},
{"floor(c_bigint_d)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 20, 0}, // TODO: Flen should be 17
{"floor(c_ubigint_d)", mysql.TypeLonglong, charset.CharsetBin, mysql.UnsignedFlag | mysql.BinaryFlag, 20, 0}, // TODO: Flen should be 17
{"floor(c_decimal)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 6, 0},
{"floor(c_udecimal)", mysql.TypeLonglong, charset.CharsetBin, mysql.UnsignedFlag | mysql.BinaryFlag, 10, 0},
{"floor(c_double_d)", mysql.TypeDouble, charset.CharsetBin, mysql.BinaryFlag, 22, 0},
{"floor(c_udouble_d)", mysql.TypeDouble, charset.CharsetBin, mysql.BinaryFlag, 22, 0},
{"floor(c_float_d)", mysql.TypeDouble, charset.CharsetBin, mysql.BinaryFlag, 12, 0},
{"floor(c_ufloat_d)", mysql.TypeDouble, charset.CharsetBin, mysql.BinaryFlag, 12, 0},
{"floor(c_datetime)", mysql.TypeDouble, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxRealWidth, 0},
{"floor(c_timestamp_d)", mysql.TypeDouble, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxRealWidth, 0},
{"floor(c_time_d)", mysql.TypeDouble, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxRealWidth, 0},
{"floor(c_enum)", mysql.TypeDouble, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxRealWidth, 0},
{"floor(c_text_d)", mysql.TypeDouble, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxRealWidth, 0},
{"floor(18446744073709551615)", mysql.TypeLonglong, charset.CharsetBin, mysql.UnsignedFlag | mysql.BinaryFlag | mysql.NotNullFlag, 20, 0},
{"floor(18446744073709551615.1)", mysql.TypeNewDecimal, charset.CharsetBin, mysql.BinaryFlag | mysql.NotNullFlag, 22, 0},
{"ceil(c_int_d)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 11, 0},
{"ceil(c_uint_d)", mysql.TypeLonglong, charset.CharsetBin, mysql.UnsignedFlag | mysql.BinaryFlag, 10, 0},
{"ceil(c_bigint_d)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 20, 0}, // TODO: Flen should be 17
{"ceil(c_ubigint_d)", mysql.TypeLonglong, charset.CharsetBin, mysql.UnsignedFlag | mysql.BinaryFlag, 20, 0}, // TODO: Flen should be 17
{"ceil(c_decimal)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 6, 0},
{"floor(c_udecimal)", mysql.TypeLonglong, charset.CharsetBin, mysql.UnsignedFlag | mysql.BinaryFlag, 10, 0},
{"ceil(c_double_d)", mysql.TypeDouble, charset.CharsetBin, mysql.BinaryFlag, 22, 0},
{"floor(c_udouble_d)", mysql.TypeDouble, charset.CharsetBin, mysql.BinaryFlag, 22, 0},
{"ceil(c_float_d)", mysql.TypeDouble, charset.CharsetBin, mysql.BinaryFlag, 12, 0},
{"floor(c_ufloat_d)", mysql.TypeDouble, charset.CharsetBin, mysql.BinaryFlag, 12, 0},
{"ceil(c_datetime)", mysql.TypeDouble, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxRealWidth, 0},
{"ceil(c_timestamp_d)", mysql.TypeDouble, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxRealWidth, 0},
{"ceil(c_time_d)", mysql.TypeDouble, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxRealWidth, 0},
{"ceil(c_enum)", mysql.TypeDouble, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxRealWidth, 0},
{"ceil(c_text_d)", mysql.TypeDouble, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxRealWidth, 0},
{"ceil(18446744073709551615)", mysql.TypeLonglong, charset.CharsetBin, mysql.UnsignedFlag | mysql.BinaryFlag | mysql.NotNullFlag, 20, 0},
{"ceil(18446744073709551615.1)", mysql.TypeNewDecimal, charset.CharsetBin, mysql.BinaryFlag | mysql.NotNullFlag, 22, 0},
{"ceiling(c_int_d)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 11, 0},
{"ceiling(c_decimal)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 6, 0},
{"ceiling(c_double_d)", mysql.TypeDouble, charset.CharsetBin, mysql.BinaryFlag, 22, 0},
{"ceiling(c_float_d)", mysql.TypeDouble, charset.CharsetBin, mysql.BinaryFlag, 12, 0},
{"ceiling(c_datetime)", mysql.TypeDouble, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxRealWidth, 0},
{"ceiling(c_time_d)", mysql.TypeDouble, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxRealWidth, 0},
{"ceiling(c_enum)", mysql.TypeDouble, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxRealWidth, 0},
{"ceiling(c_text_d)", mysql.TypeDouble, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxRealWidth, 0},
{"ceiling(18446744073709551615)", mysql.TypeLonglong, charset.CharsetBin, mysql.UnsignedFlag | mysql.BinaryFlag | mysql.NotNullFlag, 20, 0},
{"ceiling(18446744073709551615.1)", mysql.TypeNewDecimal, charset.CharsetBin, mysql.BinaryFlag | mysql.NotNullFlag, 22, 0},
{"conv(c_char, c_int_d, c_int_d)", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, 64, types.UnspecifiedLength},
{"conv(c_int_d, c_int_d, c_int_d)", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, 64, types.UnspecifiedLength},
{"abs(c_int_d )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 11, 0},
{"abs(c_bigint_d )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 20, 0},
{"abs(c_float_d )", mysql.TypeDouble, charset.CharsetBin, mysql.BinaryFlag, 22, types.UnspecifiedLength},
{"abs(c_double_d )", mysql.TypeDouble, charset.CharsetBin, mysql.BinaryFlag, 22, types.UnspecifiedLength},
{"abs(c_decimal )", mysql.TypeNewDecimal, charset.CharsetBin, mysql.BinaryFlag, 6, 3},
{"abs(c_datetime )", mysql.TypeDouble, charset.CharsetBin, mysql.BinaryFlag, 22, types.UnspecifiedLength},
{"abs(c_time_d )", mysql.TypeDouble, charset.CharsetBin, mysql.BinaryFlag, 22, types.UnspecifiedLength},
{"abs(c_timestamp_d)", mysql.TypeDouble, charset.CharsetBin, mysql.BinaryFlag, 22, types.UnspecifiedLength},
{"abs(c_char )", mysql.TypeDouble, charset.CharsetBin, mysql.BinaryFlag, 22, types.UnspecifiedLength},
{"abs(c_varchar )", mysql.TypeDouble, charset.CharsetBin, mysql.BinaryFlag, 22, types.UnspecifiedLength},
{"abs(c_text_d )", mysql.TypeDouble, charset.CharsetBin, mysql.BinaryFlag, 22, types.UnspecifiedLength},
{"abs(c_binary )", mysql.TypeDouble, charset.CharsetBin, mysql.BinaryFlag, 22, types.UnspecifiedLength},
{"abs(c_varbinary )", mysql.TypeDouble, charset.CharsetBin, mysql.BinaryFlag, 22, types.UnspecifiedLength},
{"abs(c_blob_d )", mysql.TypeDouble, charset.CharsetBin, mysql.BinaryFlag, 22, types.UnspecifiedLength},
{"abs(c_set )", mysql.TypeDouble, charset.CharsetBin, mysql.BinaryFlag, 22, types.UnspecifiedLength},
{"abs(c_enum )", mysql.TypeDouble, charset.CharsetBin, mysql.BinaryFlag, 22, types.UnspecifiedLength},
{"round(c_int_d )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 11, 0},
{"round(c_bigint_d )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 20, 0},
{"round(c_float_d )", mysql.TypeDouble, charset.CharsetBin, mysql.BinaryFlag, 12, 0}, // flen Should be 17.
{"round(c_double_d )", mysql.TypeDouble, charset.CharsetBin, mysql.BinaryFlag, 22, 0}, // flen Should be 17.
{"round(c_decimal )", mysql.TypeNewDecimal, charset.CharsetBin, mysql.BinaryFlag, 6, 0}, // flen Should be 5.
{"round(c_datetime )", mysql.TypeDouble, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxRealWidth, 0},
{"round(c_time_d )", mysql.TypeDouble, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxRealWidth, 0},
{"round(c_timestamp_d)", mysql.TypeDouble, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxRealWidth, 0},
{"round(c_char )", mysql.TypeDouble, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxRealWidth, 0},
{"round(c_varchar )", mysql.TypeDouble, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxRealWidth, 0},
{"round(c_text_d )", mysql.TypeDouble, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxRealWidth, 0},
{"round(c_binary )", mysql.TypeDouble, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxRealWidth, 0},
{"round(c_varbinary )", mysql.TypeDouble, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxRealWidth, 0},
{"round(c_blob_d )", mysql.TypeDouble, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxRealWidth, 0},
{"round(c_set )", mysql.TypeDouble, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxRealWidth, 0},
{"round(c_enum )", mysql.TypeDouble, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxRealWidth, 0},
{"truncate(c_int_d, 1)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 11, 0},
{"truncate(c_int_d, -5)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 11, 0},
{"truncate(c_int_d, 100)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 11, 0},
{"truncate(c_double_d, 1)", mysql.TypeDouble, charset.CharsetBin, mysql.BinaryFlag, 24, 1},
{"truncate(c_double_d, 5)", mysql.TypeDouble, charset.CharsetBin, mysql.BinaryFlag, 28, 5},
{"truncate(c_double_d, 100)", mysql.TypeDouble, charset.CharsetBin, mysql.BinaryFlag, 53, 30},
{"rand( )", mysql.TypeDouble, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxRealWidth, types.UnspecifiedLength},
{"rand(c_int_d)", mysql.TypeDouble, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxRealWidth, types.UnspecifiedLength},
{"pow(c_int_d, c_int_d )", mysql.TypeDouble, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxRealWidth, types.UnspecifiedLength},
{"pow(c_float_d, c_double_d)", mysql.TypeDouble, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxRealWidth, types.UnspecifiedLength},
{"pow(c_int_d, c_bigint_d)", mysql.TypeDouble, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxRealWidth, types.UnspecifiedLength},
{"sign(c_int_d )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxIntWidth, 0},
{"sign(c_bigint_d )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxIntWidth, 0},
{"sign(c_float_d )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxIntWidth, 0},
{"sign(c_double_d )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxIntWidth, 0},
{"sign(c_decimal )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxIntWidth, 0},
{"sign(c_datetime )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxIntWidth, 0},
{"sign(c_time_d )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxIntWidth, 0},
{"sign(c_timestamp_d)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxIntWidth, 0},
{"sign(c_char )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxIntWidth, 0},
{"sign(c_varchar )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxIntWidth, 0},
{"sign(c_text_d )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxIntWidth, 0},
{"sign(c_binary )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxIntWidth, 0},
{"sign(c_varbinary )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxIntWidth, 0},
{"sign(c_blob_d )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxIntWidth, 0},
{"sign(c_set )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxIntWidth, 0},
{"sign(c_enum )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxIntWidth, 0},
{"sqrt(c_int_d )", mysql.TypeDouble, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxRealWidth, types.UnspecifiedLength},
{"sqrt(c_bigint_d )", mysql.TypeDouble, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxRealWidth, types.UnspecifiedLength},
{"sqrt(c_float_d )", mysql.TypeDouble, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxRealWidth, types.UnspecifiedLength},
{"sqrt(c_double_d )", mysql.TypeDouble, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxRealWidth, types.UnspecifiedLength},
{"sqrt(c_decimal )", mysql.TypeDouble, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxRealWidth, types.UnspecifiedLength},
{"sqrt(c_datetime )", mysql.TypeDouble, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxRealWidth, types.UnspecifiedLength},
{"sqrt(c_time_d )", mysql.TypeDouble, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxRealWidth, types.UnspecifiedLength},
{"sqrt(c_timestamp_d)", mysql.TypeDouble, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxRealWidth, types.UnspecifiedLength},
{"sqrt(c_char )", mysql.TypeDouble, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxRealWidth, types.UnspecifiedLength},
{"sqrt(c_varchar )", mysql.TypeDouble, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxRealWidth, types.UnspecifiedLength},
{"sqrt(c_text_d )", mysql.TypeDouble, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxRealWidth, types.UnspecifiedLength},
{"sqrt(c_binary )", mysql.TypeDouble, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxRealWidth, types.UnspecifiedLength},
{"sqrt(c_varbinary )", mysql.TypeDouble, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxRealWidth, types.UnspecifiedLength},
{"sqrt(c_blob_d )", mysql.TypeDouble, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxRealWidth, types.UnspecifiedLength},
{"sqrt(c_set )", mysql.TypeDouble, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxRealWidth, types.UnspecifiedLength},
{"sqrt(c_enum )", mysql.TypeDouble, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxRealWidth, types.UnspecifiedLength},
{"CRC32(c_int_d )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag | mysql.UnsignedFlag, 10, 0},
{"CRC32(c_bigint_d )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag | mysql.UnsignedFlag, 10, 0},
{"CRC32(c_float_d )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag | mysql.UnsignedFlag, 10, 0},
{"CRC32(c_double_d )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag | mysql.UnsignedFlag, 10, 0},
{"CRC32(c_decimal )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag | mysql.UnsignedFlag, 10, 0},
{"CRC32(c_datetime )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag | mysql.UnsignedFlag, 10, 0},
{"CRC32(c_time_d )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag | mysql.UnsignedFlag, 10, 0},
{"CRC32(c_timestamp_d)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag | mysql.UnsignedFlag, 10, 0},
{"CRC32(c_char )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag | mysql.UnsignedFlag, 10, 0},
{"CRC32(c_varchar )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag | mysql.UnsignedFlag, 10, 0},
{"CRC32(c_text_d )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag | mysql.UnsignedFlag, 10, 0},
{"CRC32(c_binary )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag | mysql.UnsignedFlag, 10, 0},
{"CRC32(c_varbinary )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag | mysql.UnsignedFlag, 10, 0},
{"CRC32(c_blob_d )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag | mysql.UnsignedFlag, 10, 0},
{"CRC32(c_set )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag | mysql.UnsignedFlag, 10, 0},
{"CRC32(c_enum )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag | mysql.UnsignedFlag, 10, 0},
{"radians(c_int_d )", mysql.TypeDouble, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxRealWidth, types.UnspecifiedLength},
{"radians(c_bigint_d )", mysql.TypeDouble, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxRealWidth, types.UnspecifiedLength},
{"radians(c_float_d )", mysql.TypeDouble, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxRealWidth, types.UnspecifiedLength}, // Should be 17.
{"radians(c_double_d )", mysql.TypeDouble, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxRealWidth, types.UnspecifiedLength}, // Should be 17.
{"radians(c_decimal )", mysql.TypeDouble, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxRealWidth, types.UnspecifiedLength}, // Should be 5.
{"radians(c_datetime )", mysql.TypeDouble, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxRealWidth, types.UnspecifiedLength},
{"radians(c_time_d )", mysql.TypeDouble, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxRealWidth, types.UnspecifiedLength},
{"radians(c_timestamp_d)", mysql.TypeDouble, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxRealWidth, types.UnspecifiedLength},
{"radians(c_char )", mysql.TypeDouble, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxRealWidth, types.UnspecifiedLength},
{"radians(c_varchar )", mysql.TypeDouble, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxRealWidth, types.UnspecifiedLength},
{"radians(c_text_d )", mysql.TypeDouble, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxRealWidth, types.UnspecifiedLength},
{"radians(c_binary )", mysql.TypeDouble, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxRealWidth, types.UnspecifiedLength},
{"radians(c_varbinary )", mysql.TypeDouble, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxRealWidth, types.UnspecifiedLength},
{"radians(c_blob_d )", mysql.TypeDouble, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxRealWidth, types.UnspecifiedLength},
{"radians(c_set )", mysql.TypeDouble, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxRealWidth, types.UnspecifiedLength},
{"radians(c_enum )", mysql.TypeDouble, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxRealWidth, types.UnspecifiedLength},
}
}
func (s *testInferTypeSuite) createTestCase4ArithmeticFuncs() []typeInferTestCase {
return []typeInferTestCase{
{"c_int_d + c_int_d", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxIntWidth, 0},
{"c_int_d + c_bigint_d", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxIntWidth, 0},
{"c_int_d + c_char", mysql.TypeDouble, charset.CharsetBin, mysql.BinaryFlag, types.UnspecifiedLength, types.UnspecifiedLength},
{"c_int_d + c_time_d", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxIntWidth, 0},
{"c_int_d + c_double_d", mysql.TypeDouble, charset.CharsetBin, mysql.BinaryFlag, types.UnspecifiedLength, types.UnspecifiedLength},
{"c_int_d + c_decimal", mysql.TypeNewDecimal, charset.CharsetBin, mysql.BinaryFlag, 26, 3},
{"c_datetime + c_decimal", mysql.TypeNewDecimal, charset.CharsetBin, mysql.BinaryFlag, 26, 3},
{"c_bigint_d + c_decimal", mysql.TypeNewDecimal, charset.CharsetBin, mysql.BinaryFlag, 26, 3},
{"c_double_d + c_decimal", mysql.TypeDouble, charset.CharsetBin, mysql.BinaryFlag, types.UnspecifiedLength, types.UnspecifiedLength},
{"c_double_d + c_char", mysql.TypeDouble, charset.CharsetBin, mysql.BinaryFlag, types.UnspecifiedLength, types.UnspecifiedLength},
{"c_double_d + c_enum", mysql.TypeDouble, charset.CharsetBin, mysql.BinaryFlag, types.UnspecifiedLength, types.UnspecifiedLength},
{"c_int_d - c_int_d", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxIntWidth, 0},
{"c_int_d - c_bigint_d", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxIntWidth, 0},
{"c_int_d - c_char", mysql.TypeDouble, charset.CharsetBin, mysql.BinaryFlag, types.UnspecifiedLength, types.UnspecifiedLength},
{"c_int_d - c_time_d", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxIntWidth, 0},
{"c_int_d - c_double_d", mysql.TypeDouble, charset.CharsetBin, mysql.BinaryFlag, types.UnspecifiedLength, types.UnspecifiedLength},
{"c_int_d - c_decimal", mysql.TypeNewDecimal, charset.CharsetBin, mysql.BinaryFlag, 26, 3},
{"c_datetime - c_decimal", mysql.TypeNewDecimal, charset.CharsetBin, mysql.BinaryFlag, 26, 3},
{"c_bigint_d - c_decimal", mysql.TypeNewDecimal, charset.CharsetBin, mysql.BinaryFlag, 26, 3},
{"c_double_d - c_decimal", mysql.TypeDouble, charset.CharsetBin, mysql.BinaryFlag, types.UnspecifiedLength, types.UnspecifiedLength},
{"c_double_d - c_char", mysql.TypeDouble, charset.CharsetBin, mysql.BinaryFlag, types.UnspecifiedLength, types.UnspecifiedLength},
{"c_double_d - c_enum", mysql.TypeDouble, charset.CharsetBin, mysql.BinaryFlag, types.UnspecifiedLength, types.UnspecifiedLength},
{"c_int_d * c_int_d", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxIntWidth, 0},
{"c_int_d * c_bigint_d", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxIntWidth, 0},
{"c_int_d * c_char", mysql.TypeDouble, charset.CharsetBin, mysql.BinaryFlag, types.UnspecifiedLength, types.UnspecifiedLength},
{"c_int_d * c_time_d", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxIntWidth, 0},
{"c_int_d * c_double_d", mysql.TypeDouble, charset.CharsetBin, mysql.BinaryFlag, types.UnspecifiedLength, types.UnspecifiedLength},
{"c_int_d * c_decimal", mysql.TypeNewDecimal, charset.CharsetBin, mysql.BinaryFlag, 29, 3},
{"c_datetime * c_decimal", mysql.TypeNewDecimal, charset.CharsetBin, mysql.BinaryFlag, 31, 5},
{"c_bigint_d * c_decimal", mysql.TypeNewDecimal, charset.CharsetBin, mysql.BinaryFlag, 29, 3},
{"c_double_d * c_decimal", mysql.TypeDouble, charset.CharsetBin, mysql.BinaryFlag, types.UnspecifiedLength, types.UnspecifiedLength},
{"c_double_d * c_char", mysql.TypeDouble, charset.CharsetBin, mysql.BinaryFlag, types.UnspecifiedLength, types.UnspecifiedLength},
{"c_double_d * c_enum", mysql.TypeDouble, charset.CharsetBin, mysql.BinaryFlag, types.UnspecifiedLength, types.UnspecifiedLength},
{"c_int_d / c_int_d", mysql.TypeNewDecimal, charset.CharsetBin, mysql.BinaryFlag, 15, 4},
{"c_int_d / c_bigint_d", mysql.TypeNewDecimal, charset.CharsetBin, mysql.BinaryFlag, 15, 4},
{"c_int_d / c_char", mysql.TypeDouble, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxRealWidth, types.UnspecifiedLength},
{"c_int_d / c_time_d", mysql.TypeNewDecimal, charset.CharsetBin, mysql.BinaryFlag, 15, 4},
{"c_int_d / c_double_d", mysql.TypeDouble, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxRealWidth, types.UnspecifiedLength},
{"c_int_d / c_decimal", mysql.TypeNewDecimal, charset.CharsetBin, mysql.BinaryFlag, 18, 4},
{"c_datetime / c_decimal", mysql.TypeNewDecimal, charset.CharsetBin, mysql.BinaryFlag, 29, 6}, // TODO: Flen should be 25.
{"c_bigint_d / c_decimal", mysql.TypeNewDecimal, charset.CharsetBin, mysql.BinaryFlag, 27, 4}, // TODO: Flen should be 28.
{"c_double_d / c_decimal", mysql.TypeDouble, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxRealWidth, types.UnspecifiedLength},
{"c_double_d / c_char", mysql.TypeDouble, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxRealWidth, types.UnspecifiedLength},
{"c_double_d / c_enum", mysql.TypeDouble, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxRealWidth, types.UnspecifiedLength},
{"c_int_d DIV c_int_d", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxIntWidth, 0},
{"c_uint_d DIV c_uint_d", mysql.TypeLonglong, charset.CharsetBin, mysql.UnsignedFlag | mysql.BinaryFlag, mysql.MaxIntWidth, 0},
{"c_int_d DIV c_bigint_d", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxIntWidth, 0},
{"c_ubigint_d DIV c_ubigint_d", mysql.TypeLonglong, charset.CharsetBin, mysql.UnsignedFlag | mysql.BinaryFlag, mysql.MaxIntWidth, 0},
{"c_ubigint_d DIV c_bigint_d", mysql.TypeLonglong, charset.CharsetBin, mysql.UnsignedFlag | mysql.BinaryFlag, mysql.MaxIntWidth, 0},
{"c_int_d DIV c_uint_d", mysql.TypeLonglong, charset.CharsetBin, mysql.UnsignedFlag | mysql.BinaryFlag, mysql.MaxIntWidth, 0},
{"c_ubigint_d DIV c_int_d", mysql.TypeLonglong, charset.CharsetBin, mysql.UnsignedFlag | mysql.BinaryFlag, mysql.MaxIntWidth, 0},
{"c_int_d DIV c_char", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxIntWidth, 0},
{"c_int_d DIV c_time_d", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxIntWidth, 0},
{"c_int_d DIV c_double_d", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxIntWidth, 0},
{"c_int_d DIV c_udouble_d", mysql.TypeLonglong, charset.CharsetBin, mysql.UnsignedFlag | mysql.BinaryFlag, mysql.MaxIntWidth, 0},
{"c_int_d DIV c_decimal", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxIntWidth, 0},
{"c_int_d DIV c_udecimal", mysql.TypeLonglong, charset.CharsetBin, mysql.UnsignedFlag | mysql.BinaryFlag, mysql.MaxIntWidth, 0},
{"c_decimal DIV c_udecimal", mysql.TypeLonglong, charset.CharsetBin, mysql.UnsignedFlag | mysql.BinaryFlag, mysql.MaxIntWidth, 0},
{"c_datetime DIV c_decimal", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxIntWidth, 0},
{"c_bigint_d DIV c_decimal", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxIntWidth, 0},
{"c_double_d DIV c_decimal", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxIntWidth, 0},
{"c_double_d DIV c_char", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxIntWidth, 0},
{"c_double_d DIV c_enum", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxIntWidth, 0},
{"c_int_d MOD c_int_d", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxIntWidth, 0},
{"c_uint_d MOD c_uint_d", mysql.TypeLonglong, charset.CharsetBin, mysql.UnsignedFlag | mysql.BinaryFlag, mysql.MaxIntWidth, 0},
{"c_int_d MOD c_bigint_d", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxIntWidth, 0},
{"c_ubigint_d MOD c_ubigint_d", mysql.TypeLonglong, charset.CharsetBin, mysql.UnsignedFlag | mysql.BinaryFlag, mysql.MaxIntWidth, 0},
{"c_ubigint_d MOD c_bigint_d", mysql.TypeLonglong, charset.CharsetBin, mysql.UnsignedFlag | mysql.BinaryFlag, mysql.MaxIntWidth, 0},
{"c_int_d MOD c_uint_d", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxIntWidth, 0},
{"c_ubigint_d MOD c_int_d", mysql.TypeLonglong, charset.CharsetBin, mysql.UnsignedFlag | mysql.BinaryFlag, mysql.MaxIntWidth, 0},
{"c_int_d MOD c_char", mysql.TypeDouble, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxIntWidth, 0},
{"c_int_d MOD c_time_d", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxIntWidth, 0},
{"c_int_d MOD c_double_d", mysql.TypeDouble, charset.CharsetBin, mysql.BinaryFlag, 22, types.UnspecifiedLength},
{"c_int_d MOD c_udouble_d", mysql.TypeDouble, charset.CharsetBin, mysql.BinaryFlag, 22, types.UnspecifiedLength},
{"c_int_d MOD c_decimal", mysql.TypeNewDecimal, charset.CharsetBin, mysql.BinaryFlag, 11, 3},
{"c_udecimal MOD c_int_d", mysql.TypeNewDecimal, charset.CharsetBin, mysql.UnsignedFlag | mysql.BinaryFlag, 11, 3},
{"c_decimal MOD c_udecimal", mysql.TypeNewDecimal, charset.CharsetBin, mysql.BinaryFlag, 10, 3},
{"c_datetime MOD c_decimal", mysql.TypeNewDecimal, charset.CharsetBin, mysql.BinaryFlag, 22, 3},
{"c_bigint_d MOD c_decimal", mysql.TypeNewDecimal, charset.CharsetBin, mysql.BinaryFlag, 20, 3},
{"c_double_d MOD c_decimal", mysql.TypeDouble, charset.CharsetBin, mysql.BinaryFlag, 22, types.UnspecifiedLength},
{"c_double_d MOD c_char", mysql.TypeDouble, charset.CharsetBin, mysql.BinaryFlag, 22, types.UnspecifiedLength},
{"c_double_d MOD c_enum", mysql.TypeDouble, charset.CharsetBin, mysql.BinaryFlag, 22, types.UnspecifiedLength},
}
}
func (s *testInferTypeSuite) createTestCase4LogicalFuncs() []typeInferTestCase {
return []typeInferTestCase{
{"c_int_d and c_int_d", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag | mysql.IsBooleanFlag, 1, 0},
{"c_int_d xor c_int_d", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag | mysql.IsBooleanFlag, 1, 0},
{"c_int_d && c_int_d", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag | mysql.IsBooleanFlag, 1, 0},
{"c_int_d || c_int_d", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag | mysql.IsBooleanFlag, 1, 0},
}
}
func (s *testInferTypeSuite) createTestCase4ControlFuncs() []typeInferTestCase {
return []typeInferTestCase{
{"ifnull(c_int_d, c_int_d)", mysql.TypeLong, charset.CharsetBin, mysql.BinaryFlag, 11, 0},
{"ifnull(c_int_d, c_decimal)", mysql.TypeNewDecimal, charset.CharsetBin, mysql.BinaryFlag, 14, 3},
{"ifnull(c_int_d, c_char)", mysql.TypeString, charset.CharsetUTF8MB4, mysql.BinaryFlag, 20, types.UnspecifiedLength},
{"ifnull(c_int_d, c_binary)", mysql.TypeString, charset.CharsetBin, mysql.BinaryFlag, 20, types.UnspecifiedLength},
{"ifnull(c_char, c_binary)", mysql.TypeString, charset.CharsetBin, mysql.BinaryFlag, 20, types.UnspecifiedLength},
{"ifnull(null, null)", mysql.TypeNull, charset.CharsetBin, mysql.BinaryFlag, 0, 0},
{"ifnull(c_double_d, c_timestamp_d)", mysql.TypeVarchar, charset.CharsetUTF8MB4, 0, 22, types.UnspecifiedLength},
{"ifnull(c_json, c_decimal)", mysql.TypeLongBlob, charset.CharsetUTF8MB4, 0, math.MaxUint32, types.UnspecifiedLength},
{"if(c_int_d, c_decimal, c_int_d)", mysql.TypeNewDecimal, charset.CharsetBin, mysql.BinaryFlag, 14, 3},
{"if(c_int_d, c_char, c_int_d)", mysql.TypeString, charset.CharsetUTF8MB4, mysql.BinaryFlag, 20, types.UnspecifiedLength},
{"if(c_int_d, c_binary, c_int_d)", mysql.TypeString, charset.CharsetBin, mysql.BinaryFlag, 20, types.UnspecifiedLength},
{"if(c_int_d, c_bchar, c_int_d)", mysql.TypeString, charset.CharsetUTF8MB4, mysql.BinaryFlag, 20, types.UnspecifiedLength},
{"if(c_int_d, c_char, c_decimal)", mysql.TypeString, charset.CharsetUTF8MB4, mysql.BinaryFlag, 20, types.UnspecifiedLength},
{"if(c_int_d, c_datetime, c_int_d)", mysql.TypeVarchar, charset.CharsetUTF8MB4, 0, 22, types.UnspecifiedLength},
{"if(c_int_d, c_int_d, c_double_d)", mysql.TypeDouble, charset.CharsetBin, mysql.BinaryFlag, 22, types.UnspecifiedLength},
{"if(c_int_d, c_time_d, c_datetime)", mysql.TypeDatetime, charset.CharsetUTF8MB4, mysql.BinaryFlag, 22, 2}, // TODO: should not be BinaryFlag
{"if(c_int_d, c_time, c_json)", mysql.TypeLongBlob, charset.CharsetUTF8MB4, 0, math.MaxUint32, types.UnspecifiedLength},
{"if(null, null, null)", mysql.TypeNull, charset.CharsetBin, mysql.BinaryFlag, 0, 0},
{"case when c_int_d then c_char else c_varchar end", mysql.TypeVarchar, charset.CharsetUTF8MB4, 0, 20, types.UnspecifiedLength},
{"case when c_int_d > 1 then c_double_d else c_bchar end", mysql.TypeString, charset.CharsetUTF8MB4, mysql.BinaryFlag, 22, types.UnspecifiedLength},
{"case when c_int_d > 2 then c_double_d when c_int_d < 1 then c_decimal else c_double_d end", mysql.TypeDouble, charset.CharsetBin, mysql.BinaryFlag, 22, 3},
{"case when c_double_d > 2 then c_decimal else 1 end", mysql.TypeNewDecimal, charset.CharsetBin, mysql.BinaryFlag, 6, 3},
{"case when null then null else null end", mysql.TypeNull, charset.CharsetBin, mysql.BinaryFlag, 0, types.UnspecifiedLength},
}
}
func (s *testInferTypeSuite) createTestCase4Aggregations() []typeInferTestCase {
return []typeInferTestCase{
{"sum(c_int_d)", mysql.TypeNewDecimal, charset.CharsetBin, mysql.BinaryFlag, 32, 0},
{"sum(c_float_d)", mysql.TypeDouble, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxRealWidth, types.UnspecifiedLength},
{"sum(c_double_d)", mysql.TypeDouble, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxRealWidth, types.UnspecifiedLength},
{"sum(c_decimal)", mysql.TypeNewDecimal, charset.CharsetBin, mysql.BinaryFlag, 28, 3},
{"sum(cast(c_decimal as decimal(65,3)))", mysql.TypeNewDecimal, charset.CharsetBin, mysql.BinaryFlag, 65, 3},
{"sum(1.0)", mysql.TypeNewDecimal, charset.CharsetBin, mysql.BinaryFlag, 25, 1},
{"sum(1.2e2)", mysql.TypeDouble, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxRealWidth, types.UnspecifiedLength},
{"sum(c_char)", mysql.TypeDouble, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxRealWidth, types.UnspecifiedLength},
{"avg(c_int_d)", mysql.TypeNewDecimal, charset.CharsetBin, mysql.BinaryFlag, 15, 4},
{"avg(c_float_d)", mysql.TypeDouble, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxRealWidth, types.UnspecifiedLength},
{"avg(c_double_d)", mysql.TypeDouble, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxRealWidth, types.UnspecifiedLength},
{"avg(c_decimal)", mysql.TypeNewDecimal, charset.CharsetBin, mysql.BinaryFlag, 10, 7},
{"avg(cast(c_decimal as decimal(65,3)))", mysql.TypeNewDecimal, charset.CharsetBin, mysql.BinaryFlag, 65, 7},
{"avg(1.0)", mysql.TypeNewDecimal, charset.CharsetBin, mysql.BinaryFlag, 7, 5},
{"avg(1.2e2)", mysql.TypeDouble, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxRealWidth, types.UnspecifiedLength},
{"avg(c_char)", mysql.TypeDouble, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxRealWidth, types.UnspecifiedLength},
{"group_concat(c_int_d)", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, mysql.MaxBlobWidth, 0},
{"count(c_decimal)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag | mysql.NotNullFlag, 21, 0},
}
}
func (s *testInferTypeSuite) createTestCase4InfoFunc() []typeInferTestCase {
return []typeInferTestCase{
{"last_insert_id( )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag | mysql.UnsignedFlag | mysql.NotNullFlag, mysql.MaxIntWidth, 0},
{"last_insert_id(c_int_d)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag | mysql.UnsignedFlag, mysql.MaxIntWidth, 0},
{"found_rows()", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag | mysql.UnsignedFlag, mysql.MaxIntWidth, 0},
{"database()", mysql.TypeVarString, charset.CharsetUTF8MB4, mysql.NotNullFlag, 64, types.UnspecifiedLength},
{"current_user()", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, 64, types.UnspecifiedLength},
{"current_role()", mysql.TypeVarString, charset.CharsetUTF8MB4, mysql.NotNullFlag, 64, types.UnspecifiedLength},
{"user()", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, 64, types.UnspecifiedLength},
{"connection_id()", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag | mysql.UnsignedFlag | mysql.NotNullFlag, mysql.MaxIntWidth, 0},
{"version()", mysql.TypeVarString, charset.CharsetUTF8MB4, mysql.NotNullFlag, 64, types.UnspecifiedLength},
}
}
func (s *testInferTypeSuite) createTestCase4EncryptionFuncs() []typeInferTestCase {
return []typeInferTestCase{
{"md5(c_int_d )", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, 32, types.UnspecifiedLength},
{"md5(c_bigint_d )", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, 32, types.UnspecifiedLength},
{"md5(c_float_d )", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, 32, types.UnspecifiedLength},
{"md5(c_double_d )", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, 32, types.UnspecifiedLength},
{"md5(c_decimal )", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, 32, types.UnspecifiedLength},
{"md5(c_datetime )", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, 32, types.UnspecifiedLength},
{"md5(c_time_d )", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, 32, types.UnspecifiedLength},
{"md5(c_timestamp_d)", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, 32, types.UnspecifiedLength},
{"md5(c_char )", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, 32, types.UnspecifiedLength},
{"md5(c_varchar )", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, 32, types.UnspecifiedLength},
{"md5(c_text_d )", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, 32, types.UnspecifiedLength},
{"md5(c_binary )", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, 32, types.UnspecifiedLength},
{"md5(c_varbinary )", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, 32, types.UnspecifiedLength},
{"md5(c_blob_d )", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, 32, types.UnspecifiedLength},
{"md5(c_set )", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, 32, types.UnspecifiedLength},
{"md5(c_enum )", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, 32, types.UnspecifiedLength},
{"md5('1234' )", mysql.TypeVarString, charset.CharsetUTF8MB4, mysql.NotNullFlag, 32, types.UnspecifiedLength},
{"md5(1234 )", mysql.TypeVarString, charset.CharsetUTF8MB4, mysql.NotNullFlag, 32, types.UnspecifiedLength},
{"sha(c_int_d )", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, 40, types.UnspecifiedLength},
{"sha(c_bigint_d )", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, 40, types.UnspecifiedLength},
{"sha(c_float_d )", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, 40, types.UnspecifiedLength},
{"sha(c_double_d )", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, 40, types.UnspecifiedLength},
{"sha(c_decimal )", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, 40, types.UnspecifiedLength},
{"sha(c_datetime )", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, 40, types.UnspecifiedLength},
{"sha(c_time_d )", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, 40, types.UnspecifiedLength},
{"sha(c_timestamp_d)", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, 40, types.UnspecifiedLength},
{"sha(c_char )", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, 40, types.UnspecifiedLength},
{"sha(c_varchar )", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, 40, types.UnspecifiedLength},
{"sha(c_text_d )", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, 40, types.UnspecifiedLength},
{"sha(c_binary )", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, 40, types.UnspecifiedLength},
{"sha(c_varbinary )", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, 40, types.UnspecifiedLength},
{"sha(c_blob_d )", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, 40, types.UnspecifiedLength},
{"sha(c_set )", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, 40, types.UnspecifiedLength},
{"sha(c_enum )", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, 40, types.UnspecifiedLength},
{"sha('1234' )", mysql.TypeVarString, charset.CharsetUTF8MB4, mysql.NotNullFlag, 40, types.UnspecifiedLength},
{"sha(1234 )", mysql.TypeVarString, charset.CharsetUTF8MB4, mysql.NotNullFlag, 40, types.UnspecifiedLength},
{"sha1(c_int_d )", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, 40, types.UnspecifiedLength},
{"sha1(c_bigint_d )", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, 40, types.UnspecifiedLength},
{"sha1(c_float_d )", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, 40, types.UnspecifiedLength},
{"sha1(c_double_d )", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, 40, types.UnspecifiedLength},
{"sha1(c_decimal )", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, 40, types.UnspecifiedLength},
{"sha1(c_datetime )", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, 40, types.UnspecifiedLength},
{"sha1(c_time_d )", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, 40, types.UnspecifiedLength},
{"sha1(c_timestamp_d)", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, 40, types.UnspecifiedLength},
{"sha1(c_char )", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, 40, types.UnspecifiedLength},
{"sha1(c_varchar )", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, 40, types.UnspecifiedLength},
{"sha1(c_text_d )", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, 40, types.UnspecifiedLength},
{"sha1(c_binary )", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, 40, types.UnspecifiedLength},
{"sha1(c_varbinary )", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, 40, types.UnspecifiedLength},
{"sha1(c_blob_d )", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, 40, types.UnspecifiedLength},
{"sha1(c_set )", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, 40, types.UnspecifiedLength},
{"sha1(c_enum )", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, 40, types.UnspecifiedLength},
{"sha1('1234' )", mysql.TypeVarString, charset.CharsetUTF8MB4, mysql.NotNullFlag, 40, types.UnspecifiedLength},
{"sha1(1234 )", mysql.TypeVarString, charset.CharsetUTF8MB4, mysql.NotNullFlag, 40, types.UnspecifiedLength},
{"sha2(c_int_d , 0)", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, 128, types.UnspecifiedLength},
{"sha2(c_bigint_d , 0)", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, 128, types.UnspecifiedLength},
{"sha2(c_float_d , 0)", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, 128, types.UnspecifiedLength},
{"sha2(c_double_d , 0)", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, 128, types.UnspecifiedLength},
{"sha2(c_decimal , 0)", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, 128, types.UnspecifiedLength},
{"sha2(c_datetime , 0)", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, 128, types.UnspecifiedLength},
{"sha2(c_time_d , 0)", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, 128, types.UnspecifiedLength},
{"sha2(c_timestamp_d, 0)", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, 128, types.UnspecifiedLength},
{"sha2(c_char , 0)", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, 128, types.UnspecifiedLength},
{"sha2(c_varchar , 0)", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, 128, types.UnspecifiedLength},
{"sha2(c_text_d , 0)", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, 128, types.UnspecifiedLength},
{"sha2(c_binary , 0)", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, 128, types.UnspecifiedLength},
{"sha2(c_varbinary , 0)", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, 128, types.UnspecifiedLength},
{"sha2(c_blob_d , 0)", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, 128, types.UnspecifiedLength},
{"sha2(c_set , 0)", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, 128, types.UnspecifiedLength},
{"sha2(c_enum , 0)", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, 128, types.UnspecifiedLength},
{"sha2('1234' , 0)", mysql.TypeVarString, charset.CharsetUTF8MB4, mysql.NotNullFlag, 128, types.UnspecifiedLength},
{"sha2(1234 , 0)", mysql.TypeVarString, charset.CharsetUTF8MB4, mysql.NotNullFlag, 128, types.UnspecifiedLength},
{"sha2(c_int_d , '256')", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, 128, types.UnspecifiedLength},
{"sha2(c_bigint_d , '256')", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, 128, types.UnspecifiedLength},
{"sha2(c_float_d , '256')", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, 128, types.UnspecifiedLength},
{"sha2(c_double_d , '256')", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, 128, types.UnspecifiedLength},
{"sha2(c_decimal , '256')", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, 128, types.UnspecifiedLength},
{"sha2(c_datetime , '256')", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, 128, types.UnspecifiedLength},
{"sha2(c_time_d , '256')", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, 128, types.UnspecifiedLength},
{"sha2(c_timestamp_d, '256')", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, 128, types.UnspecifiedLength},
{"sha2(c_char , '256')", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, 128, types.UnspecifiedLength},
{"sha2(c_varchar , '256')", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, 128, types.UnspecifiedLength},
{"sha2(c_text_d , '256')", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, 128, types.UnspecifiedLength},
{"sha2(c_binary , '256')", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, 128, types.UnspecifiedLength},
{"sha2(c_varbinary , '256')", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, 128, types.UnspecifiedLength},
{"sha2(c_blob_d , '256')", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, 128, types.UnspecifiedLength},
{"sha2(c_set , '256')", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, 128, types.UnspecifiedLength},
{"sha2(c_enum , '256')", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, 128, types.UnspecifiedLength},
{"sha2('1234' , '256')", mysql.TypeVarString, charset.CharsetUTF8MB4, mysql.NotNullFlag, 128, types.UnspecifiedLength},
{"sha2(1234 , '256')", mysql.TypeVarString, charset.CharsetUTF8MB4, mysql.NotNullFlag, 128, types.UnspecifiedLength},
{"AES_ENCRYPT(c_int_d, 'key')", mysql.TypeVarString, charset.CharsetBin, mysql.BinaryFlag, 32, types.UnspecifiedLength},
{"AES_ENCRYPT(c_char, 'key')", mysql.TypeVarString, charset.CharsetBin, mysql.BinaryFlag, 32, types.UnspecifiedLength},
{"AES_ENCRYPT(c_varchar, 'key')", mysql.TypeVarString, charset.CharsetBin, mysql.BinaryFlag, 32, types.UnspecifiedLength},
{"AES_ENCRYPT(c_binary, 'key')", mysql.TypeVarString, charset.CharsetBin, mysql.BinaryFlag, 32, types.UnspecifiedLength},
{"AES_ENCRYPT(c_varbinary, 'key')", mysql.TypeVarString, charset.CharsetBin, mysql.BinaryFlag, 32, types.UnspecifiedLength},
{"AES_ENCRYPT('', 'key')", mysql.TypeVarString, charset.CharsetBin, mysql.BinaryFlag | mysql.NotNullFlag, 16, types.UnspecifiedLength},
{"AES_ENCRYPT('111111', 'key')", mysql.TypeVarString, charset.CharsetBin, mysql.BinaryFlag | mysql.NotNullFlag, 16, types.UnspecifiedLength},
{"AES_ENCRYPT('111111111111111', 'key')", mysql.TypeVarString, charset.CharsetBin, mysql.BinaryFlag | mysql.NotNullFlag, 16, types.UnspecifiedLength},
{"AES_ENCRYPT('1111111111111111', 'key')", mysql.TypeVarString, charset.CharsetBin, mysql.BinaryFlag | mysql.NotNullFlag, 32, types.UnspecifiedLength},
{"AES_ENCRYPT('11111111111111111', 'key')", mysql.TypeVarString, charset.CharsetBin, mysql.BinaryFlag | mysql.NotNullFlag, 32, types.UnspecifiedLength},
{"AES_DECRYPT('1111111111111111', 'key')", mysql.TypeVarString, charset.CharsetBin, mysql.BinaryFlag, 16, types.UnspecifiedLength},
{"AES_DECRYPT('11111111111111112222222222222222', 'key')", mysql.TypeVarString, charset.CharsetBin, mysql.BinaryFlag, 32, types.UnspecifiedLength},
{"COMPRESS(c_int_d)", mysql.TypeVarString, charset.CharsetBin, mysql.BinaryFlag, 33, types.UnspecifiedLength},
{"COMPRESS(c_char)", mysql.TypeVarString, charset.CharsetBin, mysql.BinaryFlag, 33, types.UnspecifiedLength},
{"COMPRESS(c_bchar)", mysql.TypeVarString, charset.CharsetBin, mysql.BinaryFlag, 33, types.UnspecifiedLength},
{"COMPRESS(c_varchar)", mysql.TypeVarString, charset.CharsetBin, mysql.BinaryFlag, 33, types.UnspecifiedLength},
{"COMPRESS(c_binary)", mysql.TypeVarString, charset.CharsetBin, mysql.BinaryFlag, 33, types.UnspecifiedLength},
{"COMPRESS(c_varbinary)", mysql.TypeVarString, charset.CharsetBin, mysql.BinaryFlag, 33, types.UnspecifiedLength},
{"COMPRESS('')", mysql.TypeVarString, charset.CharsetBin, mysql.BinaryFlag | mysql.NotNullFlag, 13, types.UnspecifiedLength},
{"COMPRESS('abcde')", mysql.TypeVarString, charset.CharsetBin, mysql.BinaryFlag | mysql.NotNullFlag, 18, types.UnspecifiedLength},
{"UNCOMPRESS(c_int_d)", mysql.TypeLongBlob, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxBlobWidth, types.UnspecifiedLength},
{"UNCOMPRESS(c_char)", mysql.TypeLongBlob, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxBlobWidth, types.UnspecifiedLength},
{"UNCOMPRESS(c_bchar)", mysql.TypeLongBlob, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxBlobWidth, types.UnspecifiedLength},
{"UNCOMPRESS(c_varchar)", mysql.TypeLongBlob, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxBlobWidth, types.UnspecifiedLength},
{"UNCOMPRESSED_LENGTH(c_varchar)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 10, 0},
{"UNCOMPRESSED_LENGTH(c_int_d)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 10, 0},
{"RANDOM_BYTES(5)", mysql.TypeVarString, charset.CharsetBin, mysql.BinaryFlag | mysql.NotNullFlag, 1024, types.UnspecifiedLength},
{"RANDOM_BYTES('123')", mysql.TypeVarString, charset.CharsetBin, mysql.BinaryFlag | mysql.NotNullFlag, 1024, types.UnspecifiedLength},
{"RANDOM_BYTES('abc')", mysql.TypeVarString, charset.CharsetBin, mysql.BinaryFlag, 1024, types.UnspecifiedLength},
}
}
func (s *testInferTypeSuite) createTestCase4CompareFuncs() []typeInferTestCase {
return []typeInferTestCase{
{"coalesce(c_int_d, 1)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 11, 0},
{"coalesce(NULL, c_int_d)", mysql.TypeLong, charset.CharsetBin, mysql.BinaryFlag, 11, 0},
{"coalesce(c_int_d, c_decimal)", mysql.TypeNewDecimal, charset.CharsetBin, mysql.BinaryFlag, 15, 3},
{"coalesce(c_int_d, c_datetime)", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, 22, types.UnspecifiedLength},
{"isnull(c_int_d )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag | mysql.IsBooleanFlag, 1, 0},
{"isnull(c_bigint_d )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag | mysql.IsBooleanFlag, 1, 0},
{"isnull(c_float_d )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag | mysql.IsBooleanFlag, 1, 0},
{"isnull(c_double_d )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag | mysql.IsBooleanFlag, 1, 0},
{"isnull(c_decimal )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag | mysql.IsBooleanFlag, 1, 0},
{"isnull(c_datetime )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag | mysql.IsBooleanFlag, 1, 0},
{"isnull(c_time_d )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag | mysql.IsBooleanFlag, 1, 0},
{"isnull(c_timestamp_d)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag | mysql.IsBooleanFlag, 1, 0},
{"isnull(c_char )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag | mysql.IsBooleanFlag, 1, 0},
{"isnull(c_varchar )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag | mysql.IsBooleanFlag, 1, 0},
{"isnull(c_text_d )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag | mysql.IsBooleanFlag, 1, 0},
{"isnull(c_binary )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag | mysql.IsBooleanFlag, 1, 0},
{"isnull(c_varbinary )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag | mysql.IsBooleanFlag, 1, 0},
{"isnull(c_blob_d )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag | mysql.IsBooleanFlag, 1, 0},
{"isnull(c_set )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag | mysql.IsBooleanFlag, 1, 0},
{"isnull(c_enum )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag | mysql.IsBooleanFlag, 1, 0},
{"nullif(c_int_d , 123)", mysql.TypeLong, charset.CharsetBin, mysql.BinaryFlag, 11, 0},
{"nullif(c_bigint_d , 123)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 20, 0},
{"nullif(c_float_d , 123)", mysql.TypeFloat, charset.CharsetBin, mysql.BinaryFlag, 12, types.UnspecifiedLength},
{"nullif(c_double_d , 123)", mysql.TypeDouble, charset.CharsetBin, mysql.BinaryFlag, 22, types.UnspecifiedLength},
{"nullif(c_decimal , 123)", mysql.TypeNewDecimal, charset.CharsetBin, mysql.BinaryFlag, 6, 3},
{"nullif(c_datetime , 123)", mysql.TypeDatetime, charset.CharsetBin, mysql.BinaryFlag, 22, 2},
{"nullif(c_time_d , 123)", mysql.TypeDuration, charset.CharsetBin, mysql.BinaryFlag, 10, 0},
{"nullif(c_timestamp_d, 123)", mysql.TypeTimestamp, charset.CharsetBin, mysql.BinaryFlag, 19, 0},
{"nullif(c_char , 123)", mysql.TypeString, charset.CharsetUTF8MB4, 0, 20, types.UnspecifiedLength},
{"nullif(c_varchar , 123)", mysql.TypeVarchar, charset.CharsetUTF8MB4, 0, 20, types.UnspecifiedLength}, // TODO: tp should be TypeVarString
{"nullif(c_text_d , 123)", mysql.TypeBlob, charset.CharsetUTF8MB4, 0, 65535, types.UnspecifiedLength}, // TODO: tp should be TypeMediumBlob
{"nullif(c_binary , 123)", mysql.TypeString, charset.CharsetBin, mysql.BinaryFlag, 20, types.UnspecifiedLength}, // TODO: tp should be TypeVarString
{"nullif(c_varbinary , 123)", mysql.TypeVarchar, charset.CharsetBin, mysql.BinaryFlag, 20, types.UnspecifiedLength}, // TODO: tp should be TypeVarString
{"nullif(c_blob_d , 123)", mysql.TypeBlob, charset.CharsetBin, mysql.BinaryFlag, 65535, types.UnspecifiedLength}, // TODO: tp should be TypeVarString
{"interval(c_int_d, c_int_d, c_int_d)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxIntWidth, 0},
{"interval(c_int_d, c_float_d, c_double_d)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxIntWidth, 0},
}
}
func (s *testInferTypeSuite) createTestCase4Miscellaneous() []typeInferTestCase {
return []typeInferTestCase{
{"sleep(c_int_d)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 21, 0},
{"sleep(c_float_d)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 21, 0},
{"sleep(c_double_d)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 21, 0},
{"sleep(c_decimal)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 21, 0},
{"sleep(c_datetime)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 21, 0},
{"sleep(c_time_d)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 21, 0},
{"sleep(c_timestamp_d)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 21, 0},
{"sleep(c_binary)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 21, 0},
{"inet_aton(c_int_d)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag | mysql.UnsignedFlag, 21, 0},
{"inet_aton(c_float_d)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag | mysql.UnsignedFlag, 21, 0},
{"inet_aton(c_double_d)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag | mysql.UnsignedFlag, 21, 0},
{"inet_aton(c_decimal)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag | mysql.UnsignedFlag, 21, 0},
{"inet_aton(c_datetime)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag | mysql.UnsignedFlag, 21, 0},
{"inet_aton(c_time_d)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag | mysql.UnsignedFlag, 21, 0},
{"inet_aton(c_timestamp_d)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag | mysql.UnsignedFlag, 21, 0},
{"inet_aton(c_binary)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag | mysql.UnsignedFlag, 21, 0},
{"inet_ntoa(c_int_d)", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, 93, 0},
{"inet_ntoa(c_float_d)", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, 93, 0},
{"inet_ntoa(c_double_d)", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, 93, 0},
{"inet_ntoa(c_decimal)", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, 93, 0},
{"inet_ntoa(c_datetime)", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, 93, 0},
{"inet_ntoa(c_time_d)", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, 93, 0},
{"inet_ntoa(c_timestamp_d)", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, 93, 0},
{"inet_ntoa(c_binary)", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, 93, 0},
{"inet6_aton(c_int_d)", mysql.TypeVarString, charset.CharsetBin, mysql.BinaryFlag, 16, 0},
{"inet6_aton(c_float_d)", mysql.TypeVarString, charset.CharsetBin, mysql.BinaryFlag, 16, 0},
{"inet6_aton(c_double_d)", mysql.TypeVarString, charset.CharsetBin, mysql.BinaryFlag, 16, 0},
{"inet6_aton(c_decimal)", mysql.TypeVarString, charset.CharsetBin, mysql.BinaryFlag, 16, 0},
{"inet6_aton(c_datetime)", mysql.TypeVarString, charset.CharsetBin, mysql.BinaryFlag, 16, 0},
{"inet6_aton(c_time_d)", mysql.TypeVarString, charset.CharsetBin, mysql.BinaryFlag, 16, 0},
{"inet6_aton(c_timestamp_d)", mysql.TypeVarString, charset.CharsetBin, mysql.BinaryFlag, 16, 0},
{"inet6_aton(c_binary)", mysql.TypeVarString, charset.CharsetBin, mysql.BinaryFlag, 16, 0},
{"inet6_ntoa(c_int_d)", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, 117, 0},
{"inet6_ntoa(c_float_d)", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, 117, 0},
{"inet6_ntoa(c_double_d)", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, 117, 0},
{"inet6_ntoa(c_decimal)", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, 117, 0},
{"inet6_ntoa(c_datetime)", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, 117, 0},
{"inet6_ntoa(c_time_d)", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, 117, 0},
{"inet6_ntoa(c_timestamp_d)", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, 117, 0},
{"inet6_ntoa(c_binary)", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, 117, 0},
{"is_ipv4(c_int_d)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag | mysql.IsBooleanFlag, 1, 0},
{"is_ipv4(c_float_d)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag | mysql.IsBooleanFlag, 1, 0},
{"is_ipv4(c_double_d)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag | mysql.IsBooleanFlag, 1, 0},
{"is_ipv4(c_decimal)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag | mysql.IsBooleanFlag, 1, 0},
{"is_ipv4(c_datetime)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag | mysql.IsBooleanFlag, 1, 0},
{"is_ipv4(c_time_d)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag | mysql.IsBooleanFlag, 1, 0},
{"is_ipv4(c_timestamp_d)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag | mysql.IsBooleanFlag, 1, 0},
{"is_ipv4(c_binary)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag | mysql.IsBooleanFlag, 1, 0},
{"is_ipv4_compat(c_int_d)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag | mysql.IsBooleanFlag, 1, 0},
{"is_ipv4_compat(c_float_d)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag | mysql.IsBooleanFlag, 1, 0},
{"is_ipv4_compat(c_double_d)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag | mysql.IsBooleanFlag, 1, 0},
{"is_ipv4_compat(c_decimal)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag | mysql.IsBooleanFlag, 1, 0},
{"is_ipv4_compat(c_datetime)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag | mysql.IsBooleanFlag, 1, 0},
{"is_ipv4_compat(c_time_d)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag | mysql.IsBooleanFlag, 1, 0},
{"is_ipv4_compat(c_timestamp_d)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag | mysql.IsBooleanFlag, 1, 0},
{"is_ipv4_compat(c_binary)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag | mysql.IsBooleanFlag, 1, 0},
{"is_ipv4_mapped(c_int_d)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag | mysql.IsBooleanFlag, 1, 0},
{"is_ipv4_mapped(c_float_d)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag | mysql.IsBooleanFlag, 1, 0},
{"is_ipv4_mapped(c_double_d)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag | mysql.IsBooleanFlag, 1, 0},
{"is_ipv4_mapped(c_decimal)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag | mysql.IsBooleanFlag, 1, 0},
{"is_ipv4_mapped(c_datetime)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag | mysql.IsBooleanFlag, 1, 0},
{"is_ipv4_mapped(c_time_d)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag | mysql.IsBooleanFlag, 1, 0},
{"is_ipv4_mapped(c_timestamp_d)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag | mysql.IsBooleanFlag, 1, 0},
{"is_ipv4_mapped(c_binary)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag | mysql.IsBooleanFlag, 1, 0},
{"is_ipv6(c_int_d)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag | mysql.IsBooleanFlag, 1, 0},
{"is_ipv6(c_float_d)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag | mysql.IsBooleanFlag, 1, 0},
{"is_ipv6(c_double_d)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag | mysql.IsBooleanFlag, 1, 0},
{"is_ipv6(c_decimal)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag | mysql.IsBooleanFlag, 1, 0},
{"is_ipv6(c_datetime)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag | mysql.IsBooleanFlag, 1, 0},
{"is_ipv6(c_time_d)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag | mysql.IsBooleanFlag, 1, 0},
{"is_ipv6(c_timestamp_d)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag | mysql.IsBooleanFlag, 1, 0},
{"is_ipv6(c_binary)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag | mysql.IsBooleanFlag, 1, 0},
{"any_value(c_int_d)", mysql.TypeLong, charset.CharsetBin, mysql.BinaryFlag, 11, 0},
{"any_value(c_bigint_d)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 20, 0},
{"any_value(c_float_d)", mysql.TypeFloat, charset.CharsetBin, mysql.BinaryFlag, 12, types.UnspecifiedLength},
{"any_value(c_double_d)", mysql.TypeDouble, charset.CharsetBin, mysql.BinaryFlag, 22, types.UnspecifiedLength},
{"any_value(c_decimal)", mysql.TypeNewDecimal, charset.CharsetBin, mysql.BinaryFlag, 6, 3}, // TODO: Flen should be 8.
{"any_value(c_datetime)", mysql.TypeDatetime, charset.CharsetUTF8MB4, 0, 22, 2},
{"any_value(c_time_d)", mysql.TypeDuration, charset.CharsetBin, mysql.BinaryFlag, 10, 0},
{"any_value(c_timestamp_d)", mysql.TypeTimestamp, charset.CharsetUTF8MB4, 0, 19, 0},
{"any_value(c_char)", mysql.TypeString, charset.CharsetUTF8MB4, 0, 20, types.UnspecifiedLength},
{"any_value(c_bchar)", mysql.TypeString, charset.CharsetUTF8MB4, 0, 20, types.UnspecifiedLength},
{"any_value(c_varchar)", mysql.TypeVarchar, charset.CharsetUTF8MB4, 0, 20, types.UnspecifiedLength},
{"any_value(c_text_d)", mysql.TypeBlob, charset.CharsetUTF8MB4, 0, 65535, types.UnspecifiedLength},
{"any_value(c_binary)", mysql.TypeString, charset.CharsetBin, mysql.BinaryFlag, 20, types.UnspecifiedLength},
{"any_value(c_varbinary)", mysql.TypeVarchar, charset.CharsetBin, mysql.BinaryFlag, 20, types.UnspecifiedLength},
{"any_value(c_blob_d)", mysql.TypeBlob, charset.CharsetBin, mysql.BinaryFlag, 65535, types.UnspecifiedLength},
{"any_value(c_set)", mysql.TypeSet, charset.CharsetUTF8MB4, 0, 5, types.UnspecifiedLength},
{"any_value(c_enum)", mysql.TypeEnum, charset.CharsetUTF8MB4, 0, 1, types.UnspecifiedLength},
}
}
func (s *testInferTypeSuite) createTestCase4OpFuncs() []typeInferTestCase {
return []typeInferTestCase{
{"c_int_d is true", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag | mysql.IsBooleanFlag, 1, 0},
{"c_decimal is true", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag | mysql.IsBooleanFlag, 1, 0},
{"c_double_d is true", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag | mysql.IsBooleanFlag, 1, 0},
{"c_float_d is true", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag | mysql.IsBooleanFlag, 1, 0},
{"c_datetime is true", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag | mysql.IsBooleanFlag, 1, 0},
{"c_time_d is true", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag | mysql.IsBooleanFlag, 1, 0},
{"c_enum is true", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag | mysql.IsBooleanFlag, 1, 0},
{"c_text_d is true", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag | mysql.IsBooleanFlag, 1, 0},
{"18446 is true", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag | mysql.IsBooleanFlag | mysql.NotNullFlag, 1, 0},
{"1844674.1 is true", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag | mysql.IsBooleanFlag | mysql.NotNullFlag, 1, 0},
{"c_int_d is false", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag | mysql.IsBooleanFlag, 1, 0},
{"c_decimal is false", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag | mysql.IsBooleanFlag, 1, 0},
{"c_double_d is false", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag | mysql.IsBooleanFlag, 1, 0},
{"c_float_d is false", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag | mysql.IsBooleanFlag, 1, 0},
{"c_datetime is false", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag | mysql.IsBooleanFlag, 1, 0},
{"c_time_d is false", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag | mysql.IsBooleanFlag, 1, 0},
{"c_enum is false", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag | mysql.IsBooleanFlag, 1, 0},
{"c_text_d is false", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag | mysql.IsBooleanFlag, 1, 0},
{"18446 is false", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag | mysql.IsBooleanFlag | mysql.NotNullFlag, 1, 0},
{"1844674.1 is false", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag | mysql.IsBooleanFlag | mysql.NotNullFlag, 1, 0},
}
}
func (s *testInferTypeSuite) createTestCase4OtherFuncs() []typeInferTestCase {
return []typeInferTestCase{
{"1 in (c_int_d)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag | mysql.IsBooleanFlag, 1, 0},
{"1 in (c_decimal)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag | mysql.IsBooleanFlag, 1, 0},
{"1 in (c_double_d)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag | mysql.IsBooleanFlag, 1, 0},
{"1 in (c_float_d)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag | mysql.IsBooleanFlag, 1, 0},
{"1 in (c_datetime)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag | mysql.IsBooleanFlag, 1, 0},
{"1 in (c_time_d)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag | mysql.IsBooleanFlag, 1, 0},
{"1 in (c_enum)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag | mysql.IsBooleanFlag, 1, 0},
{"1 in (c_text_d)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag | mysql.IsBooleanFlag, 1, 0},
{"bit_count(c_int_d )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 2, 0},
{"bit_count(c_bigint_d )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 2, 0},
{"bit_count(c_float_d )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 2, 0},
{"bit_count(c_double_d )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 2, 0},
{"bit_count(c_decimal )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 2, 0},
{"bit_count(c_datetime )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 2, 0},
{"bit_count(c_time_d )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 2, 0},
{"bit_count(c_timestamp_d)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 2, 0},
{"bit_count(c_char )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 2, 0},
{"bit_count(c_varchar )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 2, 0},
{"bit_count(c_text_d )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 2, 0},
{"bit_count(c_binary )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 2, 0},
{"bit_count(c_varbinary )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 2, 0},
{"bit_count(c_blob_d )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 2, 0},
{"bit_count(c_set )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 2, 0},
{"bit_count(c_enum )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 2, 0},
{`@varname`, mysql.TypeVarString, charset.CharsetUTF8MB4, 0, mysql.MaxFieldVarCharLength, int(types.UnspecifiedFsp)},
}
}
func (s *testInferTypeSuite) createTestCase4TimeFuncs() []typeInferTestCase {
return []typeInferTestCase{
{`time_format('150:02:28', '%r%r%r%r')`, mysql.TypeVarString, charset.CharsetUTF8MB4, 0 | mysql.NotNullFlag, 44, types.UnspecifiedLength},
{`time_format(123456, '%r%r%r%r')`, mysql.TypeVarString, charset.CharsetUTF8MB4, 0 | mysql.NotNullFlag, 44, types.UnspecifiedLength},
{`time_format('bad string', '%r%r%r%r')`, mysql.TypeVarString, charset.CharsetUTF8MB4, 0, 44, types.UnspecifiedLength},
{`time_format(null, '%r%r%r%r')`, mysql.TypeVarString, charset.CharsetUTF8MB4, 0, 44, types.UnspecifiedLength},
{`date_format(null, '%r%r%r%r')`, mysql.TypeVarString, charset.CharsetUTF8MB4, 0, 44, types.UnspecifiedLength},
{`date_format('2017-06-15', '%r%r%r%r')`, mysql.TypeVarString, charset.CharsetUTF8MB4, 0 | mysql.NotNullFlag, 44, types.UnspecifiedLength},
{`date_format(151113102019.12, '%r%r%r%r')`, mysql.TypeVarString, charset.CharsetUTF8MB4, 0 | mysql.NotNullFlag, 44, types.UnspecifiedLength},
{"timestampadd(HOUR, c_int_d, c_timestamp_d)", mysql.TypeString, charset.CharsetUTF8MB4, 0, 19, types.UnspecifiedLength},
{"timestampadd(minute, c_double_d, c_timestamp_d)", mysql.TypeString, charset.CharsetUTF8MB4, 0, 19, types.UnspecifiedLength},
{"timestampadd(SeconD, c_int_d, c_char)", mysql.TypeString, charset.CharsetUTF8MB4, 0, 19, types.UnspecifiedLength},
{"timestampadd(SeconD, c_varchar, c_time_d)", mysql.TypeString, charset.CharsetUTF8MB4, 0, 19, types.UnspecifiedLength},
{"timestampadd(SeconD, c_int_d, c_datetime)", mysql.TypeString, charset.CharsetUTF8MB4, 0, 19, types.UnspecifiedLength},
{"timestampadd(SeconD, c_double_d, c_bchar)", mysql.TypeString, charset.CharsetUTF8MB4, 0, 19, types.UnspecifiedLength},
{"timestampadd(SeconD, c_int_d, c_blob_d)", mysql.TypeString, charset.CharsetUTF8MB4, 0, 19, types.UnspecifiedLength},
{"to_seconds(c_char)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 20, 0},
{"to_days(c_char)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 20, 0},
{"unix_timestamp(c_int_d )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 11, 0},
{"unix_timestamp(c_bigint_d )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 11, 0},
{"unix_timestamp(c_float_d )", mysql.TypeNewDecimal, charset.CharsetBin, mysql.BinaryFlag, 18, 6},
{"unix_timestamp(c_double_d )", mysql.TypeNewDecimal, charset.CharsetBin, mysql.BinaryFlag, 18, 6},
{"unix_timestamp(c_decimal )", mysql.TypeNewDecimal, charset.CharsetBin, mysql.BinaryFlag, 15, 3},
{"unix_timestamp(c_decimal_d )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 11, 0},
{"unix_timestamp(c_datetime )", mysql.TypeNewDecimal, charset.CharsetBin, mysql.BinaryFlag, 14, 2},
{"unix_timestamp(c_datetime_d )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 11, 0},
{"unix_timestamp(c_time )", mysql.TypeNewDecimal, charset.CharsetBin, mysql.BinaryFlag, 15, 3},
{"unix_timestamp(c_time_d )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 11, 0},
{"unix_timestamp(c_timestamp )", mysql.TypeNewDecimal, charset.CharsetBin, mysql.BinaryFlag, 16, 4},
{"unix_timestamp(c_timestamp_d)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 11, 0},
{"unix_timestamp(c_char )", mysql.TypeNewDecimal, charset.CharsetBin, mysql.BinaryFlag, 18, 6},
{"unix_timestamp(c_varchar )", mysql.TypeNewDecimal, charset.CharsetBin, mysql.BinaryFlag, 18, 6},
{"unix_timestamp(c_text_d )", mysql.TypeNewDecimal, charset.CharsetBin, mysql.BinaryFlag, 18, 6},
{"unix_timestamp(c_binary )", mysql.TypeNewDecimal, charset.CharsetBin, mysql.BinaryFlag, 18, 6},
{"unix_timestamp(c_varbinary )", mysql.TypeNewDecimal, charset.CharsetBin, mysql.BinaryFlag, 18, 6},
{"unix_timestamp(c_blob_d )", mysql.TypeNewDecimal, charset.CharsetBin, mysql.BinaryFlag, 18, 6},
{"unix_timestamp(c_set )", mysql.TypeNewDecimal, charset.CharsetBin, mysql.BinaryFlag, 18, 6},
{"unix_timestamp(c_enum )", mysql.TypeNewDecimal, charset.CharsetBin, mysql.BinaryFlag, 18, 6},
{"unix_timestamp(null )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 11, 0},
{"unix_timestamp('12:12:12.123')", mysql.TypeNewDecimal, charset.CharsetBin, mysql.BinaryFlag, 15, 3},
{"unix_timestamp('12:12:12.1234')", mysql.TypeNewDecimal, charset.CharsetBin, mysql.BinaryFlag, 16, 4},
// TODO: Add string literal tests for UNIX_TIMESTAMP. UNIX_TIMESTAMP respects the fsp in string literals.
{"timestampdiff(MONTH, c_datetime, c_datetime)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 20, 0},
{"timestampdiff(QuarteR, c_char, c_varchar)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 20, 0},
{"timestampdiff(second, c_int_d, c_bchar)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 20, 0},
{"timestampdiff(YEAR, c_blob_d, c_bigint_d)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 20, 0},
{"addtime(c_int_d, c_time_d)", mysql.TypeString, charset.CharsetUTF8MB4, 0, 26, types.UnspecifiedLength},
{"addtime(c_datetime_d, c_time_d)", mysql.TypeDatetime, charset.CharsetBin, mysql.BinaryFlag, 26, 0},
{"addtime(c_datetime, c_time_d)", mysql.TypeDatetime, charset.CharsetBin, mysql.BinaryFlag, 26, 2},
{"addtime(c_timestamp, c_time_d)", mysql.TypeDatetime, charset.CharsetBin, mysql.BinaryFlag, 26, 4},
{"addtime(c_timestamp_d, c_time_d)", mysql.TypeDatetime, charset.CharsetBin, mysql.BinaryFlag, 26, 0},
{"addtime(c_time, c_time)", mysql.TypeDuration, charset.CharsetBin, mysql.BinaryFlag, 15, 3},
{"addtime(c_time_d, c_time)", mysql.TypeDuration, charset.CharsetBin, mysql.BinaryFlag, 15, 3},
{"addtime(c_char, c_time_d)", mysql.TypeString, charset.CharsetUTF8MB4, 0, 26, types.UnspecifiedLength},
{"addtime(c_char, c_datetime)", mysql.TypeString, charset.CharsetUTF8MB4, 0, 26, types.UnspecifiedLength},
{"addtime(c_char, c_int_d)", mysql.TypeString, charset.CharsetUTF8MB4, 0, 26, types.UnspecifiedLength},
{"addtime(c_date, c_datetime)", mysql.TypeString, charset.CharsetUTF8MB4, 0, 26, types.UnspecifiedLength},
{"addtime(c_date, c_timestamp)", mysql.TypeString, charset.CharsetUTF8MB4, 0, 26, types.UnspecifiedLength},
{"addtime(c_date, c_time)", mysql.TypeString, charset.CharsetUTF8MB4, 0, 26, types.UnspecifiedLength},
{"subtime(c_int_d, c_time_d)", mysql.TypeString, charset.CharsetUTF8MB4, 0, 26, types.UnspecifiedLength},
{"subtime(c_datetime_d, c_time_d)", mysql.TypeDatetime, charset.CharsetBin, mysql.BinaryFlag, 26, 0},
{"subtime(c_datetime, c_time_d)", mysql.TypeDatetime, charset.CharsetBin, mysql.BinaryFlag, 26, 2},
{"subtime(c_timestamp, c_time_d)", mysql.TypeDatetime, charset.CharsetBin, mysql.BinaryFlag, 26, 4},
{"subtime(c_timestamp_d, c_time_d)", mysql.TypeDatetime, charset.CharsetBin, mysql.BinaryFlag, 26, 0},
{"subtime(c_time, c_time)", mysql.TypeDuration, charset.CharsetBin, mysql.BinaryFlag, 15, 3},
{"subtime(c_time_d, c_time)", mysql.TypeDuration, charset.CharsetBin, mysql.BinaryFlag, 15, 3},
{"subtime(c_char, c_time_d)", mysql.TypeString, charset.CharsetUTF8MB4, 0, 26, types.UnspecifiedLength},
{"subtime(c_char, c_datetime)", mysql.TypeString, charset.CharsetUTF8MB4, 0, 26, types.UnspecifiedLength},
{"subtime(c_char, c_int_d)", mysql.TypeString, charset.CharsetUTF8MB4, 0, 26, types.UnspecifiedLength},
{"subtime(c_date, c_datetime)", mysql.TypeString, charset.CharsetUTF8MB4, 0, 26, types.UnspecifiedLength},
{"subtime(c_date, c_timestamp)", mysql.TypeString, charset.CharsetUTF8MB4, 0, 26, types.UnspecifiedLength},
{"subtime(c_date, c_time)", mysql.TypeString, charset.CharsetUTF8MB4, 0, 26, types.UnspecifiedLength},
{"timestamp(c_int_d)", mysql.TypeDatetime, charset.CharsetBin, mysql.BinaryFlag, 19, types.UnspecifiedLength},
{"timestamp(c_float_d)", mysql.TypeDatetime, charset.CharsetBin, mysql.BinaryFlag, 26, types.UnspecifiedLength},
{"timestamp(c_double_d)", mysql.TypeDatetime, charset.CharsetBin, mysql.BinaryFlag, 26, types.UnspecifiedLength},
{"timestamp(c_decimal)", mysql.TypeDatetime, charset.CharsetBin, mysql.BinaryFlag, 23, types.UnspecifiedLength},
{"timestamp(c_udecimal)", mysql.TypeDatetime, charset.CharsetBin, mysql.BinaryFlag, 23, types.UnspecifiedLength},
{"timestamp(c_decimal_d)", mysql.TypeDatetime, charset.CharsetBin, mysql.BinaryFlag, 19, types.UnspecifiedLength},
{"timestamp(c_udecimal_d)", mysql.TypeDatetime, charset.CharsetBin, mysql.BinaryFlag, 19, types.UnspecifiedLength},
{"timestamp(c_datetime)", mysql.TypeDatetime, charset.CharsetBin, mysql.BinaryFlag, 22, types.UnspecifiedLength},
{"timestamp(c_datetime_d)", mysql.TypeDatetime, charset.CharsetBin, mysql.BinaryFlag, 19, types.UnspecifiedLength},
{"timestamp(c_timestamp)", mysql.TypeDatetime, charset.CharsetBin, mysql.BinaryFlag, 24, types.UnspecifiedLength},
{"timestamp(c_time)", mysql.TypeDatetime, charset.CharsetBin, mysql.BinaryFlag, 23, types.UnspecifiedLength},
{"timestamp(c_time_d)", mysql.TypeDatetime, charset.CharsetBin, mysql.BinaryFlag, 19, types.UnspecifiedLength},
{"timestamp(c_bchar)", mysql.TypeDatetime, charset.CharsetBin, mysql.BinaryFlag, 26, types.UnspecifiedLength},
{"timestamp(c_char)", mysql.TypeDatetime, charset.CharsetBin, mysql.BinaryFlag, 26, types.UnspecifiedLength},
{"timestamp(c_varchar)", mysql.TypeDatetime, charset.CharsetBin, mysql.BinaryFlag, 26, types.UnspecifiedLength},
{"timestamp(c_text_d)", mysql.TypeDatetime, charset.CharsetBin, mysql.BinaryFlag, 26, types.UnspecifiedLength},
{"timestamp(c_btext_d)", mysql.TypeDatetime, charset.CharsetBin, mysql.BinaryFlag, 26, types.UnspecifiedLength},
{"timestamp(c_blob_d)", mysql.TypeDatetime, charset.CharsetBin, mysql.BinaryFlag, 26, types.UnspecifiedLength},
{"timestamp(c_set)", mysql.TypeDatetime, charset.CharsetBin, mysql.BinaryFlag, 26, types.UnspecifiedLength},
{"timestamp(c_enum)", mysql.TypeDatetime, charset.CharsetBin, mysql.BinaryFlag, 26, types.UnspecifiedLength},
{"timestamp(c_int_d, c_float_d)", mysql.TypeDatetime, charset.CharsetBin, mysql.BinaryFlag, 26, types.UnspecifiedLength},
{"timestamp(c_datetime, c_timestamp)", mysql.TypeDatetime, charset.CharsetBin, mysql.BinaryFlag, 24, types.UnspecifiedLength},
{"timestamp(c_timestamp, c_char)", mysql.TypeDatetime, charset.CharsetBin, mysql.BinaryFlag, 26, types.UnspecifiedLength},
{"timestamp(c_int_d, c_datetime)", mysql.TypeDatetime, charset.CharsetBin, mysql.BinaryFlag, 22, types.UnspecifiedLength},
{"addtime(c_int_d, c_time_d)", mysql.TypeString, charset.CharsetUTF8MB4, 0, 26, types.UnspecifiedLength},
{"addtime(c_datetime_d, c_time_d)", mysql.TypeDatetime, charset.CharsetBin, mysql.BinaryFlag, 26, 0},
{"addtime(c_datetime, c_time_d)", mysql.TypeDatetime, charset.CharsetBin, mysql.BinaryFlag, 26, 2},
{"addtime(c_timestamp, c_time_d)", mysql.TypeDatetime, charset.CharsetBin, mysql.BinaryFlag, 26, 4},
{"addtime(c_timestamp_d, c_time_d)", mysql.TypeDatetime, charset.CharsetBin, mysql.BinaryFlag, 26, 0},
{"addtime(c_time, c_time)", mysql.TypeDuration, charset.CharsetBin, mysql.BinaryFlag, 15, 3},
{"addtime(c_time_d, c_time)", mysql.TypeDuration, charset.CharsetBin, mysql.BinaryFlag, 15, 3},
{"addtime(c_char, c_time_d)", mysql.TypeString, charset.CharsetUTF8MB4, 0, 26, types.UnspecifiedLength},
{"addtime(c_char, c_datetime)", mysql.TypeString, charset.CharsetUTF8MB4, 0, 26, types.UnspecifiedLength},
{"addtime(c_char, c_int_d)", mysql.TypeString, charset.CharsetUTF8MB4, 0, 26, types.UnspecifiedLength},
{"addtime(c_date, c_datetime)", mysql.TypeString, charset.CharsetUTF8MB4, 0, 26, types.UnspecifiedLength},
{"addtime(c_date, c_timestamp)", mysql.TypeString, charset.CharsetUTF8MB4, 0, 26, types.UnspecifiedLength},
{"addtime(c_date, c_time)", mysql.TypeString, charset.CharsetUTF8MB4, 0, 26, types.UnspecifiedLength},
{"subtime(c_int_d, c_time_d)", mysql.TypeString, charset.CharsetUTF8MB4, 0, 26, types.UnspecifiedLength},
{"subtime(c_datetime_d, c_time_d)", mysql.TypeDatetime, charset.CharsetBin, mysql.BinaryFlag, 26, 0},
{"subtime(c_datetime, c_time_d)", mysql.TypeDatetime, charset.CharsetBin, mysql.BinaryFlag, 26, 2},
{"subtime(c_timestamp, c_time_d)", mysql.TypeDatetime, charset.CharsetBin, mysql.BinaryFlag, 26, 4},
{"subtime(c_timestamp_d, c_time_d)", mysql.TypeDatetime, charset.CharsetBin, mysql.BinaryFlag, 26, 0},
{"subtime(c_time, c_time)", mysql.TypeDuration, charset.CharsetBin, mysql.BinaryFlag, 15, 3},
{"subtime(c_time_d, c_time)", mysql.TypeDuration, charset.CharsetBin, mysql.BinaryFlag, 15, 3},
{"subtime(c_char, c_time_d)", mysql.TypeString, charset.CharsetUTF8MB4, 0, 26, types.UnspecifiedLength},
{"subtime(c_char, c_datetime)", mysql.TypeString, charset.CharsetUTF8MB4, 0, 26, types.UnspecifiedLength},
{"subtime(c_char, c_int_d)", mysql.TypeString, charset.CharsetUTF8MB4, 0, 26, types.UnspecifiedLength},
{"subtime(c_date, c_datetime)", mysql.TypeString, charset.CharsetUTF8MB4, 0, 26, types.UnspecifiedLength},
{"subtime(c_date, c_timestamp)", mysql.TypeString, charset.CharsetUTF8MB4, 0, 26, types.UnspecifiedLength},
{"subtime(c_date, c_time)", mysql.TypeString, charset.CharsetUTF8MB4, 0, 26, types.UnspecifiedLength},
{"hour(c_int_d )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 3, 0},
{"hour(c_bigint_d )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 3, 0},
{"hour(c_float_d )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 3, 0},
{"hour(c_double_d )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 3, 0},
{"hour(c_decimal )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 3, 0},
{"hour(c_datetime )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 3, 0},
{"hour(c_time )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 3, 0},
{"hour(c_timestamp)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 3, 0},
{"hour(c_char )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 3, 0},
{"hour(c_varchar )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 3, 0},
{"hour(c_text_d )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 3, 0},
{"hour(c_binary )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 3, 0},
{"hour(c_varbinary)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 3, 0},
{"hour(c_blob_d )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 3, 0},
{"hour(c_set )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 3, 0},
{"hour(c_enum )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 3, 0},
{"minute(c_int_d )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 2, 0},
{"minute(c_bigint_d )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 2, 0},
{"minute(c_float_d )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 2, 0},
{"minute(c_double_d )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 2, 0},
{"minute(c_decimal )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 2, 0},
{"minute(c_datetime )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 2, 0},
{"minute(c_time )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 2, 0},
{"minute(c_timestamp)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 2, 0},
{"minute(c_char )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 2, 0},
{"minute(c_varchar )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 2, 0},
{"minute(c_text_d )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 2, 0},
{"minute(c_binary )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 2, 0},
{"minute(c_varbinary)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 2, 0},
{"minute(c_blob_d )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 2, 0},
{"minute(c_set )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 2, 0},
{"minute(c_enum )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 2, 0},
{"second(c_int_d )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 2, 0},
{"second(c_bigint_d )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 2, 0},
{"second(c_float_d )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 2, 0},
{"second(c_double_d )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 2, 0},
{"second(c_decimal )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 2, 0},
{"second(c_datetime )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 2, 0},
{"second(c_time )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 2, 0},
{"second(c_timestamp)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 2, 0},
{"second(c_char )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 2, 0},
{"second(c_varchar )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 2, 0},
{"second(c_text_d )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 2, 0},
{"second(c_binary )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 2, 0},
{"second(c_varbinary)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 2, 0},
{"second(c_blob_d )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 2, 0},
{"second(c_set )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 2, 0},
{"second(c_enum )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 2, 0},
{"microsecond(c_int_d )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 6, 0},
{"microsecond(c_bigint_d )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 6, 0},
{"microsecond(c_float_d )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 6, 0},
{"microsecond(c_double_d )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 6, 0},
{"microsecond(c_decimal )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 6, 0},
{"microsecond(c_datetime )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 6, 0},
{"microsecond(c_time )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 6, 0},
{"microsecond(c_timestamp)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 6, 0},
{"microsecond(c_char )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 6, 0},
{"microsecond(c_varchar )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 6, 0},
{"microsecond(c_text_d )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 6, 0},
{"microsecond(c_binary )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 6, 0},
{"microsecond(c_varbinary)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 6, 0},
{"microsecond(c_blob_d )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 6, 0},
{"microsecond(c_set )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 6, 0},
{"microsecond(c_enum )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 6, 0},
{"datediff(c_char, c_datetime)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 20, 0},
{"datediff(c_int_d, c_timestamp)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 20, 0},
{"datediff(c_double_d, c_timestamp)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 20, 0},
{"datediff(c_bchar, c_decimal)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 20, 0},
{"datediff(c_varchar, c_varbinary)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 20, 0},
{"datediff(c_float_d, c_time)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 20, 0},
{"dayofmonth(c_int_d )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 2, 0},
{"dayofmonth(c_bigint_d )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 2, 0},
{"dayofmonth(c_float_d )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 2, 0},
{"dayofmonth(c_double_d )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 2, 0},
{"dayofmonth(c_decimal )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 2, 0},
{"dayofmonth(c_datetime )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 2, 0},
{"dayofmonth(c_time )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 2, 0},
{"dayofmonth(c_timestamp)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 2, 0},
{"dayofmonth(c_char )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 2, 0},
{"dayofmonth(c_varchar )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 2, 0},
{"dayofmonth(c_text_d )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 2, 0},
{"dayofmonth(c_binary )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 2, 0},
{"dayofmonth(c_varbinary)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 2, 0},
{"dayofmonth(c_blob_d )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 2, 0},
{"dayofmonth(c_set )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 2, 0},
{"dayofmonth(c_enum )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 2, 0},
{"dayofyear(c_int_d )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 3, 0},
{"dayofyear(c_bigint_d )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 3, 0},
{"dayofyear(c_float_d )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 3, 0},
{"dayofyear(c_double_d )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 3, 0},
{"dayofyear(c_decimal )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 3, 0},
{"dayofyear(c_datetime )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 3, 0},
{"dayofyear(c_time )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 3, 0},
{"dayofyear(c_timestamp)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 3, 0},
{"dayofyear(c_char )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 3, 0},
{"dayofyear(c_varchar )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 3, 0},
{"dayofyear(c_text_d )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 3, 0},
{"dayofyear(c_binary )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 3, 0},
{"dayofyear(c_varbinary)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 3, 0},
{"dayofyear(c_blob_d )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 3, 0},
{"dayofyear(c_set )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 3, 0},
{"dayofyear(c_enum )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 3, 0},
{"dayofweek(c_bigint_d )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 1, 0},
{"dayofweek(c_float_d )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 1, 0},
{"dayofweek(c_double_d )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 1, 0},
{"dayofweek(c_decimal )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 1, 0},
{"dayofweek(c_datetime )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 1, 0},
{"dayofweek(c_time )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 1, 0},
{"dayofweek(c_timestamp)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 1, 0},
{"dayofweek(c_char )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 1, 0},
{"dayofweek(c_varchar )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 1, 0},
{"dayofweek(c_text_d )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 1, 0},
{"dayofweek(c_binary )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 1, 0},
{"dayofweek(c_varbinary)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 1, 0},
{"dayofweek(c_blob_d )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 1, 0},
{"dayofweek(c_set )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 1, 0},
{"dayofweek(c_enum )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 1, 0},
{"hour(c_int_d )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 3, 0},
{"hour(c_bigint_d )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 3, 0},
{"hour(c_float_d )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 3, 0},
{"hour(c_double_d )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 3, 0},
{"hour(c_decimal )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 3, 0},
{"hour(c_datetime )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 3, 0},
{"hour(c_time_d )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 3, 0},
{"hour(c_timestamp_d)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 3, 0},
{"hour(c_char )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 3, 0},
{"hour(c_varchar )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 3, 0},
{"hour(c_text_d )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 3, 0},
{"hour(c_binary )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 3, 0},
{"hour(c_varbinary )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 3, 0},
{"hour(c_blob_d )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 3, 0},
{"hour(c_set )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 3, 0},
{"hour(c_enum )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 3, 0},
{"minute(c_int_d )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 2, 0},
{"minute(c_bigint_d )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 2, 0},
{"minute(c_float_d )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 2, 0},
{"minute(c_double_d )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 2, 0},
{"minute(c_decimal )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 2, 0},
{"minute(c_datetime )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 2, 0},
{"minute(c_time_d )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 2, 0},
{"minute(c_timestamp_d)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 2, 0},
{"minute(c_char )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 2, 0},
{"minute(c_varchar )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 2, 0},
{"minute(c_text_d )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 2, 0},
{"minute(c_binary )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 2, 0},
{"minute(c_varbinary )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 2, 0},
{"minute(c_blob_d )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 2, 0},
{"minute(c_set )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 2, 0},
{"minute(c_enum )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 2, 0},
{"second(c_int_d )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 2, 0},
{"second(c_bigint_d )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 2, 0},
{"second(c_float_d )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 2, 0},
{"second(c_double_d )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 2, 0},
{"second(c_decimal )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 2, 0},
{"second(c_datetime )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 2, 0},
{"second(c_time_d )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 2, 0},
{"second(c_timestamp_d)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 2, 0},
{"second(c_char )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 2, 0},
{"second(c_varchar )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 2, 0},
{"second(c_text_d )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 2, 0},
{"second(c_binary )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 2, 0},
{"second(c_varbinary )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 2, 0},
{"second(c_blob_d )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 2, 0},
{"second(c_set )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 2, 0},
{"second(c_enum )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 2, 0},
{"microsecond(c_int_d )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 6, 0},
{"microsecond(c_bigint_d )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 6, 0},
{"microsecond(c_float_d )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 6, 0},
{"microsecond(c_double_d )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 6, 0},
{"microsecond(c_decimal )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 6, 0},
{"microsecond(c_datetime )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 6, 0},
{"microsecond(c_time_d )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 6, 0},
{"microsecond(c_timestamp_d)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 6, 0},
{"microsecond(c_char )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 6, 0},
{"microsecond(c_varchar )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 6, 0},
{"microsecond(c_text_d )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 6, 0},
{"microsecond(c_binary )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 6, 0},
{"microsecond(c_varbinary )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 6, 0},
{"microsecond(c_blob_d )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 6, 0},
{"microsecond(c_set )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 6, 0},
{"microsecond(c_enum )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 6, 0},
{"last_day(c_datetime)", mysql.TypeDate, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxDateWidth, 0},
{"last_day(c_datetime_d)", mysql.TypeDate, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxDateWidth, 0},
{"last_day(c_timestamp)", mysql.TypeDate, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxDateWidth, 0},
{"last_day(c_timestamp_d)", mysql.TypeDate, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxDateWidth, 0},
{"last_day(c_char)", mysql.TypeDate, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxDateWidth, 0},
{"last_day(c_varchar)", mysql.TypeDate, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxDateWidth, 0},
{"last_day(c_varchar)", mysql.TypeDate, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxDateWidth, 0},
{"last_day(c_text_d)", mysql.TypeDate, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxDateWidth, 0},
{"last_day(c_blob_d)", mysql.TypeDate, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxDateWidth, 0},
{"week(c_int_d )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 2, 0},
{"week(c_bigint_d )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 2, 0},
{"week(c_float_d )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 2, 0},
{"week(c_double_d )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 2, 0},
{"week(c_decimal )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 2, 0},
{"week(c_datetime )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 2, 0},
{"week(c_time_d )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 2, 0},
{"week(c_timestamp_d)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 2, 0},
{"week(c_char )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 2, 0},
{"week(c_varchar )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 2, 0},
{"week(c_text_d )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 2, 0},
{"week(c_binary )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 2, 0},
{"week(c_varbinary )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 2, 0},
{"week(c_blob_d )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 2, 0},
{"week(c_set )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 2, 0},
{"week(c_enum )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 2, 0},
{"week(c_int_d , c_double_d)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 2, 0},
{"week(c_bigint_d , c_double_d)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 2, 0},
{"week(c_float_d , c_double_d)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 2, 0},
{"week(c_double_d , c_double_d)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 2, 0},
{"week(c_decimal , c_double_d)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 2, 0},
{"week(c_datetime , c_double_d)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 2, 0},
{"week(c_time_d , c_double_d)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 2, 0},
{"week(c_timestamp_d, c_double_d)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 2, 0},
{"week(c_char , c_double_d)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 2, 0},
{"week(c_varchar , c_double_d)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 2, 0},
{"week(c_text_d , c_double_d)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 2, 0},
{"week(c_binary , c_double_d)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 2, 0},
{"week(c_varbinary , c_double_d)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 2, 0},
{"week(c_blob_d , c_double_d)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 2, 0},
{"week(c_set , c_double_d)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 2, 0},
{"week(c_enum , c_double_d)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 2, 0},
{"weekofyear(c_int_d )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 2, 0},
{"weekofyear(c_bigint_d )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 2, 0},
{"weekofyear(c_float_d )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 2, 0},
{"weekofyear(c_double_d )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 2, 0},
{"weekofyear(c_decimal )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 2, 0},
{"weekofyear(c_datetime )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 2, 0},
{"weekofyear(c_time_d )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 2, 0},
{"weekofyear(c_timestamp_d)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 2, 0},
{"weekofyear(c_char )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 2, 0},
{"weekofyear(c_varchar )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 2, 0},
{"weekofyear(c_text_d )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 2, 0},
{"weekofyear(c_binary )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 2, 0},
{"weekofyear(c_varbinary )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 2, 0},
{"weekofyear(c_blob_d )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 2, 0},
{"weekofyear(c_set )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 2, 0},
{"weekofyear(c_enum )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 2, 0},
{"yearweek(c_int_d )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 6, 0},
{"yearweek(c_bigint_d )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 6, 0},
{"yearweek(c_float_d )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 6, 0},
{"yearweek(c_double_d )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 6, 0},
{"yearweek(c_decimal )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 6, 0},
{"yearweek(c_datetime )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 6, 0},
{"yearweek(c_time_d )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 6, 0},
{"yearweek(c_timestamp_d)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 6, 0},
{"yearweek(c_char )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 6, 0},
{"yearweek(c_varchar )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 6, 0},
{"yearweek(c_text_d )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 6, 0},
{"yearweek(c_binary )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 6, 0},
{"yearweek(c_varbinary )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 6, 0},
{"yearweek(c_blob_d )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 6, 0},
{"yearweek(c_set )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 6, 0},
{"yearweek(c_enum )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 6, 0},
{"year(c_int_d )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 4, 0},
{"year(c_bigint_d )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 4, 0},
{"year(c_float_d )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 4, 0},
{"year(c_double_d )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 4, 0},
{"year(c_decimal )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 4, 0},
{"year(c_datetime )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 4, 0},
{"year(c_time_d )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 4, 0},
{"year(c_timestamp_d)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 4, 0},
{"year(c_char )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 4, 0},
{"year(c_varchar )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 4, 0},
{"year(c_text_d )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 4, 0},
{"year(c_binary )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 4, 0},
{"year(c_varbinary )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 4, 0},
{"year(c_blob_d )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 4, 0},
{"year(c_set )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 4, 0},
{"year(c_enum )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 4, 0},
{"month(c_int_d )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 2, 0},
{"month(c_bigint_d )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 2, 0},
{"month(c_float_d )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 2, 0},
{"month(c_double_d )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 2, 0},
{"month(c_decimal )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 2, 0},
{"month(c_datetime )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 2, 0},
{"month(c_time_d )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 2, 0},
{"month(c_timestamp_d)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 2, 0},
{"month(c_char )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 2, 0},
{"month(c_varchar )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 2, 0},
{"month(c_text_d )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 2, 0},
{"month(c_binary )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 2, 0},
{"month(c_varbinary )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 2, 0},
{"month(c_blob_d )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 2, 0},
{"month(c_set )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 2, 0},
{"month(c_enum )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 2, 0},
{"monthName(c_int_d )", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, 10, types.UnspecifiedLength},
{"monthName(c_bigint_d )", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, 10, types.UnspecifiedLength},
{"monthName(c_float_d )", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, 10, types.UnspecifiedLength},
{"monthName(c_double_d )", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, 10, types.UnspecifiedLength},
{"monthName(c_decimal )", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, 10, types.UnspecifiedLength},
{"monthName(c_datetime )", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, 10, types.UnspecifiedLength},
{"monthName(c_time_d )", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, 10, types.UnspecifiedLength},
{"monthName(c_timestamp_d)", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, 10, types.UnspecifiedLength},
{"monthName(c_char )", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, 10, types.UnspecifiedLength},
{"monthName(c_varchar )", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, 10, types.UnspecifiedLength},
{"monthName(c_text_d )", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, 10, types.UnspecifiedLength},
{"monthName(c_binary )", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, 10, types.UnspecifiedLength},
{"monthName(c_varbinary )", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, 10, types.UnspecifiedLength},
{"monthName(c_blob_d )", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, 10, types.UnspecifiedLength},
{"monthName(c_set )", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, 10, types.UnspecifiedLength},
{"monthName(c_enum )", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, 10, types.UnspecifiedLength},
{"dayName(c_int_d )", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, 10, types.UnspecifiedLength},
{"dayName(c_bigint_d )", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, 10, types.UnspecifiedLength},
{"dayName(c_float_d )", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, 10, types.UnspecifiedLength},
{"dayName(c_double_d )", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, 10, types.UnspecifiedLength},
{"dayName(c_decimal )", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, 10, types.UnspecifiedLength},
{"dayName(c_datetime )", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, 10, types.UnspecifiedLength},
{"dayName(c_time_d )", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, 10, types.UnspecifiedLength},
{"dayName(c_timestamp_d)", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, 10, types.UnspecifiedLength},
{"dayName(c_char )", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, 10, types.UnspecifiedLength},
{"dayName(c_varchar )", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, 10, types.UnspecifiedLength},
{"dayName(c_text_d )", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, 10, types.UnspecifiedLength},
{"dayName(c_binary )", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, 10, types.UnspecifiedLength},
{"dayName(c_varbinary )", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, 10, types.UnspecifiedLength},
{"dayName(c_blob_d )", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, 10, types.UnspecifiedLength},
{"dayName(c_set )", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, 10, types.UnspecifiedLength},
{"dayName(c_enum )", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, 10, types.UnspecifiedLength},
{"now() ", mysql.TypeDatetime, charset.CharsetBin, mysql.BinaryFlag | mysql.NotNullFlag, 19, 0},
{"now(0) ", mysql.TypeDatetime, charset.CharsetBin, mysql.BinaryFlag | mysql.NotNullFlag, 19, 0},
{"now(1) ", mysql.TypeDatetime, charset.CharsetBin, mysql.BinaryFlag | mysql.NotNullFlag, 21, 1},
{"now(2) ", mysql.TypeDatetime, charset.CharsetBin, mysql.BinaryFlag | mysql.NotNullFlag, 22, 2},
{"now(3) ", mysql.TypeDatetime, charset.CharsetBin, mysql.BinaryFlag | mysql.NotNullFlag, 23, 3},
{"now(4) ", mysql.TypeDatetime, charset.CharsetBin, mysql.BinaryFlag | mysql.NotNullFlag, 24, 4},
{"now(5) ", mysql.TypeDatetime, charset.CharsetBin, mysql.BinaryFlag | mysql.NotNullFlag, 25, 5},
{"now(6) ", mysql.TypeDatetime, charset.CharsetBin, mysql.BinaryFlag | mysql.NotNullFlag, 26, 6},
{"now(7) ", mysql.TypeDatetime, charset.CharsetBin, mysql.BinaryFlag, 26, 6},
{"utc_timestamp() ", mysql.TypeDatetime, charset.CharsetBin, mysql.BinaryFlag | mysql.NotNullFlag, 19, 0},
{"utc_timestamp(0) ", mysql.TypeDatetime, charset.CharsetBin, mysql.BinaryFlag | mysql.NotNullFlag, 19, 0},
{"utc_timestamp(1) ", mysql.TypeDatetime, charset.CharsetBin, mysql.BinaryFlag | mysql.NotNullFlag, 21, 1},
{"utc_timestamp(2) ", mysql.TypeDatetime, charset.CharsetBin, mysql.BinaryFlag | mysql.NotNullFlag, 22, 2},
{"utc_timestamp(3) ", mysql.TypeDatetime, charset.CharsetBin, mysql.BinaryFlag | mysql.NotNullFlag, 23, 3},
{"utc_timestamp(4) ", mysql.TypeDatetime, charset.CharsetBin, mysql.BinaryFlag | mysql.NotNullFlag, 24, 4},
{"utc_timestamp(5) ", mysql.TypeDatetime, charset.CharsetBin, mysql.BinaryFlag | mysql.NotNullFlag, 25, 5},
{"utc_timestamp(6) ", mysql.TypeDatetime, charset.CharsetBin, mysql.BinaryFlag | mysql.NotNullFlag, 26, 6},
{"utc_timestamp(7) ", mysql.TypeDatetime, charset.CharsetBin, mysql.BinaryFlag, 26, 6},
{"utc_time() ", mysql.TypeDuration, charset.CharsetBin, mysql.BinaryFlag | mysql.NotNullFlag, 8, 0},
{"utc_time(0) ", mysql.TypeDuration, charset.CharsetBin, mysql.BinaryFlag | mysql.NotNullFlag, 8, 0},
{"utc_time(1) ", mysql.TypeDuration, charset.CharsetBin, mysql.BinaryFlag | mysql.NotNullFlag, 10, 1},
{"utc_time(2) ", mysql.TypeDuration, charset.CharsetBin, mysql.BinaryFlag | mysql.NotNullFlag, 11, 2},
{"utc_time(3) ", mysql.TypeDuration, charset.CharsetBin, mysql.BinaryFlag | mysql.NotNullFlag, 12, 3},
{"utc_time(4) ", mysql.TypeDuration, charset.CharsetBin, mysql.BinaryFlag | mysql.NotNullFlag, 13, 4},
{"utc_time(5) ", mysql.TypeDuration, charset.CharsetBin, mysql.BinaryFlag | mysql.NotNullFlag, 14, 5},
{"utc_time(6) ", mysql.TypeDuration, charset.CharsetBin, mysql.BinaryFlag | mysql.NotNullFlag, 15, 6},
{"utc_time(7) ", mysql.TypeDuration, charset.CharsetBin, mysql.BinaryFlag, 15, 6},
{"utc_date() ", mysql.TypeDatetime, charset.CharsetBin, mysql.BinaryFlag | mysql.NotNullFlag, 10, 0},
{"curdate()", mysql.TypeDatetime, charset.CharsetBin, mysql.BinaryFlag | mysql.NotNullFlag, 10, 0},
{"sysdate(4)", mysql.TypeDatetime, charset.CharsetBin, mysql.BinaryFlag, 19, 0},
{"date(c_int_d )", mysql.TypeDate, charset.CharsetBin, mysql.BinaryFlag, 10, 0},
{"date(c_bigint_d )", mysql.TypeDate, charset.CharsetBin, mysql.BinaryFlag, 10, 0},
{"date(c_float_d )", mysql.TypeDate, charset.CharsetBin, mysql.BinaryFlag, 10, 0},
{"date(c_double_d )", mysql.TypeDate, charset.CharsetBin, mysql.BinaryFlag, 10, 0},
{"date(c_decimal )", mysql.TypeDate, charset.CharsetBin, mysql.BinaryFlag, 10, 0},
{"date(c_datetime )", mysql.TypeDate, charset.CharsetBin, mysql.BinaryFlag, 10, 0},
{"date(c_time_d )", mysql.TypeDate, charset.CharsetBin, mysql.BinaryFlag, 10, 0},
{"date(c_timestamp_d)", mysql.TypeDate, charset.CharsetBin, mysql.BinaryFlag, 10, 0},
{"date(c_char )", mysql.TypeDate, charset.CharsetBin, mysql.BinaryFlag, 10, 0},
{"date(c_varchar )", mysql.TypeDate, charset.CharsetBin, mysql.BinaryFlag, 10, 0},
{"date(c_text_d )", mysql.TypeDate, charset.CharsetBin, mysql.BinaryFlag, 10, 0},
{"date(c_binary )", mysql.TypeDate, charset.CharsetBin, mysql.BinaryFlag, 10, 0},
{"date(c_varbinary )", mysql.TypeDate, charset.CharsetBin, mysql.BinaryFlag, 10, 0},
{"date(c_blob_d )", mysql.TypeDate, charset.CharsetBin, mysql.BinaryFlag, 10, 0},
{"date(c_set )", mysql.TypeDate, charset.CharsetBin, mysql.BinaryFlag, 10, 0},
{"date(c_enum )", mysql.TypeDate, charset.CharsetBin, mysql.BinaryFlag, 10, 0},
{"from_days(c_int_d )", mysql.TypeDatetime, charset.CharsetBin, mysql.BinaryFlag, 10, 0},
{"from_days(c_bigint_d )", mysql.TypeDatetime, charset.CharsetBin, mysql.BinaryFlag, 10, 0},
{"from_days(c_float_d )", mysql.TypeDatetime, charset.CharsetBin, mysql.BinaryFlag, 10, 0},
{"from_days(c_double_d )", mysql.TypeDatetime, charset.CharsetBin, mysql.BinaryFlag, 10, 0},
{"from_days(c_decimal )", mysql.TypeDatetime, charset.CharsetBin, mysql.BinaryFlag, 10, 0},
{"from_days(c_datetime )", mysql.TypeDatetime, charset.CharsetBin, mysql.BinaryFlag, 10, 0},
{"from_days(c_time_d )", mysql.TypeDatetime, charset.CharsetBin, mysql.BinaryFlag, 10, 0},
{"from_days(c_timestamp_d)", mysql.TypeDatetime, charset.CharsetBin, mysql.BinaryFlag, 10, 0},
{"from_days(c_char )", mysql.TypeDatetime, charset.CharsetBin, mysql.BinaryFlag, 10, 0},
{"from_days(c_varchar )", mysql.TypeDatetime, charset.CharsetBin, mysql.BinaryFlag, 10, 0},
{"from_days(c_text_d )", mysql.TypeDatetime, charset.CharsetBin, mysql.BinaryFlag, 10, 0},
{"from_days(c_binary )", mysql.TypeDatetime, charset.CharsetBin, mysql.BinaryFlag, 10, 0},
{"from_days(c_varbinary )", mysql.TypeDatetime, charset.CharsetBin, mysql.BinaryFlag, 10, 0},
{"from_days(c_blob_d )", mysql.TypeDatetime, charset.CharsetBin, mysql.BinaryFlag, 10, 0},
{"from_days(c_set )", mysql.TypeDatetime, charset.CharsetBin, mysql.BinaryFlag, 10, 0},
{"from_days(c_enum )", mysql.TypeDatetime, charset.CharsetBin, mysql.BinaryFlag, 10, 0},
{"weekday(c_int_d )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 1, 0},
{"weekday(c_bigint_d )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 1, 0},
{"weekday(c_float_d )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 1, 0},
{"weekday(c_double_d )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 1, 0},
{"weekday(c_decimal )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 1, 0},
{"weekday(c_datetime )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 1, 0},
{"weekday(c_time_d )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 1, 0},
{"weekday(c_timestamp_d)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 1, 0},
{"weekday(c_char )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 1, 0},
{"weekday(c_varchar )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 1, 0},
{"weekday(c_text_d )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 1, 0},
{"weekday(c_binary )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 1, 0},
{"weekday(c_varbinary )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 1, 0},
{"weekday(c_blob_d )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 1, 0},
{"weekday(c_set )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 1, 0},
{"weekday(c_enum )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 1, 0},
{"quarter(c_int_d )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 1, 0},
{"quarter(c_bigint_d )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 1, 0},
{"quarter(c_float_d )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 1, 0},
{"quarter(c_double_d )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 1, 0},
{"quarter(c_decimal )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 1, 0},
{"quarter(c_datetime )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 1, 0},
{"quarter(c_time_d )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 1, 0},
{"quarter(c_timestamp_d)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 1, 0},
{"quarter(c_char )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 1, 0},
{"quarter(c_varchar )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 1, 0},
{"quarter(c_text_d )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 1, 0},
{"quarter(c_binary )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 1, 0},
{"quarter(c_varbinary )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 1, 0},
{"quarter(c_blob_d )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 1, 0},
{"quarter(c_set )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 1, 0},
{"quarter(c_enum )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 1, 0},
{"current_time()", mysql.TypeDuration, charset.CharsetBin, mysql.BinaryFlag | mysql.NotNullFlag, mysql.MaxDurationWidthNoFsp, int(types.MinFsp)},
{"current_time(0)", mysql.TypeDuration, charset.CharsetBin, mysql.BinaryFlag | mysql.NotNullFlag, mysql.MaxDurationWidthWithFsp, int(types.MinFsp)},
{"current_time(6)", mysql.TypeDuration, charset.CharsetBin, mysql.BinaryFlag | mysql.NotNullFlag, mysql.MaxDurationWidthWithFsp, int(types.MaxFsp)},
{"sec_to_time(c_int_d )", mysql.TypeDuration, charset.CharsetBin, mysql.BinaryFlag, 10, 0},
{"sec_to_time(c_bigint_d )", mysql.TypeDuration, charset.CharsetBin, mysql.BinaryFlag, 10, 0},
{"sec_to_time(c_float_d )", mysql.TypeDuration, charset.CharsetBin, mysql.BinaryFlag, 17, 6},
{"sec_to_time(c_double_d )", mysql.TypeDuration, charset.CharsetBin, mysql.BinaryFlag, 17, 6},
{"sec_to_time(c_decimal )", mysql.TypeDuration, charset.CharsetBin, mysql.BinaryFlag, 14, 3},
{"sec_to_time(c_decimal_d )", mysql.TypeDuration, charset.CharsetBin, mysql.BinaryFlag, 10, 0},
{"sec_to_time(c_datetime )", mysql.TypeDuration, charset.CharsetBin, mysql.BinaryFlag, 13, 2},
{"sec_to_time(c_time )", mysql.TypeDuration, charset.CharsetBin, mysql.BinaryFlag, 14, 3},
{"sec_to_time(c_time_d )", mysql.TypeDuration, charset.CharsetBin, mysql.BinaryFlag, 10, 0},
{"sec_to_time(c_timestamp )", mysql.TypeDuration, charset.CharsetBin, mysql.BinaryFlag, 15, 4},
{"sec_to_time(c_timestamp_d)", mysql.TypeDuration, charset.CharsetBin, mysql.BinaryFlag, 10, 0},
{"sec_to_time(c_char )", mysql.TypeDuration, charset.CharsetBin, mysql.BinaryFlag, 17, 6},
{"sec_to_time(c_varchar )", mysql.TypeDuration, charset.CharsetBin, mysql.BinaryFlag, 17, 6},
{"sec_to_time(c_text_d )", mysql.TypeDuration, charset.CharsetBin, mysql.BinaryFlag, 17, 6},
{"sec_to_time(c_binary )", mysql.TypeDuration, charset.CharsetBin, mysql.BinaryFlag, 17, 6},
{"sec_to_time(c_varbinary )", mysql.TypeDuration, charset.CharsetBin, mysql.BinaryFlag, 17, 6},
{"sec_to_time(c_blob_d )", mysql.TypeDuration, charset.CharsetBin, mysql.BinaryFlag, 17, 6},
{"sec_to_time(c_set )", mysql.TypeDuration, charset.CharsetBin, mysql.BinaryFlag, 17, 6},
{"sec_to_time(c_enum )", mysql.TypeDuration, charset.CharsetBin, mysql.BinaryFlag, 17, 6},
{"time_to_sec(c_int_d )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 10, 0},
{"time_to_sec(c_bigint_d )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 10, 0},
{"time_to_sec(c_float_d )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 10, 0},
{"time_to_sec(c_double_d )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 10, 0},
{"time_to_sec(c_decimal )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 10, 0},
{"time_to_sec(c_decimal_d )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 10, 0},
{"time_to_sec(c_datetime )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 10, 0},
{"time_to_sec(c_time )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 10, 0},
{"time_to_sec(c_time_d )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 10, 0},
{"time_to_sec(c_timestamp )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 10, 0},
{"time_to_sec(c_timestamp_d)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 10, 0},
{"time_to_sec(c_char )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 10, 0},
{"time_to_sec(c_varchar )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 10, 0},
{"time_to_sec(c_text_d )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 10, 0},
{"time_to_sec(c_binary )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 10, 0},
{"time_to_sec(c_varbinary )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 10, 0},
{"time_to_sec(c_blob_d )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 10, 0},
{"time_to_sec(c_set )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 10, 0},
{"time_to_sec(c_enum )", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 10, 0},
{"str_to_date(c_varchar, '%Y:%m:%d')", mysql.TypeDate, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxDateWidth, int(types.MinFsp)},
{"str_to_date(c_varchar, '%Y:%m:%d %H:%i:%s')", mysql.TypeDatetime, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxDatetimeWidthNoFsp, int(types.MinFsp)},
{"str_to_date(c_varchar, '%Y:%m:%d %H:%i:%s.%f')", mysql.TypeDatetime, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxDatetimeWidthWithFsp, int(types.MaxFsp)},
{"str_to_date(c_varchar, '%H:%i:%s')", mysql.TypeDuration, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxDurationWidthNoFsp, int(types.MinFsp)},
{"str_to_date(c_varchar, '%H:%i:%s.%f')", mysql.TypeDuration, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxDurationWidthWithFsp, int(types.MaxFsp)},
{"period_add(c_int_d , c_int_d)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 6, 0},
{"period_add(c_bigint_d , c_int_d)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 6, 0},
{"period_add(c_float_d , c_int_d)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 6, 0},
{"period_add(c_double_d , c_int_d)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 6, 0},
{"period_add(c_decimal , c_int_d)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 6, 0},
{"period_add(c_datetime , c_int_d)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 6, 0},
{"period_add(c_time_d , c_int_d)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 6, 0},
{"period_add(c_timestamp_d, c_int_d)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 6, 0},
{"period_add(c_char , c_int_d)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 6, 0},
{"period_add(c_varchar , c_int_d)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 6, 0},
{"period_add(c_text_d , c_int_d)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 6, 0},
{"period_add(c_binary , c_int_d)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 6, 0},
{"period_add(c_varbinary , c_int_d)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 6, 0},
{"period_add(c_blob_d , c_int_d)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 6, 0},
{"period_add(c_set , c_int_d)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 6, 0},
{"period_add(c_enum , c_int_d)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 6, 0},
{"period_diff(c_int_d , c_int_d)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 6, 0},
{"period_diff(c_bigint_d , c_int_d)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 6, 0},
{"period_diff(c_float_d , c_int_d)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 6, 0},
{"period_diff(c_double_d , c_int_d)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 6, 0},
{"period_diff(c_decimal , c_int_d)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 6, 0},
{"period_diff(c_datetime , c_int_d)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 6, 0},
{"period_diff(c_time_d , c_int_d)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 6, 0},
{"period_diff(c_timestamp_d, c_int_d)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 6, 0},
{"period_diff(c_char , c_int_d)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 6, 0},
{"period_diff(c_varchar , c_int_d)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 6, 0},
{"period_diff(c_text_d , c_int_d)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 6, 0},
{"period_diff(c_binary , c_int_d)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 6, 0},
{"period_diff(c_varbinary , c_int_d)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 6, 0},
{"period_diff(c_blob_d , c_int_d)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 6, 0},
{"period_diff(c_set , c_int_d)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 6, 0},
{"period_diff(c_enum , c_int_d)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 6, 0},
{"maketime(c_int_d, c_int_d, c_double_d)", mysql.TypeDuration, charset.CharsetBin, mysql.BinaryFlag, 17, 6},
{"maketime(c_int_d, c_int_d, c_decimal)", mysql.TypeDuration, charset.CharsetBin, mysql.BinaryFlag, 14, 3},
{"maketime(c_int_d, c_int_d, c_decimal_d)", mysql.TypeDuration, charset.CharsetBin, mysql.BinaryFlag, 10, 0},
{"maketime(c_int_d, c_int_d, c_char)", mysql.TypeDuration, charset.CharsetBin, mysql.BinaryFlag, 17, 6},
{"maketime(c_int_d, c_int_d, c_varchar)", mysql.TypeDuration, charset.CharsetBin, mysql.BinaryFlag, 17, 6},
{"maketime(c_int_d, c_int_d, 1.2345)", mysql.TypeDuration, charset.CharsetBin, mysql.BinaryFlag, 15, 4},
{"get_format(DATE, 'USA')", mysql.TypeVarString, charset.CharsetUTF8MB4, mysql.NotNullFlag, 17, types.UnspecifiedLength},
{"convert_tz(c_time_d, c_text_d, c_text_d)", mysql.TypeDatetime, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxDatetimeWidthWithFsp, int(types.MaxFsp)},
{"from_unixtime(20170101.999)", mysql.TypeDatetime, charset.CharsetBin, mysql.BinaryFlag | mysql.NotNullFlag, mysql.MaxDatetimeWidthWithFsp, 3},
{"from_unixtime(20170101.1234567)", mysql.TypeDatetime, charset.CharsetBin, mysql.BinaryFlag | mysql.NotNullFlag, mysql.MaxDatetimeWidthWithFsp, int(types.MaxFsp)},
{"from_unixtime('20170101.999')", mysql.TypeDatetime, charset.CharsetBin, mysql.BinaryFlag | mysql.NotNullFlag, mysql.MaxDatetimeWidthWithFsp, int(types.MaxFsp)},
{"from_unixtime(20170101.123, '%H')", mysql.TypeVarString, charset.CharsetUTF8MB4, 0 | mysql.NotNullFlag, -1, types.UnspecifiedLength},
{"extract(day from c_char)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxIntWidth, 0},
{"extract(hour from c_char)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxIntWidth, 0},
}
}
func (s *testInferTypeSuite) createTestCase4LikeFuncs() []typeInferTestCase {
return []typeInferTestCase{
{"c_int_d rlike c_text_d", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag | mysql.IsBooleanFlag, 1, 0},
{"c_bigint_d rlike c_text_d", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag | mysql.IsBooleanFlag, 1, 0},
{"c_float_d rlike c_text_d", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag | mysql.IsBooleanFlag, 1, 0},
{"c_double_d rlike c_text_d", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag | mysql.IsBooleanFlag, 1, 0},
{"c_decimal rlike c_text_d", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag | mysql.IsBooleanFlag, 1, 0},
{"c_datetime rlike c_text_d", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag | mysql.IsBooleanFlag, 1, 0},
{"c_time_d rlike c_text_d", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag | mysql.IsBooleanFlag, 1, 0},
{"c_timestamp_d rlike c_text_d", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag | mysql.IsBooleanFlag, 1, 0},
{"c_char rlike c_text_d", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag | mysql.IsBooleanFlag, 1, 0},
{"c_varchar rlike c_text_d", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag | mysql.IsBooleanFlag, 1, 0},
{"c_text_d rlike c_text_d", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag | mysql.IsBooleanFlag, 1, 0},
{"c_binary rlike c_text_d", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag | mysql.IsBooleanFlag, 1, 0},
{"c_varbinary rlike c_text_d", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag | mysql.IsBooleanFlag, 1, 0},
{"c_blob_d rlike c_text_d", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag | mysql.IsBooleanFlag, 1, 0},
{"c_set rlike c_text_d", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag | mysql.IsBooleanFlag, 1, 0},
{"c_enum rlike c_text_d", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag | mysql.IsBooleanFlag, 1, 0},
{"c_int_d regexp c_text_d", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag | mysql.IsBooleanFlag, 1, 0},
{"c_bigint_d regexp c_text_d", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag | mysql.IsBooleanFlag, 1, 0},
{"c_float_d regexp c_text_d", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag | mysql.IsBooleanFlag, 1, 0},
{"c_double_d regexp c_text_d", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag | mysql.IsBooleanFlag, 1, 0},
{"c_decimal regexp c_text_d", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag | mysql.IsBooleanFlag, 1, 0},
{"c_datetime regexp c_text_d", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag | mysql.IsBooleanFlag, 1, 0},
{"c_time_d regexp c_text_d", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag | mysql.IsBooleanFlag, 1, 0},
{"c_timestamp_d regexp c_text_d", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag | mysql.IsBooleanFlag, 1, 0},
{"c_char regexp c_text_d", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag | mysql.IsBooleanFlag, 1, 0},
{"c_varchar regexp c_text_d", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag | mysql.IsBooleanFlag, 1, 0},
{"c_text_d regexp c_text_d", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag | mysql.IsBooleanFlag, 1, 0},
{"c_binary regexp c_text_d", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag | mysql.IsBooleanFlag, 1, 0},
{"c_varbinary regexp c_text_d", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag | mysql.IsBooleanFlag, 1, 0},
{"c_blob_d regexp c_text_d", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag | mysql.IsBooleanFlag, 1, 0},
{"c_set regexp c_text_d", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag | mysql.IsBooleanFlag, 1, 0},
{"c_enum regexp c_text_d", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag | mysql.IsBooleanFlag, 1, 0},
}
}
func (s *testInferTypeSuite) createTestCase4Literals() []typeInferTestCase {
return []typeInferTestCase{
{"time '00:00:00'", mysql.TypeDuration, charset.CharsetBin, mysql.BinaryFlag | mysql.NotNullFlag, 10, 0},
{"time '00'", mysql.TypeDuration, charset.CharsetBin, mysql.BinaryFlag | mysql.NotNullFlag, 10, 0},
{"time '3 00:00:00'", mysql.TypeDuration, charset.CharsetBin, mysql.BinaryFlag | mysql.NotNullFlag, 10, 0},
{"time '3 00:00:00.1234'", mysql.TypeDuration, charset.CharsetBin, mysql.BinaryFlag | mysql.NotNullFlag, 15, 4},
{"timestamp '2017-01-01 01:01:01'", mysql.TypeDatetime, charset.CharsetBin, mysql.BinaryFlag | mysql.NotNullFlag, mysql.MaxDatetimeWidthNoFsp, 0},
{"timestamp '2017-01-00000000001 01:01:01.001'", mysql.TypeDatetime, charset.CharsetBin, mysql.BinaryFlag | mysql.NotNullFlag, 23, 3},
{"date '2017-01-01'", mysql.TypeDate, charset.CharsetBin, mysql.BinaryFlag | mysql.NotNullFlag, 10, 0},
}
}
func (s *testInferTypeSuite) createTestCase4JSONFuncs() []typeInferTestCase {
return []typeInferTestCase{
{"json_type(c_json)", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, 51, types.UnspecifiedLength},
// TODO: Flen of json_unquote doesn't follow MySQL now.
{"json_unquote(c_json)", mysql.TypeVarString, charset.CharsetUTF8MB4, 0, mysql.MaxFieldVarCharLength, types.UnspecifiedLength},
{"json_extract(c_json, '')", mysql.TypeJSON, charset.CharsetUTF8MB4, mysql.BinaryFlag, mysql.MaxBlobWidth, 0},
{"json_set(c_json, '', 0)", mysql.TypeJSON, charset.CharsetUTF8MB4, mysql.BinaryFlag, mysql.MaxBlobWidth, 0},
{"json_insert(c_json, '', 0)", mysql.TypeJSON, charset.CharsetUTF8MB4, mysql.BinaryFlag, mysql.MaxBlobWidth, 0},
{"json_replace(c_json, '', 0)", mysql.TypeJSON, charset.CharsetUTF8MB4, mysql.BinaryFlag, mysql.MaxBlobWidth, 0},
{"json_remove(c_json, '')", mysql.TypeJSON, charset.CharsetUTF8MB4, mysql.BinaryFlag, mysql.MaxBlobWidth, 0},
{"json_merge(c_json, c_json)", mysql.TypeJSON, charset.CharsetUTF8MB4, mysql.BinaryFlag, mysql.MaxBlobWidth, 0},
{"json_object('k', 'v')", mysql.TypeJSON, charset.CharsetUTF8MB4, mysql.BinaryFlag, mysql.MaxBlobWidth, 0},
{"json_array('k', 'v')", mysql.TypeJSON, charset.CharsetUTF8MB4, mysql.BinaryFlag, mysql.MaxBlobWidth, 0},
}
}
func (s *testInferTypeSuite) createTestCase4MiscellaneousFunc() []typeInferTestCase {
return []typeInferTestCase{
{"get_lock(c_char, c_int_d)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 1, 0},
{"get_lock(c_char, c_bigint_d)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 1, 0},
{"get_lock(c_char, c_float_d)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 1, 0},
{"get_lock(c_char, c_double_d)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 1, 0},
{"get_lock(c_char, c_decimal)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 1, 0},
{"get_lock(c_varchar, c_int_d)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 1, 0},
{"get_lock(c_text_d, c_int_d)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 1, 0},
{"release_lock(c_char)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 1, 0},
{"release_lock(c_char)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 1, 0},
{"release_lock(c_char)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 1, 0},
{"release_lock(c_char)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 1, 0},
{"release_lock(c_char)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 1, 0},
{"release_lock(c_varchar)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 1, 0},
{"release_lock(c_text_d)", mysql.TypeLonglong, charset.CharsetBin, mysql.BinaryFlag, 1, 0},
}
}
| expression/typeinfer_test.go | 0 | https://github.com/pingcap/tidb/commit/cc83cc524f8d3fd661f6e62d129ba043cc74501e | [
0.0009992802515625954,
0.0001832127891248092,
0.0001647954195505008,
0.00016897049499675632,
0.00007356721471296623
] |
{
"id": 3,
"code_window": [
"}\n",
"\n",
"func (c *twoPhaseCommitter) needLinearizability() bool {\n",
"\tGuaranteeLinearizabilityOption := c.txn.us.GetOption(kv.GuaranteeLinearizability)\n",
"\t// by default, guarantee\n",
"\treturn GuaranteeLinearizabilityOption == nil || GuaranteeLinearizabilityOption.(bool)\n",
"}\n",
"\n",
"func (c *twoPhaseCommitter) isAsyncCommit() bool {\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\treturn !c.txn.causalConsistency\n"
],
"file_path": "store/tikv/2pc.go",
"type": "replace",
"edit_start_line_idx": 856
} | // Copyright 2016 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package tikv
import (
"bytes"
"context"
"encoding/json"
"fmt"
"math/rand"
"runtime/trace"
"sort"
"sync"
"sync/atomic"
"time"
"github.com/dgryski/go-farm"
"github.com/opentracing/opentracing-go"
"github.com/pingcap/errors"
"github.com/pingcap/failpoint"
"github.com/pingcap/kvproto/pkg/kvrpcpb"
"github.com/pingcap/kvproto/pkg/metapb"
"github.com/pingcap/tidb/kv"
tikverr "github.com/pingcap/tidb/store/tikv/error"
tikv "github.com/pingcap/tidb/store/tikv/kv"
"github.com/pingcap/tidb/store/tikv/logutil"
"github.com/pingcap/tidb/store/tikv/metrics"
"github.com/pingcap/tidb/store/tikv/oracle"
"github.com/pingcap/tidb/store/tikv/tikvrpc"
"github.com/pingcap/tidb/store/tikv/unionstore"
"github.com/pingcap/tidb/store/tikv/util"
"go.uber.org/zap"
)
// MaxTxnTimeUse is the max time a Txn may use (in ms) from its begin to commit.
// We use it to abort the transaction to guarantee GC worker will not influence it.
const MaxTxnTimeUse = 24 * 60 * 60 * 1000
// SchemaAmender is used by pessimistic transactions to amend commit mutations for schema change during 2pc.
type SchemaAmender interface {
// AmendTxn is the amend entry, new mutations will be generated based on input mutations using schema change info.
// The returned results are mutations need to prewrite and mutations need to cleanup.
AmendTxn(ctx context.Context, startInfoSchema SchemaVer, change *RelatedSchemaChange, mutations CommitterMutations) (CommitterMutations, error)
}
// KVTxn contains methods to interact with a TiKV transaction.
type KVTxn struct {
snapshot *KVSnapshot
us *unionstore.KVUnionStore
store *KVStore // for connection to region.
startTS uint64
startTime time.Time // Monotonic timestamp for recording txn time consuming.
commitTS uint64
mu sync.Mutex // For thread-safe LockKeys function.
setCnt int64
vars *tikv.Variables
committer *twoPhaseCommitter
lockedCnt int
valid bool
// schemaVer is the infoSchema fetched at startTS.
schemaVer SchemaVer
// SchemaAmender is used amend pessimistic txn commit mutations for schema change
schemaAmender SchemaAmender
// commitCallback is called after current transaction gets committed
commitCallback func(info string, err error)
binlog BinlogExecutor
schemaLeaseChecker SchemaLeaseChecker
syncLog bool
priority Priority
isPessimistic bool
enableAsyncCommit bool
enable1PC bool
scope string
kvFilter KVFilter
}
func extractStartTs(store *KVStore, options kv.TransactionOption) (uint64, error) {
var startTs uint64
var err error
if options.StartTS != nil {
startTs = *options.StartTS
} else if options.PrevSec != nil {
bo := NewBackofferWithVars(context.Background(), tsoMaxBackoff, nil)
startTs, err = store.getStalenessTimestamp(bo, options.TxnScope, *options.PrevSec)
} else if options.MinStartTS != nil {
stores := make([]*Store, 0)
allStores := store.regionCache.getStoresByType(tikvrpc.TiKV)
if options.TxnScope != oracle.GlobalTxnScope {
for _, store := range allStores {
if store.IsLabelsMatch([]*metapb.StoreLabel{
{
Key: DCLabelKey,
Value: options.TxnScope,
},
}) {
stores = append(stores, store)
}
}
} else {
stores = allStores
}
safeTS := store.getMinSafeTSByStores(stores)
startTs = *options.MinStartTS
// If the safeTS is larger than the minStartTS, we will use safeTS as StartTS, otherwise we will use
// minStartTS directly.
if oracle.CompareTS(startTs, safeTS) < 0 {
startTs = safeTS
}
} else if options.MaxPrevSec != nil {
bo := NewBackofferWithVars(context.Background(), tsoMaxBackoff, nil)
minStartTS, err := store.getStalenessTimestamp(bo, options.TxnScope, *options.MaxPrevSec)
if err != nil {
return 0, errors.Trace(err)
}
options.MinStartTS = &minStartTS
return extractStartTs(store, options)
} else {
bo := NewBackofferWithVars(context.Background(), tsoMaxBackoff, nil)
startTs, err = store.getTimestampWithRetry(bo, options.TxnScope)
}
return startTs, err
}
func newTiKVTxnWithOptions(store *KVStore, options kv.TransactionOption) (*KVTxn, error) {
if options.TxnScope == "" {
options.TxnScope = oracle.GlobalTxnScope
}
startTs, err := extractStartTs(store, options)
if err != nil {
return nil, errors.Trace(err)
}
snapshot := newTiKVSnapshot(store, startTs, store.nextReplicaReadSeed())
newTiKVTxn := &KVTxn{
snapshot: snapshot,
us: unionstore.NewUnionStore(snapshot),
store: store,
startTS: startTs,
startTime: time.Now(),
valid: true,
vars: tikv.DefaultVars,
scope: options.TxnScope,
}
return newTiKVTxn, nil
}
// SetSuccess is used to probe if kv variables are set or not. It is ONLY used in test cases.
var SetSuccess = false
// SetVars sets variables to the transaction.
func (txn *KVTxn) SetVars(vars *tikv.Variables) {
txn.vars = vars
txn.snapshot.vars = vars
failpoint.Inject("probeSetVars", func(val failpoint.Value) {
if val.(bool) {
SetSuccess = true
}
})
}
// GetVars gets variables from the transaction.
func (txn *KVTxn) GetVars() *tikv.Variables {
return txn.vars
}
// Get implements transaction interface.
func (txn *KVTxn) Get(ctx context.Context, k []byte) ([]byte, error) {
ret, err := txn.us.Get(ctx, k)
if tikverr.IsErrNotFound(err) {
return nil, err
}
if err != nil {
return nil, errors.Trace(err)
}
return ret, nil
}
// Set sets the value for key k as v into kv store.
// v must NOT be nil or empty, otherwise it returns ErrCannotSetNilValue.
func (txn *KVTxn) Set(k []byte, v []byte) error {
txn.setCnt++
return txn.us.GetMemBuffer().Set(k, v)
}
// String implements fmt.Stringer interface.
func (txn *KVTxn) String() string {
return fmt.Sprintf("%d", txn.StartTS())
}
// Iter creates an Iterator positioned on the first entry that k <= entry's key.
// If such entry is not found, it returns an invalid Iterator with no error.
// It yields only keys that < upperBound. If upperBound is nil, it means the upperBound is unbounded.
// The Iterator must be Closed after use.
func (txn *KVTxn) Iter(k []byte, upperBound []byte) (unionstore.Iterator, error) {
return txn.us.Iter(k, upperBound)
}
// IterReverse creates a reversed Iterator positioned on the first entry which key is less than k.
func (txn *KVTxn) IterReverse(k []byte) (unionstore.Iterator, error) {
return txn.us.IterReverse(k)
}
// Delete removes the entry for key k from kv store.
func (txn *KVTxn) Delete(k []byte) error {
return txn.us.GetMemBuffer().Delete(k)
}
// SetOption sets an option with a value, when val is nil, uses the default
// value of this option.
func (txn *KVTxn) SetOption(opt int, val interface{}) {
txn.us.SetOption(opt, val)
txn.snapshot.SetOption(opt, val)
}
// GetOption returns the option
func (txn *KVTxn) GetOption(opt int) interface{} {
return txn.us.GetOption(opt)
}
// DelOption deletes an option.
func (txn *KVTxn) DelOption(opt int) {
txn.us.DelOption(opt)
}
// SetSchemaLeaseChecker sets a hook to check schema version.
func (txn *KVTxn) SetSchemaLeaseChecker(checker SchemaLeaseChecker) {
txn.schemaLeaseChecker = checker
}
// EnableForceSyncLog indicates tikv to always sync log for the transaction.
func (txn *KVTxn) EnableForceSyncLog() {
txn.syncLog = true
}
// SetPessimistic indicates if the transaction should use pessimictic lock.
func (txn *KVTxn) SetPessimistic(b bool) {
txn.isPessimistic = b
}
// SetSchemaVer updates schema version to validate transaction.
func (txn *KVTxn) SetSchemaVer(schemaVer SchemaVer) {
txn.schemaVer = schemaVer
}
// SetPriority sets the priority for both write and read.
func (txn *KVTxn) SetPriority(pri Priority) {
txn.priority = pri
txn.GetSnapshot().SetPriority(pri)
}
// SetSchemaAmender sets an amender to update mutations after schema change.
func (txn *KVTxn) SetSchemaAmender(sa SchemaAmender) {
txn.schemaAmender = sa
}
// SetCommitCallback sets up a function that will be called when the transaction
// is finished.
func (txn *KVTxn) SetCommitCallback(f func(string, error)) {
txn.commitCallback = f
}
// SetEnableAsyncCommit indicates if the transaction will try to use async commit.
func (txn *KVTxn) SetEnableAsyncCommit(b bool) {
txn.enableAsyncCommit = b
}
// SetEnable1PC indicates if the transaction will try to use 1 phase commit.
func (txn *KVTxn) SetEnable1PC(b bool) {
txn.enable1PC = b
}
// SetScope sets the geographical scope of the transaction.
func (txn *KVTxn) SetScope(scope string) {
txn.scope = scope
}
// SetKVFilter sets the filter to ignore key-values in memory buffer.
func (txn *KVTxn) SetKVFilter(filter KVFilter) {
txn.kvFilter = filter
}
// IsPessimistic returns true if it is pessimistic.
func (txn *KVTxn) IsPessimistic() bool {
return txn.isPessimistic
}
// GetScope returns the geographical scope of the transaction.
func (txn *KVTxn) GetScope() string {
return txn.scope
}
// Commit commits the transaction operations to KV store.
func (txn *KVTxn) Commit(ctx context.Context) error {
if span := opentracing.SpanFromContext(ctx); span != nil && span.Tracer() != nil {
span1 := span.Tracer().StartSpan("tikvTxn.Commit", opentracing.ChildOf(span.Context()))
defer span1.Finish()
ctx = opentracing.ContextWithSpan(ctx, span1)
}
defer trace.StartRegion(ctx, "CommitTxn").End()
if !txn.valid {
return tikverr.ErrInvalidTxn
}
defer txn.close()
failpoint.Inject("mockCommitError", func(val failpoint.Value) {
if val.(bool) && IsMockCommitErrorEnable() {
MockCommitErrorDisable()
failpoint.Return(errors.New("mock commit error"))
}
})
start := time.Now()
defer func() { metrics.TxnCmdHistogramWithCommit.Observe(time.Since(start).Seconds()) }()
// sessionID is used for log.
var sessionID uint64
val := ctx.Value(util.SessionID)
if val != nil {
sessionID = val.(uint64)
}
var err error
// If the txn use pessimistic lock, committer is initialized.
committer := txn.committer
if committer == nil {
committer, err = newTwoPhaseCommitter(txn, sessionID)
if err != nil {
return errors.Trace(err)
}
txn.committer = committer
}
defer committer.ttlManager.close()
initRegion := trace.StartRegion(ctx, "InitKeys")
err = committer.initKeysAndMutations()
initRegion.End()
if err != nil {
return errors.Trace(err)
}
if committer.mutations.Len() == 0 {
return nil
}
defer func() {
ctxValue := ctx.Value(util.CommitDetailCtxKey)
if ctxValue != nil {
commitDetail := ctxValue.(**util.CommitDetails)
if *commitDetail != nil {
(*commitDetail).TxnRetry++
} else {
*commitDetail = committer.getDetail()
}
}
}()
// latches disabled
// pessimistic transaction should also bypass latch.
if txn.store.txnLatches == nil || txn.IsPessimistic() {
err = committer.execute(ctx)
if val == nil || sessionID > 0 {
txn.onCommitted(err)
}
logutil.Logger(ctx).Debug("[kv] txnLatches disabled, 2pc directly", zap.Error(err))
return errors.Trace(err)
}
// latches enabled
// for transactions which need to acquire latches
start = time.Now()
lock := txn.store.txnLatches.Lock(committer.startTS, committer.mutations.GetKeys())
commitDetail := committer.getDetail()
commitDetail.LocalLatchTime = time.Since(start)
if commitDetail.LocalLatchTime > 0 {
metrics.TiKVLocalLatchWaitTimeHistogram.Observe(commitDetail.LocalLatchTime.Seconds())
}
defer txn.store.txnLatches.UnLock(lock)
if lock.IsStale() {
return &tikverr.ErrWriteConflictInLatch{StartTS: txn.startTS}
}
err = committer.execute(ctx)
if val == nil || sessionID > 0 {
txn.onCommitted(err)
}
if err == nil {
lock.SetCommitTS(committer.commitTS)
}
logutil.Logger(ctx).Debug("[kv] txnLatches enabled while txn retryable", zap.Error(err))
return errors.Trace(err)
}
func (txn *KVTxn) close() {
txn.valid = false
}
// Rollback undoes the transaction operations to KV store.
func (txn *KVTxn) Rollback() error {
if !txn.valid {
return tikverr.ErrInvalidTxn
}
start := time.Now()
// Clean up pessimistic lock.
if txn.IsPessimistic() && txn.committer != nil {
err := txn.rollbackPessimisticLocks()
txn.committer.ttlManager.close()
if err != nil {
logutil.BgLogger().Error(err.Error())
}
}
txn.close()
logutil.BgLogger().Debug("[kv] rollback txn", zap.Uint64("txnStartTS", txn.StartTS()))
metrics.TxnCmdHistogramWithRollback.Observe(time.Since(start).Seconds())
return nil
}
func (txn *KVTxn) rollbackPessimisticLocks() error {
if txn.lockedCnt == 0 {
return nil
}
bo := NewBackofferWithVars(context.Background(), cleanupMaxBackoff, txn.vars)
keys := txn.collectLockedKeys()
return txn.committer.pessimisticRollbackMutations(bo, &PlainMutations{keys: keys})
}
func (txn *KVTxn) collectLockedKeys() [][]byte {
keys := make([][]byte, 0, txn.lockedCnt)
buf := txn.GetMemBuffer()
var err error
for it := buf.IterWithFlags(nil, nil); it.Valid(); err = it.Next() {
_ = err
if it.Flags().HasLocked() {
keys = append(keys, it.Key())
}
}
return keys
}
// TxnInfo is used to keep track the info of a committed transaction (mainly for diagnosis and testing)
type TxnInfo struct {
TxnScope string `json:"txn_scope"`
StartTS uint64 `json:"start_ts"`
CommitTS uint64 `json:"commit_ts"`
TxnCommitMode string `json:"txn_commit_mode"`
AsyncCommitFallback bool `json:"async_commit_fallback"`
OnePCFallback bool `json:"one_pc_fallback"`
ErrMsg string `json:"error,omitempty"`
}
func (txn *KVTxn) onCommitted(err error) {
if txn.commitCallback != nil {
isAsyncCommit := txn.committer.isAsyncCommit()
isOnePC := txn.committer.isOnePC()
commitMode := "2pc"
if isOnePC {
commitMode = "1pc"
} else if isAsyncCommit {
commitMode = "async_commit"
}
info := TxnInfo{
TxnScope: txn.GetScope(),
StartTS: txn.startTS,
CommitTS: txn.commitTS,
TxnCommitMode: commitMode,
AsyncCommitFallback: txn.committer.hasTriedAsyncCommit && !isAsyncCommit,
OnePCFallback: txn.committer.hasTriedOnePC && !isOnePC,
}
if err != nil {
info.ErrMsg = err.Error()
}
infoStr, err2 := json.Marshal(info)
_ = err2
txn.commitCallback(string(infoStr), err)
}
}
// LockKeys tries to lock the entries with the keys in KV store.
// lockWaitTime in ms, except that kv.LockAlwaysWait(0) means always wait lock, kv.LockNowait(-1) means nowait lock
func (txn *KVTxn) LockKeys(ctx context.Context, lockCtx *tikv.LockCtx, keysInput ...[]byte) error {
// Exclude keys that are already locked.
var err error
keys := make([][]byte, 0, len(keysInput))
startTime := time.Now()
txn.mu.Lock()
defer txn.mu.Unlock()
defer func() {
metrics.TxnCmdHistogramWithLockKeys.Observe(time.Since(startTime).Seconds())
if err == nil {
if lockCtx.PessimisticLockWaited != nil {
if atomic.LoadInt32(lockCtx.PessimisticLockWaited) > 0 {
timeWaited := time.Since(lockCtx.WaitStartTime)
atomic.StoreInt64(lockCtx.LockKeysDuration, int64(timeWaited))
metrics.TiKVPessimisticLockKeysDuration.Observe(timeWaited.Seconds())
}
}
}
if lockCtx.LockKeysCount != nil {
*lockCtx.LockKeysCount += int32(len(keys))
}
if lockCtx.Stats != nil {
lockCtx.Stats.TotalTime = time.Since(startTime)
ctxValue := ctx.Value(util.LockKeysDetailCtxKey)
if ctxValue != nil {
lockKeysDetail := ctxValue.(**util.LockKeysDetails)
*lockKeysDetail = lockCtx.Stats
}
}
}()
memBuf := txn.us.GetMemBuffer()
for _, key := range keysInput {
// The value of lockedMap is only used by pessimistic transactions.
var valueExist, locked, checkKeyExists bool
if flags, err := memBuf.GetFlags(key); err == nil {
locked = flags.HasLocked()
valueExist = flags.HasLockedValueExists()
checkKeyExists = flags.HasNeedCheckExists()
}
if !locked {
keys = append(keys, key)
} else if txn.IsPessimistic() {
if checkKeyExists && valueExist {
alreadyExist := kvrpcpb.AlreadyExist{Key: key}
e := &tikverr.ErrKeyExist{AlreadyExist: &alreadyExist}
return txn.committer.extractKeyExistsErr(e)
}
}
if lockCtx.ReturnValues && locked {
// An already locked key can not return values, we add an entry to let the caller get the value
// in other ways.
lockCtx.Values[string(key)] = tikv.ReturnedValue{AlreadyLocked: true}
}
}
if len(keys) == 0 {
return nil
}
keys = deduplicateKeys(keys)
if txn.IsPessimistic() && lockCtx.ForUpdateTS > 0 {
if txn.committer == nil {
// sessionID is used for log.
var sessionID uint64
var err error
val := ctx.Value(util.SessionID)
if val != nil {
sessionID = val.(uint64)
}
txn.committer, err = newTwoPhaseCommitter(txn, sessionID)
if err != nil {
return err
}
}
var assignedPrimaryKey bool
if txn.committer.primaryKey == nil {
txn.committer.primaryKey = keys[0]
assignedPrimaryKey = true
}
lockCtx.Stats = &util.LockKeysDetails{
LockKeys: int32(len(keys)),
}
bo := NewBackofferWithVars(ctx, pessimisticLockMaxBackoff, txn.vars)
txn.committer.forUpdateTS = lockCtx.ForUpdateTS
// If the number of keys greater than 1, it can be on different region,
// concurrently execute on multiple regions may lead to deadlock.
txn.committer.isFirstLock = txn.lockedCnt == 0 && len(keys) == 1
err = txn.committer.pessimisticLockMutations(bo, lockCtx, &PlainMutations{keys: keys})
if bo.totalSleep > 0 {
atomic.AddInt64(&lockCtx.Stats.BackoffTime, int64(bo.totalSleep)*int64(time.Millisecond))
lockCtx.Stats.Mu.Lock()
lockCtx.Stats.Mu.BackoffTypes = append(lockCtx.Stats.Mu.BackoffTypes, bo.types...)
lockCtx.Stats.Mu.Unlock()
}
if lockCtx.Killed != nil {
// If the kill signal is received during waiting for pessimisticLock,
// pessimisticLockKeys would handle the error but it doesn't reset the flag.
// We need to reset the killed flag here.
atomic.CompareAndSwapUint32(lockCtx.Killed, 1, 0)
}
if err != nil {
for _, key := range keys {
if txn.us.HasPresumeKeyNotExists(key) {
txn.us.UnmarkPresumeKeyNotExists(key)
}
}
keyMayBeLocked := !(tikverr.IsErrWriteConflict(err) || tikverr.IsErrKeyExist(err))
// If there is only 1 key and lock fails, no need to do pessimistic rollback.
if len(keys) > 1 || keyMayBeLocked {
wg := txn.asyncPessimisticRollback(ctx, keys)
if dl, ok := errors.Cause(err).(*tikverr.ErrDeadlock); ok && hashInKeys(dl.DeadlockKeyHash, keys) {
dl.IsRetryable = true
// Wait for the pessimistic rollback to finish before we retry the statement.
wg.Wait()
// Sleep a little, wait for the other transaction that blocked by this transaction to acquire the lock.
time.Sleep(time.Millisecond * 5)
failpoint.Inject("SingleStmtDeadLockRetrySleep", func() {
time.Sleep(300 * time.Millisecond)
})
}
}
if assignedPrimaryKey {
// unset the primary key if we assigned primary key when failed to lock it.
txn.committer.primaryKey = nil
}
return err
}
if assignedPrimaryKey {
txn.committer.ttlManager.run(txn.committer, lockCtx)
}
}
for _, key := range keys {
valExists := tikv.SetKeyLockedValueExists
// PointGet and BatchPointGet will return value in pessimistic lock response, the value may not exist.
// For other lock modes, the locked key values always exist.
if lockCtx.ReturnValues {
val, _ := lockCtx.Values[string(key)]
if len(val.Value) == 0 {
valExists = tikv.SetKeyLockedValueNotExists
}
}
memBuf.UpdateFlags(key, tikv.SetKeyLocked, tikv.DelNeedCheckExists, valExists)
}
txn.lockedCnt += len(keys)
return nil
}
// deduplicateKeys deduplicate the keys, it use sort instead of map to avoid memory allocation.
func deduplicateKeys(keys [][]byte) [][]byte {
sort.Slice(keys, func(i, j int) bool {
return bytes.Compare(keys[i], keys[j]) < 0
})
deduped := keys[:1]
for i := 1; i < len(keys); i++ {
if !bytes.Equal(deduped[len(deduped)-1], keys[i]) {
deduped = append(deduped, keys[i])
}
}
return deduped
}
func (txn *KVTxn) asyncPessimisticRollback(ctx context.Context, keys [][]byte) *sync.WaitGroup {
// Clone a new committer for execute in background.
committer := &twoPhaseCommitter{
store: txn.committer.store,
sessionID: txn.committer.sessionID,
startTS: txn.committer.startTS,
forUpdateTS: txn.committer.forUpdateTS,
primaryKey: txn.committer.primaryKey,
}
wg := new(sync.WaitGroup)
wg.Add(1)
go func() {
failpoint.Inject("beforeAsyncPessimisticRollback", func(val failpoint.Value) {
if s, ok := val.(string); ok {
if s == "skip" {
logutil.Logger(ctx).Info("[failpoint] injected skip async pessimistic rollback",
zap.Uint64("txnStartTS", txn.startTS))
wg.Done()
failpoint.Return()
} else if s == "delay" {
duration := time.Duration(rand.Int63n(int64(time.Second) * 2))
logutil.Logger(ctx).Info("[failpoint] injected delay before async pessimistic rollback",
zap.Uint64("txnStartTS", txn.startTS), zap.Duration("duration", duration))
time.Sleep(duration)
}
}
})
err := committer.pessimisticRollbackMutations(NewBackofferWithVars(ctx, pessimisticRollbackMaxBackoff, txn.vars), &PlainMutations{keys: keys})
if err != nil {
logutil.Logger(ctx).Warn("[kv] pessimisticRollback failed.", zap.Error(err))
}
wg.Done()
}()
return wg
}
func hashInKeys(deadlockKeyHash uint64, keys [][]byte) bool {
for _, key := range keys {
if farm.Fingerprint64(key) == deadlockKeyHash {
return true
}
}
return false
}
// IsReadOnly checks if the transaction has only performed read operations.
func (txn *KVTxn) IsReadOnly() bool {
return !txn.us.GetMemBuffer().Dirty()
}
// StartTS returns the transaction start timestamp.
func (txn *KVTxn) StartTS() uint64 {
return txn.startTS
}
// Valid returns if the transaction is valid.
// A transaction become invalid after commit or rollback.
func (txn *KVTxn) Valid() bool {
return txn.valid
}
// Len returns the number of entries in the DB.
func (txn *KVTxn) Len() int {
return txn.us.GetMemBuffer().Len()
}
// Size returns sum of keys and values length.
func (txn *KVTxn) Size() int {
return txn.us.GetMemBuffer().Size()
}
// Reset reset the Transaction to initial states.
func (txn *KVTxn) Reset() {
txn.us.GetMemBuffer().Reset()
}
// GetUnionStore returns the UnionStore binding to this transaction.
func (txn *KVTxn) GetUnionStore() *unionstore.KVUnionStore {
return txn.us
}
// GetMemBuffer return the MemBuffer binding to this transaction.
func (txn *KVTxn) GetMemBuffer() *unionstore.MemDB {
return txn.us.GetMemBuffer()
}
// GetSnapshot returns the Snapshot binding to this transaction.
func (txn *KVTxn) GetSnapshot() *KVSnapshot {
return txn.snapshot
}
// SetBinlogExecutor sets the method to perform binlong synchronization.
func (txn *KVTxn) SetBinlogExecutor(binlog BinlogExecutor) {
txn.binlog = binlog
if txn.committer != nil {
txn.committer.binlog = binlog
}
}
// GetClusterID returns store's cluster id.
func (txn *KVTxn) GetClusterID() uint64 {
return txn.store.clusterID
}
| store/tikv/txn.go | 1 | https://github.com/pingcap/tidb/commit/cc83cc524f8d3fd661f6e62d129ba043cc74501e | [
0.9984217882156372,
0.04015001654624939,
0.00016236535157077014,
0.000320158782415092,
0.19255274534225464
] |
{
"id": 3,
"code_window": [
"}\n",
"\n",
"func (c *twoPhaseCommitter) needLinearizability() bool {\n",
"\tGuaranteeLinearizabilityOption := c.txn.us.GetOption(kv.GuaranteeLinearizability)\n",
"\t// by default, guarantee\n",
"\treturn GuaranteeLinearizabilityOption == nil || GuaranteeLinearizabilityOption.(bool)\n",
"}\n",
"\n",
"func (c *twoPhaseCommitter) isAsyncCommit() bool {\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\treturn !c.txn.causalConsistency\n"
],
"file_path": "store/tikv/2pc.go",
"type": "replace",
"edit_start_line_idx": 856
} | // Copyright 2017 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package chunk
import (
"encoding/binary"
"math"
"unsafe"
"github.com/pingcap/parser/mysql"
"github.com/pingcap/tidb/types"
"github.com/pingcap/tidb/types/json"
"github.com/pingcap/tidb/util/hack"
)
// MutRow represents a mutable Row.
// The underlying columns only contains one row and not exposed to the user.
type MutRow Row
// ToRow converts the MutRow to Row, so it can be used to read data.
func (mr MutRow) ToRow() Row {
return Row(mr)
}
// Len returns the number of columns.
func (mr MutRow) Len() int {
return len(mr.c.columns)
}
// Clone deep clone a MutRow.
func (mr MutRow) Clone() MutRow {
newChk := mr.c
if mr.c != nil {
newChk = mr.c.CopyConstruct()
}
return MutRow{
c: newChk,
idx: mr.idx,
}
}
// MutRowFromValues creates a MutRow from a interface slice.
func MutRowFromValues(vals ...interface{}) MutRow {
c := &Chunk{columns: make([]*Column, 0, len(vals))}
for _, val := range vals {
col := makeMutRowColumn(val)
c.columns = append(c.columns, col)
}
return MutRow{c: c}
}
// MutRowFromDatums creates a MutRow from a datum slice.
func MutRowFromDatums(datums []types.Datum) MutRow {
c := &Chunk{columns: make([]*Column, 0, len(datums))}
for _, d := range datums {
col := makeMutRowColumn(d.GetValue())
c.columns = append(c.columns, col)
}
return MutRow{c: c, idx: 0}
}
// MutRowFromTypes creates a MutRow from a FieldType slice, each Column is initialized to zero value.
func MutRowFromTypes(types []*types.FieldType) MutRow {
c := &Chunk{columns: make([]*Column, 0, len(types))}
for _, tp := range types {
col := makeMutRowColumn(zeroValForType(tp))
c.columns = append(c.columns, col)
}
return MutRow{c: c, idx: 0}
}
func zeroValForType(tp *types.FieldType) interface{} {
switch tp.Tp {
case mysql.TypeFloat:
return float32(0)
case mysql.TypeDouble:
return float64(0)
case mysql.TypeTiny, mysql.TypeShort, mysql.TypeInt24, mysql.TypeLong, mysql.TypeLonglong, mysql.TypeYear:
if mysql.HasUnsignedFlag(tp.Flag) {
return uint64(0)
}
return int64(0)
case mysql.TypeString, mysql.TypeVarString, mysql.TypeVarchar:
return ""
case mysql.TypeBlob, mysql.TypeTinyBlob, mysql.TypeMediumBlob, mysql.TypeLongBlob:
return []byte{}
case mysql.TypeDuration:
return types.ZeroDuration
case mysql.TypeNewDecimal:
return types.NewDecFromInt(0)
case mysql.TypeDate:
return types.ZeroDate
case mysql.TypeDatetime:
return types.ZeroDatetime
case mysql.TypeTimestamp:
return types.ZeroTimestamp
case mysql.TypeBit:
return types.BinaryLiteral{}
case mysql.TypeSet:
return types.Set{}
case mysql.TypeEnum:
return types.Enum{}
case mysql.TypeJSON:
return json.CreateBinary(nil)
default:
return nil
}
}
func makeMutRowColumn(in interface{}) *Column {
switch x := in.(type) {
case nil:
col := makeMutRowBytesColumn(nil)
col.nullBitmap[0] = 0
return col
case int:
return makeMutRowUint64Column(uint64(x))
case int64:
return makeMutRowUint64Column(uint64(x))
case uint64:
return makeMutRowUint64Column(x)
case float64:
return makeMutRowUint64Column(math.Float64bits(x))
case float32:
col := newMutRowFixedLenColumn(4)
*(*uint32)(unsafe.Pointer(&col.data[0])) = math.Float32bits(x)
return col
case string:
return makeMutRowBytesColumn(hack.Slice(x))
case []byte:
return makeMutRowBytesColumn(x)
case types.BinaryLiteral:
return makeMutRowBytesColumn(x)
case *types.MyDecimal:
col := newMutRowFixedLenColumn(types.MyDecimalStructSize)
*(*types.MyDecimal)(unsafe.Pointer(&col.data[0])) = *x
return col
case types.Time:
col := newMutRowFixedLenColumn(sizeTime)
*(*types.Time)(unsafe.Pointer(&col.data[0])) = x
return col
case json.BinaryJSON:
col := newMutRowVarLenColumn(len(x.Value) + 1)
col.data[0] = x.TypeCode
copy(col.data[1:], x.Value)
return col
case types.Duration:
col := newMutRowFixedLenColumn(8)
*(*int64)(unsafe.Pointer(&col.data[0])) = int64(x.Duration)
return col
case types.Enum:
col := newMutRowVarLenColumn(len(x.Name) + 8)
copy(col.data, (*[8]byte)(unsafe.Pointer(&x.Value))[:])
copy(col.data[8:], x.Name)
return col
case types.Set:
col := newMutRowVarLenColumn(len(x.Name) + 8)
copy(col.data, (*[8]byte)(unsafe.Pointer(&x.Value))[:])
copy(col.data[8:], x.Name)
return col
default:
return nil
}
}
func newMutRowFixedLenColumn(elemSize int) *Column {
buf := make([]byte, elemSize)
col := &Column{
length: 1,
elemBuf: buf,
data: buf,
nullBitmap: make([]byte, 1),
}
col.nullBitmap[0] = 1
return col
}
func newMutRowVarLenColumn(valSize int) *Column {
buf := make([]byte, valSize+1)
col := &Column{
length: 1,
offsets: []int64{0, int64(valSize)},
data: buf[:valSize],
nullBitmap: buf[valSize:],
}
col.nullBitmap[0] = 1
return col
}
func makeMutRowUint64Column(val uint64) *Column {
col := newMutRowFixedLenColumn(8)
*(*uint64)(unsafe.Pointer(&col.data[0])) = val
return col
}
func makeMutRowBytesColumn(bin []byte) *Column {
col := newMutRowVarLenColumn(len(bin))
copy(col.data, bin)
return col
}
// SetRow sets the MutRow with Row.
func (mr MutRow) SetRow(row Row) {
for colIdx, rCol := range row.c.columns {
mrCol := mr.c.columns[colIdx]
if rCol.IsNull(row.idx) {
mrCol.nullBitmap[0] = 0
continue
}
elemLen := len(rCol.elemBuf)
if elemLen > 0 {
copy(mrCol.data, rCol.data[row.idx*elemLen:(row.idx+1)*elemLen])
} else {
setMutRowBytes(mrCol, rCol.data[rCol.offsets[row.idx]:rCol.offsets[row.idx+1]])
}
mrCol.nullBitmap[0] = 1
}
}
// SetValues sets the MutRow with values.
func (mr MutRow) SetValues(vals ...interface{}) {
for i, v := range vals {
mr.SetValue(i, v)
}
}
// SetValue sets the MutRow with colIdx and value.
func (mr MutRow) SetValue(colIdx int, val interface{}) {
col := mr.c.columns[colIdx]
if val == nil {
col.nullBitmap[0] = 0
return
}
switch x := val.(type) {
case int:
binary.LittleEndian.PutUint64(col.data, uint64(x))
case int64:
binary.LittleEndian.PutUint64(col.data, uint64(x))
case uint64:
binary.LittleEndian.PutUint64(col.data, x)
case float64:
binary.LittleEndian.PutUint64(col.data, math.Float64bits(x))
case float32:
binary.LittleEndian.PutUint32(col.data, math.Float32bits(x))
case string:
setMutRowBytes(col, hack.Slice(x))
case []byte:
setMutRowBytes(col, x)
case types.BinaryLiteral:
setMutRowBytes(col, x)
case types.Duration:
*(*int64)(unsafe.Pointer(&col.data[0])) = int64(x.Duration)
case *types.MyDecimal:
*(*types.MyDecimal)(unsafe.Pointer(&col.data[0])) = *x
case types.Time:
*(*types.Time)(unsafe.Pointer(&col.data[0])) = x
case types.Enum:
setMutRowNameValue(col, x.Name, x.Value)
case types.Set:
setMutRowNameValue(col, x.Name, x.Value)
case json.BinaryJSON:
setMutRowJSON(col, x)
}
col.nullBitmap[0] = 1
}
// SetDatums sets the MutRow with datum slice.
func (mr MutRow) SetDatums(datums ...types.Datum) {
for i, d := range datums {
mr.SetDatum(i, d)
}
}
// SetDatum sets the MutRow with colIdx and datum.
func (mr MutRow) SetDatum(colIdx int, d types.Datum) {
col := mr.c.columns[colIdx]
if d.IsNull() {
col.nullBitmap[0] = 0
return
}
switch d.Kind() {
case types.KindInt64, types.KindUint64, types.KindFloat64:
binary.LittleEndian.PutUint64(mr.c.columns[colIdx].data, d.GetUint64())
case types.KindFloat32:
binary.LittleEndian.PutUint32(mr.c.columns[colIdx].data, math.Float32bits(d.GetFloat32()))
case types.KindString, types.KindBytes, types.KindBinaryLiteral:
setMutRowBytes(col, d.GetBytes())
case types.KindMysqlTime:
*(*types.Time)(unsafe.Pointer(&col.data[0])) = d.GetMysqlTime()
case types.KindMysqlDuration:
*(*int64)(unsafe.Pointer(&col.data[0])) = int64(d.GetMysqlDuration().Duration)
case types.KindMysqlDecimal:
*(*types.MyDecimal)(unsafe.Pointer(&col.data[0])) = *d.GetMysqlDecimal()
case types.KindMysqlJSON:
setMutRowJSON(col, d.GetMysqlJSON())
case types.KindMysqlEnum:
e := d.GetMysqlEnum()
setMutRowNameValue(col, e.Name, e.Value)
case types.KindMysqlSet:
s := d.GetMysqlSet()
setMutRowNameValue(col, s.Name, s.Value)
default:
mr.c.columns[colIdx] = makeMutRowColumn(d.GetValue())
}
col.nullBitmap[0] = 1
}
func setMutRowBytes(col *Column, bin []byte) {
if len(col.data) >= len(bin) {
col.data = col.data[:len(bin)]
} else {
buf := make([]byte, len(bin)+1)
col.data = buf[:len(bin)]
col.nullBitmap = buf[len(bin):]
}
copy(col.data, bin)
col.offsets[1] = int64(len(bin))
}
func setMutRowNameValue(col *Column, name string, val uint64) {
dataLen := len(name) + 8
if len(col.data) >= dataLen {
col.data = col.data[:dataLen]
} else {
buf := make([]byte, dataLen+1)
col.data = buf[:dataLen]
col.nullBitmap = buf[dataLen:]
}
binary.LittleEndian.PutUint64(col.data, val)
copy(col.data[8:], name)
col.offsets[1] = int64(dataLen)
}
func setMutRowJSON(col *Column, j json.BinaryJSON) {
dataLen := len(j.Value) + 1
if len(col.data) >= dataLen {
col.data = col.data[:dataLen]
} else {
// In MutRow, there always exists 1 data in every Column,
// we should allocate one more byte for null bitmap.
buf := make([]byte, dataLen+1)
col.data = buf[:dataLen]
col.nullBitmap = buf[dataLen:]
}
col.data[0] = j.TypeCode
copy(col.data[1:], j.Value)
col.offsets[1] = int64(dataLen)
}
// ShallowCopyPartialRow shallow copies the data of `row` to MutRow.
func (mr MutRow) ShallowCopyPartialRow(colIdx int, row Row) {
for i, srcCol := range row.c.columns {
dstCol := mr.c.columns[colIdx+i]
if !srcCol.IsNull(row.idx) {
// MutRow only contains one row, so we can directly set the whole byte.
dstCol.nullBitmap[0] = 1
} else {
dstCol.nullBitmap[0] = 0
}
if srcCol.isFixed() {
elemLen := len(srcCol.elemBuf)
offset := row.idx * elemLen
dstCol.data = srcCol.data[offset : offset+elemLen]
} else {
start, end := srcCol.offsets[row.idx], srcCol.offsets[row.idx+1]
dstCol.data = srcCol.data[start:end]
dstCol.offsets[1] = int64(len(dstCol.data))
}
}
}
| util/chunk/mutrow.go | 0 | https://github.com/pingcap/tidb/commit/cc83cc524f8d3fd661f6e62d129ba043cc74501e | [
0.001064728363417089,
0.00021941811428405344,
0.00016724287706892937,
0.00017339180340059102,
0.00014934258069843054
] |
{
"id": 3,
"code_window": [
"}\n",
"\n",
"func (c *twoPhaseCommitter) needLinearizability() bool {\n",
"\tGuaranteeLinearizabilityOption := c.txn.us.GetOption(kv.GuaranteeLinearizability)\n",
"\t// by default, guarantee\n",
"\treturn GuaranteeLinearizabilityOption == nil || GuaranteeLinearizabilityOption.(bool)\n",
"}\n",
"\n",
"func (c *twoPhaseCommitter) isAsyncCommit() bool {\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\treturn !c.txn.causalConsistency\n"
],
"file_path": "store/tikv/2pc.go",
"type": "replace",
"edit_start_line_idx": 856
} | # Heap Profile Recorder
- Author(s): [Yisaer](https://github.com/Yisaer) (Song Gao)
- Last updated: 2020-05-11
- Discussion at: https://github.com/pingcap/tidb/pull/16777
## Abstract
"Heap Profiler Recorder" profile the flat heap usage periodically in an extra goroutine to record the global `SimpleLRUCache` memory usage with an similar value.
## Background
Currently, we have support memory usage and disk usage tracker for `Executor`. In [#15407](https://github.com/pingcap/tidb/issues/15407), we are going to support the `Global Memory Tracker`.
However, it would be too much work to realize calculating the memory usage of each Implementation of `Plan` and it might also causing much cpu consuming. To track the memory usage of `SimpleLRUCache`, we are trying to search the memory usage from `runtime/pprof`.
## Proposal
We will record the whole `SimpleLRUCache` memory usage in the `GlobalLRUMemUsageTracker` memory tracker whose parent is `GlobalMemoryUsageTracker` memory tracker by using an extra goroutine to record the value searched and summed from heap profile periodically.
## Rationale
When an golang application started, the runtime would [startProfile](https://github.com/golang/go/blob/48a90d639d578d2b33fdc1903f03e028b4d40fa9/src/cmd/oldlink/internal/ld/main.go#L155) in default including heap usage.
And `runtime.MemProfileRate` controls the fraction of memory allocations that are recorded and reported in the memory profile. In default, `runtime.MemProfileRate` is 512 KB which is also can be configured. When the whole heap usage of `SimpleLRUCache` is larger than the `runtime.MemProfileRate`, it would be reflected in the flat value of the pprof heap profile.
To verify whether `kvcache.(*SimpleLRUCache).Put` would reflect the real heap usage, I use following test to ensure it:
1. fufill the `SimpleLRUCache` by `set @randomString = ? with 20000 times`.
2. profile the heap Usage of `github.com/pingcap/tidb/util/kvcache.(*SimpleLRUCache).Put` and the result is 2.55 MB
Let's dig into the Put then we can find the where the heap consumed:
```sh
(pprof) list Put
Total: 52.23MB
ROUTINE ======================== github.com/pingcap/tidb/util/kvcache.(*SimpleLRUCache).Put in /Users/yisa/Downloads/Github/GoProject/src/github.com/pingcap/tidb/util/kvcache/simple_lru.go
2.55MB 3.05MB (flat, cum) 5.85% of Total
. . 91: return element.Value.(*cacheEntry).value, true
. . 92:}
. . 93:
. . 94:// Put puts the (key, value) pair into the LRU Cache.
. . 95:func (l *SimpleLRUCache) Put(key Key, value Value) {
1.50MB 1.50MB 96: hash := string(key.Hash())
. . 97: element, exists := l.elements[hash]
. . 98: if exists {
. . 99: l.cache.MoveToFront(element)
. . 100: return
. . 101: }
. . 102:
. . 103: newCacheEntry := &cacheEntry{
. . 104: key: key,
. . 105: value: value,
. . 106: }
. . 107: hashSize := SizeOf(hash)
. . 108: singleSize := SizeOf(newCacheEntry)
. 512.02kB 109: element = l.cache.PushFront(newCacheEntry)
1.05MB 1.05MB 110: l.elements[hash] = element
. . 111: l.size++
. . 112: l.capacity = 200000
. . 113: // Getting used memory is expensive and can be avoided by setting quota to 0.
. . 114: if l.quota == 0 {
```
We can find that the `hash` (the key of cache) and the `element`(the value of the cache) totolly consume 2.55 MB.
3. we use [sizeof](https://github.com/templarbit/sizeof) (the result is similar, but lower, not exact) to calculate the size of each key and element is 80byte and 40byte.
4. As 2.28 MB (120 byte * 20000) is similar to the 2.55MB, we can ensure that the heap profile would reflect the heap usage of `SimpleLRUCache`.
## Compatibility and Migration Plan
None
| docs/design/2020-05-11-heap-profile-record.md | 0 | https://github.com/pingcap/tidb/commit/cc83cc524f8d3fd661f6e62d129ba043cc74501e | [
0.0015114558627828956,
0.0003409252385608852,
0.0001661808491917327,
0.0001728850620565936,
0.00044246413744986057
] |
{
"id": 3,
"code_window": [
"}\n",
"\n",
"func (c *twoPhaseCommitter) needLinearizability() bool {\n",
"\tGuaranteeLinearizabilityOption := c.txn.us.GetOption(kv.GuaranteeLinearizability)\n",
"\t// by default, guarantee\n",
"\treturn GuaranteeLinearizabilityOption == nil || GuaranteeLinearizabilityOption.(bool)\n",
"}\n",
"\n",
"func (c *twoPhaseCommitter) isAsyncCommit() bool {\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\treturn !c.txn.causalConsistency\n"
],
"file_path": "store/tikv/2pc.go",
"type": "replace",
"edit_start_line_idx": 856
} | // Copyright 2015 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package types
import (
"math"
"time"
"github.com/pingcap/tidb/util/collate"
)
// CompareInt64 returns an integer comparing the int64 x to y.
func CompareInt64(x, y int64) int {
if x < y {
return -1
} else if x == y {
return 0
}
return 1
}
// CompareUint64 returns an integer comparing the uint64 x to y.
func CompareUint64(x, y uint64) int {
if x < y {
return -1
} else if x == y {
return 0
}
return 1
}
// VecCompareUU returns []int64 comparing the []uint64 x to []uint64 y
func VecCompareUU(x, y []uint64, res []int64) {
n := len(x)
for i := 0; i < n; i++ {
if x[i] < y[i] {
res[i] = -1
} else if x[i] == y[i] {
res[i] = 0
} else {
res[i] = 1
}
}
}
// VecCompareII returns []int64 comparing the []int64 x to []int64 y
func VecCompareII(x, y, res []int64) {
n := len(x)
for i := 0; i < n; i++ {
if x[i] < y[i] {
res[i] = -1
} else if x[i] == y[i] {
res[i] = 0
} else {
res[i] = 1
}
}
}
// VecCompareUI returns []int64 comparing the []uint64 x to []int64y
func VecCompareUI(x []uint64, y, res []int64) {
n := len(x)
for i := 0; i < n; i++ {
if y[i] < 0 || x[i] > math.MaxInt64 {
res[i] = 1
} else if int64(x[i]) < y[i] {
res[i] = -1
} else if int64(x[i]) == y[i] {
res[i] = 0
} else {
res[i] = 1
}
}
}
// VecCompareIU returns []int64 comparing the []int64 x to []uint64y
func VecCompareIU(x []int64, y []uint64, res []int64) {
n := len(x)
for i := 0; i < n; i++ {
if x[i] < 0 || y[i] > math.MaxInt64 {
res[i] = -1
} else if x[i] < int64(y[i]) {
res[i] = -1
} else if x[i] == int64(y[i]) {
res[i] = 0
} else {
res[i] = 1
}
}
}
// CompareFloat64 returns an integer comparing the float64 x to y.
func CompareFloat64(x, y float64) int {
if x < y {
return -1
} else if x == y {
return 0
}
return 1
}
// CompareString returns an integer comparing the string x to y with the specified collation and length.
func CompareString(x, y, collation string) int {
return collate.GetCollator(collation).Compare(x, y)
}
// CompareDuration returns an integer comparing the duration x to y.
func CompareDuration(x, y time.Duration) int {
if x < y {
return -1
} else if x == y {
return 0
}
return 1
}
| types/compare.go | 0 | https://github.com/pingcap/tidb/commit/cc83cc524f8d3fd661f6e62d129ba043cc74501e | [
0.0015114558627828956,
0.00026887052808888257,
0.00016673145000822842,
0.00017121332348324358,
0.0003447018680162728
] |
{
"id": 4,
"code_window": [
"\t\"github.com/pingcap/errors\"\n",
"\t\"github.com/pingcap/kvproto/pkg/kvrpcpb\"\n",
"\t\"github.com/pingcap/tidb/store/mockstore/unistore\"\n",
"\t\"github.com/pingcap/tidb/store/tikv\"\n",
"\ttikverr \"github.com/pingcap/tidb/store/tikv/error\"\n",
"\t\"github.com/pingcap/tidb/store/tikv/kv\"\n",
"\t\"github.com/pingcap/tidb/store/tikv/mockstore/cluster\"\n",
"\t\"github.com/pingcap/tidb/store/tikv/oracle\"\n",
"\t\"github.com/pingcap/tidb/store/tikv/tikvrpc\"\n",
"\t\"github.com/pingcap/tidb/store/tikv/util\"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "store/tikv/tests/async_commit_test.go",
"type": "replace",
"edit_start_line_idx": 30
} | // Copyright 2020 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package tikv_test
import (
"bytes"
"context"
"fmt"
"math"
"sync/atomic"
"testing"
"time"
. "github.com/pingcap/check"
"github.com/pingcap/errors"
"github.com/pingcap/kvproto/pkg/kvrpcpb"
"github.com/pingcap/tidb/store/mockstore/unistore"
"github.com/pingcap/tidb/store/tikv"
tikverr "github.com/pingcap/tidb/store/tikv/error"
"github.com/pingcap/tidb/store/tikv/kv"
"github.com/pingcap/tidb/store/tikv/mockstore/cluster"
"github.com/pingcap/tidb/store/tikv/oracle"
"github.com/pingcap/tidb/store/tikv/tikvrpc"
"github.com/pingcap/tidb/store/tikv/util"
)
func TestT(t *testing.T) {
CustomVerboseFlag = true
TestingT(t)
}
// testAsyncCommitCommon is used to put common parts that will be both used by
// testAsyncCommitSuite and testAsyncCommitFailSuite.
type testAsyncCommitCommon struct {
cluster cluster.Cluster
store *tikv.KVStore
}
func (s *testAsyncCommitCommon) setUpTest(c *C) {
if *WithTiKV {
s.store = NewTestStore(c)
return
}
client, pdClient, cluster, err := unistore.New("")
c.Assert(err, IsNil)
unistore.BootstrapWithSingleStore(cluster)
s.cluster = cluster
store, err := tikv.NewTestTiKVStore(client, pdClient, nil, nil, 0)
c.Assert(err, IsNil)
s.store = store
}
func (s *testAsyncCommitCommon) putAlphabets(c *C, enableAsyncCommit bool) {
for ch := byte('a'); ch <= byte('z'); ch++ {
s.putKV(c, []byte{ch}, []byte{ch}, enableAsyncCommit)
}
}
func (s *testAsyncCommitCommon) putKV(c *C, key, value []byte, enableAsyncCommit bool) (uint64, uint64) {
txn := s.beginAsyncCommit(c)
err := txn.Set(key, value)
c.Assert(err, IsNil)
err = txn.Commit(context.Background())
c.Assert(err, IsNil)
return txn.StartTS(), txn.GetCommitTS()
}
func (s *testAsyncCommitCommon) mustGetFromTxn(c *C, txn tikv.TxnProbe, key, expectedValue []byte) {
v, err := txn.Get(context.Background(), key)
c.Assert(err, IsNil)
c.Assert(v, BytesEquals, expectedValue)
}
func (s *testAsyncCommitCommon) mustGetLock(c *C, key []byte) *tikv.Lock {
ver, err := s.store.CurrentTimestamp(oracle.GlobalTxnScope)
c.Assert(err, IsNil)
req := tikvrpc.NewRequest(tikvrpc.CmdGet, &kvrpcpb.GetRequest{
Key: key,
Version: ver,
})
bo := tikv.NewBackofferWithVars(context.Background(), 5000, nil)
loc, err := s.store.GetRegionCache().LocateKey(bo, key)
c.Assert(err, IsNil)
resp, err := s.store.SendReq(bo, req, loc.Region, time.Second*10)
c.Assert(err, IsNil)
c.Assert(resp.Resp, NotNil)
keyErr := resp.Resp.(*kvrpcpb.GetResponse).GetError()
c.Assert(keyErr, NotNil)
var lockutil tikv.LockProbe
lock, err := lockutil.ExtractLockFromKeyErr(keyErr)
c.Assert(err, IsNil)
return lock
}
func (s *testAsyncCommitCommon) mustPointGet(c *C, key, expectedValue []byte) {
snap := s.store.GetSnapshot(math.MaxUint64)
value, err := snap.Get(context.Background(), key)
c.Assert(err, IsNil)
c.Assert(value, BytesEquals, expectedValue)
}
func (s *testAsyncCommitCommon) mustGetFromSnapshot(c *C, version uint64, key, expectedValue []byte) {
snap := s.store.GetSnapshot(version)
value, err := snap.Get(context.Background(), key)
c.Assert(err, IsNil)
c.Assert(value, BytesEquals, expectedValue)
}
func (s *testAsyncCommitCommon) mustGetNoneFromSnapshot(c *C, version uint64, key []byte) {
snap := s.store.GetSnapshot(version)
_, err := snap.Get(context.Background(), key)
c.Assert(errors.Cause(err), Equals, tikverr.ErrNotExist)
}
func (s *testAsyncCommitCommon) beginAsyncCommitWithLinearizability(c *C) tikv.TxnProbe {
txn := s.beginAsyncCommit(c)
txn.SetOption(kv.GuaranteeLinearizability, true)
return txn
}
func (s *testAsyncCommitCommon) beginAsyncCommit(c *C) tikv.TxnProbe {
txn, err := s.store.Begin()
c.Assert(err, IsNil)
txn.SetEnableAsyncCommit(true)
return tikv.TxnProbe{KVTxn: txn}
}
func (s *testAsyncCommitCommon) begin(c *C) tikv.TxnProbe {
txn, err := s.store.Begin()
c.Assert(err, IsNil)
return tikv.TxnProbe{KVTxn: txn}
}
type testAsyncCommitSuite struct {
OneByOneSuite
testAsyncCommitCommon
bo *tikv.Backoffer
}
var _ = SerialSuites(&testAsyncCommitSuite{})
func (s *testAsyncCommitSuite) SetUpTest(c *C) {
s.testAsyncCommitCommon.setUpTest(c)
s.bo = tikv.NewBackofferWithVars(context.Background(), 5000, nil)
}
func (s *testAsyncCommitSuite) lockKeysWithAsyncCommit(c *C, keys, values [][]byte, primaryKey, primaryValue []byte, commitPrimary bool) (uint64, uint64) {
txn, err := s.store.Begin()
c.Assert(err, IsNil)
txn.SetEnableAsyncCommit(true)
for i, k := range keys {
if len(values[i]) > 0 {
err = txn.Set(k, values[i])
} else {
err = txn.Delete(k)
}
c.Assert(err, IsNil)
}
if len(primaryValue) > 0 {
err = txn.Set(primaryKey, primaryValue)
} else {
err = txn.Delete(primaryKey)
}
c.Assert(err, IsNil)
txnProbe := tikv.TxnProbe{KVTxn: txn}
tpc, err := txnProbe.NewCommitter(0)
c.Assert(err, IsNil)
tpc.SetPrimaryKey(primaryKey)
ctx := context.Background()
err = tpc.PrewriteAllMutations(ctx)
c.Assert(err, IsNil)
if commitPrimary {
commitTS, err := s.store.GetOracle().GetTimestamp(ctx, &oracle.Option{TxnScope: oracle.GlobalTxnScope})
c.Assert(err, IsNil)
tpc.SetCommitTS(commitTS)
err = tpc.CommitMutations(ctx)
c.Assert(err, IsNil)
}
return txn.StartTS(), tpc.GetCommitTS()
}
func (s *testAsyncCommitSuite) TestCheckSecondaries(c *C) {
// This test doesn't support tikv mode.
if *WithTiKV {
return
}
s.putAlphabets(c, true)
loc, err := s.store.GetRegionCache().LocateKey(s.bo, []byte("a"))
c.Assert(err, IsNil)
newRegionID, peerID := s.cluster.AllocID(), s.cluster.AllocID()
s.cluster.Split(loc.Region.GetID(), newRegionID, []byte("e"), []uint64{peerID}, peerID)
s.store.GetRegionCache().InvalidateCachedRegion(loc.Region)
// No locks to check, only primary key is locked, should be successful.
s.lockKeysWithAsyncCommit(c, [][]byte{}, [][]byte{}, []byte("z"), []byte("z"), false)
lock := s.mustGetLock(c, []byte("z"))
lock.UseAsyncCommit = true
ts, err := s.store.GetOracle().GetTimestamp(context.Background(), &oracle.Option{TxnScope: oracle.GlobalTxnScope})
c.Assert(err, IsNil)
var lockutil tikv.LockProbe
status := lockutil.NewLockStatus(nil, true, ts)
resolver := tikv.LockResolverProbe{LockResolver: s.store.GetLockResolver()}
err = resolver.ResolveLockAsync(s.bo, lock, status)
c.Assert(err, IsNil)
currentTS, err := s.store.GetOracle().GetTimestamp(context.Background(), &oracle.Option{TxnScope: oracle.GlobalTxnScope})
c.Assert(err, IsNil)
status, err = resolver.GetTxnStatus(s.bo, lock.TxnID, []byte("z"), currentTS, currentTS, true, false, nil)
c.Assert(err, IsNil)
c.Assert(status.IsCommitted(), IsTrue)
c.Assert(status.CommitTS(), Equals, ts)
// One key is committed (i), one key is locked (a). Should get committed.
ts, err = s.store.GetOracle().GetTimestamp(context.Background(), &oracle.Option{TxnScope: oracle.GlobalTxnScope})
c.Assert(err, IsNil)
commitTs := ts + 10
gotCheckA := int64(0)
gotCheckB := int64(0)
gotResolve := int64(0)
gotOther := int64(0)
mock := mockResolveClient{
inner: s.store.GetTiKVClient(),
onCheckSecondaries: func(req *kvrpcpb.CheckSecondaryLocksRequest) (*tikvrpc.Response, error) {
if req.StartVersion != ts {
return nil, errors.Errorf("Bad start version: %d, expected: %d", req.StartVersion, ts)
}
var resp kvrpcpb.CheckSecondaryLocksResponse
for _, k := range req.Keys {
if bytes.Equal(k, []byte("a")) {
atomic.StoreInt64(&gotCheckA, 1)
resp = kvrpcpb.CheckSecondaryLocksResponse{
Locks: []*kvrpcpb.LockInfo{{Key: []byte("a"), PrimaryLock: []byte("z"), LockVersion: ts, UseAsyncCommit: true}},
CommitTs: commitTs,
}
} else if bytes.Equal(k, []byte("i")) {
atomic.StoreInt64(&gotCheckB, 1)
resp = kvrpcpb.CheckSecondaryLocksResponse{
Locks: []*kvrpcpb.LockInfo{},
CommitTs: commitTs,
}
} else {
fmt.Printf("Got other key: %s\n", k)
atomic.StoreInt64(&gotOther, 1)
}
}
return &tikvrpc.Response{Resp: &resp}, nil
},
onResolveLock: func(req *kvrpcpb.ResolveLockRequest) (*tikvrpc.Response, error) {
if req.StartVersion != ts {
return nil, errors.Errorf("Bad start version: %d, expected: %d", req.StartVersion, ts)
}
if req.CommitVersion != commitTs {
return nil, errors.Errorf("Bad commit version: %d, expected: %d", req.CommitVersion, commitTs)
}
for _, k := range req.Keys {
if bytes.Equal(k, []byte("a")) || bytes.Equal(k, []byte("z")) {
atomic.StoreInt64(&gotResolve, 1)
} else {
atomic.StoreInt64(&gotOther, 1)
}
}
resp := kvrpcpb.ResolveLockResponse{}
return &tikvrpc.Response{Resp: &resp}, nil
},
}
s.store.SetTiKVClient(&mock)
status = lockutil.NewLockStatus([][]byte{[]byte("a"), []byte("i")}, true, 0)
lock = &tikv.Lock{
Key: []byte("a"),
Primary: []byte("z"),
TxnID: ts,
LockType: kvrpcpb.Op_Put,
UseAsyncCommit: true,
MinCommitTS: ts + 5,
}
_ = s.beginAsyncCommit(c)
err = resolver.ResolveLockAsync(s.bo, lock, status)
c.Assert(err, IsNil)
c.Assert(gotCheckA, Equals, int64(1))
c.Assert(gotCheckB, Equals, int64(1))
c.Assert(gotOther, Equals, int64(0))
c.Assert(gotResolve, Equals, int64(1))
// One key has been rolled back (b), one is locked (a). Should be rolled back.
ts, err = s.store.GetOracle().GetTimestamp(context.Background(), &oracle.Option{TxnScope: oracle.GlobalTxnScope})
c.Assert(err, IsNil)
commitTs = ts + 10
gotCheckA = int64(0)
gotCheckB = int64(0)
gotResolve = int64(0)
gotOther = int64(0)
mock.onResolveLock = func(req *kvrpcpb.ResolveLockRequest) (*tikvrpc.Response, error) {
if req.StartVersion != ts {
return nil, errors.Errorf("Bad start version: %d, expected: %d", req.StartVersion, ts)
}
if req.CommitVersion != commitTs {
return nil, errors.Errorf("Bad commit version: %d, expected: 0", req.CommitVersion)
}
for _, k := range req.Keys {
if bytes.Equal(k, []byte("a")) || bytes.Equal(k, []byte("z")) {
atomic.StoreInt64(&gotResolve, 1)
} else {
atomic.StoreInt64(&gotOther, 1)
}
}
resp := kvrpcpb.ResolveLockResponse{}
return &tikvrpc.Response{Resp: &resp}, nil
}
lock.TxnID = ts
lock.MinCommitTS = ts + 5
err = resolver.ResolveLockAsync(s.bo, lock, status)
c.Assert(err, IsNil)
c.Assert(gotCheckA, Equals, int64(1))
c.Assert(gotCheckB, Equals, int64(1))
c.Assert(gotResolve, Equals, int64(1))
c.Assert(gotOther, Equals, int64(0))
}
func (s *testAsyncCommitSuite) TestRepeatableRead(c *C) {
var sessionID uint64 = 0
test := func(isPessimistic bool) {
s.putKV(c, []byte("k1"), []byte("v1"), true)
sessionID++
ctx := context.WithValue(context.Background(), util.SessionID, sessionID)
txn1 := s.beginAsyncCommit(c)
txn1.SetPessimistic(isPessimistic)
s.mustGetFromTxn(c, txn1, []byte("k1"), []byte("v1"))
txn1.Set([]byte("k1"), []byte("v2"))
for i := 0; i < 20; i++ {
_, err := s.store.GetOracle().GetTimestamp(ctx, &oracle.Option{TxnScope: oracle.GlobalTxnScope})
c.Assert(err, IsNil)
}
txn2 := s.beginAsyncCommit(c)
s.mustGetFromTxn(c, txn2, []byte("k1"), []byte("v1"))
err := txn1.Commit(ctx)
c.Assert(err, IsNil)
// Check txn1 is committed in async commit.
c.Assert(txn1.IsAsyncCommit(), IsTrue)
s.mustGetFromTxn(c, txn2, []byte("k1"), []byte("v1"))
err = txn2.Rollback()
c.Assert(err, IsNil)
txn3 := s.beginAsyncCommit(c)
s.mustGetFromTxn(c, txn3, []byte("k1"), []byte("v2"))
err = txn3.Rollback()
c.Assert(err, IsNil)
}
test(false)
test(true)
}
// It's just a simple validation of linearizability.
// Extra tests are needed to test this feature with the control of the TiKV cluster.
func (s *testAsyncCommitSuite) TestAsyncCommitLinearizability(c *C) {
t1 := s.beginAsyncCommitWithLinearizability(c)
t2 := s.beginAsyncCommitWithLinearizability(c)
err := t1.Set([]byte("a"), []byte("a1"))
c.Assert(err, IsNil)
err = t2.Set([]byte("b"), []byte("b1"))
c.Assert(err, IsNil)
ctx := context.WithValue(context.Background(), util.SessionID, uint64(1))
// t2 commits earlier than t1
err = t2.Commit(ctx)
c.Assert(err, IsNil)
err = t1.Commit(ctx)
c.Assert(err, IsNil)
commitTS1 := t1.GetCommitTS()
commitTS2 := t2.GetCommitTS()
c.Assert(commitTS2, Less, commitTS1)
}
// TestAsyncCommitWithMultiDC tests that async commit can only be enabled in global transactions
func (s *testAsyncCommitSuite) TestAsyncCommitWithMultiDC(c *C) {
// It requires setting placement rules to run with TiKV
if *WithTiKV {
return
}
localTxn := s.beginAsyncCommit(c)
err := localTxn.Set([]byte("a"), []byte("a1"))
localTxn.SetScope("bj")
c.Assert(err, IsNil)
ctx := context.WithValue(context.Background(), util.SessionID, uint64(1))
err = localTxn.Commit(ctx)
c.Assert(err, IsNil)
c.Assert(localTxn.IsAsyncCommit(), IsFalse)
globalTxn := s.beginAsyncCommit(c)
err = globalTxn.Set([]byte("b"), []byte("b1"))
globalTxn.SetScope(oracle.GlobalTxnScope)
c.Assert(err, IsNil)
err = globalTxn.Commit(ctx)
c.Assert(err, IsNil)
c.Assert(globalTxn.IsAsyncCommit(), IsTrue)
}
func (s *testAsyncCommitSuite) TestResolveTxnFallbackFromAsyncCommit(c *C) {
keys := [][]byte{[]byte("k0"), []byte("k1")}
values := [][]byte{[]byte("v00"), []byte("v10")}
initTest := func() tikv.CommitterProbe {
t0 := s.begin(c)
err := t0.Set(keys[0], values[0])
c.Assert(err, IsNil)
err = t0.Set(keys[1], values[1])
c.Assert(err, IsNil)
err = t0.Commit(context.Background())
c.Assert(err, IsNil)
t1 := s.beginAsyncCommit(c)
err = t1.Set(keys[0], []byte("v01"))
c.Assert(err, IsNil)
err = t1.Set(keys[1], []byte("v11"))
c.Assert(err, IsNil)
committer, err := t1.NewCommitter(1)
c.Assert(err, IsNil)
committer.SetLockTTL(1)
committer.SetUseAsyncCommit()
return committer
}
prewriteKey := func(committer tikv.CommitterProbe, idx int, fallback bool) {
bo := tikv.NewBackofferWithVars(context.Background(), 5000, nil)
loc, err := s.store.GetRegionCache().LocateKey(bo, keys[idx])
c.Assert(err, IsNil)
req := committer.BuildPrewriteRequest(loc.Region.GetID(), loc.Region.GetConfVer(), loc.Region.GetVer(),
committer.GetMutations().Slice(idx, idx+1), 1)
if fallback {
req.Req.(*kvrpcpb.PrewriteRequest).MaxCommitTs = 1
}
resp, err := s.store.SendReq(bo, req, loc.Region, 5000)
c.Assert(err, IsNil)
c.Assert(resp.Resp, NotNil)
}
readKey := func(idx int) {
t2 := s.begin(c)
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
defer cancel()
val, err := t2.Get(ctx, keys[idx])
c.Assert(err, IsNil)
c.Assert(val, DeepEquals, values[idx])
}
// Case 1: Fallback primary, read primary
committer := initTest()
prewriteKey(committer, 0, true)
prewriteKey(committer, 1, false)
readKey(0)
readKey(1)
// Case 2: Fallback primary, read secondary
committer = initTest()
prewriteKey(committer, 0, true)
prewriteKey(committer, 1, false)
readKey(1)
readKey(0)
// Case 3: Fallback secondary, read primary
committer = initTest()
prewriteKey(committer, 0, false)
prewriteKey(committer, 1, true)
readKey(0)
readKey(1)
// Case 4: Fallback secondary, read secondary
committer = initTest()
prewriteKey(committer, 0, false)
prewriteKey(committer, 1, true)
readKey(1)
readKey(0)
// Case 5: Fallback both, read primary
committer = initTest()
prewriteKey(committer, 0, true)
prewriteKey(committer, 1, true)
readKey(0)
readKey(1)
// Case 6: Fallback both, read secondary
committer = initTest()
prewriteKey(committer, 0, true)
prewriteKey(committer, 1, true)
readKey(1)
readKey(0)
}
type mockResolveClient struct {
inner tikv.Client
onResolveLock func(*kvrpcpb.ResolveLockRequest) (*tikvrpc.Response, error)
onCheckSecondaries func(*kvrpcpb.CheckSecondaryLocksRequest) (*tikvrpc.Response, error)
}
func (m *mockResolveClient) SendRequest(ctx context.Context, addr string, req *tikvrpc.Request, timeout time.Duration) (*tikvrpc.Response, error) {
// Intercept check secondary locks and resolve lock messages if the callback is non-nil.
// If the callback returns (nil, nil), forward to the inner client.
if cr, ok := req.Req.(*kvrpcpb.CheckSecondaryLocksRequest); ok && m.onCheckSecondaries != nil {
result, err := m.onCheckSecondaries(cr)
if result != nil || err != nil {
return result, err
}
} else if rr, ok := req.Req.(*kvrpcpb.ResolveLockRequest); ok && m.onResolveLock != nil {
result, err := m.onResolveLock(rr)
if result != nil || err != nil {
return result, err
}
}
return m.inner.SendRequest(ctx, addr, req, timeout)
}
func (m *mockResolveClient) Close() error {
return m.inner.Close()
}
| store/tikv/tests/async_commit_test.go | 1 | https://github.com/pingcap/tidb/commit/cc83cc524f8d3fd661f6e62d129ba043cc74501e | [
0.4216547906398773,
0.009789135307073593,
0.00016181851970031857,
0.0001707045448711142,
0.056304652243852615
] |
{
"id": 4,
"code_window": [
"\t\"github.com/pingcap/errors\"\n",
"\t\"github.com/pingcap/kvproto/pkg/kvrpcpb\"\n",
"\t\"github.com/pingcap/tidb/store/mockstore/unistore\"\n",
"\t\"github.com/pingcap/tidb/store/tikv\"\n",
"\ttikverr \"github.com/pingcap/tidb/store/tikv/error\"\n",
"\t\"github.com/pingcap/tidb/store/tikv/kv\"\n",
"\t\"github.com/pingcap/tidb/store/tikv/mockstore/cluster\"\n",
"\t\"github.com/pingcap/tidb/store/tikv/oracle\"\n",
"\t\"github.com/pingcap/tidb/store/tikv/tikvrpc\"\n",
"\t\"github.com/pingcap/tidb/store/tikv/util\"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "store/tikv/tests/async_commit_test.go",
"type": "replace",
"edit_start_line_idx": 30
} | ## Maintainers
- [dongxu](https://github.com/c4pt0r)
- [Ewan Chou](https://github.com/coocood)
- [goroutine](https://github.com/ngaut)
- [qiuyesuifeng](https://github.com/qiuyesuifeng)
- [Shen Li](https://github.com/shenli)
- [siddontang](https://github.com/siddontang)
| docs/MAINTAINERS.md | 0 | https://github.com/pingcap/tidb/commit/cc83cc524f8d3fd661f6e62d129ba043cc74501e | [
0.00017360008496325463,
0.00017360008496325463,
0.00017360008496325463,
0.00017360008496325463,
0
] |
{
"id": 4,
"code_window": [
"\t\"github.com/pingcap/errors\"\n",
"\t\"github.com/pingcap/kvproto/pkg/kvrpcpb\"\n",
"\t\"github.com/pingcap/tidb/store/mockstore/unistore\"\n",
"\t\"github.com/pingcap/tidb/store/tikv\"\n",
"\ttikverr \"github.com/pingcap/tidb/store/tikv/error\"\n",
"\t\"github.com/pingcap/tidb/store/tikv/kv\"\n",
"\t\"github.com/pingcap/tidb/store/tikv/mockstore/cluster\"\n",
"\t\"github.com/pingcap/tidb/store/tikv/oracle\"\n",
"\t\"github.com/pingcap/tidb/store/tikv/tikvrpc\"\n",
"\t\"github.com/pingcap/tidb/store/tikv/util\"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "store/tikv/tests/async_commit_test.go",
"type": "replace",
"edit_start_line_idx": 30
} | // Copyright 2016 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package mocktikv
import (
"context"
"math"
"sync"
"time"
"github.com/pingcap/errors"
"github.com/pingcap/kvproto/pkg/metapb"
"github.com/pingcap/kvproto/pkg/pdpb"
pd "github.com/tikv/pd/client"
)
// Use global variables to prevent pdClients from creating duplicate timestamps.
var tsMu = struct {
sync.Mutex
physicalTS int64
logicalTS int64
}{}
type pdClient struct {
cluster *Cluster
// SafePoint set by `UpdateGCSafePoint`. Not to be confused with SafePointKV.
gcSafePoint uint64
// Represents the current safePoint of all services including TiDB, representing how much data they want to retain
// in GC.
serviceSafePoints map[string]uint64
gcSafePointMu sync.Mutex
}
// NewPDClient creates a mock pd.Client that uses local timestamp and meta data
// from a Cluster.
func NewPDClient(cluster *Cluster) pd.Client {
return &pdClient{
cluster: cluster,
serviceSafePoints: make(map[string]uint64),
}
}
func (c *pdClient) GetClusterID(ctx context.Context) uint64 {
return 1
}
func (c *pdClient) GetTS(context.Context) (int64, int64, error) {
tsMu.Lock()
defer tsMu.Unlock()
ts := time.Now().UnixNano() / int64(time.Millisecond)
if tsMu.physicalTS >= ts {
tsMu.logicalTS++
} else {
tsMu.physicalTS = ts
tsMu.logicalTS = 0
}
return tsMu.physicalTS, tsMu.logicalTS, nil
}
func (c *pdClient) GetLocalTS(ctx context.Context, dcLocation string) (int64, int64, error) {
return c.GetTS(ctx)
}
func (c *pdClient) GetTSAsync(ctx context.Context) pd.TSFuture {
return &mockTSFuture{c, ctx, false}
}
func (c *pdClient) GetLocalTSAsync(ctx context.Context, dcLocation string) pd.TSFuture {
return c.GetTSAsync(ctx)
}
type mockTSFuture struct {
pdc *pdClient
ctx context.Context
used bool
}
func (m *mockTSFuture) Wait() (int64, int64, error) {
if m.used {
return 0, 0, errors.New("cannot wait tso twice")
}
m.used = true
return m.pdc.GetTS(m.ctx)
}
func (c *pdClient) GetRegion(ctx context.Context, key []byte) (*pd.Region, error) {
region, peer := c.cluster.GetRegionByKey(key)
return &pd.Region{Meta: region, Leader: peer}, nil
}
func (c *pdClient) GetRegionFromMember(ctx context.Context, key []byte, memberURLs []string) (*pd.Region, error) {
return &pd.Region{}, nil
}
func (c *pdClient) GetPrevRegion(ctx context.Context, key []byte) (*pd.Region, error) {
region, peer := c.cluster.GetPrevRegionByKey(key)
return &pd.Region{Meta: region, Leader: peer}, nil
}
func (c *pdClient) GetRegionByID(ctx context.Context, regionID uint64) (*pd.Region, error) {
region, peer := c.cluster.GetRegionByID(regionID)
return &pd.Region{Meta: region, Leader: peer}, nil
}
func (c *pdClient) ScanRegions(ctx context.Context, startKey []byte, endKey []byte, limit int) ([]*pd.Region, error) {
regions := c.cluster.ScanRegions(startKey, endKey, limit)
return regions, nil
}
func (c *pdClient) GetStore(ctx context.Context, storeID uint64) (*metapb.Store, error) {
select {
case <-ctx.Done():
return nil, ctx.Err()
default:
}
store := c.cluster.GetStore(storeID)
return store, nil
}
func (c *pdClient) GetAllStores(ctx context.Context, opts ...pd.GetStoreOption) ([]*metapb.Store, error) {
return c.cluster.GetAllStores(), nil
}
func (c *pdClient) UpdateGCSafePoint(ctx context.Context, safePoint uint64) (uint64, error) {
c.gcSafePointMu.Lock()
defer c.gcSafePointMu.Unlock()
if safePoint > c.gcSafePoint {
c.gcSafePoint = safePoint
}
return c.gcSafePoint, nil
}
func (c *pdClient) UpdateServiceGCSafePoint(ctx context.Context, serviceID string, ttl int64, safePoint uint64) (uint64, error) {
c.gcSafePointMu.Lock()
defer c.gcSafePointMu.Unlock()
if ttl == 0 {
delete(c.serviceSafePoints, serviceID)
} else {
var minSafePoint uint64 = math.MaxUint64
for _, ssp := range c.serviceSafePoints {
if ssp < minSafePoint {
minSafePoint = ssp
}
}
if len(c.serviceSafePoints) == 0 || minSafePoint <= safePoint {
c.serviceSafePoints[serviceID] = safePoint
}
}
// The minSafePoint may have changed. Reload it.
var minSafePoint uint64 = math.MaxUint64
for _, ssp := range c.serviceSafePoints {
if ssp < minSafePoint {
minSafePoint = ssp
}
}
return minSafePoint, nil
}
func (c *pdClient) Close() {
}
func (c *pdClient) ScatterRegion(ctx context.Context, regionID uint64) error {
return nil
}
func (c *pdClient) ScatterRegions(ctx context.Context, regionsID []uint64, opts ...pd.RegionsOption) (*pdpb.ScatterRegionResponse, error) {
return nil, nil
}
func (c *pdClient) SplitRegions(ctx context.Context, splitKeys [][]byte, opts ...pd.RegionsOption) (*pdpb.SplitRegionsResponse, error) {
return nil, nil
}
func (c *pdClient) GetOperator(ctx context.Context, regionID uint64) (*pdpb.GetOperatorResponse, error) {
return &pdpb.GetOperatorResponse{Status: pdpb.OperatorStatus_SUCCESS}, nil
}
func (c *pdClient) GetAllMembers(ctx context.Context) ([]*pdpb.Member, error) {
return nil, nil
}
func (c *pdClient) GetLeaderAddr() string { return "mockpd" }
| store/tikv/mockstore/mocktikv/pd.go | 0 | https://github.com/pingcap/tidb/commit/cc83cc524f8d3fd661f6e62d129ba043cc74501e | [
0.006922960747033358,
0.0006658604834228754,
0.00016345347103197128,
0.00020579362171702087,
0.0014774168375879526
] |
{
"id": 4,
"code_window": [
"\t\"github.com/pingcap/errors\"\n",
"\t\"github.com/pingcap/kvproto/pkg/kvrpcpb\"\n",
"\t\"github.com/pingcap/tidb/store/mockstore/unistore\"\n",
"\t\"github.com/pingcap/tidb/store/tikv\"\n",
"\ttikverr \"github.com/pingcap/tidb/store/tikv/error\"\n",
"\t\"github.com/pingcap/tidb/store/tikv/kv\"\n",
"\t\"github.com/pingcap/tidb/store/tikv/mockstore/cluster\"\n",
"\t\"github.com/pingcap/tidb/store/tikv/oracle\"\n",
"\t\"github.com/pingcap/tidb/store/tikv/tikvrpc\"\n",
"\t\"github.com/pingcap/tidb/store/tikv/util\"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "store/tikv/tests/async_commit_test.go",
"type": "replace",
"edit_start_line_idx": 30
} | // Copyright 2015 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package arena
import (
"testing"
. "github.com/pingcap/check"
)
func TestT(t *testing.T) {
TestingT(t)
}
func TestSimpleArenaAllocator(t *testing.T) {
arena := NewAllocator(1000)
slice := arena.Alloc(10)
if arena.off != 10 {
t.Error("off not match, expect 10 bug got", arena.off)
}
if len(slice) != 0 || cap(slice) != 10 {
t.Error("slice length or cap not match")
}
slice = arena.Alloc(20)
if arena.off != 30 {
t.Error("off not match, expect 30 bug got", arena.off)
}
if len(slice) != 0 || cap(slice) != 20 {
t.Error("slice length or cap not match")
}
slice = arena.Alloc(1024)
if arena.off != 30 {
t.Error("off not match, expect 30 bug got", arena.off)
}
if len(slice) != 0 || cap(slice) != 1024 {
t.Error("slice length or cap not match")
}
slice = arena.AllocWithLen(2, 10)
if arena.off != 40 {
t.Error("off not match, expect 40 bug got", arena.off)
}
if len(slice) != 2 || cap(slice) != 10 {
t.Error("slice length or cap not match")
}
arena.Reset()
if arena.off != 0 || cap(arena.arena) != 1000 {
t.Error("off or cap not match")
}
}
func TestStdAllocator(t *testing.T) {
slice := StdAllocator.Alloc(20)
if len(slice) != 0 {
t.Error("length not match")
}
if cap(slice) != 20 {
t.Error("cap not match")
}
slice = StdAllocator.AllocWithLen(10, 20)
if len(slice) != 10 {
t.Error("length not match")
}
if cap(slice) != 20 {
t.Error("cap not match")
}
}
| util/arena/arena_test.go | 0 | https://github.com/pingcap/tidb/commit/cc83cc524f8d3fd661f6e62d129ba043cc74501e | [
0.0001718520070426166,
0.00016892615531105548,
0.0001658278633840382,
0.00016813796537462622,
0.0000016871736079338007
] |
{
"id": 5,
"code_window": [
"\tc.Assert(errors.Cause(err), Equals, tikverr.ErrNotExist)\n",
"}\n",
"\n",
"func (s *testAsyncCommitCommon) beginAsyncCommitWithLinearizability(c *C) tikv.TxnProbe {\n",
"\ttxn := s.beginAsyncCommit(c)\n",
"\ttxn.SetOption(kv.GuaranteeLinearizability, true)\n",
"\treturn txn\n",
"}\n",
"\n",
"func (s *testAsyncCommitCommon) beginAsyncCommit(c *C) tikv.TxnProbe {\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\ttxn.SetCausalConsistency(false)\n"
],
"file_path": "store/tikv/tests/async_commit_test.go",
"type": "replace",
"edit_start_line_idx": 129
} | // Copyright 2016 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package tikv
import (
"bytes"
"context"
"encoding/json"
"fmt"
"math/rand"
"runtime/trace"
"sort"
"sync"
"sync/atomic"
"time"
"github.com/dgryski/go-farm"
"github.com/opentracing/opentracing-go"
"github.com/pingcap/errors"
"github.com/pingcap/failpoint"
"github.com/pingcap/kvproto/pkg/kvrpcpb"
"github.com/pingcap/kvproto/pkg/metapb"
"github.com/pingcap/tidb/kv"
tikverr "github.com/pingcap/tidb/store/tikv/error"
tikv "github.com/pingcap/tidb/store/tikv/kv"
"github.com/pingcap/tidb/store/tikv/logutil"
"github.com/pingcap/tidb/store/tikv/metrics"
"github.com/pingcap/tidb/store/tikv/oracle"
"github.com/pingcap/tidb/store/tikv/tikvrpc"
"github.com/pingcap/tidb/store/tikv/unionstore"
"github.com/pingcap/tidb/store/tikv/util"
"go.uber.org/zap"
)
// MaxTxnTimeUse is the max time a Txn may use (in ms) from its begin to commit.
// We use it to abort the transaction to guarantee GC worker will not influence it.
const MaxTxnTimeUse = 24 * 60 * 60 * 1000
// SchemaAmender is used by pessimistic transactions to amend commit mutations for schema change during 2pc.
type SchemaAmender interface {
// AmendTxn is the amend entry, new mutations will be generated based on input mutations using schema change info.
// The returned results are mutations need to prewrite and mutations need to cleanup.
AmendTxn(ctx context.Context, startInfoSchema SchemaVer, change *RelatedSchemaChange, mutations CommitterMutations) (CommitterMutations, error)
}
// KVTxn contains methods to interact with a TiKV transaction.
type KVTxn struct {
snapshot *KVSnapshot
us *unionstore.KVUnionStore
store *KVStore // for connection to region.
startTS uint64
startTime time.Time // Monotonic timestamp for recording txn time consuming.
commitTS uint64
mu sync.Mutex // For thread-safe LockKeys function.
setCnt int64
vars *tikv.Variables
committer *twoPhaseCommitter
lockedCnt int
valid bool
// schemaVer is the infoSchema fetched at startTS.
schemaVer SchemaVer
// SchemaAmender is used amend pessimistic txn commit mutations for schema change
schemaAmender SchemaAmender
// commitCallback is called after current transaction gets committed
commitCallback func(info string, err error)
binlog BinlogExecutor
schemaLeaseChecker SchemaLeaseChecker
syncLog bool
priority Priority
isPessimistic bool
enableAsyncCommit bool
enable1PC bool
scope string
kvFilter KVFilter
}
func extractStartTs(store *KVStore, options kv.TransactionOption) (uint64, error) {
var startTs uint64
var err error
if options.StartTS != nil {
startTs = *options.StartTS
} else if options.PrevSec != nil {
bo := NewBackofferWithVars(context.Background(), tsoMaxBackoff, nil)
startTs, err = store.getStalenessTimestamp(bo, options.TxnScope, *options.PrevSec)
} else if options.MinStartTS != nil {
stores := make([]*Store, 0)
allStores := store.regionCache.getStoresByType(tikvrpc.TiKV)
if options.TxnScope != oracle.GlobalTxnScope {
for _, store := range allStores {
if store.IsLabelsMatch([]*metapb.StoreLabel{
{
Key: DCLabelKey,
Value: options.TxnScope,
},
}) {
stores = append(stores, store)
}
}
} else {
stores = allStores
}
safeTS := store.getMinSafeTSByStores(stores)
startTs = *options.MinStartTS
// If the safeTS is larger than the minStartTS, we will use safeTS as StartTS, otherwise we will use
// minStartTS directly.
if oracle.CompareTS(startTs, safeTS) < 0 {
startTs = safeTS
}
} else if options.MaxPrevSec != nil {
bo := NewBackofferWithVars(context.Background(), tsoMaxBackoff, nil)
minStartTS, err := store.getStalenessTimestamp(bo, options.TxnScope, *options.MaxPrevSec)
if err != nil {
return 0, errors.Trace(err)
}
options.MinStartTS = &minStartTS
return extractStartTs(store, options)
} else {
bo := NewBackofferWithVars(context.Background(), tsoMaxBackoff, nil)
startTs, err = store.getTimestampWithRetry(bo, options.TxnScope)
}
return startTs, err
}
func newTiKVTxnWithOptions(store *KVStore, options kv.TransactionOption) (*KVTxn, error) {
if options.TxnScope == "" {
options.TxnScope = oracle.GlobalTxnScope
}
startTs, err := extractStartTs(store, options)
if err != nil {
return nil, errors.Trace(err)
}
snapshot := newTiKVSnapshot(store, startTs, store.nextReplicaReadSeed())
newTiKVTxn := &KVTxn{
snapshot: snapshot,
us: unionstore.NewUnionStore(snapshot),
store: store,
startTS: startTs,
startTime: time.Now(),
valid: true,
vars: tikv.DefaultVars,
scope: options.TxnScope,
}
return newTiKVTxn, nil
}
// SetSuccess is used to probe if kv variables are set or not. It is ONLY used in test cases.
var SetSuccess = false
// SetVars sets variables to the transaction.
func (txn *KVTxn) SetVars(vars *tikv.Variables) {
txn.vars = vars
txn.snapshot.vars = vars
failpoint.Inject("probeSetVars", func(val failpoint.Value) {
if val.(bool) {
SetSuccess = true
}
})
}
// GetVars gets variables from the transaction.
func (txn *KVTxn) GetVars() *tikv.Variables {
return txn.vars
}
// Get implements transaction interface.
func (txn *KVTxn) Get(ctx context.Context, k []byte) ([]byte, error) {
ret, err := txn.us.Get(ctx, k)
if tikverr.IsErrNotFound(err) {
return nil, err
}
if err != nil {
return nil, errors.Trace(err)
}
return ret, nil
}
// Set sets the value for key k as v into kv store.
// v must NOT be nil or empty, otherwise it returns ErrCannotSetNilValue.
func (txn *KVTxn) Set(k []byte, v []byte) error {
txn.setCnt++
return txn.us.GetMemBuffer().Set(k, v)
}
// String implements fmt.Stringer interface.
func (txn *KVTxn) String() string {
return fmt.Sprintf("%d", txn.StartTS())
}
// Iter creates an Iterator positioned on the first entry that k <= entry's key.
// If such entry is not found, it returns an invalid Iterator with no error.
// It yields only keys that < upperBound. If upperBound is nil, it means the upperBound is unbounded.
// The Iterator must be Closed after use.
func (txn *KVTxn) Iter(k []byte, upperBound []byte) (unionstore.Iterator, error) {
return txn.us.Iter(k, upperBound)
}
// IterReverse creates a reversed Iterator positioned on the first entry which key is less than k.
func (txn *KVTxn) IterReverse(k []byte) (unionstore.Iterator, error) {
return txn.us.IterReverse(k)
}
// Delete removes the entry for key k from kv store.
func (txn *KVTxn) Delete(k []byte) error {
return txn.us.GetMemBuffer().Delete(k)
}
// SetOption sets an option with a value, when val is nil, uses the default
// value of this option.
func (txn *KVTxn) SetOption(opt int, val interface{}) {
txn.us.SetOption(opt, val)
txn.snapshot.SetOption(opt, val)
}
// GetOption returns the option
func (txn *KVTxn) GetOption(opt int) interface{} {
return txn.us.GetOption(opt)
}
// DelOption deletes an option.
func (txn *KVTxn) DelOption(opt int) {
txn.us.DelOption(opt)
}
// SetSchemaLeaseChecker sets a hook to check schema version.
func (txn *KVTxn) SetSchemaLeaseChecker(checker SchemaLeaseChecker) {
txn.schemaLeaseChecker = checker
}
// EnableForceSyncLog indicates tikv to always sync log for the transaction.
func (txn *KVTxn) EnableForceSyncLog() {
txn.syncLog = true
}
// SetPessimistic indicates if the transaction should use pessimictic lock.
func (txn *KVTxn) SetPessimistic(b bool) {
txn.isPessimistic = b
}
// SetSchemaVer updates schema version to validate transaction.
func (txn *KVTxn) SetSchemaVer(schemaVer SchemaVer) {
txn.schemaVer = schemaVer
}
// SetPriority sets the priority for both write and read.
func (txn *KVTxn) SetPriority(pri Priority) {
txn.priority = pri
txn.GetSnapshot().SetPriority(pri)
}
// SetSchemaAmender sets an amender to update mutations after schema change.
func (txn *KVTxn) SetSchemaAmender(sa SchemaAmender) {
txn.schemaAmender = sa
}
// SetCommitCallback sets up a function that will be called when the transaction
// is finished.
func (txn *KVTxn) SetCommitCallback(f func(string, error)) {
txn.commitCallback = f
}
// SetEnableAsyncCommit indicates if the transaction will try to use async commit.
func (txn *KVTxn) SetEnableAsyncCommit(b bool) {
txn.enableAsyncCommit = b
}
// SetEnable1PC indicates if the transaction will try to use 1 phase commit.
func (txn *KVTxn) SetEnable1PC(b bool) {
txn.enable1PC = b
}
// SetScope sets the geographical scope of the transaction.
func (txn *KVTxn) SetScope(scope string) {
txn.scope = scope
}
// SetKVFilter sets the filter to ignore key-values in memory buffer.
func (txn *KVTxn) SetKVFilter(filter KVFilter) {
txn.kvFilter = filter
}
// IsPessimistic returns true if it is pessimistic.
func (txn *KVTxn) IsPessimistic() bool {
return txn.isPessimistic
}
// GetScope returns the geographical scope of the transaction.
func (txn *KVTxn) GetScope() string {
return txn.scope
}
// Commit commits the transaction operations to KV store.
func (txn *KVTxn) Commit(ctx context.Context) error {
if span := opentracing.SpanFromContext(ctx); span != nil && span.Tracer() != nil {
span1 := span.Tracer().StartSpan("tikvTxn.Commit", opentracing.ChildOf(span.Context()))
defer span1.Finish()
ctx = opentracing.ContextWithSpan(ctx, span1)
}
defer trace.StartRegion(ctx, "CommitTxn").End()
if !txn.valid {
return tikverr.ErrInvalidTxn
}
defer txn.close()
failpoint.Inject("mockCommitError", func(val failpoint.Value) {
if val.(bool) && IsMockCommitErrorEnable() {
MockCommitErrorDisable()
failpoint.Return(errors.New("mock commit error"))
}
})
start := time.Now()
defer func() { metrics.TxnCmdHistogramWithCommit.Observe(time.Since(start).Seconds()) }()
// sessionID is used for log.
var sessionID uint64
val := ctx.Value(util.SessionID)
if val != nil {
sessionID = val.(uint64)
}
var err error
// If the txn use pessimistic lock, committer is initialized.
committer := txn.committer
if committer == nil {
committer, err = newTwoPhaseCommitter(txn, sessionID)
if err != nil {
return errors.Trace(err)
}
txn.committer = committer
}
defer committer.ttlManager.close()
initRegion := trace.StartRegion(ctx, "InitKeys")
err = committer.initKeysAndMutations()
initRegion.End()
if err != nil {
return errors.Trace(err)
}
if committer.mutations.Len() == 0 {
return nil
}
defer func() {
ctxValue := ctx.Value(util.CommitDetailCtxKey)
if ctxValue != nil {
commitDetail := ctxValue.(**util.CommitDetails)
if *commitDetail != nil {
(*commitDetail).TxnRetry++
} else {
*commitDetail = committer.getDetail()
}
}
}()
// latches disabled
// pessimistic transaction should also bypass latch.
if txn.store.txnLatches == nil || txn.IsPessimistic() {
err = committer.execute(ctx)
if val == nil || sessionID > 0 {
txn.onCommitted(err)
}
logutil.Logger(ctx).Debug("[kv] txnLatches disabled, 2pc directly", zap.Error(err))
return errors.Trace(err)
}
// latches enabled
// for transactions which need to acquire latches
start = time.Now()
lock := txn.store.txnLatches.Lock(committer.startTS, committer.mutations.GetKeys())
commitDetail := committer.getDetail()
commitDetail.LocalLatchTime = time.Since(start)
if commitDetail.LocalLatchTime > 0 {
metrics.TiKVLocalLatchWaitTimeHistogram.Observe(commitDetail.LocalLatchTime.Seconds())
}
defer txn.store.txnLatches.UnLock(lock)
if lock.IsStale() {
return &tikverr.ErrWriteConflictInLatch{StartTS: txn.startTS}
}
err = committer.execute(ctx)
if val == nil || sessionID > 0 {
txn.onCommitted(err)
}
if err == nil {
lock.SetCommitTS(committer.commitTS)
}
logutil.Logger(ctx).Debug("[kv] txnLatches enabled while txn retryable", zap.Error(err))
return errors.Trace(err)
}
func (txn *KVTxn) close() {
txn.valid = false
}
// Rollback undoes the transaction operations to KV store.
func (txn *KVTxn) Rollback() error {
if !txn.valid {
return tikverr.ErrInvalidTxn
}
start := time.Now()
// Clean up pessimistic lock.
if txn.IsPessimistic() && txn.committer != nil {
err := txn.rollbackPessimisticLocks()
txn.committer.ttlManager.close()
if err != nil {
logutil.BgLogger().Error(err.Error())
}
}
txn.close()
logutil.BgLogger().Debug("[kv] rollback txn", zap.Uint64("txnStartTS", txn.StartTS()))
metrics.TxnCmdHistogramWithRollback.Observe(time.Since(start).Seconds())
return nil
}
func (txn *KVTxn) rollbackPessimisticLocks() error {
if txn.lockedCnt == 0 {
return nil
}
bo := NewBackofferWithVars(context.Background(), cleanupMaxBackoff, txn.vars)
keys := txn.collectLockedKeys()
return txn.committer.pessimisticRollbackMutations(bo, &PlainMutations{keys: keys})
}
func (txn *KVTxn) collectLockedKeys() [][]byte {
keys := make([][]byte, 0, txn.lockedCnt)
buf := txn.GetMemBuffer()
var err error
for it := buf.IterWithFlags(nil, nil); it.Valid(); err = it.Next() {
_ = err
if it.Flags().HasLocked() {
keys = append(keys, it.Key())
}
}
return keys
}
// TxnInfo is used to keep track the info of a committed transaction (mainly for diagnosis and testing)
type TxnInfo struct {
TxnScope string `json:"txn_scope"`
StartTS uint64 `json:"start_ts"`
CommitTS uint64 `json:"commit_ts"`
TxnCommitMode string `json:"txn_commit_mode"`
AsyncCommitFallback bool `json:"async_commit_fallback"`
OnePCFallback bool `json:"one_pc_fallback"`
ErrMsg string `json:"error,omitempty"`
}
func (txn *KVTxn) onCommitted(err error) {
if txn.commitCallback != nil {
isAsyncCommit := txn.committer.isAsyncCommit()
isOnePC := txn.committer.isOnePC()
commitMode := "2pc"
if isOnePC {
commitMode = "1pc"
} else if isAsyncCommit {
commitMode = "async_commit"
}
info := TxnInfo{
TxnScope: txn.GetScope(),
StartTS: txn.startTS,
CommitTS: txn.commitTS,
TxnCommitMode: commitMode,
AsyncCommitFallback: txn.committer.hasTriedAsyncCommit && !isAsyncCommit,
OnePCFallback: txn.committer.hasTriedOnePC && !isOnePC,
}
if err != nil {
info.ErrMsg = err.Error()
}
infoStr, err2 := json.Marshal(info)
_ = err2
txn.commitCallback(string(infoStr), err)
}
}
// LockKeys tries to lock the entries with the keys in KV store.
// lockWaitTime in ms, except that kv.LockAlwaysWait(0) means always wait lock, kv.LockNowait(-1) means nowait lock
func (txn *KVTxn) LockKeys(ctx context.Context, lockCtx *tikv.LockCtx, keysInput ...[]byte) error {
// Exclude keys that are already locked.
var err error
keys := make([][]byte, 0, len(keysInput))
startTime := time.Now()
txn.mu.Lock()
defer txn.mu.Unlock()
defer func() {
metrics.TxnCmdHistogramWithLockKeys.Observe(time.Since(startTime).Seconds())
if err == nil {
if lockCtx.PessimisticLockWaited != nil {
if atomic.LoadInt32(lockCtx.PessimisticLockWaited) > 0 {
timeWaited := time.Since(lockCtx.WaitStartTime)
atomic.StoreInt64(lockCtx.LockKeysDuration, int64(timeWaited))
metrics.TiKVPessimisticLockKeysDuration.Observe(timeWaited.Seconds())
}
}
}
if lockCtx.LockKeysCount != nil {
*lockCtx.LockKeysCount += int32(len(keys))
}
if lockCtx.Stats != nil {
lockCtx.Stats.TotalTime = time.Since(startTime)
ctxValue := ctx.Value(util.LockKeysDetailCtxKey)
if ctxValue != nil {
lockKeysDetail := ctxValue.(**util.LockKeysDetails)
*lockKeysDetail = lockCtx.Stats
}
}
}()
memBuf := txn.us.GetMemBuffer()
for _, key := range keysInput {
// The value of lockedMap is only used by pessimistic transactions.
var valueExist, locked, checkKeyExists bool
if flags, err := memBuf.GetFlags(key); err == nil {
locked = flags.HasLocked()
valueExist = flags.HasLockedValueExists()
checkKeyExists = flags.HasNeedCheckExists()
}
if !locked {
keys = append(keys, key)
} else if txn.IsPessimistic() {
if checkKeyExists && valueExist {
alreadyExist := kvrpcpb.AlreadyExist{Key: key}
e := &tikverr.ErrKeyExist{AlreadyExist: &alreadyExist}
return txn.committer.extractKeyExistsErr(e)
}
}
if lockCtx.ReturnValues && locked {
// An already locked key can not return values, we add an entry to let the caller get the value
// in other ways.
lockCtx.Values[string(key)] = tikv.ReturnedValue{AlreadyLocked: true}
}
}
if len(keys) == 0 {
return nil
}
keys = deduplicateKeys(keys)
if txn.IsPessimistic() && lockCtx.ForUpdateTS > 0 {
if txn.committer == nil {
// sessionID is used for log.
var sessionID uint64
var err error
val := ctx.Value(util.SessionID)
if val != nil {
sessionID = val.(uint64)
}
txn.committer, err = newTwoPhaseCommitter(txn, sessionID)
if err != nil {
return err
}
}
var assignedPrimaryKey bool
if txn.committer.primaryKey == nil {
txn.committer.primaryKey = keys[0]
assignedPrimaryKey = true
}
lockCtx.Stats = &util.LockKeysDetails{
LockKeys: int32(len(keys)),
}
bo := NewBackofferWithVars(ctx, pessimisticLockMaxBackoff, txn.vars)
txn.committer.forUpdateTS = lockCtx.ForUpdateTS
// If the number of keys greater than 1, it can be on different region,
// concurrently execute on multiple regions may lead to deadlock.
txn.committer.isFirstLock = txn.lockedCnt == 0 && len(keys) == 1
err = txn.committer.pessimisticLockMutations(bo, lockCtx, &PlainMutations{keys: keys})
if bo.totalSleep > 0 {
atomic.AddInt64(&lockCtx.Stats.BackoffTime, int64(bo.totalSleep)*int64(time.Millisecond))
lockCtx.Stats.Mu.Lock()
lockCtx.Stats.Mu.BackoffTypes = append(lockCtx.Stats.Mu.BackoffTypes, bo.types...)
lockCtx.Stats.Mu.Unlock()
}
if lockCtx.Killed != nil {
// If the kill signal is received during waiting for pessimisticLock,
// pessimisticLockKeys would handle the error but it doesn't reset the flag.
// We need to reset the killed flag here.
atomic.CompareAndSwapUint32(lockCtx.Killed, 1, 0)
}
if err != nil {
for _, key := range keys {
if txn.us.HasPresumeKeyNotExists(key) {
txn.us.UnmarkPresumeKeyNotExists(key)
}
}
keyMayBeLocked := !(tikverr.IsErrWriteConflict(err) || tikverr.IsErrKeyExist(err))
// If there is only 1 key and lock fails, no need to do pessimistic rollback.
if len(keys) > 1 || keyMayBeLocked {
wg := txn.asyncPessimisticRollback(ctx, keys)
if dl, ok := errors.Cause(err).(*tikverr.ErrDeadlock); ok && hashInKeys(dl.DeadlockKeyHash, keys) {
dl.IsRetryable = true
// Wait for the pessimistic rollback to finish before we retry the statement.
wg.Wait()
// Sleep a little, wait for the other transaction that blocked by this transaction to acquire the lock.
time.Sleep(time.Millisecond * 5)
failpoint.Inject("SingleStmtDeadLockRetrySleep", func() {
time.Sleep(300 * time.Millisecond)
})
}
}
if assignedPrimaryKey {
// unset the primary key if we assigned primary key when failed to lock it.
txn.committer.primaryKey = nil
}
return err
}
if assignedPrimaryKey {
txn.committer.ttlManager.run(txn.committer, lockCtx)
}
}
for _, key := range keys {
valExists := tikv.SetKeyLockedValueExists
// PointGet and BatchPointGet will return value in pessimistic lock response, the value may not exist.
// For other lock modes, the locked key values always exist.
if lockCtx.ReturnValues {
val, _ := lockCtx.Values[string(key)]
if len(val.Value) == 0 {
valExists = tikv.SetKeyLockedValueNotExists
}
}
memBuf.UpdateFlags(key, tikv.SetKeyLocked, tikv.DelNeedCheckExists, valExists)
}
txn.lockedCnt += len(keys)
return nil
}
// deduplicateKeys deduplicate the keys, it use sort instead of map to avoid memory allocation.
func deduplicateKeys(keys [][]byte) [][]byte {
sort.Slice(keys, func(i, j int) bool {
return bytes.Compare(keys[i], keys[j]) < 0
})
deduped := keys[:1]
for i := 1; i < len(keys); i++ {
if !bytes.Equal(deduped[len(deduped)-1], keys[i]) {
deduped = append(deduped, keys[i])
}
}
return deduped
}
func (txn *KVTxn) asyncPessimisticRollback(ctx context.Context, keys [][]byte) *sync.WaitGroup {
// Clone a new committer for execute in background.
committer := &twoPhaseCommitter{
store: txn.committer.store,
sessionID: txn.committer.sessionID,
startTS: txn.committer.startTS,
forUpdateTS: txn.committer.forUpdateTS,
primaryKey: txn.committer.primaryKey,
}
wg := new(sync.WaitGroup)
wg.Add(1)
go func() {
failpoint.Inject("beforeAsyncPessimisticRollback", func(val failpoint.Value) {
if s, ok := val.(string); ok {
if s == "skip" {
logutil.Logger(ctx).Info("[failpoint] injected skip async pessimistic rollback",
zap.Uint64("txnStartTS", txn.startTS))
wg.Done()
failpoint.Return()
} else if s == "delay" {
duration := time.Duration(rand.Int63n(int64(time.Second) * 2))
logutil.Logger(ctx).Info("[failpoint] injected delay before async pessimistic rollback",
zap.Uint64("txnStartTS", txn.startTS), zap.Duration("duration", duration))
time.Sleep(duration)
}
}
})
err := committer.pessimisticRollbackMutations(NewBackofferWithVars(ctx, pessimisticRollbackMaxBackoff, txn.vars), &PlainMutations{keys: keys})
if err != nil {
logutil.Logger(ctx).Warn("[kv] pessimisticRollback failed.", zap.Error(err))
}
wg.Done()
}()
return wg
}
func hashInKeys(deadlockKeyHash uint64, keys [][]byte) bool {
for _, key := range keys {
if farm.Fingerprint64(key) == deadlockKeyHash {
return true
}
}
return false
}
// IsReadOnly checks if the transaction has only performed read operations.
func (txn *KVTxn) IsReadOnly() bool {
return !txn.us.GetMemBuffer().Dirty()
}
// StartTS returns the transaction start timestamp.
func (txn *KVTxn) StartTS() uint64 {
return txn.startTS
}
// Valid returns if the transaction is valid.
// A transaction become invalid after commit or rollback.
func (txn *KVTxn) Valid() bool {
return txn.valid
}
// Len returns the number of entries in the DB.
func (txn *KVTxn) Len() int {
return txn.us.GetMemBuffer().Len()
}
// Size returns sum of keys and values length.
func (txn *KVTxn) Size() int {
return txn.us.GetMemBuffer().Size()
}
// Reset reset the Transaction to initial states.
func (txn *KVTxn) Reset() {
txn.us.GetMemBuffer().Reset()
}
// GetUnionStore returns the UnionStore binding to this transaction.
func (txn *KVTxn) GetUnionStore() *unionstore.KVUnionStore {
return txn.us
}
// GetMemBuffer return the MemBuffer binding to this transaction.
func (txn *KVTxn) GetMemBuffer() *unionstore.MemDB {
return txn.us.GetMemBuffer()
}
// GetSnapshot returns the Snapshot binding to this transaction.
func (txn *KVTxn) GetSnapshot() *KVSnapshot {
return txn.snapshot
}
// SetBinlogExecutor sets the method to perform binlong synchronization.
func (txn *KVTxn) SetBinlogExecutor(binlog BinlogExecutor) {
txn.binlog = binlog
if txn.committer != nil {
txn.committer.binlog = binlog
}
}
// GetClusterID returns store's cluster id.
func (txn *KVTxn) GetClusterID() uint64 {
return txn.store.clusterID
}
| store/tikv/txn.go | 1 | https://github.com/pingcap/tidb/commit/cc83cc524f8d3fd661f6e62d129ba043cc74501e | [
0.03319559618830681,
0.00224282150156796,
0.000163796721608378,
0.0008251490071415901,
0.004644479602575302
] |
{
"id": 5,
"code_window": [
"\tc.Assert(errors.Cause(err), Equals, tikverr.ErrNotExist)\n",
"}\n",
"\n",
"func (s *testAsyncCommitCommon) beginAsyncCommitWithLinearizability(c *C) tikv.TxnProbe {\n",
"\ttxn := s.beginAsyncCommit(c)\n",
"\ttxn.SetOption(kv.GuaranteeLinearizability, true)\n",
"\treturn txn\n",
"}\n",
"\n",
"func (s *testAsyncCommitCommon) beginAsyncCommit(c *C) tikv.TxnProbe {\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\ttxn.SetCausalConsistency(false)\n"
],
"file_path": "store/tikv/tests/async_commit_test.go",
"type": "replace",
"edit_start_line_idx": 129
} | // Copyright 2017 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package chunk
import (
"reflect"
"unsafe"
"github.com/cznic/mathutil"
"github.com/pingcap/errors"
"github.com/pingcap/tidb/types"
"github.com/pingcap/tidb/types/json"
)
var msgErrSelNotNil = "The selection vector of Chunk is not nil. Please file a bug to the TiDB Team"
// Chunk stores multiple rows of data in Apache Arrow format.
// See https://arrow.apache.org/docs/format/Columnar.html#physical-memory-layout
// Values are appended in compact format and can be directly accessed without decoding.
// When the chunk is done processing, we can reuse the allocated memory by resetting it.
type Chunk struct {
// sel indicates which rows are selected.
// If it is nil, all rows are selected.
sel []int
columns []*Column
// numVirtualRows indicates the number of virtual rows, which have zero Column.
// It is used only when this Chunk doesn't hold any data, i.e. "len(columns)==0".
numVirtualRows int
// capacity indicates the max number of rows this chunk can hold.
// TODO: replace all usages of capacity to requiredRows and remove this field
capacity int
// requiredRows indicates how many rows the parent executor want.
requiredRows int
}
// Capacity constants.
const (
InitialCapacity = 32
ZeroCapacity = 0
)
// NewChunkWithCapacity creates a new chunk with field types and capacity.
func NewChunkWithCapacity(fields []*types.FieldType, cap int) *Chunk {
return New(fields, cap, cap) //FIXME: in following PR.
}
// New creates a new chunk.
// cap: the limit for the max number of rows.
// maxChunkSize: the max limit for the number of rows.
func New(fields []*types.FieldType, cap, maxChunkSize int) *Chunk {
chk := &Chunk{
columns: make([]*Column, 0, len(fields)),
capacity: mathutil.Min(cap, maxChunkSize),
// set the default value of requiredRows to maxChunkSize to let chk.IsFull() behave
// like how we judge whether a chunk is full now, then the statement
// "chk.NumRows() < maxChunkSize"
// equals to "!chk.IsFull()".
requiredRows: maxChunkSize,
}
for _, f := range fields {
chk.columns = append(chk.columns, NewColumn(f, chk.capacity))
}
return chk
}
// renewWithCapacity creates a new Chunk based on an existing Chunk with capacity. The newly
// created Chunk has the same data schema with the old Chunk.
func renewWithCapacity(chk *Chunk, cap, maxChunkSize int) *Chunk {
newChk := new(Chunk)
if chk.columns == nil {
return newChk
}
newChk.columns = renewColumns(chk.columns, cap)
newChk.numVirtualRows = 0
newChk.capacity = cap
newChk.requiredRows = maxChunkSize
return newChk
}
// Renew creates a new Chunk based on an existing Chunk. The newly created Chunk
// has the same data schema with the old Chunk. The capacity of the new Chunk
// might be doubled based on the capacity of the old Chunk and the maxChunkSize.
// chk: old chunk(often used in previous call).
// maxChunkSize: the limit for the max number of rows.
func Renew(chk *Chunk, maxChunkSize int) *Chunk {
newCap := reCalcCapacity(chk, maxChunkSize)
return renewWithCapacity(chk, newCap, maxChunkSize)
}
// renewColumns creates the columns of a Chunk. The capacity of the newly
// created columns is equal to cap.
func renewColumns(oldCol []*Column, cap int) []*Column {
columns := make([]*Column, 0, len(oldCol))
for _, col := range oldCol {
columns = append(columns, newColumn(col.typeSize(), cap))
}
return columns
}
// renewEmpty creates a new Chunk based on an existing Chunk
// but keep columns empty.
func renewEmpty(chk *Chunk) *Chunk {
newChk := &Chunk{
columns: nil,
numVirtualRows: chk.numVirtualRows,
capacity: chk.capacity,
requiredRows: chk.requiredRows,
}
if chk.sel != nil {
newChk.sel = make([]int, len(chk.sel))
copy(newChk.sel, chk.sel)
}
return newChk
}
// MemoryUsage returns the total memory usage of a Chunk in bytes.
// We ignore the size of Column.length and Column.nullCount
// since they have little effect of the total memory usage.
func (c *Chunk) MemoryUsage() (sum int64) {
for _, col := range c.columns {
curColMemUsage := int64(unsafe.Sizeof(*col)) + int64(cap(col.nullBitmap)) + int64(cap(col.offsets)*8) + int64(cap(col.data)) + int64(cap(col.elemBuf))
sum += curColMemUsage
}
return
}
// newFixedLenColumn creates a fixed length Column with elemLen and initial data capacity.
func newFixedLenColumn(elemLen, cap int) *Column {
return &Column{
elemBuf: make([]byte, elemLen),
data: make([]byte, 0, cap*elemLen),
nullBitmap: make([]byte, 0, (cap+7)>>3),
}
}
// newVarLenColumn creates a variable length Column with initial data capacity.
func newVarLenColumn(cap int, old *Column) *Column {
estimatedElemLen := 8
// For varLenColumn (e.g. varchar), the accurate length of an element is unknown.
// Therefore, in the first executor.Next we use an experience value -- 8 (so it may make runtime.growslice)
// but in the following Next call we estimate the length as AVG x 1.125 elemLen of the previous call.
if old != nil && old.length != 0 {
estimatedElemLen = (len(old.data) + len(old.data)/8) / old.length
}
return &Column{
offsets: make([]int64, 1, cap+1),
data: make([]byte, 0, cap*estimatedElemLen),
nullBitmap: make([]byte, 0, (cap+7)>>3),
}
}
// RequiredRows returns how many rows is considered full.
func (c *Chunk) RequiredRows() int {
return c.requiredRows
}
// SetRequiredRows sets the number of required rows.
func (c *Chunk) SetRequiredRows(requiredRows, maxChunkSize int) *Chunk {
if requiredRows <= 0 || requiredRows > maxChunkSize {
requiredRows = maxChunkSize
}
c.requiredRows = requiredRows
return c
}
// IsFull returns if this chunk is considered full.
func (c *Chunk) IsFull() bool {
return c.NumRows() >= c.requiredRows
}
// Prune creates a new Chunk according to `c` and prunes the columns
// whose index is not in `usedColIdxs`
func (c *Chunk) Prune(usedColIdxs []int) *Chunk {
chk := renewEmpty(c)
chk.columns = make([]*Column, len(usedColIdxs))
for i, idx := range usedColIdxs {
chk.columns[i] = c.columns[idx]
}
return chk
}
// MakeRef makes Column in "dstColIdx" reference to Column in "srcColIdx".
func (c *Chunk) MakeRef(srcColIdx, dstColIdx int) {
c.columns[dstColIdx] = c.columns[srcColIdx]
}
// MakeRefTo copies columns `src.columns[srcColIdx]` to `c.columns[dstColIdx]`.
func (c *Chunk) MakeRefTo(dstColIdx int, src *Chunk, srcColIdx int) error {
if c.sel != nil || src.sel != nil {
return errors.New(msgErrSelNotNil)
}
c.columns[dstColIdx] = src.columns[srcColIdx]
return nil
}
// SwapColumn swaps Column "c.columns[colIdx]" with Column
// "other.columns[otherIdx]". If there exists columns refer to the Column to be
// swapped, we need to re-build the reference.
func (c *Chunk) SwapColumn(colIdx int, other *Chunk, otherIdx int) error {
if c.sel != nil || other.sel != nil {
return errors.New(msgErrSelNotNil)
}
// Find the leftmost Column of the reference which is the actual Column to
// be swapped.
for i := 0; i < colIdx; i++ {
if c.columns[i] == c.columns[colIdx] {
colIdx = i
}
}
for i := 0; i < otherIdx; i++ {
if other.columns[i] == other.columns[otherIdx] {
otherIdx = i
}
}
// Find the columns which refer to the actual Column to be swapped.
refColsIdx := make([]int, 0, len(c.columns)-colIdx)
for i := colIdx; i < len(c.columns); i++ {
if c.columns[i] == c.columns[colIdx] {
refColsIdx = append(refColsIdx, i)
}
}
refColsIdx4Other := make([]int, 0, len(other.columns)-otherIdx)
for i := otherIdx; i < len(other.columns); i++ {
if other.columns[i] == other.columns[otherIdx] {
refColsIdx4Other = append(refColsIdx4Other, i)
}
}
// Swap columns from two chunks.
c.columns[colIdx], other.columns[otherIdx] = other.columns[otherIdx], c.columns[colIdx]
// Rebuild the reference.
for _, i := range refColsIdx {
c.MakeRef(colIdx, i)
}
for _, i := range refColsIdx4Other {
other.MakeRef(otherIdx, i)
}
return nil
}
// SwapColumns swaps columns with another Chunk.
func (c *Chunk) SwapColumns(other *Chunk) {
c.sel, other.sel = other.sel, c.sel
c.columns, other.columns = other.columns, c.columns
c.numVirtualRows, other.numVirtualRows = other.numVirtualRows, c.numVirtualRows
}
// SetNumVirtualRows sets the virtual row number for a Chunk.
// It should only be used when there exists no Column in the Chunk.
func (c *Chunk) SetNumVirtualRows(numVirtualRows int) {
c.numVirtualRows = numVirtualRows
}
// Reset resets the chunk, so the memory it allocated can be reused.
// Make sure all the data in the chunk is not used anymore before you reuse this chunk.
func (c *Chunk) Reset() {
c.sel = nil
if c.columns == nil {
return
}
for _, col := range c.columns {
col.reset()
}
c.numVirtualRows = 0
}
// CopyConstruct creates a new chunk and copies this chunk's data into it.
func (c *Chunk) CopyConstruct() *Chunk {
newChk := renewEmpty(c)
newChk.columns = make([]*Column, len(c.columns))
for i := range c.columns {
newChk.columns[i] = c.columns[i].CopyConstruct(nil)
}
return newChk
}
// GrowAndReset resets the Chunk and doubles the capacity of the Chunk.
// The doubled capacity should not be larger than maxChunkSize.
// TODO: this method will be used in following PR.
func (c *Chunk) GrowAndReset(maxChunkSize int) {
c.sel = nil
if c.columns == nil {
return
}
newCap := reCalcCapacity(c, maxChunkSize)
if newCap <= c.capacity {
c.Reset()
return
}
c.capacity = newCap
c.columns = renewColumns(c.columns, newCap)
c.numVirtualRows = 0
c.requiredRows = maxChunkSize
}
// reCalcCapacity calculates the capacity for another Chunk based on the current
// Chunk. The new capacity is doubled only when the current Chunk is full.
func reCalcCapacity(c *Chunk, maxChunkSize int) int {
if c.NumRows() < c.capacity {
return c.capacity
}
return mathutil.Min(c.capacity*2, maxChunkSize)
}
// Capacity returns the capacity of the Chunk.
func (c *Chunk) Capacity() int {
return c.capacity
}
// NumCols returns the number of columns in the chunk.
func (c *Chunk) NumCols() int {
return len(c.columns)
}
// NumRows returns the number of rows in the chunk.
func (c *Chunk) NumRows() int {
if c.sel != nil {
return len(c.sel)
}
if c.NumCols() == 0 {
return c.numVirtualRows
}
return c.columns[0].length
}
// GetRow gets the Row in the chunk with the row index.
func (c *Chunk) GetRow(idx int) Row {
if c.sel != nil {
// mapping the logical RowIdx to the actual physical RowIdx;
// for example, if the Sel is [1, 5, 6], then
// logical 0 -> physical 1,
// logical 1 -> physical 5,
// logical 2 -> physical 6.
// Then when we iterate this Chunk according to Row, only selected rows will be
// accessed while all filtered rows will be ignored.
return Row{c: c, idx: c.sel[idx]}
}
return Row{c: c, idx: idx}
}
// AppendRow appends a row to the chunk.
func (c *Chunk) AppendRow(row Row) {
c.AppendPartialRow(0, row)
c.numVirtualRows++
}
// AppendPartialRow appends a row to the chunk.
func (c *Chunk) AppendPartialRow(colOff int, row Row) {
c.appendSel(colOff)
for i, rowCol := range row.c.columns {
chkCol := c.columns[colOff+i]
appendCellByCell(chkCol, rowCol, row.idx)
}
}
// AppendRowByColIdxs appends a row by its colIdxs to the chunk.
// 1. every columns are used if colIdxs is nil.
// 2. no columns are used if colIdxs is not nil but the size of colIdxs is 0.
func (c *Chunk) AppendRowByColIdxs(row Row, colIdxs []int) (wide int) {
wide = c.AppendPartialRowByColIdxs(0, row, colIdxs)
c.numVirtualRows++
return
}
// AppendPartialRowByColIdxs appends a row by its colIdxs to the chunk.
// 1. every columns are used if colIdxs is nil.
// 2. no columns are used if colIdxs is not nil but the size of colIdxs is 0.
func (c *Chunk) AppendPartialRowByColIdxs(colOff int, row Row, colIdxs []int) (wide int) {
if colIdxs == nil {
c.AppendPartialRow(colOff, row)
return row.Len()
}
c.appendSel(colOff)
for i, colIdx := range colIdxs {
rowCol := row.c.columns[colIdx]
chkCol := c.columns[colOff+i]
appendCellByCell(chkCol, rowCol, row.idx)
}
return len(colIdxs)
}
// appendCellByCell appends the cell with rowIdx of src into dst.
func appendCellByCell(dst *Column, src *Column, rowIdx int) {
dst.appendNullBitmap(!src.IsNull(rowIdx))
if src.isFixed() {
elemLen := len(src.elemBuf)
offset := rowIdx * elemLen
dst.data = append(dst.data, src.data[offset:offset+elemLen]...)
} else {
start, end := src.offsets[rowIdx], src.offsets[rowIdx+1]
dst.data = append(dst.data, src.data[start:end]...)
dst.offsets = append(dst.offsets, int64(len(dst.data)))
}
dst.length++
}
// preAlloc pre-allocates the memory space in a Chunk to store the Row.
// NOTE: only used in test.
// 1. The Chunk must be empty or holds no useful data.
// 2. The schema of the Row must be the same with the Chunk.
// 3. This API is paired with the `Insert()` function, which inserts all the
// rows data into the Chunk after the pre-allocation.
// 4. We set the null bitmap here instead of in the Insert() function because
// when the Insert() function is called parallelly, the data race on a byte
// can not be avoided although the manipulated bits are different inside a
// byte.
func (c *Chunk) preAlloc(row Row) (rowIdx uint32) {
rowIdx = uint32(c.NumRows())
for i, srcCol := range row.c.columns {
dstCol := c.columns[i]
dstCol.appendNullBitmap(!srcCol.IsNull(row.idx))
elemLen := len(srcCol.elemBuf)
if !srcCol.isFixed() {
elemLen = int(srcCol.offsets[row.idx+1] - srcCol.offsets[row.idx])
dstCol.offsets = append(dstCol.offsets, int64(len(dstCol.data)+elemLen))
}
dstCol.length++
needCap := len(dstCol.data) + elemLen
if needCap <= cap(dstCol.data) {
(*reflect.SliceHeader)(unsafe.Pointer(&dstCol.data)).Len = len(dstCol.data) + elemLen
continue
}
// Grow the capacity according to golang.growslice.
// Implementation differences with golang:
// 1. We double the capacity when `dstCol.data < 1024*elemLen bytes` but
// not `1024 bytes`.
// 2. We expand the capacity to 1.5*originCap rather than 1.25*originCap
// during the slow-increasing phase.
newCap := cap(dstCol.data)
doubleCap := newCap << 1
if needCap > doubleCap {
newCap = needCap
} else {
avgElemLen := elemLen
if !srcCol.isFixed() {
avgElemLen = len(dstCol.data) / len(dstCol.offsets)
}
// slowIncThreshold indicates the threshold exceeding which the
// dstCol.data capacity increase fold decreases from 2 to 1.5.
slowIncThreshold := 1024 * avgElemLen
if len(dstCol.data) < slowIncThreshold {
newCap = doubleCap
} else {
for 0 < newCap && newCap < needCap {
newCap += newCap / 2
}
if newCap <= 0 {
newCap = needCap
}
}
}
dstCol.data = make([]byte, len(dstCol.data)+elemLen, newCap)
}
return
}
// insert inserts `row` on the position specified by `rowIdx`.
// NOTE: only used in test.
// Note: Insert will cover the origin data, it should be called after
// PreAlloc.
func (c *Chunk) insert(rowIdx int, row Row) {
for i, srcCol := range row.c.columns {
if row.IsNull(i) {
continue
}
dstCol := c.columns[i]
var srcStart, srcEnd, destStart, destEnd int
if srcCol.isFixed() {
srcElemLen, destElemLen := len(srcCol.elemBuf), len(dstCol.elemBuf)
srcStart, destStart = row.idx*srcElemLen, rowIdx*destElemLen
srcEnd, destEnd = srcStart+srcElemLen, destStart+destElemLen
} else {
srcStart, srcEnd = int(srcCol.offsets[row.idx]), int(srcCol.offsets[row.idx+1])
destStart, destEnd = int(dstCol.offsets[rowIdx]), int(dstCol.offsets[rowIdx+1])
}
copy(dstCol.data[destStart:destEnd], srcCol.data[srcStart:srcEnd])
}
}
// Append appends rows in [begin, end) in another Chunk to a Chunk.
func (c *Chunk) Append(other *Chunk, begin, end int) {
for colID, src := range other.columns {
dst := c.columns[colID]
if src.isFixed() {
elemLen := len(src.elemBuf)
dst.data = append(dst.data, src.data[begin*elemLen:end*elemLen]...)
} else {
beginOffset, endOffset := src.offsets[begin], src.offsets[end]
dst.data = append(dst.data, src.data[beginOffset:endOffset]...)
lastOffset := dst.offsets[len(dst.offsets)-1]
for i := begin; i < end; i++ {
lastOffset += src.offsets[i+1] - src.offsets[i]
dst.offsets = append(dst.offsets, lastOffset)
}
}
for i := begin; i < end; i++ {
c.appendSel(colID)
dst.appendNullBitmap(!src.IsNull(i))
dst.length++
}
}
c.numVirtualRows += end - begin
}
// TruncateTo truncates rows from tail to head in a Chunk to "numRows" rows.
func (c *Chunk) TruncateTo(numRows int) {
c.Reconstruct()
for _, col := range c.columns {
if col.isFixed() {
elemLen := len(col.elemBuf)
col.data = col.data[:numRows*elemLen]
} else {
col.data = col.data[:col.offsets[numRows]]
col.offsets = col.offsets[:numRows+1]
}
col.length = numRows
bitmapLen := (col.length + 7) / 8
col.nullBitmap = col.nullBitmap[:bitmapLen]
if col.length%8 != 0 {
// When we append null, we simply increment the nullCount,
// so we need to clear the unused bits in the last bitmap byte.
lastByte := col.nullBitmap[bitmapLen-1]
unusedBitsLen := 8 - uint(col.length%8)
lastByte <<= unusedBitsLen
lastByte >>= unusedBitsLen
col.nullBitmap[bitmapLen-1] = lastByte
}
}
c.numVirtualRows = numRows
}
// AppendNull appends a null value to the chunk.
func (c *Chunk) AppendNull(colIdx int) {
c.appendSel(colIdx)
c.columns[colIdx].AppendNull()
}
// AppendInt64 appends a int64 value to the chunk.
func (c *Chunk) AppendInt64(colIdx int, i int64) {
c.appendSel(colIdx)
c.columns[colIdx].AppendInt64(i)
}
// AppendUint64 appends a uint64 value to the chunk.
func (c *Chunk) AppendUint64(colIdx int, u uint64) {
c.appendSel(colIdx)
c.columns[colIdx].AppendUint64(u)
}
// AppendFloat32 appends a float32 value to the chunk.
func (c *Chunk) AppendFloat32(colIdx int, f float32) {
c.appendSel(colIdx)
c.columns[colIdx].AppendFloat32(f)
}
// AppendFloat64 appends a float64 value to the chunk.
func (c *Chunk) AppendFloat64(colIdx int, f float64) {
c.appendSel(colIdx)
c.columns[colIdx].AppendFloat64(f)
}
// AppendString appends a string value to the chunk.
func (c *Chunk) AppendString(colIdx int, str string) {
c.appendSel(colIdx)
c.columns[colIdx].AppendString(str)
}
// AppendBytes appends a bytes value to the chunk.
func (c *Chunk) AppendBytes(colIdx int, b []byte) {
c.appendSel(colIdx)
c.columns[colIdx].AppendBytes(b)
}
// AppendTime appends a Time value to the chunk.
func (c *Chunk) AppendTime(colIdx int, t types.Time) {
c.appendSel(colIdx)
c.columns[colIdx].AppendTime(t)
}
// AppendDuration appends a Duration value to the chunk.
func (c *Chunk) AppendDuration(colIdx int, dur types.Duration) {
c.appendSel(colIdx)
c.columns[colIdx].AppendDuration(dur)
}
// AppendMyDecimal appends a MyDecimal value to the chunk.
func (c *Chunk) AppendMyDecimal(colIdx int, dec *types.MyDecimal) {
c.appendSel(colIdx)
c.columns[colIdx].AppendMyDecimal(dec)
}
// AppendEnum appends an Enum value to the chunk.
func (c *Chunk) AppendEnum(colIdx int, enum types.Enum) {
c.appendSel(colIdx)
c.columns[colIdx].appendNameValue(enum.Name, enum.Value)
}
// AppendSet appends a Set value to the chunk.
func (c *Chunk) AppendSet(colIdx int, set types.Set) {
c.appendSel(colIdx)
c.columns[colIdx].appendNameValue(set.Name, set.Value)
}
// AppendJSON appends a JSON value to the chunk.
func (c *Chunk) AppendJSON(colIdx int, j json.BinaryJSON) {
c.appendSel(colIdx)
c.columns[colIdx].AppendJSON(j)
}
func (c *Chunk) appendSel(colIdx int) {
if colIdx == 0 && c.sel != nil { // use column 0 as standard
c.sel = append(c.sel, c.columns[0].length)
}
}
// AppendDatum appends a datum into the chunk.
func (c *Chunk) AppendDatum(colIdx int, d *types.Datum) {
switch d.Kind() {
case types.KindNull:
c.AppendNull(colIdx)
case types.KindInt64:
c.AppendInt64(colIdx, d.GetInt64())
case types.KindUint64:
c.AppendUint64(colIdx, d.GetUint64())
case types.KindFloat32:
c.AppendFloat32(colIdx, d.GetFloat32())
case types.KindFloat64:
c.AppendFloat64(colIdx, d.GetFloat64())
case types.KindString, types.KindBytes, types.KindBinaryLiteral, types.KindRaw, types.KindMysqlBit:
c.AppendBytes(colIdx, d.GetBytes())
case types.KindMysqlDecimal:
c.AppendMyDecimal(colIdx, d.GetMysqlDecimal())
case types.KindMysqlDuration:
c.AppendDuration(colIdx, d.GetMysqlDuration())
case types.KindMysqlEnum:
c.AppendEnum(colIdx, d.GetMysqlEnum())
case types.KindMysqlSet:
c.AppendSet(colIdx, d.GetMysqlSet())
case types.KindMysqlTime:
c.AppendTime(colIdx, d.GetMysqlTime())
case types.KindMysqlJSON:
c.AppendJSON(colIdx, d.GetMysqlJSON())
}
}
// Column returns the specific column.
func (c *Chunk) Column(colIdx int) *Column {
return c.columns[colIdx]
}
// SetCol sets the colIdx Column to col and returns the old Column.
func (c *Chunk) SetCol(colIdx int, col *Column) *Column {
if col == c.columns[colIdx] {
return nil
}
old := c.columns[colIdx]
c.columns[colIdx] = col
return old
}
// Sel returns Sel of this Chunk.
func (c *Chunk) Sel() []int {
return c.sel
}
// SetSel sets a Sel for this Chunk.
func (c *Chunk) SetSel(sel []int) {
c.sel = sel
}
// Reconstruct removes all filtered rows in this Chunk.
func (c *Chunk) Reconstruct() {
if c.sel == nil {
return
}
for _, col := range c.columns {
col.reconstruct(c.sel)
}
c.numVirtualRows = len(c.sel)
c.sel = nil
}
// ToString returns all the values in a chunk.
func (c *Chunk) ToString(ft []*types.FieldType) string {
var buf []byte
for rowIdx := 0; rowIdx < c.NumRows(); rowIdx++ {
row := c.GetRow(rowIdx)
buf = append(buf, row.ToString(ft)...)
buf = append(buf, '\n')
}
return string(buf)
}
// AppendRows appends multiple rows to the chunk.
func (c *Chunk) AppendRows(rows []Row) {
c.AppendPartialRows(0, rows)
c.numVirtualRows += len(rows)
}
// AppendPartialRows appends multiple rows to the chunk.
func (c *Chunk) AppendPartialRows(colOff int, rows []Row) {
columns := c.columns[colOff:]
for i, dstCol := range columns {
for _, srcRow := range rows {
if i == 0 {
c.appendSel(colOff)
}
appendCellByCell(dstCol, srcRow.c.columns[i], srcRow.idx)
}
}
}
| util/chunk/chunk.go | 0 | https://github.com/pingcap/tidb/commit/cc83cc524f8d3fd661f6e62d129ba043cc74501e | [
0.0005334965535439551,
0.0001928324782056734,
0.0001602148695383221,
0.00017055973876267672,
0.0000602302243351005
] |
{
"id": 5,
"code_window": [
"\tc.Assert(errors.Cause(err), Equals, tikverr.ErrNotExist)\n",
"}\n",
"\n",
"func (s *testAsyncCommitCommon) beginAsyncCommitWithLinearizability(c *C) tikv.TxnProbe {\n",
"\ttxn := s.beginAsyncCommit(c)\n",
"\ttxn.SetOption(kv.GuaranteeLinearizability, true)\n",
"\treturn txn\n",
"}\n",
"\n",
"func (s *testAsyncCommitCommon) beginAsyncCommit(c *C) tikv.TxnProbe {\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\ttxn.SetCausalConsistency(false)\n"
],
"file_path": "store/tikv/tests/async_commit_test.go",
"type": "replace",
"edit_start_line_idx": 129
} | // Copyright 2021 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package txn
import (
"github.com/pingcap/tidb/kv"
"github.com/pingcap/tidb/store/tikv"
)
type tikvScanner struct {
*tikv.Scanner
}
// Next return next element.
func (s *tikvScanner) Next() error {
err := s.Scanner.Next()
return extractKeyErr(err)
}
func (s *tikvScanner) Key() kv.Key {
return kv.Key(s.Scanner.Key())
}
| store/driver/txn/scanner.go | 0 | https://github.com/pingcap/tidb/commit/cc83cc524f8d3fd661f6e62d129ba043cc74501e | [
0.0020915460772812366,
0.0008753899019211531,
0.00017396760813426226,
0.0006180229247547686,
0.0007890076376497746
] |
{
"id": 5,
"code_window": [
"\tc.Assert(errors.Cause(err), Equals, tikverr.ErrNotExist)\n",
"}\n",
"\n",
"func (s *testAsyncCommitCommon) beginAsyncCommitWithLinearizability(c *C) tikv.TxnProbe {\n",
"\ttxn := s.beginAsyncCommit(c)\n",
"\ttxn.SetOption(kv.GuaranteeLinearizability, true)\n",
"\treturn txn\n",
"}\n",
"\n",
"func (s *testAsyncCommitCommon) beginAsyncCommit(c *C) tikv.TxnProbe {\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\ttxn.SetCausalConsistency(false)\n"
],
"file_path": "store/tikv/tests/async_commit_test.go",
"type": "replace",
"edit_start_line_idx": 129
} | // Copyright 2015 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package executor
import (
"context"
"math"
"sort"
"time"
"github.com/pingcap/errors"
"github.com/pingcap/log"
"github.com/pingcap/parser"
"github.com/pingcap/parser/ast"
"github.com/pingcap/parser/mysql"
"github.com/pingcap/tidb/expression"
"github.com/pingcap/tidb/infoschema"
"github.com/pingcap/tidb/planner"
plannercore "github.com/pingcap/tidb/planner/core"
"github.com/pingcap/tidb/sessionctx"
"github.com/pingcap/tidb/types"
driver "github.com/pingcap/tidb/types/parser_driver"
"github.com/pingcap/tidb/util"
"github.com/pingcap/tidb/util/chunk"
"github.com/pingcap/tidb/util/hint"
"github.com/pingcap/tidb/util/sqlexec"
"go.uber.org/zap"
)
var (
_ Executor = &DeallocateExec{}
_ Executor = &ExecuteExec{}
_ Executor = &PrepareExec{}
)
type paramMarkerSorter struct {
markers []ast.ParamMarkerExpr
}
func (p *paramMarkerSorter) Len() int {
return len(p.markers)
}
func (p *paramMarkerSorter) Less(i, j int) bool {
return p.markers[i].(*driver.ParamMarkerExpr).Offset < p.markers[j].(*driver.ParamMarkerExpr).Offset
}
func (p *paramMarkerSorter) Swap(i, j int) {
p.markers[i], p.markers[j] = p.markers[j], p.markers[i]
}
type paramMarkerExtractor struct {
markers []ast.ParamMarkerExpr
}
func (e *paramMarkerExtractor) Enter(in ast.Node) (ast.Node, bool) {
return in, false
}
func (e *paramMarkerExtractor) Leave(in ast.Node) (ast.Node, bool) {
if x, ok := in.(*driver.ParamMarkerExpr); ok {
e.markers = append(e.markers, x)
}
return in, true
}
// PrepareExec represents a PREPARE executor.
type PrepareExec struct {
baseExecutor
is infoschema.InfoSchema
name string
sqlText string
ID uint32
ParamCount int
Fields []*ast.ResultField
}
// NewPrepareExec creates a new PrepareExec.
func NewPrepareExec(ctx sessionctx.Context, is infoschema.InfoSchema, sqlTxt string) *PrepareExec {
base := newBaseExecutor(ctx, nil, 0)
base.initCap = chunk.ZeroCapacity
return &PrepareExec{
baseExecutor: base,
is: is,
sqlText: sqlTxt,
}
}
// Next implements the Executor Next interface.
func (e *PrepareExec) Next(ctx context.Context, req *chunk.Chunk) error {
vars := e.ctx.GetSessionVars()
if e.ID != 0 {
// Must be the case when we retry a prepare.
// Make sure it is idempotent.
_, ok := vars.PreparedStmts[e.ID]
if ok {
return nil
}
}
charset, collation := vars.GetCharsetInfo()
var (
stmts []ast.StmtNode
err error
)
if sqlParser, ok := e.ctx.(sqlexec.SQLParser); ok {
// FIXME: ok... yet another parse API, may need some api interface clean.
stmts, err = sqlParser.ParseSQL(e.sqlText, charset, collation)
} else {
p := parser.New()
p.SetParserConfig(vars.BuildParserConfig())
var warns []error
stmts, warns, err = p.Parse(e.sqlText, charset, collation)
for _, warn := range warns {
e.ctx.GetSessionVars().StmtCtx.AppendWarning(util.SyntaxWarn(warn))
}
}
if err != nil {
return util.SyntaxError(err)
}
if len(stmts) != 1 {
return ErrPrepareMulti
}
stmt := stmts[0]
err = ResetContextOfStmt(e.ctx, stmt)
if err != nil {
return err
}
var extractor paramMarkerExtractor
stmt.Accept(&extractor)
// DDL Statements can not accept parameters
if _, ok := stmt.(ast.DDLNode); ok && len(extractor.markers) > 0 {
return ErrPrepareDDL
}
switch stmt.(type) {
case *ast.LoadDataStmt, *ast.PrepareStmt, *ast.ExecuteStmt, *ast.DeallocateStmt:
return ErrUnsupportedPs
}
// Prepare parameters should NOT over 2 bytes(MaxUint16)
// https://dev.mysql.com/doc/internals/en/com-stmt-prepare-response.html#packet-COM_STMT_PREPARE_OK.
if len(extractor.markers) > math.MaxUint16 {
return ErrPsManyParam
}
err = plannercore.Preprocess(e.ctx, stmt, e.is, plannercore.InPrepare)
if err != nil {
return err
}
// The parameter markers are appended in visiting order, which may not
// be the same as the position order in the query string. We need to
// sort it by position.
sorter := ¶mMarkerSorter{markers: extractor.markers}
sort.Sort(sorter)
e.ParamCount = len(sorter.markers)
for i := 0; i < e.ParamCount; i++ {
sorter.markers[i].SetOrder(i)
}
prepared := &ast.Prepared{
Stmt: stmt,
StmtType: GetStmtLabel(stmt),
Params: sorter.markers,
SchemaVersion: e.is.SchemaMetaVersion(),
}
if !plannercore.PreparedPlanCacheEnabled() {
prepared.UseCache = false
} else {
if !e.ctx.GetSessionVars().UseDynamicPartitionPrune() {
prepared.UseCache = plannercore.Cacheable(stmt, e.is)
} else {
prepared.UseCache = plannercore.Cacheable(stmt, nil)
}
}
// We try to build the real statement of preparedStmt.
for i := range prepared.Params {
param := prepared.Params[i].(*driver.ParamMarkerExpr)
param.Datum.SetNull()
param.InExecute = false
}
var p plannercore.Plan
e.ctx.GetSessionVars().PlanID = 0
e.ctx.GetSessionVars().PlanColumnID = 0
destBuilder, _ := plannercore.NewPlanBuilder(e.ctx, e.is, &hint.BlockHintProcessor{})
p, err = destBuilder.Build(ctx, stmt)
if err != nil {
return err
}
if _, ok := stmt.(*ast.SelectStmt); ok {
e.Fields = colNames2ResultFields(p.Schema(), p.OutputNames(), vars.CurrentDB)
}
if e.ID == 0 {
e.ID = vars.GetNextPreparedStmtID()
}
if e.name != "" {
vars.PreparedStmtNameToID[e.name] = e.ID
}
normalized, digest := parser.NormalizeDigest(prepared.Stmt.Text())
preparedObj := &plannercore.CachedPrepareStmt{
PreparedAst: prepared,
VisitInfos: destBuilder.GetVisitInfo(),
NormalizedSQL: normalized,
SQLDigest: digest,
ForUpdateRead: destBuilder.GetIsForUpdateRead(),
}
return vars.AddPreparedStmt(e.ID, preparedObj)
}
// ExecuteExec represents an EXECUTE executor.
// It cannot be executed by itself, all it needs to do is to build
// another Executor from a prepared statement.
type ExecuteExec struct {
baseExecutor
is infoschema.InfoSchema
name string
usingVars []expression.Expression
stmtExec Executor
stmt ast.StmtNode
plan plannercore.Plan
id uint32
lowerPriority bool
outputNames []*types.FieldName
}
// Next implements the Executor Next interface.
func (e *ExecuteExec) Next(ctx context.Context, req *chunk.Chunk) error {
return nil
}
// Build builds a prepared statement into an executor.
// After Build, e.StmtExec will be used to do the real execution.
func (e *ExecuteExec) Build(b *executorBuilder) error {
if snapshotTS := e.ctx.GetSessionVars().SnapshotTS; snapshotTS != 0 {
if err := e.ctx.InitTxnWithStartTS(snapshotTS); err != nil {
return err
}
} else {
ok, err := plannercore.IsPointGetWithPKOrUniqueKeyByAutoCommit(e.ctx, e.plan)
if err != nil {
return err
}
if ok {
err = e.ctx.InitTxnWithStartTS(math.MaxUint64)
if err != nil {
return err
}
}
}
stmtExec := b.build(e.plan)
if b.err != nil {
log.Warn("rebuild plan in EXECUTE statement failed", zap.String("labelName of PREPARE statement", e.name))
return errors.Trace(b.err)
}
e.stmtExec = stmtExec
if e.ctx.GetSessionVars().StmtCtx.Priority == mysql.NoPriority {
e.lowerPriority = needLowerPriority(e.plan)
}
return nil
}
// DeallocateExec represent a DEALLOCATE executor.
type DeallocateExec struct {
baseExecutor
Name string
}
// Next implements the Executor Next interface.
func (e *DeallocateExec) Next(ctx context.Context, req *chunk.Chunk) error {
vars := e.ctx.GetSessionVars()
id, ok := vars.PreparedStmtNameToID[e.Name]
if !ok {
return errors.Trace(plannercore.ErrStmtNotFound)
}
preparedPointer := vars.PreparedStmts[id]
preparedObj, ok := preparedPointer.(*plannercore.CachedPrepareStmt)
if !ok {
return errors.Errorf("invalid CachedPrepareStmt type")
}
prepared := preparedObj.PreparedAst
delete(vars.PreparedStmtNameToID, e.Name)
if plannercore.PreparedPlanCacheEnabled() {
e.ctx.PreparedPlanCache().Delete(plannercore.NewPSTMTPlanCacheKey(
vars, id, prepared.SchemaVersion,
))
}
vars.RemovePreparedStmt(id)
return nil
}
// CompileExecutePreparedStmt compiles a session Execute command to a stmt.Statement.
func CompileExecutePreparedStmt(ctx context.Context, sctx sessionctx.Context,
ID uint32, args []types.Datum) (sqlexec.Statement, bool, bool, error) {
startTime := time.Now()
defer func() {
sctx.GetSessionVars().DurationCompile = time.Since(startTime)
}()
execStmt := &ast.ExecuteStmt{ExecID: ID}
if err := ResetContextOfStmt(sctx, execStmt); err != nil {
return nil, false, false, err
}
execStmt.BinaryArgs = args
is := sctx.GetSessionVars().GetInfoSchema().(infoschema.InfoSchema)
execPlan, names, err := planner.Optimize(ctx, sctx, execStmt, is)
if err != nil {
return nil, false, false, err
}
stmt := &ExecStmt{
GoCtx: ctx,
InfoSchema: is,
Plan: execPlan,
StmtNode: execStmt,
Ctx: sctx,
OutputNames: names,
}
if preparedPointer, ok := sctx.GetSessionVars().PreparedStmts[ID]; ok {
preparedObj, ok := preparedPointer.(*plannercore.CachedPrepareStmt)
if !ok {
return nil, false, false, errors.Errorf("invalid CachedPrepareStmt type")
}
stmtCtx := sctx.GetSessionVars().StmtCtx
stmt.Text = preparedObj.PreparedAst.Stmt.Text()
stmtCtx.OriginalSQL = stmt.Text
stmtCtx.InitSQLDigest(preparedObj.NormalizedSQL, preparedObj.SQLDigest)
}
tiFlashPushDown, tiFlashExchangePushDown := plannercore.IsTiFlashContained(stmt.Plan)
return stmt, tiFlashPushDown, tiFlashExchangePushDown, nil
}
| executor/prepared.go | 0 | https://github.com/pingcap/tidb/commit/cc83cc524f8d3fd661f6e62d129ba043cc74501e | [
0.0021758561488240957,
0.0002497019886504859,
0.00016154903278220445,
0.00017184905300382525,
0.00033838438685052097
] |
{
"id": 6,
"code_window": [
"\tc.Assert(err, IsNil)\n",
"\ttxn.SetEnableAsyncCommit(false)\n",
"\ttxn.SetEnable1PC(false)\n",
"\ttxn.SetOption(kv.GuaranteeLinearizability, false)\n",
"\n",
"\t// Prewrite the lock without committing it\n",
"\tc.Assert(failpoint.Enable(\"github.com/pingcap/tidb/store/tikv/beforeCommit\", `pause`), IsNil)\n",
"\tch := make(chan struct{})\n",
"\tcommitter, err := txn.NewCommitter(1)\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\ttxn.SetCausalConsistency(true)\n"
],
"file_path": "store/tikv/tests/snapshot_fail_test.go",
"type": "replace",
"edit_start_line_idx": 215
} | // Copyright 2020 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package tikv_test
import (
"bytes"
"context"
"fmt"
"math"
"sync/atomic"
"testing"
"time"
. "github.com/pingcap/check"
"github.com/pingcap/errors"
"github.com/pingcap/kvproto/pkg/kvrpcpb"
"github.com/pingcap/tidb/store/mockstore/unistore"
"github.com/pingcap/tidb/store/tikv"
tikverr "github.com/pingcap/tidb/store/tikv/error"
"github.com/pingcap/tidb/store/tikv/kv"
"github.com/pingcap/tidb/store/tikv/mockstore/cluster"
"github.com/pingcap/tidb/store/tikv/oracle"
"github.com/pingcap/tidb/store/tikv/tikvrpc"
"github.com/pingcap/tidb/store/tikv/util"
)
func TestT(t *testing.T) {
CustomVerboseFlag = true
TestingT(t)
}
// testAsyncCommitCommon is used to put common parts that will be both used by
// testAsyncCommitSuite and testAsyncCommitFailSuite.
type testAsyncCommitCommon struct {
cluster cluster.Cluster
store *tikv.KVStore
}
func (s *testAsyncCommitCommon) setUpTest(c *C) {
if *WithTiKV {
s.store = NewTestStore(c)
return
}
client, pdClient, cluster, err := unistore.New("")
c.Assert(err, IsNil)
unistore.BootstrapWithSingleStore(cluster)
s.cluster = cluster
store, err := tikv.NewTestTiKVStore(client, pdClient, nil, nil, 0)
c.Assert(err, IsNil)
s.store = store
}
func (s *testAsyncCommitCommon) putAlphabets(c *C, enableAsyncCommit bool) {
for ch := byte('a'); ch <= byte('z'); ch++ {
s.putKV(c, []byte{ch}, []byte{ch}, enableAsyncCommit)
}
}
func (s *testAsyncCommitCommon) putKV(c *C, key, value []byte, enableAsyncCommit bool) (uint64, uint64) {
txn := s.beginAsyncCommit(c)
err := txn.Set(key, value)
c.Assert(err, IsNil)
err = txn.Commit(context.Background())
c.Assert(err, IsNil)
return txn.StartTS(), txn.GetCommitTS()
}
func (s *testAsyncCommitCommon) mustGetFromTxn(c *C, txn tikv.TxnProbe, key, expectedValue []byte) {
v, err := txn.Get(context.Background(), key)
c.Assert(err, IsNil)
c.Assert(v, BytesEquals, expectedValue)
}
func (s *testAsyncCommitCommon) mustGetLock(c *C, key []byte) *tikv.Lock {
ver, err := s.store.CurrentTimestamp(oracle.GlobalTxnScope)
c.Assert(err, IsNil)
req := tikvrpc.NewRequest(tikvrpc.CmdGet, &kvrpcpb.GetRequest{
Key: key,
Version: ver,
})
bo := tikv.NewBackofferWithVars(context.Background(), 5000, nil)
loc, err := s.store.GetRegionCache().LocateKey(bo, key)
c.Assert(err, IsNil)
resp, err := s.store.SendReq(bo, req, loc.Region, time.Second*10)
c.Assert(err, IsNil)
c.Assert(resp.Resp, NotNil)
keyErr := resp.Resp.(*kvrpcpb.GetResponse).GetError()
c.Assert(keyErr, NotNil)
var lockutil tikv.LockProbe
lock, err := lockutil.ExtractLockFromKeyErr(keyErr)
c.Assert(err, IsNil)
return lock
}
func (s *testAsyncCommitCommon) mustPointGet(c *C, key, expectedValue []byte) {
snap := s.store.GetSnapshot(math.MaxUint64)
value, err := snap.Get(context.Background(), key)
c.Assert(err, IsNil)
c.Assert(value, BytesEquals, expectedValue)
}
func (s *testAsyncCommitCommon) mustGetFromSnapshot(c *C, version uint64, key, expectedValue []byte) {
snap := s.store.GetSnapshot(version)
value, err := snap.Get(context.Background(), key)
c.Assert(err, IsNil)
c.Assert(value, BytesEquals, expectedValue)
}
func (s *testAsyncCommitCommon) mustGetNoneFromSnapshot(c *C, version uint64, key []byte) {
snap := s.store.GetSnapshot(version)
_, err := snap.Get(context.Background(), key)
c.Assert(errors.Cause(err), Equals, tikverr.ErrNotExist)
}
func (s *testAsyncCommitCommon) beginAsyncCommitWithLinearizability(c *C) tikv.TxnProbe {
txn := s.beginAsyncCommit(c)
txn.SetOption(kv.GuaranteeLinearizability, true)
return txn
}
func (s *testAsyncCommitCommon) beginAsyncCommit(c *C) tikv.TxnProbe {
txn, err := s.store.Begin()
c.Assert(err, IsNil)
txn.SetEnableAsyncCommit(true)
return tikv.TxnProbe{KVTxn: txn}
}
func (s *testAsyncCommitCommon) begin(c *C) tikv.TxnProbe {
txn, err := s.store.Begin()
c.Assert(err, IsNil)
return tikv.TxnProbe{KVTxn: txn}
}
type testAsyncCommitSuite struct {
OneByOneSuite
testAsyncCommitCommon
bo *tikv.Backoffer
}
var _ = SerialSuites(&testAsyncCommitSuite{})
func (s *testAsyncCommitSuite) SetUpTest(c *C) {
s.testAsyncCommitCommon.setUpTest(c)
s.bo = tikv.NewBackofferWithVars(context.Background(), 5000, nil)
}
func (s *testAsyncCommitSuite) lockKeysWithAsyncCommit(c *C, keys, values [][]byte, primaryKey, primaryValue []byte, commitPrimary bool) (uint64, uint64) {
txn, err := s.store.Begin()
c.Assert(err, IsNil)
txn.SetEnableAsyncCommit(true)
for i, k := range keys {
if len(values[i]) > 0 {
err = txn.Set(k, values[i])
} else {
err = txn.Delete(k)
}
c.Assert(err, IsNil)
}
if len(primaryValue) > 0 {
err = txn.Set(primaryKey, primaryValue)
} else {
err = txn.Delete(primaryKey)
}
c.Assert(err, IsNil)
txnProbe := tikv.TxnProbe{KVTxn: txn}
tpc, err := txnProbe.NewCommitter(0)
c.Assert(err, IsNil)
tpc.SetPrimaryKey(primaryKey)
ctx := context.Background()
err = tpc.PrewriteAllMutations(ctx)
c.Assert(err, IsNil)
if commitPrimary {
commitTS, err := s.store.GetOracle().GetTimestamp(ctx, &oracle.Option{TxnScope: oracle.GlobalTxnScope})
c.Assert(err, IsNil)
tpc.SetCommitTS(commitTS)
err = tpc.CommitMutations(ctx)
c.Assert(err, IsNil)
}
return txn.StartTS(), tpc.GetCommitTS()
}
func (s *testAsyncCommitSuite) TestCheckSecondaries(c *C) {
// This test doesn't support tikv mode.
if *WithTiKV {
return
}
s.putAlphabets(c, true)
loc, err := s.store.GetRegionCache().LocateKey(s.bo, []byte("a"))
c.Assert(err, IsNil)
newRegionID, peerID := s.cluster.AllocID(), s.cluster.AllocID()
s.cluster.Split(loc.Region.GetID(), newRegionID, []byte("e"), []uint64{peerID}, peerID)
s.store.GetRegionCache().InvalidateCachedRegion(loc.Region)
// No locks to check, only primary key is locked, should be successful.
s.lockKeysWithAsyncCommit(c, [][]byte{}, [][]byte{}, []byte("z"), []byte("z"), false)
lock := s.mustGetLock(c, []byte("z"))
lock.UseAsyncCommit = true
ts, err := s.store.GetOracle().GetTimestamp(context.Background(), &oracle.Option{TxnScope: oracle.GlobalTxnScope})
c.Assert(err, IsNil)
var lockutil tikv.LockProbe
status := lockutil.NewLockStatus(nil, true, ts)
resolver := tikv.LockResolverProbe{LockResolver: s.store.GetLockResolver()}
err = resolver.ResolveLockAsync(s.bo, lock, status)
c.Assert(err, IsNil)
currentTS, err := s.store.GetOracle().GetTimestamp(context.Background(), &oracle.Option{TxnScope: oracle.GlobalTxnScope})
c.Assert(err, IsNil)
status, err = resolver.GetTxnStatus(s.bo, lock.TxnID, []byte("z"), currentTS, currentTS, true, false, nil)
c.Assert(err, IsNil)
c.Assert(status.IsCommitted(), IsTrue)
c.Assert(status.CommitTS(), Equals, ts)
// One key is committed (i), one key is locked (a). Should get committed.
ts, err = s.store.GetOracle().GetTimestamp(context.Background(), &oracle.Option{TxnScope: oracle.GlobalTxnScope})
c.Assert(err, IsNil)
commitTs := ts + 10
gotCheckA := int64(0)
gotCheckB := int64(0)
gotResolve := int64(0)
gotOther := int64(0)
mock := mockResolveClient{
inner: s.store.GetTiKVClient(),
onCheckSecondaries: func(req *kvrpcpb.CheckSecondaryLocksRequest) (*tikvrpc.Response, error) {
if req.StartVersion != ts {
return nil, errors.Errorf("Bad start version: %d, expected: %d", req.StartVersion, ts)
}
var resp kvrpcpb.CheckSecondaryLocksResponse
for _, k := range req.Keys {
if bytes.Equal(k, []byte("a")) {
atomic.StoreInt64(&gotCheckA, 1)
resp = kvrpcpb.CheckSecondaryLocksResponse{
Locks: []*kvrpcpb.LockInfo{{Key: []byte("a"), PrimaryLock: []byte("z"), LockVersion: ts, UseAsyncCommit: true}},
CommitTs: commitTs,
}
} else if bytes.Equal(k, []byte("i")) {
atomic.StoreInt64(&gotCheckB, 1)
resp = kvrpcpb.CheckSecondaryLocksResponse{
Locks: []*kvrpcpb.LockInfo{},
CommitTs: commitTs,
}
} else {
fmt.Printf("Got other key: %s\n", k)
atomic.StoreInt64(&gotOther, 1)
}
}
return &tikvrpc.Response{Resp: &resp}, nil
},
onResolveLock: func(req *kvrpcpb.ResolveLockRequest) (*tikvrpc.Response, error) {
if req.StartVersion != ts {
return nil, errors.Errorf("Bad start version: %d, expected: %d", req.StartVersion, ts)
}
if req.CommitVersion != commitTs {
return nil, errors.Errorf("Bad commit version: %d, expected: %d", req.CommitVersion, commitTs)
}
for _, k := range req.Keys {
if bytes.Equal(k, []byte("a")) || bytes.Equal(k, []byte("z")) {
atomic.StoreInt64(&gotResolve, 1)
} else {
atomic.StoreInt64(&gotOther, 1)
}
}
resp := kvrpcpb.ResolveLockResponse{}
return &tikvrpc.Response{Resp: &resp}, nil
},
}
s.store.SetTiKVClient(&mock)
status = lockutil.NewLockStatus([][]byte{[]byte("a"), []byte("i")}, true, 0)
lock = &tikv.Lock{
Key: []byte("a"),
Primary: []byte("z"),
TxnID: ts,
LockType: kvrpcpb.Op_Put,
UseAsyncCommit: true,
MinCommitTS: ts + 5,
}
_ = s.beginAsyncCommit(c)
err = resolver.ResolveLockAsync(s.bo, lock, status)
c.Assert(err, IsNil)
c.Assert(gotCheckA, Equals, int64(1))
c.Assert(gotCheckB, Equals, int64(1))
c.Assert(gotOther, Equals, int64(0))
c.Assert(gotResolve, Equals, int64(1))
// One key has been rolled back (b), one is locked (a). Should be rolled back.
ts, err = s.store.GetOracle().GetTimestamp(context.Background(), &oracle.Option{TxnScope: oracle.GlobalTxnScope})
c.Assert(err, IsNil)
commitTs = ts + 10
gotCheckA = int64(0)
gotCheckB = int64(0)
gotResolve = int64(0)
gotOther = int64(0)
mock.onResolveLock = func(req *kvrpcpb.ResolveLockRequest) (*tikvrpc.Response, error) {
if req.StartVersion != ts {
return nil, errors.Errorf("Bad start version: %d, expected: %d", req.StartVersion, ts)
}
if req.CommitVersion != commitTs {
return nil, errors.Errorf("Bad commit version: %d, expected: 0", req.CommitVersion)
}
for _, k := range req.Keys {
if bytes.Equal(k, []byte("a")) || bytes.Equal(k, []byte("z")) {
atomic.StoreInt64(&gotResolve, 1)
} else {
atomic.StoreInt64(&gotOther, 1)
}
}
resp := kvrpcpb.ResolveLockResponse{}
return &tikvrpc.Response{Resp: &resp}, nil
}
lock.TxnID = ts
lock.MinCommitTS = ts + 5
err = resolver.ResolveLockAsync(s.bo, lock, status)
c.Assert(err, IsNil)
c.Assert(gotCheckA, Equals, int64(1))
c.Assert(gotCheckB, Equals, int64(1))
c.Assert(gotResolve, Equals, int64(1))
c.Assert(gotOther, Equals, int64(0))
}
func (s *testAsyncCommitSuite) TestRepeatableRead(c *C) {
var sessionID uint64 = 0
test := func(isPessimistic bool) {
s.putKV(c, []byte("k1"), []byte("v1"), true)
sessionID++
ctx := context.WithValue(context.Background(), util.SessionID, sessionID)
txn1 := s.beginAsyncCommit(c)
txn1.SetPessimistic(isPessimistic)
s.mustGetFromTxn(c, txn1, []byte("k1"), []byte("v1"))
txn1.Set([]byte("k1"), []byte("v2"))
for i := 0; i < 20; i++ {
_, err := s.store.GetOracle().GetTimestamp(ctx, &oracle.Option{TxnScope: oracle.GlobalTxnScope})
c.Assert(err, IsNil)
}
txn2 := s.beginAsyncCommit(c)
s.mustGetFromTxn(c, txn2, []byte("k1"), []byte("v1"))
err := txn1.Commit(ctx)
c.Assert(err, IsNil)
// Check txn1 is committed in async commit.
c.Assert(txn1.IsAsyncCommit(), IsTrue)
s.mustGetFromTxn(c, txn2, []byte("k1"), []byte("v1"))
err = txn2.Rollback()
c.Assert(err, IsNil)
txn3 := s.beginAsyncCommit(c)
s.mustGetFromTxn(c, txn3, []byte("k1"), []byte("v2"))
err = txn3.Rollback()
c.Assert(err, IsNil)
}
test(false)
test(true)
}
// It's just a simple validation of linearizability.
// Extra tests are needed to test this feature with the control of the TiKV cluster.
func (s *testAsyncCommitSuite) TestAsyncCommitLinearizability(c *C) {
t1 := s.beginAsyncCommitWithLinearizability(c)
t2 := s.beginAsyncCommitWithLinearizability(c)
err := t1.Set([]byte("a"), []byte("a1"))
c.Assert(err, IsNil)
err = t2.Set([]byte("b"), []byte("b1"))
c.Assert(err, IsNil)
ctx := context.WithValue(context.Background(), util.SessionID, uint64(1))
// t2 commits earlier than t1
err = t2.Commit(ctx)
c.Assert(err, IsNil)
err = t1.Commit(ctx)
c.Assert(err, IsNil)
commitTS1 := t1.GetCommitTS()
commitTS2 := t2.GetCommitTS()
c.Assert(commitTS2, Less, commitTS1)
}
// TestAsyncCommitWithMultiDC tests that async commit can only be enabled in global transactions
func (s *testAsyncCommitSuite) TestAsyncCommitWithMultiDC(c *C) {
// It requires setting placement rules to run with TiKV
if *WithTiKV {
return
}
localTxn := s.beginAsyncCommit(c)
err := localTxn.Set([]byte("a"), []byte("a1"))
localTxn.SetScope("bj")
c.Assert(err, IsNil)
ctx := context.WithValue(context.Background(), util.SessionID, uint64(1))
err = localTxn.Commit(ctx)
c.Assert(err, IsNil)
c.Assert(localTxn.IsAsyncCommit(), IsFalse)
globalTxn := s.beginAsyncCommit(c)
err = globalTxn.Set([]byte("b"), []byte("b1"))
globalTxn.SetScope(oracle.GlobalTxnScope)
c.Assert(err, IsNil)
err = globalTxn.Commit(ctx)
c.Assert(err, IsNil)
c.Assert(globalTxn.IsAsyncCommit(), IsTrue)
}
func (s *testAsyncCommitSuite) TestResolveTxnFallbackFromAsyncCommit(c *C) {
keys := [][]byte{[]byte("k0"), []byte("k1")}
values := [][]byte{[]byte("v00"), []byte("v10")}
initTest := func() tikv.CommitterProbe {
t0 := s.begin(c)
err := t0.Set(keys[0], values[0])
c.Assert(err, IsNil)
err = t0.Set(keys[1], values[1])
c.Assert(err, IsNil)
err = t0.Commit(context.Background())
c.Assert(err, IsNil)
t1 := s.beginAsyncCommit(c)
err = t1.Set(keys[0], []byte("v01"))
c.Assert(err, IsNil)
err = t1.Set(keys[1], []byte("v11"))
c.Assert(err, IsNil)
committer, err := t1.NewCommitter(1)
c.Assert(err, IsNil)
committer.SetLockTTL(1)
committer.SetUseAsyncCommit()
return committer
}
prewriteKey := func(committer tikv.CommitterProbe, idx int, fallback bool) {
bo := tikv.NewBackofferWithVars(context.Background(), 5000, nil)
loc, err := s.store.GetRegionCache().LocateKey(bo, keys[idx])
c.Assert(err, IsNil)
req := committer.BuildPrewriteRequest(loc.Region.GetID(), loc.Region.GetConfVer(), loc.Region.GetVer(),
committer.GetMutations().Slice(idx, idx+1), 1)
if fallback {
req.Req.(*kvrpcpb.PrewriteRequest).MaxCommitTs = 1
}
resp, err := s.store.SendReq(bo, req, loc.Region, 5000)
c.Assert(err, IsNil)
c.Assert(resp.Resp, NotNil)
}
readKey := func(idx int) {
t2 := s.begin(c)
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
defer cancel()
val, err := t2.Get(ctx, keys[idx])
c.Assert(err, IsNil)
c.Assert(val, DeepEquals, values[idx])
}
// Case 1: Fallback primary, read primary
committer := initTest()
prewriteKey(committer, 0, true)
prewriteKey(committer, 1, false)
readKey(0)
readKey(1)
// Case 2: Fallback primary, read secondary
committer = initTest()
prewriteKey(committer, 0, true)
prewriteKey(committer, 1, false)
readKey(1)
readKey(0)
// Case 3: Fallback secondary, read primary
committer = initTest()
prewriteKey(committer, 0, false)
prewriteKey(committer, 1, true)
readKey(0)
readKey(1)
// Case 4: Fallback secondary, read secondary
committer = initTest()
prewriteKey(committer, 0, false)
prewriteKey(committer, 1, true)
readKey(1)
readKey(0)
// Case 5: Fallback both, read primary
committer = initTest()
prewriteKey(committer, 0, true)
prewriteKey(committer, 1, true)
readKey(0)
readKey(1)
// Case 6: Fallback both, read secondary
committer = initTest()
prewriteKey(committer, 0, true)
prewriteKey(committer, 1, true)
readKey(1)
readKey(0)
}
type mockResolveClient struct {
inner tikv.Client
onResolveLock func(*kvrpcpb.ResolveLockRequest) (*tikvrpc.Response, error)
onCheckSecondaries func(*kvrpcpb.CheckSecondaryLocksRequest) (*tikvrpc.Response, error)
}
func (m *mockResolveClient) SendRequest(ctx context.Context, addr string, req *tikvrpc.Request, timeout time.Duration) (*tikvrpc.Response, error) {
// Intercept check secondary locks and resolve lock messages if the callback is non-nil.
// If the callback returns (nil, nil), forward to the inner client.
if cr, ok := req.Req.(*kvrpcpb.CheckSecondaryLocksRequest); ok && m.onCheckSecondaries != nil {
result, err := m.onCheckSecondaries(cr)
if result != nil || err != nil {
return result, err
}
} else if rr, ok := req.Req.(*kvrpcpb.ResolveLockRequest); ok && m.onResolveLock != nil {
result, err := m.onResolveLock(rr)
if result != nil || err != nil {
return result, err
}
}
return m.inner.SendRequest(ctx, addr, req, timeout)
}
func (m *mockResolveClient) Close() error {
return m.inner.Close()
}
| store/tikv/tests/async_commit_test.go | 1 | https://github.com/pingcap/tidb/commit/cc83cc524f8d3fd661f6e62d129ba043cc74501e | [
0.9958561062812805,
0.07376092672348022,
0.00016128875722642988,
0.0017952669877558947,
0.24831753969192505
] |
{
"id": 6,
"code_window": [
"\tc.Assert(err, IsNil)\n",
"\ttxn.SetEnableAsyncCommit(false)\n",
"\ttxn.SetEnable1PC(false)\n",
"\ttxn.SetOption(kv.GuaranteeLinearizability, false)\n",
"\n",
"\t// Prewrite the lock without committing it\n",
"\tc.Assert(failpoint.Enable(\"github.com/pingcap/tidb/store/tikv/beforeCommit\", `pause`), IsNil)\n",
"\tch := make(chan struct{})\n",
"\tcommitter, err := txn.NewCommitter(1)\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\ttxn.SetCausalConsistency(true)\n"
],
"file_path": "store/tikv/tests/snapshot_fail_test.go",
"type": "replace",
"edit_start_line_idx": 215
} | // Copyright 2018 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package autoid
import (
mysql "github.com/pingcap/tidb/errno"
"github.com/pingcap/tidb/util/dbterror"
)
// Error instances.
var (
errInvalidTableID = dbterror.ClassAutoid.NewStd(mysql.ErrInvalidTableID)
errInvalidIncrementAndOffset = dbterror.ClassAutoid.NewStd(mysql.ErrInvalidIncrementAndOffset)
errNotImplemented = dbterror.ClassAutoid.NewStd(mysql.ErrNotImplemented)
ErrAutoincReadFailed = dbterror.ClassAutoid.NewStd(mysql.ErrAutoincReadFailed)
ErrWrongAutoKey = dbterror.ClassAutoid.NewStd(mysql.ErrWrongAutoKey)
ErrInvalidAllocatorType = dbterror.ClassAutoid.NewStd(mysql.ErrUnknownAllocatorType)
ErrAutoRandReadFailed = dbterror.ClassAutoid.NewStd(mysql.ErrAutoRandReadFailed)
)
const (
// AutoRandomPKisNotHandleErrMsg indicates the auto_random column attribute is defined on a non-primary key column, or the primary key is nonclustered.
AutoRandomPKisNotHandleErrMsg = "column %s is not the integer primary key, or the primary key is nonclustered"
// AutoRandomIncompatibleWithAutoIncErrMsg is reported when auto_random and auto_increment are specified on the same column.
AutoRandomIncompatibleWithAutoIncErrMsg = "auto_random is incompatible with auto_increment"
// AutoRandomIncompatibleWithDefaultValueErrMsg is reported when auto_random and default are specified on the same column.
AutoRandomIncompatibleWithDefaultValueErrMsg = "auto_random is incompatible with default"
// AutoRandomOverflowErrMsg is reported when auto_random is greater than max length of a MySQL data type.
AutoRandomOverflowErrMsg = "max allowed auto_random shard bits is %d, but got %d on column `%s`"
// AutoRandomModifyColTypeErrMsg is reported when a user is trying to modify the type of a column specified with auto_random.
AutoRandomModifyColTypeErrMsg = "modifying the auto_random column type is not supported"
// AutoRandomAlterErrMsg is reported when a user is trying to add/drop/modify the value of auto_random attribute.
AutoRandomAlterErrMsg = "adding/dropping/modifying auto_random is not supported"
// AutoRandomDecreaseBitErrMsg is reported when the auto_random shard bits is decreased.
AutoRandomDecreaseBitErrMsg = "decreasing auto_random shard bits is not supported"
// AutoRandomNonPositive is reported then a user specifies a non-positive value for auto_random.
AutoRandomNonPositive = "the value of auto_random should be positive"
// AutoRandomAvailableAllocTimesNote is reported when a table containing auto_random is created.
AutoRandomAvailableAllocTimesNote = "Available implicit allocation times: %d"
// AutoRandomExplicitInsertDisabledErrMsg is reported when auto_random column value is explicitly specified, but the session var 'allow_auto_random_explicit_insert' is false.
AutoRandomExplicitInsertDisabledErrMsg = "Explicit insertion on auto_random column is disabled. Try to set @@allow_auto_random_explicit_insert = true."
// AutoRandomOnNonBigIntColumn is reported when define auto random to non bigint column
AutoRandomOnNonBigIntColumn = "auto_random option must be defined on `bigint` column, but not on `%s` column"
// AutoRandomRebaseNotApplicable is reported when alter auto_random base on a non auto_random table.
AutoRandomRebaseNotApplicable = "alter auto_random_base of a non auto_random table"
// AutoRandomRebaseOverflow is reported when alter auto_random_base to a value that overflows the incremental bits.
AutoRandomRebaseOverflow = "alter auto_random_base to %d overflows the incremental bits, max allowed base is %d"
// AutoRandomAlterAddColumn is reported when adding an auto_random column.
AutoRandomAlterAddColumn = "unsupported add column '%s' constraint AUTO_RANDOM when altering '%s.%s'"
// AutoRandomAlterChangeFromAutoInc is reported when the column is changing from a non-auto_increment or a non-primary key.
AutoRandomAlterChangeFromAutoInc = "auto_random can only be converted from auto_increment clustered primary key"
// AutoRandomAllocatorNotFound is reported when auto_random ID allocator not found during changing from auto_inc to auto_random.
AutoRandomAllocatorNotFound = "auto_random ID allocator not found in table '%s.%s'"
)
| meta/autoid/errors.go | 0 | https://github.com/pingcap/tidb/commit/cc83cc524f8d3fd661f6e62d129ba043cc74501e | [
0.00017587716865818948,
0.00016959030472207814,
0.0001641560229472816,
0.00016922717622946948,
0.0000035420273434283445
] |
{
"id": 6,
"code_window": [
"\tc.Assert(err, IsNil)\n",
"\ttxn.SetEnableAsyncCommit(false)\n",
"\ttxn.SetEnable1PC(false)\n",
"\ttxn.SetOption(kv.GuaranteeLinearizability, false)\n",
"\n",
"\t// Prewrite the lock without committing it\n",
"\tc.Assert(failpoint.Enable(\"github.com/pingcap/tidb/store/tikv/beforeCommit\", `pause`), IsNil)\n",
"\tch := make(chan struct{})\n",
"\tcommitter, err := txn.NewCommitter(1)\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\ttxn.SetCausalConsistency(true)\n"
],
"file_path": "store/tikv/tests/snapshot_fail_test.go",
"type": "replace",
"edit_start_line_idx": 215
} | // Copyright 2020 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package executor
import (
"context"
"sort"
plannercore "github.com/pingcap/tidb/planner/core"
"github.com/pingcap/tidb/sessionctx"
"github.com/pingcap/tidb/types"
)
type inspectionRuleRetriever struct {
dummyCloser
retrieved bool
extractor *plannercore.InspectionRuleTableExtractor
}
const (
inspectionRuleTypeInspection string = "inspection"
inspectionRuleTypeSummary string = "summary"
)
func (e *inspectionRuleRetriever) retrieve(ctx context.Context, sctx sessionctx.Context) ([][]types.Datum, error) {
if e.retrieved || e.extractor.SkipRequest {
return nil, nil
}
e.retrieved = true
tps := inspectionFilter{set: e.extractor.Types}
var finalRows [][]types.Datum
// Select inspection rules
if tps.enable(inspectionRuleTypeInspection) {
for _, r := range inspectionRules {
finalRows = append(finalRows, types.MakeDatums(
r.name(),
inspectionRuleTypeInspection,
// TODO: add rule explanation
"",
))
}
}
// Select summary rules
if tps.enable(inspectionRuleTypeSummary) {
// Get ordered key of map inspectionSummaryRules
summaryRules := make([]string, 0)
for rule := range inspectionSummaryRules {
summaryRules = append(summaryRules, rule)
}
sort.Strings(summaryRules)
for _, rule := range summaryRules {
finalRows = append(finalRows, types.MakeDatums(
rule,
inspectionRuleTypeSummary,
// TODO: add rule explanation
"",
))
}
}
return finalRows, nil
}
| executor/inspection_common.go | 0 | https://github.com/pingcap/tidb/commit/cc83cc524f8d3fd661f6e62d129ba043cc74501e | [
0.000332129915477708,
0.00018953060498461127,
0.00015999519382603467,
0.00016903915093280375,
0.00005407467688200995
] |
{
"id": 6,
"code_window": [
"\tc.Assert(err, IsNil)\n",
"\ttxn.SetEnableAsyncCommit(false)\n",
"\ttxn.SetEnable1PC(false)\n",
"\ttxn.SetOption(kv.GuaranteeLinearizability, false)\n",
"\n",
"\t// Prewrite the lock without committing it\n",
"\tc.Assert(failpoint.Enable(\"github.com/pingcap/tidb/store/tikv/beforeCommit\", `pause`), IsNil)\n",
"\tch := make(chan struct{})\n",
"\tcommitter, err := txn.NewCommitter(1)\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\ttxn.SetCausalConsistency(true)\n"
],
"file_path": "store/tikv/tests/snapshot_fail_test.go",
"type": "replace",
"edit_start_line_idx": 215
} | // Copyright 2016 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package tikv
import (
"context"
"github.com/pingcap/errors"
"github.com/pingcap/kvproto/pkg/metapb"
"github.com/pingcap/tidb/store/tikv/util/codec"
pd "github.com/tikv/pd/client"
)
// CodecPDClient wraps a PD Client to decode the encoded keys in region meta.
type CodecPDClient struct {
pd.Client
}
// GetRegion encodes the key before send requests to pd-server and decodes the
// returned StartKey && EndKey from pd-server.
func (c *CodecPDClient) GetRegion(ctx context.Context, key []byte) (*pd.Region, error) {
encodedKey := codec.EncodeBytes([]byte(nil), key)
region, err := c.Client.GetRegion(ctx, encodedKey)
return processRegionResult(region, err)
}
// GetPrevRegion encodes the key before send requests to pd-server and decodes the
// returned StartKey && EndKey from pd-server.
func (c *CodecPDClient) GetPrevRegion(ctx context.Context, key []byte) (*pd.Region, error) {
encodedKey := codec.EncodeBytes([]byte(nil), key)
region, err := c.Client.GetPrevRegion(ctx, encodedKey)
return processRegionResult(region, err)
}
// GetRegionByID encodes the key before send requests to pd-server and decodes the
// returned StartKey && EndKey from pd-server.
func (c *CodecPDClient) GetRegionByID(ctx context.Context, regionID uint64) (*pd.Region, error) {
region, err := c.Client.GetRegionByID(ctx, regionID)
return processRegionResult(region, err)
}
// ScanRegions encodes the key before send requests to pd-server and decodes the
// returned StartKey && EndKey from pd-server.
func (c *CodecPDClient) ScanRegions(ctx context.Context, startKey []byte, endKey []byte, limit int) ([]*pd.Region, error) {
startKey = codec.EncodeBytes([]byte(nil), startKey)
if len(endKey) > 0 {
endKey = codec.EncodeBytes([]byte(nil), endKey)
}
regions, err := c.Client.ScanRegions(ctx, startKey, endKey, limit)
if err != nil {
return nil, errors.Trace(err)
}
for _, region := range regions {
if region != nil {
err = decodeRegionMetaKeyInPlace(region.Meta)
if err != nil {
return nil, errors.Trace(err)
}
}
}
return regions, nil
}
func processRegionResult(region *pd.Region, err error) (*pd.Region, error) {
if err != nil {
return nil, errors.Trace(err)
}
if region == nil || region.Meta == nil {
return nil, nil
}
err = decodeRegionMetaKeyInPlace(region.Meta)
if err != nil {
return nil, errors.Trace(err)
}
return region, nil
}
func decodeRegionMetaKeyInPlace(r *metapb.Region) error {
if len(r.StartKey) != 0 {
_, decoded, err := codec.DecodeBytes(r.StartKey, nil)
if err != nil {
return errors.Trace(err)
}
r.StartKey = decoded
}
if len(r.EndKey) != 0 {
_, decoded, err := codec.DecodeBytes(r.EndKey, nil)
if err != nil {
return errors.Trace(err)
}
r.EndKey = decoded
}
return nil
}
func decodeRegionMetaKeyWithShallowCopy(r *metapb.Region) (*metapb.Region, error) {
nr := *r
if len(r.StartKey) != 0 {
_, decoded, err := codec.DecodeBytes(r.StartKey, nil)
if err != nil {
return nil, errors.Trace(err)
}
nr.StartKey = decoded
}
if len(r.EndKey) != 0 {
_, decoded, err := codec.DecodeBytes(r.EndKey, nil)
if err != nil {
return nil, errors.Trace(err)
}
nr.EndKey = decoded
}
return &nr, nil
}
| store/tikv/pd_codec.go | 0 | https://github.com/pingcap/tidb/commit/cc83cc524f8d3fd661f6e62d129ba043cc74501e | [
0.0023175848182290792,
0.00033296996844001114,
0.00016126017726492137,
0.00016843578487168998,
0.0005729246768169105
] |
{
"id": 7,
"code_window": [
"\tpriority Priority\n",
"\tisPessimistic bool\n",
"\tenableAsyncCommit bool\n",
"\tenable1PC bool\n",
"\tscope string\n",
"\tkvFilter KVFilter\n",
"}\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tcausalConsistency bool\n"
],
"file_path": "store/tikv/txn.go",
"type": "add",
"edit_start_line_idx": 86
} | // Copyright 2021 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package txn
import (
"context"
"sync/atomic"
"github.com/opentracing/opentracing-go"
"github.com/pingcap/errors"
"github.com/pingcap/kvproto/pkg/metapb"
"github.com/pingcap/parser/model"
"github.com/pingcap/tidb/kv"
"github.com/pingcap/tidb/sessionctx/binloginfo"
derr "github.com/pingcap/tidb/store/driver/error"
"github.com/pingcap/tidb/store/tikv"
tikverr "github.com/pingcap/tidb/store/tikv/error"
tikvstore "github.com/pingcap/tidb/store/tikv/kv"
"github.com/pingcap/tidb/tablecodec"
)
type tikvTxn struct {
*tikv.KVTxn
idxNameCache map[int64]*model.TableInfo
}
// NewTiKVTxn returns a new Transaction.
func NewTiKVTxn(txn *tikv.KVTxn) kv.Transaction {
txn.SetKVFilter(TiDBKVFilter{})
entryLimit := atomic.LoadUint64(&kv.TxnEntrySizeLimit)
totalLimit := atomic.LoadUint64(&kv.TxnTotalSizeLimit)
txn.GetUnionStore().SetEntrySizeLimit(entryLimit, totalLimit)
return &tikvTxn{txn, make(map[int64]*model.TableInfo)}
}
func (txn *tikvTxn) GetTableInfo(id int64) *model.TableInfo {
return txn.idxNameCache[id]
}
func (txn *tikvTxn) CacheTableInfo(id int64, info *model.TableInfo) {
txn.idxNameCache[id] = info
}
// lockWaitTime in ms, except that kv.LockAlwaysWait(0) means always wait lock, kv.LockNowait(-1) means nowait lock
func (txn *tikvTxn) LockKeys(ctx context.Context, lockCtx *kv.LockCtx, keysInput ...kv.Key) error {
keys := toTiKVKeys(keysInput)
err := txn.KVTxn.LockKeys(ctx, lockCtx, keys...)
return txn.extractKeyErr(err)
}
func (txn *tikvTxn) Commit(ctx context.Context) error {
err := txn.KVTxn.Commit(ctx)
return txn.extractKeyErr(err)
}
// GetSnapshot returns the Snapshot binding to this transaction.
func (txn *tikvTxn) GetSnapshot() kv.Snapshot {
return &tikvSnapshot{txn.KVTxn.GetSnapshot()}
}
// Iter creates an Iterator positioned on the first entry that k <= entry's key.
// If such entry is not found, it returns an invalid Iterator with no error.
// It yields only keys that < upperBound. If upperBound is nil, it means the upperBound is unbounded.
// The Iterator must be Closed after use.
func (txn *tikvTxn) Iter(k kv.Key, upperBound kv.Key) (kv.Iterator, error) {
it, err := txn.KVTxn.Iter(k, upperBound)
return newKVIterator(it), derr.ToTiDBErr(err)
}
// IterReverse creates a reversed Iterator positioned on the first entry which key is less than k.
// The returned iterator will iterate from greater key to smaller key.
// If k is nil, the returned iterator will be positioned at the last key.
// TODO: Add lower bound limit
func (txn *tikvTxn) IterReverse(k kv.Key) (kv.Iterator, error) {
it, err := txn.KVTxn.IterReverse(k)
return newKVIterator(it), derr.ToTiDBErr(err)
}
// BatchGet gets kv from the memory buffer of statement and transaction, and the kv storage.
// Do not use len(value) == 0 or value == nil to represent non-exist.
// If a key doesn't exist, there shouldn't be any corresponding entry in the result map.
func (txn *tikvTxn) BatchGet(ctx context.Context, keys []kv.Key) (map[string][]byte, error) {
if span := opentracing.SpanFromContext(ctx); span != nil && span.Tracer() != nil {
span1 := span.Tracer().StartSpan("tikvTxn.BatchGet", opentracing.ChildOf(span.Context()))
defer span1.Finish()
ctx = opentracing.ContextWithSpan(ctx, span1)
}
return NewBufferBatchGetter(txn.GetMemBuffer(), nil, txn.GetSnapshot()).BatchGet(ctx, keys)
}
func (txn *tikvTxn) Delete(k kv.Key) error {
err := txn.KVTxn.Delete(k)
return derr.ToTiDBErr(err)
}
func (txn *tikvTxn) Get(ctx context.Context, k kv.Key) ([]byte, error) {
data, err := txn.KVTxn.Get(ctx, k)
return data, derr.ToTiDBErr(err)
}
func (txn *tikvTxn) Set(k kv.Key, v []byte) error {
err := txn.KVTxn.Set(k, v)
return derr.ToTiDBErr(err)
}
func (txn *tikvTxn) GetMemBuffer() kv.MemBuffer {
return newMemBuffer(txn.KVTxn.GetMemBuffer())
}
func (txn *tikvTxn) GetUnionStore() kv.UnionStore {
return &tikvUnionStore{txn.KVTxn.GetUnionStore()}
}
func (txn *tikvTxn) SetOption(opt int, val interface{}) {
switch opt {
case tikvstore.BinlogInfo:
txn.SetBinlogExecutor(&binlogExecutor{
txn: txn.KVTxn,
binInfo: val.(*binloginfo.BinlogInfo), // val cannot be other type.
})
case tikvstore.SchemaChecker:
txn.SetSchemaLeaseChecker(val.(tikv.SchemaLeaseChecker))
case tikvstore.IsolationLevel:
level := getTiKVIsolationLevel(val.(kv.IsoLevel))
txn.KVTxn.GetSnapshot().SetIsolationLevel(level)
case tikvstore.Priority:
txn.KVTxn.SetPriority(getTiKVPriority(val.(int)))
case tikvstore.NotFillCache:
txn.KVTxn.GetSnapshot().SetNotFillCache(val.(bool))
case tikvstore.SyncLog:
txn.EnableForceSyncLog()
case tikvstore.Pessimistic:
txn.SetPessimistic(val.(bool))
case tikvstore.SnapshotTS:
txn.KVTxn.GetSnapshot().SetSnapshotTS(val.(uint64))
case tikvstore.ReplicaRead:
txn.KVTxn.GetSnapshot().SetReplicaRead(val.(tikvstore.ReplicaReadType))
case tikvstore.TaskID:
txn.KVTxn.GetSnapshot().SetTaskID(val.(uint64))
case tikvstore.InfoSchema:
txn.SetSchemaVer(val.(tikv.SchemaVer))
case tikvstore.SchemaAmender:
txn.SetSchemaAmender(val.(tikv.SchemaAmender))
case tikvstore.SampleStep:
txn.KVTxn.GetSnapshot().SetSampleStep(val.(uint32))
case tikvstore.CommitHook:
txn.SetCommitCallback(val.(func(string, error)))
case tikvstore.EnableAsyncCommit:
txn.SetEnableAsyncCommit(val.(bool))
case tikvstore.Enable1PC:
txn.SetEnable1PC(val.(bool))
case tikvstore.TxnScope:
txn.SetScope(val.(string))
case tikvstore.IsStalenessReadOnly:
txn.KVTxn.GetSnapshot().SetIsStatenessReadOnly(val.(bool))
case tikvstore.MatchStoreLabels:
txn.KVTxn.GetSnapshot().SetMatchStoreLabels(val.([]*metapb.StoreLabel))
default:
txn.KVTxn.SetOption(opt, val)
}
}
func (txn *tikvTxn) GetOption(opt int) interface{} {
switch opt {
case tikvstore.TxnScope:
return txn.KVTxn.GetScope()
default:
return txn.KVTxn.GetOption(opt)
}
}
// SetVars sets variables to the transaction.
func (txn *tikvTxn) SetVars(vars interface{}) {
if vs, ok := vars.(*tikv.Variables); ok {
txn.KVTxn.SetVars(vs)
}
}
func (txn *tikvTxn) GetVars() interface{} {
return txn.KVTxn.GetVars()
}
func (txn *tikvTxn) extractKeyErr(err error) error {
if e, ok := errors.Cause(err).(*tikverr.ErrKeyExist); ok {
return txn.extractKeyExistsErr(e.GetKey())
}
return extractKeyErr(err)
}
func (txn *tikvTxn) extractKeyExistsErr(key kv.Key) error {
tableID, indexID, isRecord, err := tablecodec.DecodeKeyHead(key)
if err != nil {
return genKeyExistsError("UNKNOWN", key.String(), err)
}
tblInfo := txn.GetTableInfo(tableID)
if tblInfo == nil {
return genKeyExistsError("UNKNOWN", key.String(), errors.New("cannot find table info"))
}
value, err := txn.KVTxn.GetUnionStore().GetMemBuffer().SelectValueHistory(key, func(value []byte) bool { return len(value) != 0 })
if err != nil {
return genKeyExistsError("UNKNOWN", key.String(), err)
}
if isRecord {
return extractKeyExistsErrFromHandle(key, value, tblInfo)
}
return extractKeyExistsErrFromIndex(key, value, tblInfo, indexID)
}
// TiDBKVFilter is the filter specific to TiDB to filter out KV pairs that needn't be committed.
type TiDBKVFilter struct{}
// IsUnnecessaryKeyValue defines which kinds of KV pairs from TiDB needn't be committed.
func (f TiDBKVFilter) IsUnnecessaryKeyValue(key, value []byte, flags tikvstore.KeyFlags) bool {
return tablecodec.IsUntouchedIndexKValue(key, value)
}
| store/driver/txn/txn_driver.go | 1 | https://github.com/pingcap/tidb/commit/cc83cc524f8d3fd661f6e62d129ba043cc74501e | [
0.02992088906466961,
0.0018972638063132763,
0.00016502170183230191,
0.0002483304706402123,
0.005938828457146883
] |
{
"id": 7,
"code_window": [
"\tpriority Priority\n",
"\tisPessimistic bool\n",
"\tenableAsyncCommit bool\n",
"\tenable1PC bool\n",
"\tscope string\n",
"\tkvFilter KVFilter\n",
"}\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tcausalConsistency bool\n"
],
"file_path": "store/tikv/txn.go",
"type": "add",
"edit_start_line_idx": 86
} | lease = "0"
mem-quota-query = 34359738368
nested-loop-join-cache-capacity = 20971520
host = "127.0.0.1"
[status]
status-host = "127.0.0.1"
[performance]
stats-lease = "0"
[experimental]
allow-expression-index = true
| cmd/explaintest/config.toml | 0 | https://github.com/pingcap/tidb/commit/cc83cc524f8d3fd661f6e62d129ba043cc74501e | [
0.00017656288400758058,
0.00017283446504734457,
0.00016910604608710855,
0.00017283446504734457,
0.000003728418960236013
] |
{
"id": 7,
"code_window": [
"\tpriority Priority\n",
"\tisPessimistic bool\n",
"\tenableAsyncCommit bool\n",
"\tenable1PC bool\n",
"\tscope string\n",
"\tkvFilter KVFilter\n",
"}\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tcausalConsistency bool\n"
],
"file_path": "store/tikv/txn.go",
"type": "add",
"edit_start_line_idx": 86
} | // Copyright 2019 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package profile
import (
"fmt"
"math"
"sort"
"github.com/google/pprof/profile"
"github.com/pingcap/tidb/types"
"github.com/pingcap/tidb/util/texttree"
)
type flamegraphNode struct {
cumValue int64
children map[uint64]*flamegraphNode
name string
}
func newFlamegraphNode() *flamegraphNode {
return &flamegraphNode{
cumValue: 0,
children: make(map[uint64]*flamegraphNode),
name: "",
}
}
// add the value from a sample into the flamegraph DAG.
// This method should only be called on the root node.
func (n *flamegraphNode) add(sample *profile.Sample) {
// FIXME: we take the last sample value by default, but some profiles have multiple samples.
// - allocs: alloc_objects, alloc_space, inuse_objects, inuse_space
// - block: contentions, delay
// - cpu: samples, cpu
// - heap: alloc_objects, alloc_space, inuse_objects, inuse_space
// - mutex: contentions, delay
value := sample.Value[len(sample.Value)-1]
if value == 0 {
return
}
locs := sample.Location
for {
n.cumValue += value
if len(locs) == 0 {
return
}
// The previous implementation in TiDB identify nodes using location ID,
// but `go tool pprof` identify nodes using function ID. Should we follow?
loc := locs[len(locs)-1]
locID := loc.ID
child, ok := n.children[locID]
if !ok {
child = newFlamegraphNode()
n.children[locID] = child
if len(loc.Line) > 0 && loc.Line[0].Function != nil {
child.name = locs[len(locs)-1].Line[0].Function.Name
}
}
locs = locs[:len(locs)-1]
n = child
}
}
// collectFuncUsage collect the value by given function name
func (n *flamegraphNode) collectFuncUsage(name string) int64 {
if n.name == name {
return n.cumValue
}
if len(n.children) == 0 {
return 0
}
var usage int64 = 0
for _, child := range n.children {
usage = child.collectFuncUsage(name) + usage
}
return usage
}
type flamegraphNodeWithLocation struct {
*flamegraphNode
locID uint64
}
// sortedChildren returns a list of children of this node, sorted by each
// child's cumulative value.
func (n *flamegraphNode) sortedChildren() []flamegraphNodeWithLocation {
children := make([]flamegraphNodeWithLocation, 0, len(n.children))
for locID, child := range n.children {
children = append(children, flamegraphNodeWithLocation{
flamegraphNode: child,
locID: locID,
})
}
sort.Slice(children, func(i, j int) bool {
a, b := children[i], children[j]
if a.cumValue != b.cumValue {
return a.cumValue > b.cumValue
}
return a.locID < b.locID
})
return children
}
type flamegraphCollector struct {
rows [][]types.Datum
locations map[uint64]*profile.Location
total int64
rootChild int
}
func newFlamegraphCollector(p *profile.Profile) *flamegraphCollector {
locations := make(map[uint64]*profile.Location, len(p.Location))
for _, loc := range p.Location {
locations[loc.ID] = loc
}
return &flamegraphCollector{locations: locations}
}
func (c *flamegraphCollector) locationName(locID uint64) (funcName, fileLine string) {
loc := c.locations[locID]
if len(loc.Line) == 0 {
return "<unknown>", "<unknown>"
}
line := loc.Line[0]
funcName = line.Function.Name
fileLine = fmt.Sprintf("%s:%d", line.Function.Filename, line.Line)
return
}
func (c *flamegraphCollector) collectChild(
node flamegraphNodeWithLocation,
depth int64,
indent string,
parentCumValue int64,
isLastChild bool,
) {
funcName, fileLine := c.locationName(node.locID)
c.rows = append(c.rows, types.MakeDatums(
texttree.PrettyIdentifier(funcName, indent, isLastChild),
percentage(node.cumValue, c.total),
percentage(node.cumValue, parentCumValue),
c.rootChild,
depth,
fileLine,
))
if len(node.children) == 0 {
return
}
indent4Child := texttree.Indent4Child(indent, isLastChild)
children := node.sortedChildren()
for i, child := range children {
c.collectChild(child, depth+1, indent4Child, node.cumValue, i == len(children)-1)
}
}
func (c *flamegraphCollector) collect(root *flamegraphNode) {
c.rows = append(c.rows, types.MakeDatums("root", "100%", "100%", 0, 0, "root"))
if len(root.children) == 0 {
return
}
c.total = root.cumValue
indent4Child := texttree.Indent4Child("", false)
children := root.sortedChildren()
for i, child := range children {
c.rootChild = i + 1
c.collectChild(child, 1, indent4Child, root.cumValue, i == len(children)-1)
}
}
func percentage(value, total int64) string {
var ratio float64
if total != 0 {
ratio = math.Abs(float64(value)/float64(total)) * 100
}
switch {
case ratio >= 99.95 && ratio <= 100.05:
return "100%"
case ratio >= 1.0:
return fmt.Sprintf("%.2f%%", ratio)
default:
return fmt.Sprintf("%.2g%%", ratio)
}
}
| util/profile/flamegraph.go | 0 | https://github.com/pingcap/tidb/commit/cc83cc524f8d3fd661f6e62d129ba043cc74501e | [
0.00025226586149074137,
0.000178916088771075,
0.00016765449254307896,
0.00017433593166060746,
0.000017779620975488797
] |
{
"id": 7,
"code_window": [
"\tpriority Priority\n",
"\tisPessimistic bool\n",
"\tenableAsyncCommit bool\n",
"\tenable1PC bool\n",
"\tscope string\n",
"\tkvFilter KVFilter\n",
"}\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tcausalConsistency bool\n"
],
"file_path": "store/tikv/txn.go",
"type": "add",
"edit_start_line_idx": 86
} | // Copyright 2015 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package types
import (
"io"
"testing"
. "github.com/pingcap/check"
"github.com/pingcap/errors"
"github.com/pingcap/parser/charset"
"github.com/pingcap/parser/mysql"
"github.com/pingcap/parser/terror"
"github.com/pingcap/tidb/util/testleak"
)
func TestT(t *testing.T) {
TestingT(t)
}
var _ = Suite(&testTypeEtcSuite{})
type testTypeEtcSuite struct {
}
func testIsTypeBlob(c *C, tp byte, expect bool) {
v := IsTypeBlob(tp)
c.Assert(v, Equals, expect)
}
func testIsTypeChar(c *C, tp byte, expect bool) {
v := IsTypeChar(tp)
c.Assert(v, Equals, expect)
}
func (s *testTypeEtcSuite) TestIsType(c *C) {
defer testleak.AfterTest(c)()
testIsTypeBlob(c, mysql.TypeTinyBlob, true)
testIsTypeBlob(c, mysql.TypeMediumBlob, true)
testIsTypeBlob(c, mysql.TypeBlob, true)
testIsTypeBlob(c, mysql.TypeLongBlob, true)
testIsTypeBlob(c, mysql.TypeInt24, false)
testIsTypeChar(c, mysql.TypeString, true)
testIsTypeChar(c, mysql.TypeVarchar, true)
testIsTypeChar(c, mysql.TypeLong, false)
}
func testTypeStr(c *C, tp byte, expect string) {
v := TypeStr(tp)
c.Assert(v, Equals, expect)
}
func testTypeToStr(c *C, tp byte, charset string, expect string) {
v := TypeToStr(tp, charset)
c.Assert(v, Equals, expect)
}
func (s *testTypeEtcSuite) TestTypeToStr(c *C) {
defer testleak.AfterTest(c)()
testTypeStr(c, mysql.TypeYear, "year")
testTypeStr(c, 0xdd, "")
testTypeToStr(c, mysql.TypeBlob, "utf8", "text")
testTypeToStr(c, mysql.TypeLongBlob, "utf8", "longtext")
testTypeToStr(c, mysql.TypeTinyBlob, "utf8", "tinytext")
testTypeToStr(c, mysql.TypeMediumBlob, "utf8", "mediumtext")
testTypeToStr(c, mysql.TypeVarchar, "binary", "varbinary")
testTypeToStr(c, mysql.TypeString, "binary", "binary")
testTypeToStr(c, mysql.TypeTiny, "binary", "tinyint")
testTypeToStr(c, mysql.TypeBlob, "binary", "blob")
testTypeToStr(c, mysql.TypeLongBlob, "binary", "longblob")
testTypeToStr(c, mysql.TypeTinyBlob, "binary", "tinyblob")
testTypeToStr(c, mysql.TypeMediumBlob, "binary", "mediumblob")
testTypeToStr(c, mysql.TypeVarchar, "utf8", "varchar")
testTypeToStr(c, mysql.TypeString, "utf8", "char")
testTypeToStr(c, mysql.TypeShort, "binary", "smallint")
testTypeToStr(c, mysql.TypeInt24, "binary", "mediumint")
testTypeToStr(c, mysql.TypeLong, "binary", "int")
testTypeToStr(c, mysql.TypeLonglong, "binary", "bigint")
testTypeToStr(c, mysql.TypeFloat, "binary", "float")
testTypeToStr(c, mysql.TypeDouble, "binary", "double")
testTypeToStr(c, mysql.TypeYear, "binary", "year")
testTypeToStr(c, mysql.TypeDuration, "binary", "time")
testTypeToStr(c, mysql.TypeDatetime, "binary", "datetime")
testTypeToStr(c, mysql.TypeDate, "binary", "date")
testTypeToStr(c, mysql.TypeTimestamp, "binary", "timestamp")
testTypeToStr(c, mysql.TypeNewDecimal, "binary", "decimal")
testTypeToStr(c, mysql.TypeUnspecified, "binary", "unspecified")
testTypeToStr(c, 0xdd, "binary", "")
testTypeToStr(c, mysql.TypeBit, "binary", "bit")
testTypeToStr(c, mysql.TypeEnum, "binary", "enum")
testTypeToStr(c, mysql.TypeSet, "binary", "set")
}
func (s *testTypeEtcSuite) TestEOFAsNil(c *C) {
defer testleak.AfterTest(c)()
err := EOFAsNil(io.EOF)
c.Assert(err, IsNil)
err = EOFAsNil(errors.New("test"))
c.Assert(err, ErrorMatches, "test")
}
func (s *testTypeEtcSuite) TestMaxFloat(c *C) {
defer testleak.AfterTest(c)()
tbl := []struct {
Flen int
Decimal int
Expect float64
}{
{3, 2, 9.99},
{5, 2, 999.99},
{10, 1, 999999999.9},
{5, 5, 0.99999},
}
for _, t := range tbl {
f := GetMaxFloat(t.Flen, t.Decimal)
c.Assert(f, Equals, t.Expect)
}
}
func (s *testTypeEtcSuite) TestRoundFloat(c *C) {
defer testleak.AfterTest(c)()
tbl := []struct {
Input float64
Expect float64
}{
{2.5, 2},
{1.5, 2},
{0.5, 0},
{0.49999999999999997, 0},
{0, 0},
{-0.49999999999999997, 0},
{-0.5, 0},
{-2.5, -2},
{-1.5, -2},
}
for _, t := range tbl {
f := RoundFloat(t.Input)
c.Assert(f, Equals, t.Expect)
}
}
func (s *testTypeEtcSuite) TestRound(c *C) {
defer testleak.AfterTest(c)()
tbl := []struct {
Input float64
Dec int
Expect float64
}{
{-1.23, 0, -1},
{-1.58, 0, -2},
{1.58, 0, 2},
{1.298, 1, 1.3},
{1.298, 0, 1},
{23.298, -1, 20},
}
for _, t := range tbl {
f := Round(t.Input, t.Dec)
c.Assert(f, Equals, t.Expect)
}
}
func (s *testTypeEtcSuite) TestTruncate(c *C) {
defer testleak.AfterTest(c)()
tbl := []struct {
Input float64
Flen int
Decimal int
Expect float64
Err error
}{
{100.114, 10, 2, 100.11, nil},
{100.115, 10, 2, 100.12, nil},
{100.1156, 10, 3, 100.116, nil},
{100.1156, 3, 1, 99.9, ErrOverflow},
{1.36, 10, 2, 1.36, nil},
}
for _, t := range tbl {
f, err := TruncateFloat(t.Input, t.Flen, t.Decimal)
c.Assert(f, Equals, t.Expect)
c.Assert(terror.ErrorEqual(err, t.Err), IsTrue, Commentf("err %v", err))
}
}
func (s *testTypeEtcSuite) TestIsTypeTemporal(c *C) {
defer testleak.AfterTest(c)()
res := IsTypeTemporal(mysql.TypeDuration)
c.Assert(res, Equals, true)
res = IsTypeTemporal(mysql.TypeDatetime)
c.Assert(res, Equals, true)
res = IsTypeTemporal(mysql.TypeTimestamp)
c.Assert(res, Equals, true)
res = IsTypeTemporal(mysql.TypeDate)
c.Assert(res, Equals, true)
res = IsTypeTemporal(mysql.TypeNewDate)
c.Assert(res, Equals, true)
res = IsTypeTemporal('t')
c.Assert(res, Equals, false)
}
func (s *testTypeEtcSuite) TestIsBinaryStr(c *C) {
defer testleak.AfterTest(c)()
in := FieldType{
Tp: mysql.TypeBit,
Flag: mysql.UnsignedFlag,
Flen: 1,
Decimal: 0,
Charset: charset.CharsetUTF8,
Collate: charset.CollationUTF8,
}
in.Collate = charset.CollationUTF8
res := IsBinaryStr(&in)
c.Assert(res, Equals, false)
in.Collate = charset.CollationBin
res = IsBinaryStr(&in)
c.Assert(res, Equals, false)
in.Tp = mysql.TypeBlob
res = IsBinaryStr(&in)
c.Assert(res, Equals, true)
}
func (s *testTypeEtcSuite) TestIsNonBinaryStr(c *C) {
defer testleak.AfterTest(c)()
in := FieldType{
Tp: mysql.TypeBit,
Flag: mysql.UnsignedFlag,
Flen: 1,
Decimal: 0,
Charset: charset.CharsetUTF8,
Collate: charset.CollationUTF8,
}
in.Collate = charset.CollationBin
res := IsBinaryStr(&in)
c.Assert(res, Equals, false)
in.Collate = charset.CollationUTF8
res = IsBinaryStr(&in)
c.Assert(res, Equals, false)
in.Tp = mysql.TypeBlob
res = IsBinaryStr(&in)
c.Assert(res, Equals, false)
}
func (s *testTypeEtcSuite) TestIsTemporalWithDate(c *C) {
defer testleak.AfterTest(c)()
res := IsTemporalWithDate(mysql.TypeDatetime)
c.Assert(res, Equals, true)
res = IsTemporalWithDate(mysql.TypeDate)
c.Assert(res, Equals, true)
res = IsTemporalWithDate(mysql.TypeTimestamp)
c.Assert(res, Equals, true)
res = IsTemporalWithDate('t')
c.Assert(res, Equals, false)
}
func (s *testTypeEtcSuite) TestIsTypePrefixable(c *C) {
defer testleak.AfterTest(c)()
res := IsTypePrefixable('t')
c.Assert(res, Equals, false)
res = IsTypePrefixable(mysql.TypeBlob)
c.Assert(res, Equals, true)
}
func (s *testTypeEtcSuite) TestIsTypeFractionable(c *C) {
defer testleak.AfterTest(c)()
res := IsTypeFractionable(mysql.TypeDatetime)
c.Assert(res, Equals, true)
res = IsTypeFractionable(mysql.TypeDuration)
c.Assert(res, Equals, true)
res = IsTypeFractionable(mysql.TypeTimestamp)
c.Assert(res, Equals, true)
res = IsTypeFractionable('t')
c.Assert(res, Equals, false)
}
func (s *testTypeEtcSuite) TestIsTypeNumeric(c *C) {
defer testleak.AfterTest(c)()
res := IsTypeNumeric(mysql.TypeBit)
c.Assert(res, Equals, true)
res = IsTypeNumeric(mysql.TypeTiny)
c.Assert(res, Equals, true)
res = IsTypeNumeric(mysql.TypeInt24)
c.Assert(res, Equals, true)
res = IsTypeNumeric(mysql.TypeLong)
c.Assert(res, Equals, true)
res = IsTypeNumeric(mysql.TypeLonglong)
c.Assert(res, Equals, true)
res = IsTypeNumeric(mysql.TypeNewDecimal)
c.Assert(res, Equals, true)
res = IsTypeNumeric(mysql.TypeUnspecified)
c.Assert(res, Equals, false)
res = IsTypeNumeric(mysql.TypeFloat)
c.Assert(res, Equals, true)
res = IsTypeNumeric(mysql.TypeDouble)
c.Assert(res, Equals, true)
res = IsTypeNumeric(mysql.TypeShort)
c.Assert(res, Equals, true)
res = IsTypeNumeric('t')
c.Assert(res, Equals, false)
}
| types/etc_test.go | 0 | https://github.com/pingcap/tidb/commit/cc83cc524f8d3fd661f6e62d129ba043cc74501e | [
0.00017874260083772242,
0.0001733948156470433,
0.0001656289241509512,
0.0001740069274092093,
0.0000032993320928653702
] |
{
"id": 8,
"code_window": [
"// SetEnable1PC indicates if the transaction will try to use 1 phase commit.\n",
"func (txn *KVTxn) SetEnable1PC(b bool) {\n",
"\ttxn.enable1PC = b\n",
"}\n",
"\n",
"// SetScope sets the geographical scope of the transaction.\n",
"func (txn *KVTxn) SetScope(scope string) {\n",
"\ttxn.scope = scope\n",
"}\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"// SetCausalConsistency indicates if the transaction does not need to\n",
"// guarantee linearizability. Default value is false which means\n",
"// linearizability is guaranteed.\n",
"func (txn *KVTxn) SetCausalConsistency(b bool) {\n",
"\ttxn.causalConsistency = b\n",
"}\n",
"\n"
],
"file_path": "store/tikv/txn.go",
"type": "add",
"edit_start_line_idx": 285
} | // Copyright 2020 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package tikv_test
import (
"bytes"
"context"
"fmt"
"math"
"sync/atomic"
"testing"
"time"
. "github.com/pingcap/check"
"github.com/pingcap/errors"
"github.com/pingcap/kvproto/pkg/kvrpcpb"
"github.com/pingcap/tidb/store/mockstore/unistore"
"github.com/pingcap/tidb/store/tikv"
tikverr "github.com/pingcap/tidb/store/tikv/error"
"github.com/pingcap/tidb/store/tikv/kv"
"github.com/pingcap/tidb/store/tikv/mockstore/cluster"
"github.com/pingcap/tidb/store/tikv/oracle"
"github.com/pingcap/tidb/store/tikv/tikvrpc"
"github.com/pingcap/tidb/store/tikv/util"
)
func TestT(t *testing.T) {
CustomVerboseFlag = true
TestingT(t)
}
// testAsyncCommitCommon is used to put common parts that will be both used by
// testAsyncCommitSuite and testAsyncCommitFailSuite.
type testAsyncCommitCommon struct {
cluster cluster.Cluster
store *tikv.KVStore
}
func (s *testAsyncCommitCommon) setUpTest(c *C) {
if *WithTiKV {
s.store = NewTestStore(c)
return
}
client, pdClient, cluster, err := unistore.New("")
c.Assert(err, IsNil)
unistore.BootstrapWithSingleStore(cluster)
s.cluster = cluster
store, err := tikv.NewTestTiKVStore(client, pdClient, nil, nil, 0)
c.Assert(err, IsNil)
s.store = store
}
func (s *testAsyncCommitCommon) putAlphabets(c *C, enableAsyncCommit bool) {
for ch := byte('a'); ch <= byte('z'); ch++ {
s.putKV(c, []byte{ch}, []byte{ch}, enableAsyncCommit)
}
}
func (s *testAsyncCommitCommon) putKV(c *C, key, value []byte, enableAsyncCommit bool) (uint64, uint64) {
txn := s.beginAsyncCommit(c)
err := txn.Set(key, value)
c.Assert(err, IsNil)
err = txn.Commit(context.Background())
c.Assert(err, IsNil)
return txn.StartTS(), txn.GetCommitTS()
}
func (s *testAsyncCommitCommon) mustGetFromTxn(c *C, txn tikv.TxnProbe, key, expectedValue []byte) {
v, err := txn.Get(context.Background(), key)
c.Assert(err, IsNil)
c.Assert(v, BytesEquals, expectedValue)
}
func (s *testAsyncCommitCommon) mustGetLock(c *C, key []byte) *tikv.Lock {
ver, err := s.store.CurrentTimestamp(oracle.GlobalTxnScope)
c.Assert(err, IsNil)
req := tikvrpc.NewRequest(tikvrpc.CmdGet, &kvrpcpb.GetRequest{
Key: key,
Version: ver,
})
bo := tikv.NewBackofferWithVars(context.Background(), 5000, nil)
loc, err := s.store.GetRegionCache().LocateKey(bo, key)
c.Assert(err, IsNil)
resp, err := s.store.SendReq(bo, req, loc.Region, time.Second*10)
c.Assert(err, IsNil)
c.Assert(resp.Resp, NotNil)
keyErr := resp.Resp.(*kvrpcpb.GetResponse).GetError()
c.Assert(keyErr, NotNil)
var lockutil tikv.LockProbe
lock, err := lockutil.ExtractLockFromKeyErr(keyErr)
c.Assert(err, IsNil)
return lock
}
func (s *testAsyncCommitCommon) mustPointGet(c *C, key, expectedValue []byte) {
snap := s.store.GetSnapshot(math.MaxUint64)
value, err := snap.Get(context.Background(), key)
c.Assert(err, IsNil)
c.Assert(value, BytesEquals, expectedValue)
}
func (s *testAsyncCommitCommon) mustGetFromSnapshot(c *C, version uint64, key, expectedValue []byte) {
snap := s.store.GetSnapshot(version)
value, err := snap.Get(context.Background(), key)
c.Assert(err, IsNil)
c.Assert(value, BytesEquals, expectedValue)
}
func (s *testAsyncCommitCommon) mustGetNoneFromSnapshot(c *C, version uint64, key []byte) {
snap := s.store.GetSnapshot(version)
_, err := snap.Get(context.Background(), key)
c.Assert(errors.Cause(err), Equals, tikverr.ErrNotExist)
}
func (s *testAsyncCommitCommon) beginAsyncCommitWithLinearizability(c *C) tikv.TxnProbe {
txn := s.beginAsyncCommit(c)
txn.SetOption(kv.GuaranteeLinearizability, true)
return txn
}
func (s *testAsyncCommitCommon) beginAsyncCommit(c *C) tikv.TxnProbe {
txn, err := s.store.Begin()
c.Assert(err, IsNil)
txn.SetEnableAsyncCommit(true)
return tikv.TxnProbe{KVTxn: txn}
}
func (s *testAsyncCommitCommon) begin(c *C) tikv.TxnProbe {
txn, err := s.store.Begin()
c.Assert(err, IsNil)
return tikv.TxnProbe{KVTxn: txn}
}
type testAsyncCommitSuite struct {
OneByOneSuite
testAsyncCommitCommon
bo *tikv.Backoffer
}
var _ = SerialSuites(&testAsyncCommitSuite{})
func (s *testAsyncCommitSuite) SetUpTest(c *C) {
s.testAsyncCommitCommon.setUpTest(c)
s.bo = tikv.NewBackofferWithVars(context.Background(), 5000, nil)
}
func (s *testAsyncCommitSuite) lockKeysWithAsyncCommit(c *C, keys, values [][]byte, primaryKey, primaryValue []byte, commitPrimary bool) (uint64, uint64) {
txn, err := s.store.Begin()
c.Assert(err, IsNil)
txn.SetEnableAsyncCommit(true)
for i, k := range keys {
if len(values[i]) > 0 {
err = txn.Set(k, values[i])
} else {
err = txn.Delete(k)
}
c.Assert(err, IsNil)
}
if len(primaryValue) > 0 {
err = txn.Set(primaryKey, primaryValue)
} else {
err = txn.Delete(primaryKey)
}
c.Assert(err, IsNil)
txnProbe := tikv.TxnProbe{KVTxn: txn}
tpc, err := txnProbe.NewCommitter(0)
c.Assert(err, IsNil)
tpc.SetPrimaryKey(primaryKey)
ctx := context.Background()
err = tpc.PrewriteAllMutations(ctx)
c.Assert(err, IsNil)
if commitPrimary {
commitTS, err := s.store.GetOracle().GetTimestamp(ctx, &oracle.Option{TxnScope: oracle.GlobalTxnScope})
c.Assert(err, IsNil)
tpc.SetCommitTS(commitTS)
err = tpc.CommitMutations(ctx)
c.Assert(err, IsNil)
}
return txn.StartTS(), tpc.GetCommitTS()
}
func (s *testAsyncCommitSuite) TestCheckSecondaries(c *C) {
// This test doesn't support tikv mode.
if *WithTiKV {
return
}
s.putAlphabets(c, true)
loc, err := s.store.GetRegionCache().LocateKey(s.bo, []byte("a"))
c.Assert(err, IsNil)
newRegionID, peerID := s.cluster.AllocID(), s.cluster.AllocID()
s.cluster.Split(loc.Region.GetID(), newRegionID, []byte("e"), []uint64{peerID}, peerID)
s.store.GetRegionCache().InvalidateCachedRegion(loc.Region)
// No locks to check, only primary key is locked, should be successful.
s.lockKeysWithAsyncCommit(c, [][]byte{}, [][]byte{}, []byte("z"), []byte("z"), false)
lock := s.mustGetLock(c, []byte("z"))
lock.UseAsyncCommit = true
ts, err := s.store.GetOracle().GetTimestamp(context.Background(), &oracle.Option{TxnScope: oracle.GlobalTxnScope})
c.Assert(err, IsNil)
var lockutil tikv.LockProbe
status := lockutil.NewLockStatus(nil, true, ts)
resolver := tikv.LockResolverProbe{LockResolver: s.store.GetLockResolver()}
err = resolver.ResolveLockAsync(s.bo, lock, status)
c.Assert(err, IsNil)
currentTS, err := s.store.GetOracle().GetTimestamp(context.Background(), &oracle.Option{TxnScope: oracle.GlobalTxnScope})
c.Assert(err, IsNil)
status, err = resolver.GetTxnStatus(s.bo, lock.TxnID, []byte("z"), currentTS, currentTS, true, false, nil)
c.Assert(err, IsNil)
c.Assert(status.IsCommitted(), IsTrue)
c.Assert(status.CommitTS(), Equals, ts)
// One key is committed (i), one key is locked (a). Should get committed.
ts, err = s.store.GetOracle().GetTimestamp(context.Background(), &oracle.Option{TxnScope: oracle.GlobalTxnScope})
c.Assert(err, IsNil)
commitTs := ts + 10
gotCheckA := int64(0)
gotCheckB := int64(0)
gotResolve := int64(0)
gotOther := int64(0)
mock := mockResolveClient{
inner: s.store.GetTiKVClient(),
onCheckSecondaries: func(req *kvrpcpb.CheckSecondaryLocksRequest) (*tikvrpc.Response, error) {
if req.StartVersion != ts {
return nil, errors.Errorf("Bad start version: %d, expected: %d", req.StartVersion, ts)
}
var resp kvrpcpb.CheckSecondaryLocksResponse
for _, k := range req.Keys {
if bytes.Equal(k, []byte("a")) {
atomic.StoreInt64(&gotCheckA, 1)
resp = kvrpcpb.CheckSecondaryLocksResponse{
Locks: []*kvrpcpb.LockInfo{{Key: []byte("a"), PrimaryLock: []byte("z"), LockVersion: ts, UseAsyncCommit: true}},
CommitTs: commitTs,
}
} else if bytes.Equal(k, []byte("i")) {
atomic.StoreInt64(&gotCheckB, 1)
resp = kvrpcpb.CheckSecondaryLocksResponse{
Locks: []*kvrpcpb.LockInfo{},
CommitTs: commitTs,
}
} else {
fmt.Printf("Got other key: %s\n", k)
atomic.StoreInt64(&gotOther, 1)
}
}
return &tikvrpc.Response{Resp: &resp}, nil
},
onResolveLock: func(req *kvrpcpb.ResolveLockRequest) (*tikvrpc.Response, error) {
if req.StartVersion != ts {
return nil, errors.Errorf("Bad start version: %d, expected: %d", req.StartVersion, ts)
}
if req.CommitVersion != commitTs {
return nil, errors.Errorf("Bad commit version: %d, expected: %d", req.CommitVersion, commitTs)
}
for _, k := range req.Keys {
if bytes.Equal(k, []byte("a")) || bytes.Equal(k, []byte("z")) {
atomic.StoreInt64(&gotResolve, 1)
} else {
atomic.StoreInt64(&gotOther, 1)
}
}
resp := kvrpcpb.ResolveLockResponse{}
return &tikvrpc.Response{Resp: &resp}, nil
},
}
s.store.SetTiKVClient(&mock)
status = lockutil.NewLockStatus([][]byte{[]byte("a"), []byte("i")}, true, 0)
lock = &tikv.Lock{
Key: []byte("a"),
Primary: []byte("z"),
TxnID: ts,
LockType: kvrpcpb.Op_Put,
UseAsyncCommit: true,
MinCommitTS: ts + 5,
}
_ = s.beginAsyncCommit(c)
err = resolver.ResolveLockAsync(s.bo, lock, status)
c.Assert(err, IsNil)
c.Assert(gotCheckA, Equals, int64(1))
c.Assert(gotCheckB, Equals, int64(1))
c.Assert(gotOther, Equals, int64(0))
c.Assert(gotResolve, Equals, int64(1))
// One key has been rolled back (b), one is locked (a). Should be rolled back.
ts, err = s.store.GetOracle().GetTimestamp(context.Background(), &oracle.Option{TxnScope: oracle.GlobalTxnScope})
c.Assert(err, IsNil)
commitTs = ts + 10
gotCheckA = int64(0)
gotCheckB = int64(0)
gotResolve = int64(0)
gotOther = int64(0)
mock.onResolveLock = func(req *kvrpcpb.ResolveLockRequest) (*tikvrpc.Response, error) {
if req.StartVersion != ts {
return nil, errors.Errorf("Bad start version: %d, expected: %d", req.StartVersion, ts)
}
if req.CommitVersion != commitTs {
return nil, errors.Errorf("Bad commit version: %d, expected: 0", req.CommitVersion)
}
for _, k := range req.Keys {
if bytes.Equal(k, []byte("a")) || bytes.Equal(k, []byte("z")) {
atomic.StoreInt64(&gotResolve, 1)
} else {
atomic.StoreInt64(&gotOther, 1)
}
}
resp := kvrpcpb.ResolveLockResponse{}
return &tikvrpc.Response{Resp: &resp}, nil
}
lock.TxnID = ts
lock.MinCommitTS = ts + 5
err = resolver.ResolveLockAsync(s.bo, lock, status)
c.Assert(err, IsNil)
c.Assert(gotCheckA, Equals, int64(1))
c.Assert(gotCheckB, Equals, int64(1))
c.Assert(gotResolve, Equals, int64(1))
c.Assert(gotOther, Equals, int64(0))
}
func (s *testAsyncCommitSuite) TestRepeatableRead(c *C) {
var sessionID uint64 = 0
test := func(isPessimistic bool) {
s.putKV(c, []byte("k1"), []byte("v1"), true)
sessionID++
ctx := context.WithValue(context.Background(), util.SessionID, sessionID)
txn1 := s.beginAsyncCommit(c)
txn1.SetPessimistic(isPessimistic)
s.mustGetFromTxn(c, txn1, []byte("k1"), []byte("v1"))
txn1.Set([]byte("k1"), []byte("v2"))
for i := 0; i < 20; i++ {
_, err := s.store.GetOracle().GetTimestamp(ctx, &oracle.Option{TxnScope: oracle.GlobalTxnScope})
c.Assert(err, IsNil)
}
txn2 := s.beginAsyncCommit(c)
s.mustGetFromTxn(c, txn2, []byte("k1"), []byte("v1"))
err := txn1.Commit(ctx)
c.Assert(err, IsNil)
// Check txn1 is committed in async commit.
c.Assert(txn1.IsAsyncCommit(), IsTrue)
s.mustGetFromTxn(c, txn2, []byte("k1"), []byte("v1"))
err = txn2.Rollback()
c.Assert(err, IsNil)
txn3 := s.beginAsyncCommit(c)
s.mustGetFromTxn(c, txn3, []byte("k1"), []byte("v2"))
err = txn3.Rollback()
c.Assert(err, IsNil)
}
test(false)
test(true)
}
// It's just a simple validation of linearizability.
// Extra tests are needed to test this feature with the control of the TiKV cluster.
func (s *testAsyncCommitSuite) TestAsyncCommitLinearizability(c *C) {
t1 := s.beginAsyncCommitWithLinearizability(c)
t2 := s.beginAsyncCommitWithLinearizability(c)
err := t1.Set([]byte("a"), []byte("a1"))
c.Assert(err, IsNil)
err = t2.Set([]byte("b"), []byte("b1"))
c.Assert(err, IsNil)
ctx := context.WithValue(context.Background(), util.SessionID, uint64(1))
// t2 commits earlier than t1
err = t2.Commit(ctx)
c.Assert(err, IsNil)
err = t1.Commit(ctx)
c.Assert(err, IsNil)
commitTS1 := t1.GetCommitTS()
commitTS2 := t2.GetCommitTS()
c.Assert(commitTS2, Less, commitTS1)
}
// TestAsyncCommitWithMultiDC tests that async commit can only be enabled in global transactions
func (s *testAsyncCommitSuite) TestAsyncCommitWithMultiDC(c *C) {
// It requires setting placement rules to run with TiKV
if *WithTiKV {
return
}
localTxn := s.beginAsyncCommit(c)
err := localTxn.Set([]byte("a"), []byte("a1"))
localTxn.SetScope("bj")
c.Assert(err, IsNil)
ctx := context.WithValue(context.Background(), util.SessionID, uint64(1))
err = localTxn.Commit(ctx)
c.Assert(err, IsNil)
c.Assert(localTxn.IsAsyncCommit(), IsFalse)
globalTxn := s.beginAsyncCommit(c)
err = globalTxn.Set([]byte("b"), []byte("b1"))
globalTxn.SetScope(oracle.GlobalTxnScope)
c.Assert(err, IsNil)
err = globalTxn.Commit(ctx)
c.Assert(err, IsNil)
c.Assert(globalTxn.IsAsyncCommit(), IsTrue)
}
func (s *testAsyncCommitSuite) TestResolveTxnFallbackFromAsyncCommit(c *C) {
keys := [][]byte{[]byte("k0"), []byte("k1")}
values := [][]byte{[]byte("v00"), []byte("v10")}
initTest := func() tikv.CommitterProbe {
t0 := s.begin(c)
err := t0.Set(keys[0], values[0])
c.Assert(err, IsNil)
err = t0.Set(keys[1], values[1])
c.Assert(err, IsNil)
err = t0.Commit(context.Background())
c.Assert(err, IsNil)
t1 := s.beginAsyncCommit(c)
err = t1.Set(keys[0], []byte("v01"))
c.Assert(err, IsNil)
err = t1.Set(keys[1], []byte("v11"))
c.Assert(err, IsNil)
committer, err := t1.NewCommitter(1)
c.Assert(err, IsNil)
committer.SetLockTTL(1)
committer.SetUseAsyncCommit()
return committer
}
prewriteKey := func(committer tikv.CommitterProbe, idx int, fallback bool) {
bo := tikv.NewBackofferWithVars(context.Background(), 5000, nil)
loc, err := s.store.GetRegionCache().LocateKey(bo, keys[idx])
c.Assert(err, IsNil)
req := committer.BuildPrewriteRequest(loc.Region.GetID(), loc.Region.GetConfVer(), loc.Region.GetVer(),
committer.GetMutations().Slice(idx, idx+1), 1)
if fallback {
req.Req.(*kvrpcpb.PrewriteRequest).MaxCommitTs = 1
}
resp, err := s.store.SendReq(bo, req, loc.Region, 5000)
c.Assert(err, IsNil)
c.Assert(resp.Resp, NotNil)
}
readKey := func(idx int) {
t2 := s.begin(c)
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
defer cancel()
val, err := t2.Get(ctx, keys[idx])
c.Assert(err, IsNil)
c.Assert(val, DeepEquals, values[idx])
}
// Case 1: Fallback primary, read primary
committer := initTest()
prewriteKey(committer, 0, true)
prewriteKey(committer, 1, false)
readKey(0)
readKey(1)
// Case 2: Fallback primary, read secondary
committer = initTest()
prewriteKey(committer, 0, true)
prewriteKey(committer, 1, false)
readKey(1)
readKey(0)
// Case 3: Fallback secondary, read primary
committer = initTest()
prewriteKey(committer, 0, false)
prewriteKey(committer, 1, true)
readKey(0)
readKey(1)
// Case 4: Fallback secondary, read secondary
committer = initTest()
prewriteKey(committer, 0, false)
prewriteKey(committer, 1, true)
readKey(1)
readKey(0)
// Case 5: Fallback both, read primary
committer = initTest()
prewriteKey(committer, 0, true)
prewriteKey(committer, 1, true)
readKey(0)
readKey(1)
// Case 6: Fallback both, read secondary
committer = initTest()
prewriteKey(committer, 0, true)
prewriteKey(committer, 1, true)
readKey(1)
readKey(0)
}
type mockResolveClient struct {
inner tikv.Client
onResolveLock func(*kvrpcpb.ResolveLockRequest) (*tikvrpc.Response, error)
onCheckSecondaries func(*kvrpcpb.CheckSecondaryLocksRequest) (*tikvrpc.Response, error)
}
func (m *mockResolveClient) SendRequest(ctx context.Context, addr string, req *tikvrpc.Request, timeout time.Duration) (*tikvrpc.Response, error) {
// Intercept check secondary locks and resolve lock messages if the callback is non-nil.
// If the callback returns (nil, nil), forward to the inner client.
if cr, ok := req.Req.(*kvrpcpb.CheckSecondaryLocksRequest); ok && m.onCheckSecondaries != nil {
result, err := m.onCheckSecondaries(cr)
if result != nil || err != nil {
return result, err
}
} else if rr, ok := req.Req.(*kvrpcpb.ResolveLockRequest); ok && m.onResolveLock != nil {
result, err := m.onResolveLock(rr)
if result != nil || err != nil {
return result, err
}
}
return m.inner.SendRequest(ctx, addr, req, timeout)
}
func (m *mockResolveClient) Close() error {
return m.inner.Close()
}
| store/tikv/tests/async_commit_test.go | 1 | https://github.com/pingcap/tidb/commit/cc83cc524f8d3fd661f6e62d129ba043cc74501e | [
0.9938148260116577,
0.0371333509683609,
0.00016290968051180243,
0.000176332556293346,
0.1619819700717926
] |
{
"id": 8,
"code_window": [
"// SetEnable1PC indicates if the transaction will try to use 1 phase commit.\n",
"func (txn *KVTxn) SetEnable1PC(b bool) {\n",
"\ttxn.enable1PC = b\n",
"}\n",
"\n",
"// SetScope sets the geographical scope of the transaction.\n",
"func (txn *KVTxn) SetScope(scope string) {\n",
"\ttxn.scope = scope\n",
"}\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"// SetCausalConsistency indicates if the transaction does not need to\n",
"// guarantee linearizability. Default value is false which means\n",
"// linearizability is guaranteed.\n",
"func (txn *KVTxn) SetCausalConsistency(b bool) {\n",
"\ttxn.causalConsistency = b\n",
"}\n",
"\n"
],
"file_path": "store/tikv/txn.go",
"type": "add",
"edit_start_line_idx": 285
} | // Copyright 2020 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package tikv_test
import (
"bytes"
"context"
"sort"
. "github.com/pingcap/check"
"github.com/pingcap/errors"
"github.com/pingcap/failpoint"
"github.com/pingcap/kvproto/pkg/kvrpcpb"
"github.com/pingcap/parser/terror"
"github.com/pingcap/tidb/store/tikv"
tikverr "github.com/pingcap/tidb/store/tikv/error"
"github.com/pingcap/tidb/store/tikv/util"
)
type testAsyncCommitFailSuite struct {
OneByOneSuite
testAsyncCommitCommon
}
var _ = SerialSuites(&testAsyncCommitFailSuite{})
func (s *testAsyncCommitFailSuite) SetUpTest(c *C) {
s.testAsyncCommitCommon.setUpTest(c)
}
// TestFailCommitPrimaryRpcErrors tests rpc errors are handled properly when
// committing primary region task.
func (s *testAsyncCommitFailSuite) TestFailAsyncCommitPrewriteRpcErrors(c *C) {
// This test doesn't support tikv mode because it needs setting failpoint in unistore.
if *WithTiKV {
return
}
c.Assert(failpoint.Enable("github.com/pingcap/tidb/store/tikv/noRetryOnRpcError", "return(true)"), IsNil)
c.Assert(failpoint.Enable("github.com/pingcap/tidb/store/mockstore/unistore/rpcPrewriteTimeout", `return(true)`), IsNil)
defer func() {
c.Assert(failpoint.Disable("github.com/pingcap/tidb/store/mockstore/unistore/rpcPrewriteTimeout"), IsNil)
c.Assert(failpoint.Disable("github.com/pingcap/tidb/store/tikv/noRetryOnRpcError"), IsNil)
}()
// The rpc error will be wrapped to ErrResultUndetermined.
t1 := s.beginAsyncCommit(c)
err := t1.Set([]byte("a"), []byte("a1"))
c.Assert(err, IsNil)
ctx := context.WithValue(context.Background(), util.SessionID, uint64(1))
err = t1.Commit(ctx)
c.Assert(err, NotNil)
c.Assert(terror.ErrorEqual(err, terror.ErrResultUndetermined), IsTrue, Commentf("%s", errors.ErrorStack(err)))
// We don't need to call "Rollback" after "Commit" fails.
err = t1.Rollback()
c.Assert(err, Equals, tikverr.ErrInvalidTxn)
// Create a new transaction to check. The previous transaction should actually commit.
t2 := s.beginAsyncCommit(c)
res, err := t2.Get(context.Background(), []byte("a"))
c.Assert(err, IsNil)
c.Assert(bytes.Equal(res, []byte("a1")), IsTrue)
}
func (s *testAsyncCommitFailSuite) TestAsyncCommitPrewriteCancelled(c *C) {
// This test doesn't support tikv mode because it needs setting failpoint in unistore.
if *WithTiKV {
return
}
// Split into two regions.
splitKey := "s"
bo := tikv.NewBackofferWithVars(context.Background(), 5000, nil)
loc, err := s.store.GetRegionCache().LocateKey(bo, []byte(splitKey))
c.Assert(err, IsNil)
newRegionID := s.cluster.AllocID()
newPeerID := s.cluster.AllocID()
s.cluster.Split(loc.Region.GetID(), newRegionID, []byte(splitKey), []uint64{newPeerID}, newPeerID)
s.store.GetRegionCache().InvalidateCachedRegion(loc.Region)
c.Assert(failpoint.Enable("github.com/pingcap/tidb/store/mockstore/unistore/rpcPrewriteResult", `1*return("writeConflict")->sleep(50)`), IsNil)
defer func() {
c.Assert(failpoint.Disable("github.com/pingcap/tidb/store/mockstore/unistore/rpcPrewriteResult"), IsNil)
}()
t1 := s.beginAsyncCommit(c)
err = t1.Set([]byte("a"), []byte("a"))
c.Assert(err, IsNil)
err = t1.Set([]byte("z"), []byte("z"))
c.Assert(err, IsNil)
ctx := context.WithValue(context.Background(), util.SessionID, uint64(1))
err = t1.Commit(ctx)
c.Assert(err, NotNil)
_, ok := errors.Cause(err).(*tikverr.ErrWriteConflict)
c.Assert(ok, IsTrue, Commentf("%s", errors.ErrorStack(err)))
}
func (s *testAsyncCommitFailSuite) TestPointGetWithAsyncCommit(c *C) {
s.putAlphabets(c, true)
txn := s.beginAsyncCommit(c)
txn.Set([]byte("a"), []byte("v1"))
txn.Set([]byte("b"), []byte("v2"))
s.mustPointGet(c, []byte("a"), []byte("a"))
s.mustPointGet(c, []byte("b"), []byte("b"))
// PointGet cannot ignore async commit transactions' locks.
c.Assert(failpoint.Enable("github.com/pingcap/tidb/store/tikv/asyncCommitDoNothing", "return"), IsNil)
ctx := context.WithValue(context.Background(), util.SessionID, uint64(1))
err := txn.Commit(ctx)
c.Assert(err, IsNil)
c.Assert(txn.GetCommitter().IsAsyncCommit(), IsTrue)
s.mustPointGet(c, []byte("a"), []byte("v1"))
s.mustPointGet(c, []byte("b"), []byte("v2"))
c.Assert(failpoint.Disable("github.com/pingcap/tidb/store/tikv/asyncCommitDoNothing"), IsNil)
// PointGet will not push the `max_ts` to its ts which is MaxUint64.
txn2 := s.beginAsyncCommit(c)
s.mustGetFromTxn(c, txn2, []byte("a"), []byte("v1"))
s.mustGetFromTxn(c, txn2, []byte("b"), []byte("v2"))
err = txn2.Rollback()
c.Assert(err, IsNil)
}
func (s *testAsyncCommitFailSuite) TestSecondaryListInPrimaryLock(c *C) {
// This test doesn't support tikv mode.
if *WithTiKV {
return
}
s.putAlphabets(c, true)
// Split into several regions.
for _, splitKey := range []string{"h", "o", "u"} {
bo := tikv.NewBackofferWithVars(context.Background(), 5000, nil)
loc, err := s.store.GetRegionCache().LocateKey(bo, []byte(splitKey))
c.Assert(err, IsNil)
newRegionID := s.cluster.AllocID()
newPeerID := s.cluster.AllocID()
s.cluster.Split(loc.Region.GetID(), newRegionID, []byte(splitKey), []uint64{newPeerID}, newPeerID)
s.store.GetRegionCache().InvalidateCachedRegion(loc.Region)
}
// Ensure the region has been split
bo := tikv.NewBackofferWithVars(context.Background(), 5000, nil)
loc, err := s.store.GetRegionCache().LocateKey(bo, []byte("i"))
c.Assert(err, IsNil)
c.Assert(loc.StartKey, BytesEquals, []byte("h"))
c.Assert(loc.EndKey, BytesEquals, []byte("o"))
loc, err = s.store.GetRegionCache().LocateKey(bo, []byte("p"))
c.Assert(err, IsNil)
c.Assert(loc.StartKey, BytesEquals, []byte("o"))
c.Assert(loc.EndKey, BytesEquals, []byte("u"))
var sessionID uint64 = 0
test := func(keys []string, values []string) {
sessionID++
ctx := context.WithValue(context.Background(), util.SessionID, sessionID)
txn := s.beginAsyncCommit(c)
for i := range keys {
txn.Set([]byte(keys[i]), []byte(values[i]))
}
c.Assert(failpoint.Enable("github.com/pingcap/tidb/store/tikv/asyncCommitDoNothing", "return"), IsNil)
err = txn.Commit(ctx)
c.Assert(err, IsNil)
primary := txn.GetCommitter().GetPrimaryKey()
bo := tikv.NewBackofferWithVars(context.Background(), 5000, nil)
lockResolver := tikv.LockResolverProbe{LockResolver: s.store.GetLockResolver()}
txnStatus, err := lockResolver.GetTxnStatus(bo, txn.StartTS(), primary, 0, 0, false, false, nil)
c.Assert(err, IsNil)
c.Assert(txnStatus.IsCommitted(), IsFalse)
c.Assert(txnStatus.Action(), Equals, kvrpcpb.Action_NoAction)
// Currently when the transaction has no secondary, the `secondaries` field of the txnStatus
// will be set nil. So here initialize the `expectedSecondaries` to nil too.
var expectedSecondaries [][]byte
for _, k := range keys {
if !bytes.Equal([]byte(k), primary) {
expectedSecondaries = append(expectedSecondaries, []byte(k))
}
}
sort.Slice(expectedSecondaries, func(i, j int) bool {
return bytes.Compare(expectedSecondaries[i], expectedSecondaries[j]) < 0
})
gotSecondaries := lockResolver.GetSecondariesFromTxnStatus(txnStatus)
sort.Slice(gotSecondaries, func(i, j int) bool {
return bytes.Compare(gotSecondaries[i], gotSecondaries[j]) < 0
})
c.Assert(gotSecondaries, DeepEquals, expectedSecondaries)
c.Assert(failpoint.Disable("github.com/pingcap/tidb/store/tikv/asyncCommitDoNothing"), IsNil)
txn.GetCommitter().Cleanup(context.Background())
}
test([]string{"a"}, []string{"a1"})
test([]string{"a", "b"}, []string{"a2", "b2"})
test([]string{"a", "b", "d"}, []string{"a3", "b3", "d3"})
test([]string{"a", "b", "h", "i", "u"}, []string{"a4", "b4", "h4", "i4", "u4"})
test([]string{"i", "a", "z", "u", "b"}, []string{"i5", "a5", "z5", "u5", "b5"})
}
func (s *testAsyncCommitFailSuite) TestAsyncCommitContextCancelCausingUndetermined(c *C) {
// For an async commit transaction, if RPC returns context.Canceled error when prewriting, the
// transaction should go to undetermined state.
txn := s.beginAsyncCommit(c)
err := txn.Set([]byte("a"), []byte("va"))
c.Assert(err, IsNil)
c.Assert(failpoint.Enable("github.com/pingcap/tidb/store/tikv/rpcContextCancelErr", `return(true)`), IsNil)
defer func() {
c.Assert(failpoint.Disable("github.com/pingcap/tidb/store/tikv/rpcContextCancelErr"), IsNil)
}()
ctx := context.WithValue(context.Background(), util.SessionID, uint64(1))
err = txn.Commit(ctx)
c.Assert(err, NotNil)
c.Assert(txn.GetCommitter().GetUndeterminedErr(), NotNil)
}
| store/tikv/tests/async_commit_fail_test.go | 0 | https://github.com/pingcap/tidb/commit/cc83cc524f8d3fd661f6e62d129ba043cc74501e | [
0.01267069298774004,
0.0013121264055371284,
0.00016692648932803422,
0.00022509775590151548,
0.0030078210402280092
] |
{
"id": 8,
"code_window": [
"// SetEnable1PC indicates if the transaction will try to use 1 phase commit.\n",
"func (txn *KVTxn) SetEnable1PC(b bool) {\n",
"\ttxn.enable1PC = b\n",
"}\n",
"\n",
"// SetScope sets the geographical scope of the transaction.\n",
"func (txn *KVTxn) SetScope(scope string) {\n",
"\ttxn.scope = scope\n",
"}\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"// SetCausalConsistency indicates if the transaction does not need to\n",
"// guarantee linearizability. Default value is false which means\n",
"// linearizability is guaranteed.\n",
"func (txn *KVTxn) SetCausalConsistency(b bool) {\n",
"\ttxn.causalConsistency = b\n",
"}\n",
"\n"
],
"file_path": "store/tikv/txn.go",
"type": "add",
"edit_start_line_idx": 285
} | // Copyright 2018 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package handle_test
import (
"encoding/json"
"fmt"
"sync"
. "github.com/pingcap/check"
"github.com/pingcap/parser/model"
"github.com/pingcap/tidb/statistics"
"github.com/pingcap/tidb/statistics/handle"
"github.com/pingcap/tidb/util/testkit"
)
func (s *testStatsSuite) TestConversion(c *C) {
defer cleanEnv(c, s.store, s.do)
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("create table t (a int, b int)")
tk.MustExec("create index c on t(a,b)")
tk.MustExec("insert into t(a,b) values (3, 1),(2, 1),(1, 10)")
tk.MustExec("analyze table t")
tk.MustExec("insert into t(a,b) values (1, 1),(3, 1),(5, 10)")
is := s.do.InfoSchema()
h := s.do.StatsHandle()
c.Assert(h.DumpStatsDeltaToKV(handle.DumpAll), IsNil)
c.Assert(h.Update(is), IsNil)
tableInfo, err := is.TableByName(model.NewCIStr("test"), model.NewCIStr("t"))
c.Assert(err, IsNil)
jsonTbl, err := h.DumpStatsToJSON("test", tableInfo.Meta(), nil)
c.Assert(err, IsNil)
loadTbl, err := handle.TableStatsFromJSON(tableInfo.Meta(), tableInfo.Meta().ID, jsonTbl)
c.Assert(err, IsNil)
tbl := h.GetTableStats(tableInfo.Meta())
assertTableEqual(c, loadTbl, tbl)
cleanEnv(c, s.store, s.do)
wg := sync.WaitGroup{}
wg.Add(1)
go func() {
c.Assert(h.Update(is), IsNil)
wg.Done()
}()
err = h.LoadStatsFromJSON(is, jsonTbl)
wg.Wait()
c.Assert(err, IsNil)
loadTblInStorage := h.GetTableStats(tableInfo.Meta())
assertTableEqual(c, loadTblInStorage, tbl)
}
func (s *testStatsSuite) getStatsJSON(c *C, db, tableName string) *handle.JSONTable {
is := s.do.InfoSchema()
h := s.do.StatsHandle()
c.Assert(h.Update(is), IsNil)
table, err := is.TableByName(model.NewCIStr(db), model.NewCIStr(tableName))
c.Assert(err, IsNil)
tableInfo := table.Meta()
jsonTbl, err := h.DumpStatsToJSON("test", tableInfo, nil)
c.Assert(err, IsNil)
return jsonTbl
}
func (s *testStatsSuite) TestDumpGlobalStats(c *C) {
defer cleanEnv(c, s.store, s.do)
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("set @@tidb_analyze_version = 2")
tk.MustExec("set @@tidb_partition_prune_mode = 'static'")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (a int, key(a)) partition by hash(a) partitions 2")
tk.MustExec("insert into t values (1), (2)")
tk.MustExec("analyze table t")
// global-stats is not existed
stats := s.getStatsJSON(c, "test", "t")
c.Assert(stats.Partitions["p0"], NotNil)
c.Assert(stats.Partitions["p1"], NotNil)
c.Assert(stats.Partitions["global"], IsNil)
// global-stats is existed
tk.MustExec("set @@tidb_partition_prune_mode = 'dynamic'")
tk.MustExec("analyze table t")
stats = s.getStatsJSON(c, "test", "t")
c.Assert(stats.Partitions["p0"], NotNil)
c.Assert(stats.Partitions["p1"], NotNil)
c.Assert(stats.Partitions["global"], NotNil)
}
func (s *testStatsSuite) TestLoadGlobalStats(c *C) {
defer cleanEnv(c, s.store, s.do)
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("set @@tidb_analyze_version = 2")
tk.MustExec("set @@tidb_partition_prune_mode = 'dynamic'")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (a int, key(a)) partition by hash(a) partitions 2")
tk.MustExec("insert into t values (1), (2)")
tk.MustExec("analyze table t")
globalStats := s.getStatsJSON(c, "test", "t")
// remove all statistics
tk.MustExec("delete from mysql.stats_meta")
tk.MustExec("delete from mysql.stats_histograms")
tk.MustExec("delete from mysql.stats_buckets")
s.do.StatsHandle().Clear()
clearedStats := s.getStatsJSON(c, "test", "t")
c.Assert(len(clearedStats.Partitions), Equals, 0)
// load global-stats back
c.Assert(s.do.StatsHandle().LoadStatsFromJSON(s.do.InfoSchema(), globalStats), IsNil)
loadedStats := s.getStatsJSON(c, "test", "t")
c.Assert(len(loadedStats.Partitions), Equals, 3) // p0, p1, global
}
func (s *testStatsSuite) TestDumpPartitions(c *C) {
defer cleanEnv(c, s.store, s.do)
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
createTable := `CREATE TABLE t (a int, b int, primary key(a), index idx(b))
PARTITION BY RANGE ( a ) (
PARTITION p0 VALUES LESS THAN (6),
PARTITION p1 VALUES LESS THAN (11),
PARTITION p2 VALUES LESS THAN (16),
PARTITION p3 VALUES LESS THAN (21)
)`
tk.MustExec(createTable)
for i := 1; i < 21; i++ {
tk.MustExec(fmt.Sprintf(`insert into t values (%d, %d)`, i, i))
}
tk.MustExec("analyze table t")
is := s.do.InfoSchema()
h := s.do.StatsHandle()
c.Assert(h.Update(is), IsNil)
table, err := is.TableByName(model.NewCIStr("test"), model.NewCIStr("t"))
c.Assert(err, IsNil)
tableInfo := table.Meta()
jsonTbl, err := h.DumpStatsToJSON("test", tableInfo, nil)
c.Assert(err, IsNil)
pi := tableInfo.GetPartitionInfo()
originTables := make([]*statistics.Table, 0, len(pi.Definitions))
for _, def := range pi.Definitions {
originTables = append(originTables, h.GetPartitionStats(tableInfo, def.ID))
}
tk.MustExec("delete from mysql.stats_meta")
tk.MustExec("delete from mysql.stats_histograms")
tk.MustExec("delete from mysql.stats_buckets")
h.Clear()
err = h.LoadStatsFromJSON(s.do.InfoSchema(), jsonTbl)
c.Assert(err, IsNil)
for i, def := range pi.Definitions {
t := h.GetPartitionStats(tableInfo, def.ID)
assertTableEqual(c, originTables[i], t)
}
}
func (s *testStatsSuite) TestDumpAlteredTable(c *C) {
defer cleanEnv(c, s.store, s.do)
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
h := s.do.StatsHandle()
oriLease := h.Lease()
h.SetLease(1)
defer func() { h.SetLease(oriLease) }()
tk.MustExec("create table t(a int, b int)")
tk.MustExec("analyze table t")
tk.MustExec("alter table t drop column a")
table, err := s.do.InfoSchema().TableByName(model.NewCIStr("test"), model.NewCIStr("t"))
c.Assert(err, IsNil)
_, err = h.DumpStatsToJSON("test", table.Meta(), nil)
c.Assert(err, IsNil)
}
func (s *testStatsSuite) TestDumpCMSketchWithTopN(c *C) {
// Just test if we can store and recover the Top N elements stored in database.
defer cleanEnv(c, s.store, s.do)
testKit := testkit.NewTestKit(c, s.store)
testKit.MustExec("use test")
testKit.MustExec("create table t(a int)")
testKit.MustExec("insert into t values (1),(3),(4),(2),(5)")
testKit.MustExec("analyze table t")
is := s.do.InfoSchema()
tbl, err := is.TableByName(model.NewCIStr("test"), model.NewCIStr("t"))
c.Assert(err, IsNil)
tableInfo := tbl.Meta()
h := s.do.StatsHandle()
c.Assert(h.Update(is), IsNil)
// Insert 30 fake data
fakeData := make([][]byte, 0, 30)
for i := 0; i < 30; i++ {
fakeData = append(fakeData, []byte(fmt.Sprintf("%01024d", i)))
}
cms, _, _, _ := statistics.NewCMSketchAndTopN(5, 2048, fakeData, 20, 100)
stat := h.GetTableStats(tableInfo)
err = h.SaveStatsToStorage(tableInfo.ID, 1, 0, &stat.Columns[tableInfo.Columns[0].ID].Histogram, cms, nil, nil, statistics.Version2, 1, false)
c.Assert(err, IsNil)
c.Assert(h.Update(is), IsNil)
stat = h.GetTableStats(tableInfo)
cmsFromStore := stat.Columns[tableInfo.Columns[0].ID].CMSketch
c.Assert(cmsFromStore, NotNil)
c.Check(cms.Equal(cmsFromStore), IsTrue)
jsonTable, err := h.DumpStatsToJSON("test", tableInfo, nil)
c.Check(err, IsNil)
err = h.LoadStatsFromJSON(is, jsonTable)
c.Check(err, IsNil)
stat = h.GetTableStats(tableInfo)
cmsFromJSON := stat.Columns[tableInfo.Columns[0].ID].CMSketch.Copy()
c.Check(cms.Equal(cmsFromJSON), IsTrue)
}
func (s *testStatsSuite) TestDumpPseudoColumns(c *C) {
defer cleanEnv(c, s.store, s.do)
testKit := testkit.NewTestKit(c, s.store)
testKit.MustExec("use test")
testKit.MustExec("create table t(a int, b int, index idx(a))")
// Force adding an pseudo tables in stats cache.
testKit.MustQuery("select * from t")
testKit.MustExec("analyze table t index idx")
is := s.do.InfoSchema()
tbl, err := is.TableByName(model.NewCIStr("test"), model.NewCIStr("t"))
c.Assert(err, IsNil)
h := s.do.StatsHandle()
_, err = h.DumpStatsToJSON("test", tbl.Meta(), nil)
c.Assert(err, IsNil)
}
func (s *testStatsSuite) TestDumpExtendedStats(c *C) {
defer cleanEnv(c, s.store, s.do)
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("set session tidb_enable_extended_stats = on")
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a int, b int)")
tk.MustExec("insert into t values(1,5),(2,4),(3,3),(4,2),(5,1)")
h := s.do.StatsHandle()
c.Assert(h.DumpStatsDeltaToKV(handle.DumpAll), IsNil)
tk.MustExec("alter table t add stats_extended s1 correlation(a,b)")
tk.MustExec("analyze table t")
is := s.do.InfoSchema()
tableInfo, err := is.TableByName(model.NewCIStr("test"), model.NewCIStr("t"))
c.Assert(err, IsNil)
tbl := h.GetTableStats(tableInfo.Meta())
jsonTbl, err := h.DumpStatsToJSON("test", tableInfo.Meta(), nil)
c.Assert(err, IsNil)
loadTbl, err := handle.TableStatsFromJSON(tableInfo.Meta(), tableInfo.Meta().ID, jsonTbl)
c.Assert(err, IsNil)
assertTableEqual(c, loadTbl, tbl)
cleanEnv(c, s.store, s.do)
wg := sync.WaitGroup{}
wg.Add(1)
go func() {
c.Assert(h.Update(is), IsNil)
wg.Done()
}()
err = h.LoadStatsFromJSON(is, jsonTbl)
wg.Wait()
c.Assert(err, IsNil)
loadTblInStorage := h.GetTableStats(tableInfo.Meta())
assertTableEqual(c, loadTblInStorage, tbl)
}
func (s *testStatsSuite) TestDumpVer2Stats(c *C) {
defer cleanEnv(c, s.store, s.do)
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("set @@tidb_analyze_version = 2")
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a int, b varchar(10))")
tk.MustExec("insert into t value(1, 'aaa'), (3, 'aab'), (5, 'bba'), (2, 'bbb'), (4, 'cca'), (6, 'ccc')")
// mark column stats as needed
tk.MustExec("select * from t where a = 3")
tk.MustExec("select * from t where b = 'bbb'")
tk.MustExec("alter table t add index single(a)")
tk.MustExec("alter table t add index multi(a, b)")
tk.MustExec("analyze table t with 2 topn")
h := s.do.StatsHandle()
is := s.do.InfoSchema()
tableInfo, err := is.TableByName(model.NewCIStr("test"), model.NewCIStr("t"))
c.Assert(err, IsNil)
storageTbl, err := h.TableStatsFromStorage(tableInfo.Meta(), tableInfo.Meta().ID, false, 0)
c.Assert(err, IsNil)
dumpJSONTable, err := h.DumpStatsToJSON("test", tableInfo.Meta(), nil)
c.Assert(err, IsNil)
jsonBytes, err := json.MarshalIndent(dumpJSONTable, "", " ")
c.Assert(err, IsNil)
loadJSONTable := &handle.JSONTable{}
err = json.Unmarshal(jsonBytes, loadJSONTable)
c.Assert(err, IsNil)
loadTbl, err := handle.TableStatsFromJSON(tableInfo.Meta(), tableInfo.Meta().ID, loadJSONTable)
c.Assert(err, IsNil)
// assert that a statistics.Table from storage dumped into JSON text and then unmarshalled into a statistics.Table keeps unchanged
assertTableEqual(c, loadTbl, storageTbl)
// assert that this statistics.Table is the same as the one in stats cache
statsCacheTbl := h.GetTableStats(tableInfo.Meta())
assertTableEqual(c, loadTbl, statsCacheTbl)
err = h.LoadStatsFromJSON(is, loadJSONTable)
c.Assert(err, IsNil)
c.Assert(h.Update(is), IsNil)
statsCacheTbl = h.GetTableStats(tableInfo.Meta())
// assert that after the JSONTable above loaded into storage then updated into the stats cache,
// the statistics.Table in the stats cache is the same as the unmarshalled statistics.Table
assertTableEqual(c, statsCacheTbl, loadTbl)
}
| statistics/handle/dump_test.go | 0 | https://github.com/pingcap/tidb/commit/cc83cc524f8d3fd661f6e62d129ba043cc74501e | [
0.00017982229474000633,
0.0001742609601933509,
0.00016359880100935698,
0.0001747573260217905,
0.0000043155364437552635
] |
{
"id": 8,
"code_window": [
"// SetEnable1PC indicates if the transaction will try to use 1 phase commit.\n",
"func (txn *KVTxn) SetEnable1PC(b bool) {\n",
"\ttxn.enable1PC = b\n",
"}\n",
"\n",
"// SetScope sets the geographical scope of the transaction.\n",
"func (txn *KVTxn) SetScope(scope string) {\n",
"\ttxn.scope = scope\n",
"}\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"// SetCausalConsistency indicates if the transaction does not need to\n",
"// guarantee linearizability. Default value is false which means\n",
"// linearizability is guaranteed.\n",
"func (txn *KVTxn) SetCausalConsistency(b bool) {\n",
"\ttxn.causalConsistency = b\n",
"}\n",
"\n"
],
"file_path": "store/tikv/txn.go",
"type": "add",
"edit_start_line_idx": 285
} | // Copyright 2020 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package collate
import (
"github.com/pingcap/tidb/util/stringutil"
)
type generalCICollator struct {
}
// Compare implements Collator interface.
func (gc *generalCICollator) Compare(a, b string) int {
a = truncateTailingSpace(a)
b = truncateTailingSpace(b)
r1, r2 := rune(0), rune(0)
ai, bi := 0, 0
for ai < len(a) && bi < len(b) {
r1, ai = decodeRune(a, ai)
r2, bi = decodeRune(b, bi)
cmp := int(convertRuneGeneralCI(r1)) - int(convertRuneGeneralCI(r2))
if cmp != 0 {
return sign(cmp)
}
}
return sign((len(a) - ai) - (len(b) - bi))
}
// Key implements Collator interface.
func (gc *generalCICollator) Key(str string) []byte {
str = truncateTailingSpace(str)
buf := make([]byte, 0, len(str))
i := 0
r := rune(0)
for i < len(str) {
r, i = decodeRune(str, i)
u16 := convertRuneGeneralCI(r)
buf = append(buf, byte(u16>>8), byte(u16))
}
return buf
}
// Pattern implements Collator interface.
func (gc *generalCICollator) Pattern() WildcardPattern {
return &ciPattern{}
}
type ciPattern struct {
patChars []rune
patTypes []byte
}
// Compile implements WildcardPattern interface.
func (p *ciPattern) Compile(patternStr string, escape byte) {
p.patChars, p.patTypes = stringutil.CompilePatternInner(patternStr, escape)
}
// Compile implements WildcardPattern interface.
func (p *ciPattern) DoMatch(str string) bool {
return stringutil.DoMatchInner(str, p.patChars, p.patTypes, func(a, b rune) bool {
return convertRuneGeneralCI(a) == convertRuneGeneralCI(b)
})
}
func convertRuneGeneralCI(r rune) uint16 {
if r > 0xFFFF {
return 0xFFFD
}
plane := planeTable[r>>8]
if plane == nil {
return uint16(r)
}
return plane[r&0xFF]
}
var (
plane00 = []uint16{
0x0000, 0x0001, 0x0002, 0x0003, 0x0004, 0x0005, 0x0006, 0x0007, 0x0008, 0x0009, 0x000A, 0x000B, 0x000C, 0x000D, 0x000E, 0x000F,
0x0010, 0x0011, 0x0012, 0x0013, 0x0014, 0x0015, 0x0016, 0x0017, 0x0018, 0x0019, 0x001A, 0x001B, 0x001C, 0x001D, 0x001E, 0x001F,
0x0020, 0x0021, 0x0022, 0x0023, 0x0024, 0x0025, 0x0026, 0x0027, 0x0028, 0x0029, 0x002A, 0x002B, 0x002C, 0x002D, 0x002E, 0x002F,
0x0030, 0x0031, 0x0032, 0x0033, 0x0034, 0x0035, 0x0036, 0x0037, 0x0038, 0x0039, 0x003A, 0x003B, 0x003C, 0x003D, 0x003E, 0x003F,
0x0040, 0x0041, 0x0042, 0x0043, 0x0044, 0x0045, 0x0046, 0x0047, 0x0048, 0x0049, 0x004A, 0x004B, 0x004C, 0x004D, 0x004E, 0x004F,
0x0050, 0x0051, 0x0052, 0x0053, 0x0054, 0x0055, 0x0056, 0x0057, 0x0058, 0x0059, 0x005A, 0x005B, 0x005C, 0x005D, 0x005E, 0x005F,
0x0060, 0x0041, 0x0042, 0x0043, 0x0044, 0x0045, 0x0046, 0x0047, 0x0048, 0x0049, 0x004A, 0x004B, 0x004C, 0x004D, 0x004E, 0x004F,
0x0050, 0x0051, 0x0052, 0x0053, 0x0054, 0x0055, 0x0056, 0x0057, 0x0058, 0x0059, 0x005A, 0x007B, 0x007C, 0x007D, 0x007E, 0x007F,
0x0080, 0x0081, 0x0082, 0x0083, 0x0084, 0x0085, 0x0086, 0x0087, 0x0088, 0x0089, 0x008A, 0x008B, 0x008C, 0x008D, 0x008E, 0x008F,
0x0090, 0x0091, 0x0092, 0x0093, 0x0094, 0x0095, 0x0096, 0x0097, 0x0098, 0x0099, 0x009A, 0x009B, 0x009C, 0x009D, 0x009E, 0x009F,
0x00A0, 0x00A1, 0x00A2, 0x00A3, 0x00A4, 0x00A5, 0x00A6, 0x00A7, 0x00A8, 0x00A9, 0x00AA, 0x00AB, 0x00AC, 0x00AD, 0x00AE, 0x00AF,
0x00B0, 0x00B1, 0x00B2, 0x00B3, 0x00B4, 0x039C, 0x00B6, 0x00B7, 0x00B8, 0x00B9, 0x00BA, 0x00BB, 0x00BC, 0x00BD, 0x00BE, 0x00BF,
0x0041, 0x0041, 0x0041, 0x0041, 0x0041, 0x0041, 0x00C6, 0x0043, 0x0045, 0x0045, 0x0045, 0x0045, 0x0049, 0x0049, 0x0049, 0x0049,
0x00D0, 0x004E, 0x004F, 0x004F, 0x004F, 0x004F, 0x004F, 0x00D7, 0x00D8, 0x0055, 0x0055, 0x0055, 0x0055, 0x0059, 0x00DE, 0x0053,
0x0041, 0x0041, 0x0041, 0x0041, 0x0041, 0x0041, 0x00C6, 0x0043, 0x0045, 0x0045, 0x0045, 0x0045, 0x0049, 0x0049, 0x0049, 0x0049,
0x00D0, 0x004E, 0x004F, 0x004F, 0x004F, 0x004F, 0x004F, 0x00F7, 0x00D8, 0x0055, 0x0055, 0x0055, 0x0055, 0x0059, 0x00DE, 0x0059}
plane01 = []uint16{
0x0041, 0x0041, 0x0041, 0x0041, 0x0041, 0x0041, 0x0043, 0x0043, 0x0043, 0x0043, 0x0043, 0x0043, 0x0043, 0x0043, 0x0044, 0x0044,
0x0110, 0x0110, 0x0045, 0x0045, 0x0045, 0x0045, 0x0045, 0x0045, 0x0045, 0x0045, 0x0045, 0x0045, 0x0047, 0x0047, 0x0047, 0x0047,
0x0047, 0x0047, 0x0047, 0x0047, 0x0048, 0x0048, 0x0126, 0x0126, 0x0049, 0x0049, 0x0049, 0x0049, 0x0049, 0x0049, 0x0049, 0x0049,
0x0049, 0x0049, 0x0132, 0x0132, 0x004A, 0x004A, 0x004B, 0x004B, 0x0138, 0x004C, 0x004C, 0x004C, 0x004C, 0x004C, 0x004C, 0x013F,
0x013F, 0x0141, 0x0141, 0x004E, 0x004E, 0x004E, 0x004E, 0x004E, 0x004E, 0x0149, 0x014A, 0x014A, 0x004F, 0x004F, 0x004F, 0x004F,
0x004F, 0x004F, 0x0152, 0x0152, 0x0052, 0x0052, 0x0052, 0x0052, 0x0052, 0x0052, 0x0053, 0x0053, 0x0053, 0x0053, 0x0053, 0x0053,
0x0053, 0x0053, 0x0054, 0x0054, 0x0054, 0x0054, 0x0166, 0x0166, 0x0055, 0x0055, 0x0055, 0x0055, 0x0055, 0x0055, 0x0055, 0x0055,
0x0055, 0x0055, 0x0055, 0x0055, 0x0057, 0x0057, 0x0059, 0x0059, 0x0059, 0x005A, 0x005A, 0x005A, 0x005A, 0x005A, 0x005A, 0x0053,
0x0180, 0x0181, 0x0182, 0x0182, 0x0184, 0x0184, 0x0186, 0x0187, 0x0187, 0x0189, 0x018A, 0x018B, 0x018B, 0x018D, 0x018E, 0x018F,
0x0190, 0x0191, 0x0191, 0x0193, 0x0194, 0x01F6, 0x0196, 0x0197, 0x0198, 0x0198, 0x019A, 0x019B, 0x019C, 0x019D, 0x019E, 0x019F,
0x004F, 0x004F, 0x01A2, 0x01A2, 0x01A4, 0x01A4, 0x01A6, 0x01A7, 0x01A7, 0x01A9, 0x01AA, 0x01AB, 0x01AC, 0x01AC, 0x01AE, 0x0055,
0x0055, 0x01B1, 0x01B2, 0x01B3, 0x01B3, 0x01B5, 0x01B5, 0x01B7, 0x01B8, 0x01B8, 0x01BA, 0x01BB, 0x01BC, 0x01BC, 0x01BE, 0x01F7,
0x01C0, 0x01C1, 0x01C2, 0x01C3, 0x01C4, 0x01C4, 0x01C4, 0x01C7, 0x01C7, 0x01C7, 0x01CA, 0x01CA, 0x01CA, 0x0041, 0x0041, 0x0049,
0x0049, 0x004F, 0x004F, 0x0055, 0x0055, 0x0055, 0x0055, 0x0055, 0x0055, 0x0055, 0x0055, 0x0055, 0x0055, 0x018E, 0x0041, 0x0041,
0x0041, 0x0041, 0x00C6, 0x00C6, 0x01E4, 0x01E4, 0x0047, 0x0047, 0x004B, 0x004B, 0x004F, 0x004F, 0x004F, 0x004F, 0x01B7, 0x01B7,
0x004A, 0x01F1, 0x01F1, 0x01F1, 0x0047, 0x0047, 0x01F6, 0x01F7, 0x004E, 0x004E, 0x0041, 0x0041, 0x00C6, 0x00C6, 0x00D8, 0x00D8}
plane02 = []uint16{
0x0041, 0x0041, 0x0041, 0x0041, 0x0045, 0x0045, 0x0045, 0x0045, 0x0049, 0x0049, 0x0049, 0x0049, 0x004F, 0x004F, 0x004F, 0x004F,
0x0052, 0x0052, 0x0052, 0x0052, 0x0055, 0x0055, 0x0055, 0x0055, 0x0053, 0x0053, 0x0054, 0x0054, 0x021C, 0x021C, 0x0048, 0x0048,
0x0220, 0x0221, 0x0222, 0x0222, 0x0224, 0x0224, 0x0041, 0x0041, 0x0045, 0x0045, 0x004F, 0x004F, 0x004F, 0x004F, 0x004F, 0x004F,
0x004F, 0x004F, 0x0059, 0x0059, 0x0234, 0x0235, 0x0236, 0x0237, 0x0238, 0x0239, 0x023A, 0x023B, 0x023C, 0x023D, 0x023E, 0x023F,
0x0240, 0x0241, 0x0242, 0x0243, 0x0244, 0x0245, 0x0246, 0x0247, 0x0248, 0x0249, 0x024A, 0x024B, 0x024C, 0x024D, 0x024E, 0x024F,
0x0250, 0x0251, 0x0252, 0x0181, 0x0186, 0x0255, 0x0189, 0x018A, 0x0258, 0x018F, 0x025A, 0x0190, 0x025C, 0x025D, 0x025E, 0x025F,
0x0193, 0x0261, 0x0262, 0x0194, 0x0264, 0x0265, 0x0266, 0x0267, 0x0197, 0x0196, 0x026A, 0x026B, 0x026C, 0x026D, 0x026E, 0x019C,
0x0270, 0x0271, 0x019D, 0x0273, 0x0274, 0x019F, 0x0276, 0x0277, 0x0278, 0x0279, 0x027A, 0x027B, 0x027C, 0x027D, 0x027E, 0x027F,
0x01A6, 0x0281, 0x0282, 0x01A9, 0x0284, 0x0285, 0x0286, 0x0287, 0x01AE, 0x0289, 0x01B1, 0x01B2, 0x028C, 0x028D, 0x028E, 0x028F,
0x0290, 0x0291, 0x01B7, 0x0293, 0x0294, 0x0295, 0x0296, 0x0297, 0x0298, 0x0299, 0x029A, 0x029B, 0x029C, 0x029D, 0x029E, 0x029F,
0x02A0, 0x02A1, 0x02A2, 0x02A3, 0x02A4, 0x02A5, 0x02A6, 0x02A7, 0x02A8, 0x02A9, 0x02AA, 0x02AB, 0x02AC, 0x02AD, 0x02AE, 0x02AF,
0x02B0, 0x02B1, 0x02B2, 0x02B3, 0x02B4, 0x02B5, 0x02B6, 0x02B7, 0x02B8, 0x02B9, 0x02BA, 0x02BB, 0x02BC, 0x02BD, 0x02BE, 0x02BF,
0x02C0, 0x02C1, 0x02C2, 0x02C3, 0x02C4, 0x02C5, 0x02C6, 0x02C7, 0x02C8, 0x02C9, 0x02CA, 0x02CB, 0x02CC, 0x02CD, 0x02CE, 0x02CF,
0x02D0, 0x02D1, 0x02D2, 0x02D3, 0x02D4, 0x02D5, 0x02D6, 0x02D7, 0x02D8, 0x02D9, 0x02DA, 0x02DB, 0x02DC, 0x02DD, 0x02DE, 0x02DF,
0x02E0, 0x02E1, 0x02E2, 0x02E3, 0x02E4, 0x02E5, 0x02E6, 0x02E7, 0x02E8, 0x02E9, 0x02EA, 0x02EB, 0x02EC, 0x02ED, 0x02EE, 0x02EF,
0x02F0, 0x02F1, 0x02F2, 0x02F3, 0x02F4, 0x02F5, 0x02F6, 0x02F7, 0x02F8, 0x02F9, 0x02FA, 0x02FB, 0x02FC, 0x02FD, 0x02FE, 0x02FF}
plane03 = []uint16{
0x0300, 0x0301, 0x0302, 0x0303, 0x0304, 0x0305, 0x0306, 0x0307, 0x0308, 0x0309, 0x030A, 0x030B, 0x030C, 0x030D, 0x030E, 0x030F,
0x0310, 0x0311, 0x0312, 0x0313, 0x0314, 0x0315, 0x0316, 0x0317, 0x0318, 0x0319, 0x031A, 0x031B, 0x031C, 0x031D, 0x031E, 0x031F,
0x0320, 0x0321, 0x0322, 0x0323, 0x0324, 0x0325, 0x0326, 0x0327, 0x0328, 0x0329, 0x032A, 0x032B, 0x032C, 0x032D, 0x032E, 0x032F,
0x0330, 0x0331, 0x0332, 0x0333, 0x0334, 0x0335, 0x0336, 0x0337, 0x0338, 0x0339, 0x033A, 0x033B, 0x033C, 0x033D, 0x033E, 0x033F,
0x0340, 0x0341, 0x0342, 0x0343, 0x0344, 0x0399, 0x0346, 0x0347, 0x0348, 0x0349, 0x034A, 0x034B, 0x034C, 0x034D, 0x034E, 0x034F,
0x0350, 0x0351, 0x0352, 0x0353, 0x0354, 0x0355, 0x0356, 0x0357, 0x0358, 0x0359, 0x035A, 0x035B, 0x035C, 0x035D, 0x035E, 0x035F,
0x0360, 0x0361, 0x0362, 0x0363, 0x0364, 0x0365, 0x0366, 0x0367, 0x0368, 0x0369, 0x036A, 0x036B, 0x036C, 0x036D, 0x036E, 0x036F,
0x0370, 0x0371, 0x0372, 0x0373, 0x0374, 0x0375, 0x0376, 0x0377, 0x0378, 0x0379, 0x037A, 0x037B, 0x037C, 0x037D, 0x037E, 0x037F,
0x0380, 0x0381, 0x0382, 0x0383, 0x0384, 0x0385, 0x0391, 0x0387, 0x0395, 0x0397, 0x0399, 0x038B, 0x039F, 0x038D, 0x03A5, 0x03A9,
0x0399, 0x0391, 0x0392, 0x0393, 0x0394, 0x0395, 0x0396, 0x0397, 0x0398, 0x0399, 0x039A, 0x039B, 0x039C, 0x039D, 0x039E, 0x039F,
0x03A0, 0x03A1, 0x03A2, 0x03A3, 0x03A4, 0x03A5, 0x03A6, 0x03A7, 0x03A8, 0x03A9, 0x0399, 0x03A5, 0x0391, 0x0395, 0x0397, 0x0399,
0x03A5, 0x0391, 0x0392, 0x0393, 0x0394, 0x0395, 0x0396, 0x0397, 0x0398, 0x0399, 0x039A, 0x039B, 0x039C, 0x039D, 0x039E, 0x039F,
0x03A0, 0x03A1, 0x03A3, 0x03A3, 0x03A4, 0x03A5, 0x03A6, 0x03A7, 0x03A8, 0x03A9, 0x0399, 0x03A5, 0x039F, 0x03A5, 0x03A9, 0x03CF,
0x0392, 0x0398, 0x03D2, 0x03D2, 0x03D2, 0x03A6, 0x03A0, 0x03D7, 0x03D8, 0x03D9, 0x03DA, 0x03DA, 0x03DC, 0x03DC, 0x03DE, 0x03DE,
0x03E0, 0x03E0, 0x03E2, 0x03E2, 0x03E4, 0x03E4, 0x03E6, 0x03E6, 0x03E8, 0x03E8, 0x03EA, 0x03EA, 0x03EC, 0x03EC, 0x03EE, 0x03EE,
0x039A, 0x03A1, 0x03A3, 0x03F3, 0x03F4, 0x03F5, 0x03F6, 0x03F7, 0x03F8, 0x03F9, 0x03FA, 0x03FB, 0x03FC, 0x03FD, 0x03FE, 0x03FF}
plane04 = []uint16{
0x0415, 0x0415, 0x0402, 0x0413, 0x0404, 0x0405, 0x0406, 0x0406, 0x0408, 0x0409, 0x040A, 0x040B, 0x041A, 0x0418, 0x0423, 0x040F,
0x0410, 0x0411, 0x0412, 0x0413, 0x0414, 0x0415, 0x0416, 0x0417, 0x0418, 0x0419, 0x041A, 0x041B, 0x041C, 0x041D, 0x041E, 0x041F,
0x0420, 0x0421, 0x0422, 0x0423, 0x0424, 0x0425, 0x0426, 0x0427, 0x0428, 0x0429, 0x042A, 0x042B, 0x042C, 0x042D, 0x042E, 0x042F,
0x0410, 0x0411, 0x0412, 0x0413, 0x0414, 0x0415, 0x0416, 0x0417, 0x0418, 0x0419, 0x041A, 0x041B, 0x041C, 0x041D, 0x041E, 0x041F,
0x0420, 0x0421, 0x0422, 0x0423, 0x0424, 0x0425, 0x0426, 0x0427, 0x0428, 0x0429, 0x042A, 0x042B, 0x042C, 0x042D, 0x042E, 0x042F,
0x0415, 0x0415, 0x0402, 0x0413, 0x0404, 0x0405, 0x0406, 0x0406, 0x0408, 0x0409, 0x040A, 0x040B, 0x041A, 0x0418, 0x0423, 0x040F,
0x0460, 0x0460, 0x0462, 0x0462, 0x0464, 0x0464, 0x0466, 0x0466, 0x0468, 0x0468, 0x046A, 0x046A, 0x046C, 0x046C, 0x046E, 0x046E,
0x0470, 0x0470, 0x0472, 0x0472, 0x0474, 0x0474, 0x0474, 0x0474, 0x0478, 0x0478, 0x047A, 0x047A, 0x047C, 0x047C, 0x047E, 0x047E,
0x0480, 0x0480, 0x0482, 0x0483, 0x0484, 0x0485, 0x0486, 0x0487, 0x0488, 0x0489, 0x048A, 0x048B, 0x048C, 0x048C, 0x048E, 0x048E,
0x0490, 0x0490, 0x0492, 0x0492, 0x0494, 0x0494, 0x0496, 0x0496, 0x0498, 0x0498, 0x049A, 0x049A, 0x049C, 0x049C, 0x049E, 0x049E,
0x04A0, 0x04A0, 0x04A2, 0x04A2, 0x04A4, 0x04A4, 0x04A6, 0x04A6, 0x04A8, 0x04A8, 0x04AA, 0x04AA, 0x04AC, 0x04AC, 0x04AE, 0x04AE,
0x04B0, 0x04B0, 0x04B2, 0x04B2, 0x04B4, 0x04B4, 0x04B6, 0x04B6, 0x04B8, 0x04B8, 0x04BA, 0x04BA, 0x04BC, 0x04BC, 0x04BE, 0x04BE,
0x04C0, 0x0416, 0x0416, 0x04C3, 0x04C3, 0x04C5, 0x04C6, 0x04C7, 0x04C7, 0x04C9, 0x04CA, 0x04CB, 0x04CB, 0x04CD, 0x04CE, 0x04CF,
0x0410, 0x0410, 0x0410, 0x0410, 0x04D4, 0x04D4, 0x0415, 0x0415, 0x04D8, 0x04D8, 0x04D8, 0x04D8, 0x0416, 0x0416, 0x0417, 0x0417,
0x04E0, 0x04E0, 0x0418, 0x0418, 0x0418, 0x0418, 0x041E, 0x041E, 0x04E8, 0x04E8, 0x04E8, 0x04E8, 0x042D, 0x042D, 0x0423, 0x0423,
0x0423, 0x0423, 0x0423, 0x0423, 0x0427, 0x0427, 0x04F6, 0x04F7, 0x042B, 0x042B, 0x04FA, 0x04FB, 0x04FC, 0x04FD, 0x04FE, 0x04FF}
plane05 = []uint16{
0x0500, 0x0501, 0x0502, 0x0503, 0x0504, 0x0505, 0x0506, 0x0507, 0x0508, 0x0509, 0x050A, 0x050B, 0x050C, 0x050D, 0x050E, 0x050F,
0x0510, 0x0511, 0x0512, 0x0513, 0x0514, 0x0515, 0x0516, 0x0517, 0x0518, 0x0519, 0x051A, 0x051B, 0x051C, 0x051D, 0x051E, 0x051F,
0x0520, 0x0521, 0x0522, 0x0523, 0x0524, 0x0525, 0x0526, 0x0527, 0x0528, 0x0529, 0x052A, 0x052B, 0x052C, 0x052D, 0x052E, 0x052F,
0x0530, 0x0531, 0x0532, 0x0533, 0x0534, 0x0535, 0x0536, 0x0537, 0x0538, 0x0539, 0x053A, 0x053B, 0x053C, 0x053D, 0x053E, 0x053F,
0x0540, 0x0541, 0x0542, 0x0543, 0x0544, 0x0545, 0x0546, 0x0547, 0x0548, 0x0549, 0x054A, 0x054B, 0x054C, 0x054D, 0x054E, 0x054F,
0x0550, 0x0551, 0x0552, 0x0553, 0x0554, 0x0555, 0x0556, 0x0557, 0x0558, 0x0559, 0x055A, 0x055B, 0x055C, 0x055D, 0x055E, 0x055F,
0x0560, 0x0531, 0x0532, 0x0533, 0x0534, 0x0535, 0x0536, 0x0537, 0x0538, 0x0539, 0x053A, 0x053B, 0x053C, 0x053D, 0x053E, 0x053F,
0x0540, 0x0541, 0x0542, 0x0543, 0x0544, 0x0545, 0x0546, 0x0547, 0x0548, 0x0549, 0x054A, 0x054B, 0x054C, 0x054D, 0x054E, 0x054F,
0x0550, 0x0551, 0x0552, 0x0553, 0x0554, 0x0555, 0x0556, 0x0587, 0x0588, 0x0589, 0x058A, 0x058B, 0x058C, 0x058D, 0x058E, 0x058F,
0x0590, 0x0591, 0x0592, 0x0593, 0x0594, 0x0595, 0x0596, 0x0597, 0x0598, 0x0599, 0x059A, 0x059B, 0x059C, 0x059D, 0x059E, 0x059F,
0x05A0, 0x05A1, 0x05A2, 0x05A3, 0x05A4, 0x05A5, 0x05A6, 0x05A7, 0x05A8, 0x05A9, 0x05AA, 0x05AB, 0x05AC, 0x05AD, 0x05AE, 0x05AF,
0x05B0, 0x05B1, 0x05B2, 0x05B3, 0x05B4, 0x05B5, 0x05B6, 0x05B7, 0x05B8, 0x05B9, 0x05BA, 0x05BB, 0x05BC, 0x05BD, 0x05BE, 0x05BF,
0x05C0, 0x05C1, 0x05C2, 0x05C3, 0x05C4, 0x05C5, 0x05C6, 0x05C7, 0x05C8, 0x05C9, 0x05CA, 0x05CB, 0x05CC, 0x05CD, 0x05CE, 0x05CF,
0x05D0, 0x05D1, 0x05D2, 0x05D3, 0x05D4, 0x05D5, 0x05D6, 0x05D7, 0x05D8, 0x05D9, 0x05DA, 0x05DB, 0x05DC, 0x05DD, 0x05DE, 0x05DF,
0x05E0, 0x05E1, 0x05E2, 0x05E3, 0x05E4, 0x05E5, 0x05E6, 0x05E7, 0x05E8, 0x05E9, 0x05EA, 0x05EB, 0x05EC, 0x05ED, 0x05EE, 0x05EF,
0x05F0, 0x05F1, 0x05F2, 0x05F3, 0x05F4, 0x05F5, 0x05F6, 0x05F7, 0x05F8, 0x05F9, 0x05FA, 0x05FB, 0x05FC, 0x05FD, 0x05FE, 0x05FF}
plane1E = []uint16{
0x0041, 0x0041, 0x0042, 0x0042, 0x0042, 0x0042, 0x0042, 0x0042, 0x0043, 0x0043, 0x0044, 0x0044, 0x0044, 0x0044, 0x0044, 0x0044,
0x0044, 0x0044, 0x0044, 0x0044, 0x0045, 0x0045, 0x0045, 0x0045, 0x0045, 0x0045, 0x0045, 0x0045, 0x0045, 0x0045, 0x0046, 0x0046,
0x0047, 0x0047, 0x0048, 0x0048, 0x0048, 0x0048, 0x0048, 0x0048, 0x0048, 0x0048, 0x0048, 0x0048, 0x0049, 0x0049, 0x0049, 0x0049,
0x004B, 0x004B, 0x004B, 0x004B, 0x004B, 0x004B, 0x004C, 0x004C, 0x004C, 0x004C, 0x004C, 0x004C, 0x004C, 0x004C, 0x004D, 0x004D,
0x004D, 0x004D, 0x004D, 0x004D, 0x004E, 0x004E, 0x004E, 0x004E, 0x004E, 0x004E, 0x004E, 0x004E, 0x004F, 0x004F, 0x004F, 0x004F,
0x004F, 0x004F, 0x004F, 0x004F, 0x0050, 0x0050, 0x0050, 0x0050, 0x0052, 0x0052, 0x0052, 0x0052, 0x0052, 0x0052, 0x0052, 0x0052,
0x0053, 0x0053, 0x0053, 0x0053, 0x0053, 0x0053, 0x0053, 0x0053, 0x0053, 0x0053, 0x0054, 0x0054, 0x0054, 0x0054, 0x0054, 0x0054,
0x0054, 0x0054, 0x0055, 0x0055, 0x0055, 0x0055, 0x0055, 0x0055, 0x0055, 0x0055, 0x0055, 0x0055, 0x0056, 0x0056, 0x0056, 0x0056,
0x0057, 0x0057, 0x0057, 0x0057, 0x0057, 0x0057, 0x0057, 0x0057, 0x0057, 0x0057, 0x0058, 0x0058, 0x0058, 0x0058, 0x0059, 0x0059,
0x005A, 0x005A, 0x005A, 0x005A, 0x005A, 0x005A, 0x0048, 0x0054, 0x0057, 0x0059, 0x1E9A, 0x0053, 0x1E9C, 0x1E9D, 0x1E9E, 0x1E9F,
0x0041, 0x0041, 0x0041, 0x0041, 0x0041, 0x0041, 0x0041, 0x0041, 0x0041, 0x0041, 0x0041, 0x0041, 0x0041, 0x0041, 0x0041, 0x0041,
0x0041, 0x0041, 0x0041, 0x0041, 0x0041, 0x0041, 0x0041, 0x0041, 0x0045, 0x0045, 0x0045, 0x0045, 0x0045, 0x0045, 0x0045, 0x0045,
0x0045, 0x0045, 0x0045, 0x0045, 0x0045, 0x0045, 0x0045, 0x0045, 0x0049, 0x0049, 0x0049, 0x0049, 0x004F, 0x004F, 0x004F, 0x004F,
0x004F, 0x004F, 0x004F, 0x004F, 0x004F, 0x004F, 0x004F, 0x004F, 0x004F, 0x004F, 0x004F, 0x004F, 0x004F, 0x004F, 0x004F, 0x004F,
0x004F, 0x004F, 0x004F, 0x004F, 0x0055, 0x0055, 0x0055, 0x0055, 0x0055, 0x0055, 0x0055, 0x0055, 0x0055, 0x0055, 0x0055, 0x0055,
0x0055, 0x0055, 0x0059, 0x0059, 0x0059, 0x0059, 0x0059, 0x0059, 0x0059, 0x0059, 0x1EFA, 0x1EFB, 0x1EFC, 0x1EFD, 0x1EFE, 0x1EFF}
plane1F = []uint16{
0x0391, 0x0391, 0x0391, 0x0391, 0x0391, 0x0391, 0x0391, 0x0391, 0x0391, 0x0391, 0x0391, 0x0391, 0x0391, 0x0391, 0x0391, 0x0391,
0x0395, 0x0395, 0x0395, 0x0395, 0x0395, 0x0395, 0x1F16, 0x1F17, 0x0395, 0x0395, 0x0395, 0x0395, 0x0395, 0x0395, 0x1F1E, 0x1F1F,
0x0397, 0x0397, 0x0397, 0x0397, 0x0397, 0x0397, 0x0397, 0x0397, 0x0397, 0x0397, 0x0397, 0x0397, 0x0397, 0x0397, 0x0397, 0x0397,
0x0399, 0x0399, 0x0399, 0x0399, 0x0399, 0x0399, 0x0399, 0x0399, 0x0399, 0x0399, 0x0399, 0x0399, 0x0399, 0x0399, 0x0399, 0x0399,
0x039F, 0x039F, 0x039F, 0x039F, 0x039F, 0x039F, 0x1F46, 0x1F47, 0x039F, 0x039F, 0x039F, 0x039F, 0x039F, 0x039F, 0x1F4E, 0x1F4F,
0x03A5, 0x03A5, 0x03A5, 0x03A5, 0x03A5, 0x03A5, 0x03A5, 0x03A5, 0x1F58, 0x03A5, 0x1F5A, 0x03A5, 0x1F5C, 0x03A5, 0x1F5E, 0x03A5,
0x03A9, 0x03A9, 0x03A9, 0x03A9, 0x03A9, 0x03A9, 0x03A9, 0x03A9, 0x03A9, 0x03A9, 0x03A9, 0x03A9, 0x03A9, 0x03A9, 0x03A9, 0x03A9,
0x0391, 0x1FBB, 0x0395, 0x1FC9, 0x0397, 0x1FCB, 0x0399, 0x1FDB, 0x039F, 0x1FF9, 0x03A5, 0x1FEB, 0x03A9, 0x1FFB, 0x1F7E, 0x1F7F,
0x0391, 0x0391, 0x0391, 0x0391, 0x0391, 0x0391, 0x0391, 0x0391, 0x0391, 0x0391, 0x0391, 0x0391, 0x0391, 0x0391, 0x0391, 0x0391,
0x0397, 0x0397, 0x0397, 0x0397, 0x0397, 0x0397, 0x0397, 0x0397, 0x0397, 0x0397, 0x0397, 0x0397, 0x0397, 0x0397, 0x0397, 0x0397,
0x03A9, 0x03A9, 0x03A9, 0x03A9, 0x03A9, 0x03A9, 0x03A9, 0x03A9, 0x03A9, 0x03A9, 0x03A9, 0x03A9, 0x03A9, 0x03A9, 0x03A9, 0x03A9,
0x0391, 0x0391, 0x0391, 0x0391, 0x0391, 0x1FB5, 0x0391, 0x0391, 0x0391, 0x0391, 0x0391, 0x1FBB, 0x0391, 0x1FBD, 0x0399, 0x1FBF,
0x1FC0, 0x1FC1, 0x0397, 0x0397, 0x0397, 0x1FC5, 0x0397, 0x0397, 0x0395, 0x1FC9, 0x0397, 0x1FCB, 0x0397, 0x1FCD, 0x1FCE, 0x1FCF,
0x0399, 0x0399, 0x0399, 0x1FD3, 0x1FD4, 0x1FD5, 0x0399, 0x0399, 0x0399, 0x0399, 0x0399, 0x1FDB, 0x1FDC, 0x1FDD, 0x1FDE, 0x1FDF,
0x03A5, 0x03A5, 0x03A5, 0x1FE3, 0x03A1, 0x03A1, 0x03A5, 0x03A5, 0x03A5, 0x03A5, 0x03A5, 0x1FEB, 0x03A1, 0x1FED, 0x1FEE, 0x1FEF,
0x1FF0, 0x1FF1, 0x03A9, 0x03A9, 0x03A9, 0x1FF5, 0x03A9, 0x03A9, 0x039F, 0x1FF9, 0x03A9, 0x1FFB, 0x03A9, 0x1FFD, 0x1FFE, 0x1FFF}
plane21 = []uint16{
0x2100, 0x2101, 0x2102, 0x2103, 0x2104, 0x2105, 0x2106, 0x2107, 0x2108, 0x2109, 0x210A, 0x210B, 0x210C, 0x210D, 0x210E, 0x210F,
0x2110, 0x2111, 0x2112, 0x2113, 0x2114, 0x2115, 0x2116, 0x2117, 0x2118, 0x2119, 0x211A, 0x211B, 0x211C, 0x211D, 0x211E, 0x211F,
0x2120, 0x2121, 0x2122, 0x2123, 0x2124, 0x2125, 0x2126, 0x2127, 0x2128, 0x2129, 0x212A, 0x212B, 0x212C, 0x212D, 0x212E, 0x212F,
0x2130, 0x2131, 0x2132, 0x2133, 0x2134, 0x2135, 0x2136, 0x2137, 0x2138, 0x2139, 0x213A, 0x213B, 0x213C, 0x213D, 0x213E, 0x213F,
0x2140, 0x2141, 0x2142, 0x2143, 0x2144, 0x2145, 0x2146, 0x2147, 0x2148, 0x2149, 0x214A, 0x214B, 0x214C, 0x214D, 0x214E, 0x214F,
0x2150, 0x2151, 0x2152, 0x2153, 0x2154, 0x2155, 0x2156, 0x2157, 0x2158, 0x2159, 0x215A, 0x215B, 0x215C, 0x215D, 0x215E, 0x215F,
0x2160, 0x2161, 0x2162, 0x2163, 0x2164, 0x2165, 0x2166, 0x2167, 0x2168, 0x2169, 0x216A, 0x216B, 0x216C, 0x216D, 0x216E, 0x216F,
0x2160, 0x2161, 0x2162, 0x2163, 0x2164, 0x2165, 0x2166, 0x2167, 0x2168, 0x2169, 0x216A, 0x216B, 0x216C, 0x216D, 0x216E, 0x216F,
0x2180, 0x2181, 0x2182, 0x2183, 0x2184, 0x2185, 0x2186, 0x2187, 0x2188, 0x2189, 0x218A, 0x218B, 0x218C, 0x218D, 0x218E, 0x218F,
0x2190, 0x2191, 0x2192, 0x2193, 0x2194, 0x2195, 0x2196, 0x2197, 0x2198, 0x2199, 0x219A, 0x219B, 0x219C, 0x219D, 0x219E, 0x219F,
0x21A0, 0x21A1, 0x21A2, 0x21A3, 0x21A4, 0x21A5, 0x21A6, 0x21A7, 0x21A8, 0x21A9, 0x21AA, 0x21AB, 0x21AC, 0x21AD, 0x21AE, 0x21AF,
0x21B0, 0x21B1, 0x21B2, 0x21B3, 0x21B4, 0x21B5, 0x21B6, 0x21B7, 0x21B8, 0x21B9, 0x21BA, 0x21BB, 0x21BC, 0x21BD, 0x21BE, 0x21BF,
0x21C0, 0x21C1, 0x21C2, 0x21C3, 0x21C4, 0x21C5, 0x21C6, 0x21C7, 0x21C8, 0x21C9, 0x21CA, 0x21CB, 0x21CC, 0x21CD, 0x21CE, 0x21CF,
0x21D0, 0x21D1, 0x21D2, 0x21D3, 0x21D4, 0x21D5, 0x21D6, 0x21D7, 0x21D8, 0x21D9, 0x21DA, 0x21DB, 0x21DC, 0x21DD, 0x21DE, 0x21DF,
0x21E0, 0x21E1, 0x21E2, 0x21E3, 0x21E4, 0x21E5, 0x21E6, 0x21E7, 0x21E8, 0x21E9, 0x21EA, 0x21EB, 0x21EC, 0x21ED, 0x21EE, 0x21EF,
0x21F0, 0x21F1, 0x21F2, 0x21F3, 0x21F4, 0x21F5, 0x21F6, 0x21F7, 0x21F8, 0x21F9, 0x21FA, 0x21FB, 0x21FC, 0x21FD, 0x21FE, 0x21FF}
plane24 = []uint16{
0x2400, 0x2401, 0x2402, 0x2403, 0x2404, 0x2405, 0x2406, 0x2407, 0x2408, 0x2409, 0x240A, 0x240B, 0x240C, 0x240D, 0x240E, 0x240F,
0x2410, 0x2411, 0x2412, 0x2413, 0x2414, 0x2415, 0x2416, 0x2417, 0x2418, 0x2419, 0x241A, 0x241B, 0x241C, 0x241D, 0x241E, 0x241F,
0x2420, 0x2421, 0x2422, 0x2423, 0x2424, 0x2425, 0x2426, 0x2427, 0x2428, 0x2429, 0x242A, 0x242B, 0x242C, 0x242D, 0x242E, 0x242F,
0x2430, 0x2431, 0x2432, 0x2433, 0x2434, 0x2435, 0x2436, 0x2437, 0x2438, 0x2439, 0x243A, 0x243B, 0x243C, 0x243D, 0x243E, 0x243F,
0x2440, 0x2441, 0x2442, 0x2443, 0x2444, 0x2445, 0x2446, 0x2447, 0x2448, 0x2449, 0x244A, 0x244B, 0x244C, 0x244D, 0x244E, 0x244F,
0x2450, 0x2451, 0x2452, 0x2453, 0x2454, 0x2455, 0x2456, 0x2457, 0x2458, 0x2459, 0x245A, 0x245B, 0x245C, 0x245D, 0x245E, 0x245F,
0x2460, 0x2461, 0x2462, 0x2463, 0x2464, 0x2465, 0x2466, 0x2467, 0x2468, 0x2469, 0x246A, 0x246B, 0x246C, 0x246D, 0x246E, 0x246F,
0x2470, 0x2471, 0x2472, 0x2473, 0x2474, 0x2475, 0x2476, 0x2477, 0x2478, 0x2479, 0x247A, 0x247B, 0x247C, 0x247D, 0x247E, 0x247F,
0x2480, 0x2481, 0x2482, 0x2483, 0x2484, 0x2485, 0x2486, 0x2487, 0x2488, 0x2489, 0x248A, 0x248B, 0x248C, 0x248D, 0x248E, 0x248F,
0x2490, 0x2491, 0x2492, 0x2493, 0x2494, 0x2495, 0x2496, 0x2497, 0x2498, 0x2499, 0x249A, 0x249B, 0x249C, 0x249D, 0x249E, 0x249F,
0x24A0, 0x24A1, 0x24A2, 0x24A3, 0x24A4, 0x24A5, 0x24A6, 0x24A7, 0x24A8, 0x24A9, 0x24AA, 0x24AB, 0x24AC, 0x24AD, 0x24AE, 0x24AF,
0x24B0, 0x24B1, 0x24B2, 0x24B3, 0x24B4, 0x24B5, 0x24B6, 0x24B7, 0x24B8, 0x24B9, 0x24BA, 0x24BB, 0x24BC, 0x24BD, 0x24BE, 0x24BF,
0x24C0, 0x24C1, 0x24C2, 0x24C3, 0x24C4, 0x24C5, 0x24C6, 0x24C7, 0x24C8, 0x24C9, 0x24CA, 0x24CB, 0x24CC, 0x24CD, 0x24CE, 0x24CF,
0x24B6, 0x24B7, 0x24B8, 0x24B9, 0x24BA, 0x24BB, 0x24BC, 0x24BD, 0x24BE, 0x24BF, 0x24C0, 0x24C1, 0x24C2, 0x24C3, 0x24C4, 0x24C5,
0x24C6, 0x24C7, 0x24C8, 0x24C9, 0x24CA, 0x24CB, 0x24CC, 0x24CD, 0x24CE, 0x24CF, 0x24EA, 0x24EB, 0x24EC, 0x24ED, 0x24EE, 0x24EF,
0x24F0, 0x24F1, 0x24F2, 0x24F3, 0x24F4, 0x24F5, 0x24F6, 0x24F7, 0x24F8, 0x24F9, 0x24FA, 0x24FB, 0x24FC, 0x24FD, 0x24FE, 0x24FF}
planeFF = []uint16{
0xFF00, 0xFF01, 0xFF02, 0xFF03, 0xFF04, 0xFF05, 0xFF06, 0xFF07, 0xFF08, 0xFF09, 0xFF0A, 0xFF0B, 0xFF0C, 0xFF0D, 0xFF0E, 0xFF0F,
0xFF10, 0xFF11, 0xFF12, 0xFF13, 0xFF14, 0xFF15, 0xFF16, 0xFF17, 0xFF18, 0xFF19, 0xFF1A, 0xFF1B, 0xFF1C, 0xFF1D, 0xFF1E, 0xFF1F,
0xFF20, 0xFF21, 0xFF22, 0xFF23, 0xFF24, 0xFF25, 0xFF26, 0xFF27, 0xFF28, 0xFF29, 0xFF2A, 0xFF2B, 0xFF2C, 0xFF2D, 0xFF2E, 0xFF2F,
0xFF30, 0xFF31, 0xFF32, 0xFF33, 0xFF34, 0xFF35, 0xFF36, 0xFF37, 0xFF38, 0xFF39, 0xFF3A, 0xFF3B, 0xFF3C, 0xFF3D, 0xFF3E, 0xFF3F,
0xFF40, 0xFF21, 0xFF22, 0xFF23, 0xFF24, 0xFF25, 0xFF26, 0xFF27, 0xFF28, 0xFF29, 0xFF2A, 0xFF2B, 0xFF2C, 0xFF2D, 0xFF2E, 0xFF2F,
0xFF30, 0xFF31, 0xFF32, 0xFF33, 0xFF34, 0xFF35, 0xFF36, 0xFF37, 0xFF38, 0xFF39, 0xFF3A, 0xFF5B, 0xFF5C, 0xFF5D, 0xFF5E, 0xFF5F,
0xFF60, 0xFF61, 0xFF62, 0xFF63, 0xFF64, 0xFF65, 0xFF66, 0xFF67, 0xFF68, 0xFF69, 0xFF6A, 0xFF6B, 0xFF6C, 0xFF6D, 0xFF6E, 0xFF6F,
0xFF70, 0xFF71, 0xFF72, 0xFF73, 0xFF74, 0xFF75, 0xFF76, 0xFF77, 0xFF78, 0xFF79, 0xFF7A, 0xFF7B, 0xFF7C, 0xFF7D, 0xFF7E, 0xFF7F,
0xFF80, 0xFF81, 0xFF82, 0xFF83, 0xFF84, 0xFF85, 0xFF86, 0xFF87, 0xFF88, 0xFF89, 0xFF8A, 0xFF8B, 0xFF8C, 0xFF8D, 0xFF8E, 0xFF8F,
0xFF90, 0xFF91, 0xFF92, 0xFF93, 0xFF94, 0xFF95, 0xFF96, 0xFF97, 0xFF98, 0xFF99, 0xFF9A, 0xFF9B, 0xFF9C, 0xFF9D, 0xFF9E, 0xFF9F,
0xFFA0, 0xFFA1, 0xFFA2, 0xFFA3, 0xFFA4, 0xFFA5, 0xFFA6, 0xFFA7, 0xFFA8, 0xFFA9, 0xFFAA, 0xFFAB, 0xFFAC, 0xFFAD, 0xFFAE, 0xFFAF,
0xFFB0, 0xFFB1, 0xFFB2, 0xFFB3, 0xFFB4, 0xFFB5, 0xFFB6, 0xFFB7, 0xFFB8, 0xFFB9, 0xFFBA, 0xFFBB, 0xFFBC, 0xFFBD, 0xFFBE, 0xFFBF,
0xFFC0, 0xFFC1, 0xFFC2, 0xFFC3, 0xFFC4, 0xFFC5, 0xFFC6, 0xFFC7, 0xFFC8, 0xFFC9, 0xFFCA, 0xFFCB, 0xFFCC, 0xFFCD, 0xFFCE, 0xFFCF,
0xFFD0, 0xFFD1, 0xFFD2, 0xFFD3, 0xFFD4, 0xFFD5, 0xFFD6, 0xFFD7, 0xFFD8, 0xFFD9, 0xFFDA, 0xFFDB, 0xFFDC, 0xFFDD, 0xFFDE, 0xFFDF,
0xFFE0, 0xFFE1, 0xFFE2, 0xFFE3, 0xFFE4, 0xFFE5, 0xFFE6, 0xFFE7, 0xFFE8, 0xFFE9, 0xFFEA, 0xFFEB, 0xFFEC, 0xFFED, 0xFFEE, 0xFFEF,
0xFFF0, 0xFFF1, 0xFFF2, 0xFFF3, 0xFFF4, 0xFFF5, 0xFFF6, 0xFFF7, 0xFFF8, 0xFFF9, 0xFFFA, 0xFFFB, 0xFFFC, 0xFFFD, 0xFFFE, 0xFFFF}
planeTable = [][]uint16{
plane00, plane01, plane02, plane03, plane04, plane05, nil, nil, nil,
nil, nil, nil, nil, nil, nil, nil, nil, nil,
nil, nil, nil, nil, nil, nil, nil, nil, nil,
nil, nil, nil, plane1E, plane1F, nil, plane21, nil, nil,
plane24, nil, nil, nil, nil, nil, nil, nil, nil,
nil, nil, nil, nil, nil, nil, nil, nil, nil,
nil, nil, nil, nil, nil, nil, nil, nil, nil,
nil, nil, nil, nil, nil, nil, nil, nil, nil,
nil, nil, nil, nil, nil, nil, nil, nil, nil,
nil, nil, nil, nil, nil, nil, nil, nil, nil,
nil, nil, nil, nil, nil, nil, nil, nil, nil,
nil, nil, nil, nil, nil, nil, nil, nil, nil,
nil, nil, nil, nil, nil, nil, nil, nil, nil,
nil, nil, nil, nil, nil, nil, nil, nil, nil,
nil, nil, nil, nil, nil, nil, nil, nil, nil,
nil, nil, nil, nil, nil, nil, nil, nil, nil,
nil, nil, nil, nil, nil, nil, nil, nil, nil,
nil, nil, nil, nil, nil, nil, nil, nil, nil,
nil, nil, nil, nil, nil, nil, nil, nil, nil,
nil, nil, nil, nil, nil, nil, nil, nil, nil,
nil, nil, nil, nil, nil, nil, nil, nil, nil,
nil, nil, nil, nil, nil, nil, nil, nil, nil,
nil, nil, nil, nil, nil, nil, nil, nil, nil,
nil, nil, nil, nil, nil, nil, nil, nil, nil,
nil, nil, nil, nil, nil, nil, nil, nil, nil,
nil, nil, nil, nil, nil, nil, nil, nil, nil,
nil, nil, nil, nil, nil, nil, nil, nil, nil,
nil, nil, nil, nil, nil, nil, nil, nil, nil,
nil, nil, nil, planeFF}
)
| util/collate/general_ci.go | 0 | https://github.com/pingcap/tidb/commit/cc83cc524f8d3fd661f6e62d129ba043cc74501e | [
0.0004541362286545336,
0.00018358857778366655,
0.0001650772464927286,
0.00016879310715012252,
0.000055629268899792805
] |
{
"id": 0,
"code_window": [
"\ttab.mutex.Lock()\n",
"\tdefer tab.mutex.Unlock()\n",
"\n",
"\t// Find all non-empty buckets and get a fresh slice of their entries.\n",
"\tvar buckets [][]*Node\n",
"\tfor _, b := range tab.buckets {\n",
"\t\tif len(b.entries) > 0 {\n",
"\t\t\tbuckets = append(buckets, b.entries[:])\n",
"\t\t}\n",
"\t}\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tfor _, b := range &tab.buckets {\n"
],
"file_path": "p2p/discover/table.go",
"type": "replace",
"edit_start_line_idx": 162
} | // Copyright 2015 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
// Package discover implements the Node Discovery Protocol.
//
// The Node Discovery protocol provides a way to find RLPx nodes that
// can be connected to. It uses a Kademlia-like protocol to maintain a
// distributed database of the IDs and endpoints of all listening
// nodes.
package discover
import (
crand "crypto/rand"
"encoding/binary"
"fmt"
mrand "math/rand"
"net"
"sort"
"sync"
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/p2p/netutil"
)
const (
alpha = 3 // Kademlia concurrency factor
bucketSize = 16 // Kademlia bucket size
maxReplacements = 10 // Size of per-bucket replacement list
// We keep buckets for the upper 1/15 of distances because
// it's very unlikely we'll ever encounter a node that's closer.
hashBits = len(common.Hash{}) * 8
nBuckets = hashBits / 15 // Number of buckets
bucketMinDistance = hashBits - nBuckets // Log distance of closest bucket
// IP address limits.
bucketIPLimit, bucketSubnet = 2, 24 // at most 2 addresses from the same /24
tableIPLimit, tableSubnet = 10, 24
maxFindnodeFailures = 5 // Nodes exceeding this limit are dropped
refreshInterval = 30 * time.Minute
revalidateInterval = 10 * time.Second
copyNodesInterval = 30 * time.Second
seedMinTableTime = 5 * time.Minute
seedCount = 30
seedMaxAge = 5 * 24 * time.Hour
)
type Table struct {
mutex sync.Mutex // protects buckets, bucket content, nursery, rand
buckets [nBuckets]*bucket // index of known nodes by distance
nursery []*Node // bootstrap nodes
rand *mrand.Rand // source of randomness, periodically reseeded
ips netutil.DistinctNetSet
db *nodeDB // database of known nodes
refreshReq chan chan struct{}
initDone chan struct{}
closeReq chan struct{}
closed chan struct{}
nodeAddedHook func(*Node) // for testing
net transport
self *Node // metadata of the local node
}
// transport is implemented by the UDP transport.
// it is an interface so we can test without opening lots of UDP
// sockets and without generating a private key.
type transport interface {
ping(NodeID, *net.UDPAddr) error
findnode(toid NodeID, addr *net.UDPAddr, target NodeID) ([]*Node, error)
close()
}
// bucket contains nodes, ordered by their last activity. the entry
// that was most recently active is the first element in entries.
type bucket struct {
entries []*Node // live entries, sorted by time of last contact
replacements []*Node // recently seen nodes to be used if revalidation fails
ips netutil.DistinctNetSet
}
func newTable(t transport, ourID NodeID, ourAddr *net.UDPAddr, nodeDBPath string, bootnodes []*Node) (*Table, error) {
// If no node database was given, use an in-memory one
db, err := newNodeDB(nodeDBPath, nodeDBVersion, ourID)
if err != nil {
return nil, err
}
tab := &Table{
net: t,
db: db,
self: NewNode(ourID, ourAddr.IP, uint16(ourAddr.Port), uint16(ourAddr.Port)),
refreshReq: make(chan chan struct{}),
initDone: make(chan struct{}),
closeReq: make(chan struct{}),
closed: make(chan struct{}),
rand: mrand.New(mrand.NewSource(0)),
ips: netutil.DistinctNetSet{Subnet: tableSubnet, Limit: tableIPLimit},
}
if err := tab.setFallbackNodes(bootnodes); err != nil {
return nil, err
}
for i := range tab.buckets {
tab.buckets[i] = &bucket{
ips: netutil.DistinctNetSet{Subnet: bucketSubnet, Limit: bucketIPLimit},
}
}
tab.seedRand()
tab.loadSeedNodes()
// Start the background expiration goroutine after loading seeds so that the search for
// seed nodes also considers older nodes that would otherwise be removed by the
// expiration.
tab.db.ensureExpirer()
go tab.loop()
return tab, nil
}
func (tab *Table) seedRand() {
var b [8]byte
crand.Read(b[:])
tab.mutex.Lock()
tab.rand.Seed(int64(binary.BigEndian.Uint64(b[:])))
tab.mutex.Unlock()
}
// Self returns the local node.
// The returned node should not be modified by the caller.
func (tab *Table) Self() *Node {
return tab.self
}
// ReadRandomNodes fills the given slice with random nodes from the
// table. It will not write the same node more than once. The nodes in
// the slice are copies and can be modified by the caller.
func (tab *Table) ReadRandomNodes(buf []*Node) (n int) {
if !tab.isInitDone() {
return 0
}
tab.mutex.Lock()
defer tab.mutex.Unlock()
// Find all non-empty buckets and get a fresh slice of their entries.
var buckets [][]*Node
for _, b := range tab.buckets {
if len(b.entries) > 0 {
buckets = append(buckets, b.entries[:])
}
}
if len(buckets) == 0 {
return 0
}
// Shuffle the buckets.
for i := len(buckets) - 1; i > 0; i-- {
j := tab.rand.Intn(len(buckets))
buckets[i], buckets[j] = buckets[j], buckets[i]
}
// Move head of each bucket into buf, removing buckets that become empty.
var i, j int
for ; i < len(buf); i, j = i+1, (j+1)%len(buckets) {
b := buckets[j]
buf[i] = &(*b[0])
buckets[j] = b[1:]
if len(b) == 1 {
buckets = append(buckets[:j], buckets[j+1:]...)
}
if len(buckets) == 0 {
break
}
}
return i + 1
}
// Close terminates the network listener and flushes the node database.
func (tab *Table) Close() {
select {
case <-tab.closed:
// already closed.
case tab.closeReq <- struct{}{}:
<-tab.closed // wait for refreshLoop to end.
}
}
// setFallbackNodes sets the initial points of contact. These nodes
// are used to connect to the network if the table is empty and there
// are no known nodes in the database.
func (tab *Table) setFallbackNodes(nodes []*Node) error {
for _, n := range nodes {
if err := n.validateComplete(); err != nil {
return fmt.Errorf("bad bootstrap/fallback node %q (%v)", n, err)
}
}
tab.nursery = make([]*Node, 0, len(nodes))
for _, n := range nodes {
cpy := *n
// Recompute cpy.sha because the node might not have been
// created by NewNode or ParseNode.
cpy.sha = crypto.Keccak256Hash(n.ID[:])
tab.nursery = append(tab.nursery, &cpy)
}
return nil
}
// isInitDone returns whether the table's initial seeding procedure has completed.
func (tab *Table) isInitDone() bool {
select {
case <-tab.initDone:
return true
default:
return false
}
}
// Resolve searches for a specific node with the given ID.
// It returns nil if the node could not be found.
func (tab *Table) Resolve(targetID NodeID) *Node {
// If the node is present in the local table, no
// network interaction is required.
hash := crypto.Keccak256Hash(targetID[:])
tab.mutex.Lock()
cl := tab.closest(hash, 1)
tab.mutex.Unlock()
if len(cl.entries) > 0 && cl.entries[0].ID == targetID {
return cl.entries[0]
}
// Otherwise, do a network lookup.
result := tab.Lookup(targetID)
for _, n := range result {
if n.ID == targetID {
return n
}
}
return nil
}
// Lookup performs a network search for nodes close
// to the given target. It approaches the target by querying
// nodes that are closer to it on each iteration.
// The given target does not need to be an actual node
// identifier.
func (tab *Table) Lookup(targetID NodeID) []*Node {
return tab.lookup(targetID, true)
}
func (tab *Table) lookup(targetID NodeID, refreshIfEmpty bool) []*Node {
var (
target = crypto.Keccak256Hash(targetID[:])
asked = make(map[NodeID]bool)
seen = make(map[NodeID]bool)
reply = make(chan []*Node, alpha)
pendingQueries = 0
result *nodesByDistance
)
// don't query further if we hit ourself.
// unlikely to happen often in practice.
asked[tab.self.ID] = true
for {
tab.mutex.Lock()
// generate initial result set
result = tab.closest(target, bucketSize)
tab.mutex.Unlock()
if len(result.entries) > 0 || !refreshIfEmpty {
break
}
// The result set is empty, all nodes were dropped, refresh.
// We actually wait for the refresh to complete here. The very
// first query will hit this case and run the bootstrapping
// logic.
<-tab.refresh()
refreshIfEmpty = false
}
for {
// ask the alpha closest nodes that we haven't asked yet
for i := 0; i < len(result.entries) && pendingQueries < alpha; i++ {
n := result.entries[i]
if !asked[n.ID] {
asked[n.ID] = true
pendingQueries++
go tab.findnode(n, targetID, reply)
}
}
if pendingQueries == 0 {
// we have asked all closest nodes, stop the search
break
}
// wait for the next reply
for _, n := range <-reply {
if n != nil && !seen[n.ID] {
seen[n.ID] = true
result.push(n, bucketSize)
}
}
pendingQueries--
}
return result.entries
}
func (tab *Table) findnode(n *Node, targetID NodeID, reply chan<- []*Node) {
fails := tab.db.findFails(n.ID)
r, err := tab.net.findnode(n.ID, n.addr(), targetID)
if err != nil || len(r) == 0 {
fails++
tab.db.updateFindFails(n.ID, fails)
log.Trace("Findnode failed", "id", n.ID, "failcount", fails, "err", err)
if fails >= maxFindnodeFailures {
log.Trace("Too many findnode failures, dropping", "id", n.ID, "failcount", fails)
tab.delete(n)
}
} else if fails > 0 {
tab.db.updateFindFails(n.ID, fails-1)
}
// Grab as many nodes as possible. Some of them might not be alive anymore, but we'll
// just remove those again during revalidation.
for _, n := range r {
tab.add(n)
}
reply <- r
}
func (tab *Table) refresh() <-chan struct{} {
done := make(chan struct{})
select {
case tab.refreshReq <- done:
case <-tab.closed:
close(done)
}
return done
}
// loop schedules refresh, revalidate runs and coordinates shutdown.
func (tab *Table) loop() {
var (
revalidate = time.NewTimer(tab.nextRevalidateTime())
refresh = time.NewTicker(refreshInterval)
copyNodes = time.NewTicker(copyNodesInterval)
revalidateDone = make(chan struct{})
refreshDone = make(chan struct{}) // where doRefresh reports completion
waiting = []chan struct{}{tab.initDone} // holds waiting callers while doRefresh runs
)
defer refresh.Stop()
defer revalidate.Stop()
defer copyNodes.Stop()
// Start initial refresh.
go tab.doRefresh(refreshDone)
loop:
for {
select {
case <-refresh.C:
tab.seedRand()
if refreshDone == nil {
refreshDone = make(chan struct{})
go tab.doRefresh(refreshDone)
}
case req := <-tab.refreshReq:
waiting = append(waiting, req)
if refreshDone == nil {
refreshDone = make(chan struct{})
go tab.doRefresh(refreshDone)
}
case <-refreshDone:
for _, ch := range waiting {
close(ch)
}
waiting, refreshDone = nil, nil
case <-revalidate.C:
go tab.doRevalidate(revalidateDone)
case <-revalidateDone:
revalidate.Reset(tab.nextRevalidateTime())
case <-copyNodes.C:
go tab.copyLiveNodes()
case <-tab.closeReq:
break loop
}
}
if tab.net != nil {
tab.net.close()
}
if refreshDone != nil {
<-refreshDone
}
for _, ch := range waiting {
close(ch)
}
tab.db.close()
close(tab.closed)
}
// doRefresh performs a lookup for a random target to keep buckets
// full. seed nodes are inserted if the table is empty (initial
// bootstrap or discarded faulty peers).
func (tab *Table) doRefresh(done chan struct{}) {
defer close(done)
// Load nodes from the database and insert
// them. This should yield a few previously seen nodes that are
// (hopefully) still alive.
tab.loadSeedNodes()
// Run self lookup to discover new neighbor nodes.
tab.lookup(tab.self.ID, false)
// The Kademlia paper specifies that the bucket refresh should
// perform a lookup in the least recently used bucket. We cannot
// adhere to this because the findnode target is a 512bit value
// (not hash-sized) and it is not easily possible to generate a
// sha3 preimage that falls into a chosen bucket.
// We perform a few lookups with a random target instead.
for i := 0; i < 3; i++ {
var target NodeID
crand.Read(target[:])
tab.lookup(target, false)
}
}
func (tab *Table) loadSeedNodes() {
seeds := tab.db.querySeeds(seedCount, seedMaxAge)
seeds = append(seeds, tab.nursery...)
for i := range seeds {
seed := seeds[i]
age := log.Lazy{Fn: func() interface{} { return time.Since(tab.db.lastPongReceived(seed.ID)) }}
log.Debug("Found seed node in database", "id", seed.ID, "addr", seed.addr(), "age", age)
tab.add(seed)
}
}
// doRevalidate checks that the last node in a random bucket is still live
// and replaces or deletes the node if it isn't.
func (tab *Table) doRevalidate(done chan<- struct{}) {
defer func() { done <- struct{}{} }()
last, bi := tab.nodeToRevalidate()
if last == nil {
// No non-empty bucket found.
return
}
// Ping the selected node and wait for a pong.
err := tab.net.ping(last.ID, last.addr())
tab.mutex.Lock()
defer tab.mutex.Unlock()
b := tab.buckets[bi]
if err == nil {
// The node responded, move it to the front.
log.Trace("Revalidated node", "b", bi, "id", last.ID)
b.bump(last)
return
}
// No reply received, pick a replacement or delete the node if there aren't
// any replacements.
if r := tab.replace(b, last); r != nil {
log.Trace("Replaced dead node", "b", bi, "id", last.ID, "ip", last.IP, "r", r.ID, "rip", r.IP)
} else {
log.Trace("Removed dead node", "b", bi, "id", last.ID, "ip", last.IP)
}
}
// nodeToRevalidate returns the last node in a random, non-empty bucket.
func (tab *Table) nodeToRevalidate() (n *Node, bi int) {
tab.mutex.Lock()
defer tab.mutex.Unlock()
for _, bi = range tab.rand.Perm(len(tab.buckets)) {
b := tab.buckets[bi]
if len(b.entries) > 0 {
last := b.entries[len(b.entries)-1]
return last, bi
}
}
return nil, 0
}
func (tab *Table) nextRevalidateTime() time.Duration {
tab.mutex.Lock()
defer tab.mutex.Unlock()
return time.Duration(tab.rand.Int63n(int64(revalidateInterval)))
}
// copyLiveNodes adds nodes from the table to the database if they have been in the table
// longer then minTableTime.
func (tab *Table) copyLiveNodes() {
tab.mutex.Lock()
defer tab.mutex.Unlock()
now := time.Now()
for _, b := range tab.buckets {
for _, n := range b.entries {
if now.Sub(n.addedAt) >= seedMinTableTime {
tab.db.updateNode(n)
}
}
}
}
// closest returns the n nodes in the table that are closest to the
// given id. The caller must hold tab.mutex.
func (tab *Table) closest(target common.Hash, nresults int) *nodesByDistance {
// This is a very wasteful way to find the closest nodes but
// obviously correct. I believe that tree-based buckets would make
// this easier to implement efficiently.
close := &nodesByDistance{target: target}
for _, b := range tab.buckets {
for _, n := range b.entries {
close.push(n, nresults)
}
}
return close
}
func (tab *Table) len() (n int) {
for _, b := range tab.buckets {
n += len(b.entries)
}
return n
}
// bucket returns the bucket for the given node ID hash.
func (tab *Table) bucket(sha common.Hash) *bucket {
d := logdist(tab.self.sha, sha)
if d <= bucketMinDistance {
return tab.buckets[0]
}
return tab.buckets[d-bucketMinDistance-1]
}
// add attempts to add the given node to its corresponding bucket. If the bucket has space
// available, adding the node succeeds immediately. Otherwise, the node is added if the
// least recently active node in the bucket does not respond to a ping packet.
//
// The caller must not hold tab.mutex.
func (tab *Table) add(n *Node) {
tab.mutex.Lock()
defer tab.mutex.Unlock()
b := tab.bucket(n.sha)
if !tab.bumpOrAdd(b, n) {
// Node is not in table. Add it to the replacement list.
tab.addReplacement(b, n)
}
}
// addThroughPing adds the given node to the table. Compared to plain
// 'add' there is an additional safety measure: if the table is still
// initializing the node is not added. This prevents an attack where the
// table could be filled by just sending ping repeatedly.
//
// The caller must not hold tab.mutex.
func (tab *Table) addThroughPing(n *Node) {
if !tab.isInitDone() {
return
}
tab.add(n)
}
// stuff adds nodes the table to the end of their corresponding bucket
// if the bucket is not full. The caller must not hold tab.mutex.
func (tab *Table) stuff(nodes []*Node) {
tab.mutex.Lock()
defer tab.mutex.Unlock()
for _, n := range nodes {
if n.ID == tab.self.ID {
continue // don't add self
}
b := tab.bucket(n.sha)
if len(b.entries) < bucketSize {
tab.bumpOrAdd(b, n)
}
}
}
// delete removes an entry from the node table. It is used to evacuate dead nodes.
func (tab *Table) delete(node *Node) {
tab.mutex.Lock()
defer tab.mutex.Unlock()
tab.deleteInBucket(tab.bucket(node.sha), node)
}
func (tab *Table) addIP(b *bucket, ip net.IP) bool {
if netutil.IsLAN(ip) {
return true
}
if !tab.ips.Add(ip) {
log.Debug("IP exceeds table limit", "ip", ip)
return false
}
if !b.ips.Add(ip) {
log.Debug("IP exceeds bucket limit", "ip", ip)
tab.ips.Remove(ip)
return false
}
return true
}
func (tab *Table) removeIP(b *bucket, ip net.IP) {
if netutil.IsLAN(ip) {
return
}
tab.ips.Remove(ip)
b.ips.Remove(ip)
}
func (tab *Table) addReplacement(b *bucket, n *Node) {
for _, e := range b.replacements {
if e.ID == n.ID {
return // already in list
}
}
if !tab.addIP(b, n.IP) {
return
}
var removed *Node
b.replacements, removed = pushNode(b.replacements, n, maxReplacements)
if removed != nil {
tab.removeIP(b, removed.IP)
}
}
// replace removes n from the replacement list and replaces 'last' with it if it is the
// last entry in the bucket. If 'last' isn't the last entry, it has either been replaced
// with someone else or became active.
func (tab *Table) replace(b *bucket, last *Node) *Node {
if len(b.entries) == 0 || b.entries[len(b.entries)-1].ID != last.ID {
// Entry has moved, don't replace it.
return nil
}
// Still the last entry.
if len(b.replacements) == 0 {
tab.deleteInBucket(b, last)
return nil
}
r := b.replacements[tab.rand.Intn(len(b.replacements))]
b.replacements = deleteNode(b.replacements, r)
b.entries[len(b.entries)-1] = r
tab.removeIP(b, last.IP)
return r
}
// bump moves the given node to the front of the bucket entry list
// if it is contained in that list.
func (b *bucket) bump(n *Node) bool {
for i := range b.entries {
if b.entries[i].ID == n.ID {
// move it to the front
copy(b.entries[1:], b.entries[:i])
b.entries[0] = n
return true
}
}
return false
}
// bumpOrAdd moves n to the front of the bucket entry list or adds it if the list isn't
// full. The return value is true if n is in the bucket.
func (tab *Table) bumpOrAdd(b *bucket, n *Node) bool {
if b.bump(n) {
return true
}
if len(b.entries) >= bucketSize || !tab.addIP(b, n.IP) {
return false
}
b.entries, _ = pushNode(b.entries, n, bucketSize)
b.replacements = deleteNode(b.replacements, n)
n.addedAt = time.Now()
if tab.nodeAddedHook != nil {
tab.nodeAddedHook(n)
}
return true
}
func (tab *Table) deleteInBucket(b *bucket, n *Node) {
b.entries = deleteNode(b.entries, n)
tab.removeIP(b, n.IP)
}
// pushNode adds n to the front of list, keeping at most max items.
func pushNode(list []*Node, n *Node, max int) ([]*Node, *Node) {
if len(list) < max {
list = append(list, nil)
}
removed := list[len(list)-1]
copy(list[1:], list)
list[0] = n
return list, removed
}
// deleteNode removes n from list.
func deleteNode(list []*Node, n *Node) []*Node {
for i := range list {
if list[i].ID == n.ID {
return append(list[:i], list[i+1:]...)
}
}
return list
}
// nodesByDistance is a list of nodes, ordered by
// distance to target.
type nodesByDistance struct {
entries []*Node
target common.Hash
}
// push adds the given node to the list, keeping the total size below maxElems.
func (h *nodesByDistance) push(n *Node, maxElems int) {
ix := sort.Search(len(h.entries), func(i int) bool {
return distcmp(h.target, h.entries[i].sha, n.sha) > 0
})
if len(h.entries) < maxElems {
h.entries = append(h.entries, n)
}
if ix == len(h.entries) {
// farther away than all nodes we already have.
// if there was room for it, the node is now the last element.
} else {
// slide existing entries down to make room
// this will overwrite the entry we just appended.
copy(h.entries[ix+1:], h.entries[ix:])
h.entries[ix] = n
}
}
| p2p/discover/table.go | 1 | https://github.com/ethereum/go-ethereum/commit/cf05ef9106779da0df62c0c03312fc489171aaa5 | [
0.9990425705909729,
0.42987295985221863,
0.0001665018789935857,
0.02118196338415146,
0.47639408707618713
] |
{
"id": 0,
"code_window": [
"\ttab.mutex.Lock()\n",
"\tdefer tab.mutex.Unlock()\n",
"\n",
"\t// Find all non-empty buckets and get a fresh slice of their entries.\n",
"\tvar buckets [][]*Node\n",
"\tfor _, b := range tab.buckets {\n",
"\t\tif len(b.entries) > 0 {\n",
"\t\t\tbuckets = append(buckets, b.entries[:])\n",
"\t\t}\n",
"\t}\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tfor _, b := range &tab.buckets {\n"
],
"file_path": "p2p/discover/table.go",
"type": "replace",
"edit_start_line_idx": 162
} | // mkerrors.sh -m64
// Code generated by the command above; see README.md. DO NOT EDIT.
// +build arm64,darwin
// Created by cgo -godefs - DO NOT EDIT
// cgo -godefs -- -m64 _const.go
package unix
import "syscall"
const (
AF_APPLETALK = 0x10
AF_CCITT = 0xa
AF_CHAOS = 0x5
AF_CNT = 0x15
AF_COIP = 0x14
AF_DATAKIT = 0x9
AF_DECnet = 0xc
AF_DLI = 0xd
AF_E164 = 0x1c
AF_ECMA = 0x8
AF_HYLINK = 0xf
AF_IEEE80211 = 0x25
AF_IMPLINK = 0x3
AF_INET = 0x2
AF_INET6 = 0x1e
AF_IPX = 0x17
AF_ISDN = 0x1c
AF_ISO = 0x7
AF_LAT = 0xe
AF_LINK = 0x12
AF_LOCAL = 0x1
AF_MAX = 0x28
AF_NATM = 0x1f
AF_NDRV = 0x1b
AF_NETBIOS = 0x21
AF_NS = 0x6
AF_OSI = 0x7
AF_PPP = 0x22
AF_PUP = 0x4
AF_RESERVED_36 = 0x24
AF_ROUTE = 0x11
AF_SIP = 0x18
AF_SNA = 0xb
AF_SYSTEM = 0x20
AF_UNIX = 0x1
AF_UNSPEC = 0x0
AF_UTUN = 0x26
ALTWERASE = 0x200
ATTR_BIT_MAP_COUNT = 0x5
ATTR_CMN_ACCESSMASK = 0x20000
ATTR_CMN_ACCTIME = 0x1000
ATTR_CMN_ADDEDTIME = 0x10000000
ATTR_CMN_BKUPTIME = 0x2000
ATTR_CMN_CHGTIME = 0x800
ATTR_CMN_CRTIME = 0x200
ATTR_CMN_DATA_PROTECT_FLAGS = 0x40000000
ATTR_CMN_DEVID = 0x2
ATTR_CMN_DOCUMENT_ID = 0x100000
ATTR_CMN_ERROR = 0x20000000
ATTR_CMN_EXTENDED_SECURITY = 0x400000
ATTR_CMN_FILEID = 0x2000000
ATTR_CMN_FLAGS = 0x40000
ATTR_CMN_FNDRINFO = 0x4000
ATTR_CMN_FSID = 0x4
ATTR_CMN_FULLPATH = 0x8000000
ATTR_CMN_GEN_COUNT = 0x80000
ATTR_CMN_GRPID = 0x10000
ATTR_CMN_GRPUUID = 0x1000000
ATTR_CMN_MODTIME = 0x400
ATTR_CMN_NAME = 0x1
ATTR_CMN_NAMEDATTRCOUNT = 0x80000
ATTR_CMN_NAMEDATTRLIST = 0x100000
ATTR_CMN_OBJID = 0x20
ATTR_CMN_OBJPERMANENTID = 0x40
ATTR_CMN_OBJTAG = 0x10
ATTR_CMN_OBJTYPE = 0x8
ATTR_CMN_OWNERID = 0x8000
ATTR_CMN_PARENTID = 0x4000000
ATTR_CMN_PAROBJID = 0x80
ATTR_CMN_RETURNED_ATTRS = 0x80000000
ATTR_CMN_SCRIPT = 0x100
ATTR_CMN_SETMASK = 0x41c7ff00
ATTR_CMN_USERACCESS = 0x200000
ATTR_CMN_UUID = 0x800000
ATTR_CMN_VALIDMASK = 0xffffffff
ATTR_CMN_VOLSETMASK = 0x6700
ATTR_FILE_ALLOCSIZE = 0x4
ATTR_FILE_CLUMPSIZE = 0x10
ATTR_FILE_DATAALLOCSIZE = 0x400
ATTR_FILE_DATAEXTENTS = 0x800
ATTR_FILE_DATALENGTH = 0x200
ATTR_FILE_DEVTYPE = 0x20
ATTR_FILE_FILETYPE = 0x40
ATTR_FILE_FORKCOUNT = 0x80
ATTR_FILE_FORKLIST = 0x100
ATTR_FILE_IOBLOCKSIZE = 0x8
ATTR_FILE_LINKCOUNT = 0x1
ATTR_FILE_RSRCALLOCSIZE = 0x2000
ATTR_FILE_RSRCEXTENTS = 0x4000
ATTR_FILE_RSRCLENGTH = 0x1000
ATTR_FILE_SETMASK = 0x20
ATTR_FILE_TOTALSIZE = 0x2
ATTR_FILE_VALIDMASK = 0x37ff
ATTR_VOL_ALLOCATIONCLUMP = 0x40
ATTR_VOL_ATTRIBUTES = 0x40000000
ATTR_VOL_CAPABILITIES = 0x20000
ATTR_VOL_DIRCOUNT = 0x400
ATTR_VOL_ENCODINGSUSED = 0x10000
ATTR_VOL_FILECOUNT = 0x200
ATTR_VOL_FSTYPE = 0x1
ATTR_VOL_INFO = 0x80000000
ATTR_VOL_IOBLOCKSIZE = 0x80
ATTR_VOL_MAXOBJCOUNT = 0x800
ATTR_VOL_MINALLOCATION = 0x20
ATTR_VOL_MOUNTEDDEVICE = 0x8000
ATTR_VOL_MOUNTFLAGS = 0x4000
ATTR_VOL_MOUNTPOINT = 0x1000
ATTR_VOL_NAME = 0x2000
ATTR_VOL_OBJCOUNT = 0x100
ATTR_VOL_QUOTA_SIZE = 0x10000000
ATTR_VOL_RESERVED_SIZE = 0x20000000
ATTR_VOL_SETMASK = 0x80002000
ATTR_VOL_SIGNATURE = 0x2
ATTR_VOL_SIZE = 0x4
ATTR_VOL_SPACEAVAIL = 0x10
ATTR_VOL_SPACEFREE = 0x8
ATTR_VOL_UUID = 0x40000
ATTR_VOL_VALIDMASK = 0xf007ffff
B0 = 0x0
B110 = 0x6e
B115200 = 0x1c200
B1200 = 0x4b0
B134 = 0x86
B14400 = 0x3840
B150 = 0x96
B1800 = 0x708
B19200 = 0x4b00
B200 = 0xc8
B230400 = 0x38400
B2400 = 0x960
B28800 = 0x7080
B300 = 0x12c
B38400 = 0x9600
B4800 = 0x12c0
B50 = 0x32
B57600 = 0xe100
B600 = 0x258
B7200 = 0x1c20
B75 = 0x4b
B76800 = 0x12c00
B9600 = 0x2580
BIOCFLUSH = 0x20004268
BIOCGBLEN = 0x40044266
BIOCGDLT = 0x4004426a
BIOCGDLTLIST = 0xc00c4279
BIOCGETIF = 0x4020426b
BIOCGHDRCMPLT = 0x40044274
BIOCGRSIG = 0x40044272
BIOCGRTIMEOUT = 0x4010426e
BIOCGSEESENT = 0x40044276
BIOCGSTATS = 0x4008426f
BIOCIMMEDIATE = 0x80044270
BIOCPROMISC = 0x20004269
BIOCSBLEN = 0xc0044266
BIOCSDLT = 0x80044278
BIOCSETF = 0x80104267
BIOCSETFNR = 0x8010427e
BIOCSETIF = 0x8020426c
BIOCSHDRCMPLT = 0x80044275
BIOCSRSIG = 0x80044273
BIOCSRTIMEOUT = 0x8010426d
BIOCSSEESENT = 0x80044277
BIOCVERSION = 0x40044271
BPF_A = 0x10
BPF_ABS = 0x20
BPF_ADD = 0x0
BPF_ALIGNMENT = 0x4
BPF_ALU = 0x4
BPF_AND = 0x50
BPF_B = 0x10
BPF_DIV = 0x30
BPF_H = 0x8
BPF_IMM = 0x0
BPF_IND = 0x40
BPF_JA = 0x0
BPF_JEQ = 0x10
BPF_JGE = 0x30
BPF_JGT = 0x20
BPF_JMP = 0x5
BPF_JSET = 0x40
BPF_K = 0x0
BPF_LD = 0x0
BPF_LDX = 0x1
BPF_LEN = 0x80
BPF_LSH = 0x60
BPF_MAJOR_VERSION = 0x1
BPF_MAXBUFSIZE = 0x80000
BPF_MAXINSNS = 0x200
BPF_MEM = 0x60
BPF_MEMWORDS = 0x10
BPF_MINBUFSIZE = 0x20
BPF_MINOR_VERSION = 0x1
BPF_MISC = 0x7
BPF_MSH = 0xa0
BPF_MUL = 0x20
BPF_NEG = 0x80
BPF_OR = 0x40
BPF_RELEASE = 0x30bb6
BPF_RET = 0x6
BPF_RSH = 0x70
BPF_ST = 0x2
BPF_STX = 0x3
BPF_SUB = 0x10
BPF_TAX = 0x0
BPF_TXA = 0x80
BPF_W = 0x0
BPF_X = 0x8
BRKINT = 0x2
BS0 = 0x0
BS1 = 0x8000
BSDLY = 0x8000
CFLUSH = 0xf
CLOCAL = 0x8000
CLOCK_MONOTONIC = 0x6
CLOCK_MONOTONIC_RAW = 0x4
CLOCK_MONOTONIC_RAW_APPROX = 0x5
CLOCK_PROCESS_CPUTIME_ID = 0xc
CLOCK_REALTIME = 0x0
CLOCK_THREAD_CPUTIME_ID = 0x10
CLOCK_UPTIME_RAW = 0x8
CLOCK_UPTIME_RAW_APPROX = 0x9
CR0 = 0x0
CR1 = 0x1000
CR2 = 0x2000
CR3 = 0x3000
CRDLY = 0x3000
CREAD = 0x800
CRTSCTS = 0x30000
CS5 = 0x0
CS6 = 0x100
CS7 = 0x200
CS8 = 0x300
CSIZE = 0x300
CSTART = 0x11
CSTATUS = 0x14
CSTOP = 0x13
CSTOPB = 0x400
CSUSP = 0x1a
CTL_MAXNAME = 0xc
CTL_NET = 0x4
DLT_A429 = 0xb8
DLT_A653_ICM = 0xb9
DLT_AIRONET_HEADER = 0x78
DLT_AOS = 0xde
DLT_APPLE_IP_OVER_IEEE1394 = 0x8a
DLT_ARCNET = 0x7
DLT_ARCNET_LINUX = 0x81
DLT_ATM_CLIP = 0x13
DLT_ATM_RFC1483 = 0xb
DLT_AURORA = 0x7e
DLT_AX25 = 0x3
DLT_AX25_KISS = 0xca
DLT_BACNET_MS_TP = 0xa5
DLT_BLUETOOTH_HCI_H4 = 0xbb
DLT_BLUETOOTH_HCI_H4_WITH_PHDR = 0xc9
DLT_CAN20B = 0xbe
DLT_CAN_SOCKETCAN = 0xe3
DLT_CHAOS = 0x5
DLT_CHDLC = 0x68
DLT_CISCO_IOS = 0x76
DLT_C_HDLC = 0x68
DLT_C_HDLC_WITH_DIR = 0xcd
DLT_DBUS = 0xe7
DLT_DECT = 0xdd
DLT_DOCSIS = 0x8f
DLT_DVB_CI = 0xeb
DLT_ECONET = 0x73
DLT_EN10MB = 0x1
DLT_EN3MB = 0x2
DLT_ENC = 0x6d
DLT_ERF = 0xc5
DLT_ERF_ETH = 0xaf
DLT_ERF_POS = 0xb0
DLT_FC_2 = 0xe0
DLT_FC_2_WITH_FRAME_DELIMS = 0xe1
DLT_FDDI = 0xa
DLT_FLEXRAY = 0xd2
DLT_FRELAY = 0x6b
DLT_FRELAY_WITH_DIR = 0xce
DLT_GCOM_SERIAL = 0xad
DLT_GCOM_T1E1 = 0xac
DLT_GPF_F = 0xab
DLT_GPF_T = 0xaa
DLT_GPRS_LLC = 0xa9
DLT_GSMTAP_ABIS = 0xda
DLT_GSMTAP_UM = 0xd9
DLT_HHDLC = 0x79
DLT_IBM_SN = 0x92
DLT_IBM_SP = 0x91
DLT_IEEE802 = 0x6
DLT_IEEE802_11 = 0x69
DLT_IEEE802_11_RADIO = 0x7f
DLT_IEEE802_11_RADIO_AVS = 0xa3
DLT_IEEE802_15_4 = 0xc3
DLT_IEEE802_15_4_LINUX = 0xbf
DLT_IEEE802_15_4_NOFCS = 0xe6
DLT_IEEE802_15_4_NONASK_PHY = 0xd7
DLT_IEEE802_16_MAC_CPS = 0xbc
DLT_IEEE802_16_MAC_CPS_RADIO = 0xc1
DLT_IPFILTER = 0x74
DLT_IPMB = 0xc7
DLT_IPMB_LINUX = 0xd1
DLT_IPNET = 0xe2
DLT_IPOIB = 0xf2
DLT_IPV4 = 0xe4
DLT_IPV6 = 0xe5
DLT_IP_OVER_FC = 0x7a
DLT_JUNIPER_ATM1 = 0x89
DLT_JUNIPER_ATM2 = 0x87
DLT_JUNIPER_ATM_CEMIC = 0xee
DLT_JUNIPER_CHDLC = 0xb5
DLT_JUNIPER_ES = 0x84
DLT_JUNIPER_ETHER = 0xb2
DLT_JUNIPER_FIBRECHANNEL = 0xea
DLT_JUNIPER_FRELAY = 0xb4
DLT_JUNIPER_GGSN = 0x85
DLT_JUNIPER_ISM = 0xc2
DLT_JUNIPER_MFR = 0x86
DLT_JUNIPER_MLFR = 0x83
DLT_JUNIPER_MLPPP = 0x82
DLT_JUNIPER_MONITOR = 0xa4
DLT_JUNIPER_PIC_PEER = 0xae
DLT_JUNIPER_PPP = 0xb3
DLT_JUNIPER_PPPOE = 0xa7
DLT_JUNIPER_PPPOE_ATM = 0xa8
DLT_JUNIPER_SERVICES = 0x88
DLT_JUNIPER_SRX_E2E = 0xe9
DLT_JUNIPER_ST = 0xc8
DLT_JUNIPER_VP = 0xb7
DLT_JUNIPER_VS = 0xe8
DLT_LAPB_WITH_DIR = 0xcf
DLT_LAPD = 0xcb
DLT_LIN = 0xd4
DLT_LINUX_EVDEV = 0xd8
DLT_LINUX_IRDA = 0x90
DLT_LINUX_LAPD = 0xb1
DLT_LINUX_PPP_WITHDIRECTION = 0xa6
DLT_LINUX_SLL = 0x71
DLT_LOOP = 0x6c
DLT_LTALK = 0x72
DLT_MATCHING_MAX = 0xf5
DLT_MATCHING_MIN = 0x68
DLT_MFR = 0xb6
DLT_MOST = 0xd3
DLT_MPEG_2_TS = 0xf3
DLT_MPLS = 0xdb
DLT_MTP2 = 0x8c
DLT_MTP2_WITH_PHDR = 0x8b
DLT_MTP3 = 0x8d
DLT_MUX27010 = 0xec
DLT_NETANALYZER = 0xf0
DLT_NETANALYZER_TRANSPARENT = 0xf1
DLT_NFC_LLCP = 0xf5
DLT_NFLOG = 0xef
DLT_NG40 = 0xf4
DLT_NULL = 0x0
DLT_PCI_EXP = 0x7d
DLT_PFLOG = 0x75
DLT_PFSYNC = 0x12
DLT_PPI = 0xc0
DLT_PPP = 0x9
DLT_PPP_BSDOS = 0x10
DLT_PPP_ETHER = 0x33
DLT_PPP_PPPD = 0xa6
DLT_PPP_SERIAL = 0x32
DLT_PPP_WITH_DIR = 0xcc
DLT_PPP_WITH_DIRECTION = 0xa6
DLT_PRISM_HEADER = 0x77
DLT_PRONET = 0x4
DLT_RAIF1 = 0xc6
DLT_RAW = 0xc
DLT_RIO = 0x7c
DLT_SCCP = 0x8e
DLT_SITA = 0xc4
DLT_SLIP = 0x8
DLT_SLIP_BSDOS = 0xf
DLT_STANAG_5066_D_PDU = 0xed
DLT_SUNATM = 0x7b
DLT_SYMANTEC_FIREWALL = 0x63
DLT_TZSP = 0x80
DLT_USB = 0xba
DLT_USB_LINUX = 0xbd
DLT_USB_LINUX_MMAPPED = 0xdc
DLT_USER0 = 0x93
DLT_USER1 = 0x94
DLT_USER10 = 0x9d
DLT_USER11 = 0x9e
DLT_USER12 = 0x9f
DLT_USER13 = 0xa0
DLT_USER14 = 0xa1
DLT_USER15 = 0xa2
DLT_USER2 = 0x95
DLT_USER3 = 0x96
DLT_USER4 = 0x97
DLT_USER5 = 0x98
DLT_USER6 = 0x99
DLT_USER7 = 0x9a
DLT_USER8 = 0x9b
DLT_USER9 = 0x9c
DLT_WIHART = 0xdf
DLT_X2E_SERIAL = 0xd5
DLT_X2E_XORAYA = 0xd6
DT_BLK = 0x6
DT_CHR = 0x2
DT_DIR = 0x4
DT_FIFO = 0x1
DT_LNK = 0xa
DT_REG = 0x8
DT_SOCK = 0xc
DT_UNKNOWN = 0x0
DT_WHT = 0xe
ECHO = 0x8
ECHOCTL = 0x40
ECHOE = 0x2
ECHOK = 0x4
ECHOKE = 0x1
ECHONL = 0x10
ECHOPRT = 0x20
EVFILT_AIO = -0x3
EVFILT_EXCEPT = -0xf
EVFILT_FS = -0x9
EVFILT_MACHPORT = -0x8
EVFILT_PROC = -0x5
EVFILT_READ = -0x1
EVFILT_SIGNAL = -0x6
EVFILT_SYSCOUNT = 0xf
EVFILT_THREADMARKER = 0xf
EVFILT_TIMER = -0x7
EVFILT_USER = -0xa
EVFILT_VM = -0xc
EVFILT_VNODE = -0x4
EVFILT_WRITE = -0x2
EV_ADD = 0x1
EV_CLEAR = 0x20
EV_DELETE = 0x2
EV_DISABLE = 0x8
EV_DISPATCH = 0x80
EV_DISPATCH2 = 0x180
EV_ENABLE = 0x4
EV_EOF = 0x8000
EV_ERROR = 0x4000
EV_FLAG0 = 0x1000
EV_FLAG1 = 0x2000
EV_ONESHOT = 0x10
EV_OOBAND = 0x2000
EV_POLL = 0x1000
EV_RECEIPT = 0x40
EV_SYSFLAGS = 0xf000
EV_UDATA_SPECIFIC = 0x100
EV_VANISHED = 0x200
EXTA = 0x4b00
EXTB = 0x9600
EXTPROC = 0x800
FD_CLOEXEC = 0x1
FD_SETSIZE = 0x400
FF0 = 0x0
FF1 = 0x4000
FFDLY = 0x4000
FLUSHO = 0x800000
FSOPT_ATTR_CMN_EXTENDED = 0x20
FSOPT_NOFOLLOW = 0x1
FSOPT_NOINMEMUPDATE = 0x2
FSOPT_PACK_INVAL_ATTRS = 0x8
FSOPT_REPORT_FULLSIZE = 0x4
F_ADDFILESIGS = 0x3d
F_ADDFILESIGS_FOR_DYLD_SIM = 0x53
F_ADDFILESIGS_RETURN = 0x61
F_ADDSIGS = 0x3b
F_ALLOCATEALL = 0x4
F_ALLOCATECONTIG = 0x2
F_BARRIERFSYNC = 0x55
F_CHECK_LV = 0x62
F_CHKCLEAN = 0x29
F_DUPFD = 0x0
F_DUPFD_CLOEXEC = 0x43
F_FINDSIGS = 0x4e
F_FLUSH_DATA = 0x28
F_FREEZE_FS = 0x35
F_FULLFSYNC = 0x33
F_GETCODEDIR = 0x48
F_GETFD = 0x1
F_GETFL = 0x3
F_GETLK = 0x7
F_GETLKPID = 0x42
F_GETNOSIGPIPE = 0x4a
F_GETOWN = 0x5
F_GETPATH = 0x32
F_GETPATH_MTMINFO = 0x47
F_GETPROTECTIONCLASS = 0x3f
F_GETPROTECTIONLEVEL = 0x4d
F_GLOBAL_NOCACHE = 0x37
F_LOG2PHYS = 0x31
F_LOG2PHYS_EXT = 0x41
F_NOCACHE = 0x30
F_NODIRECT = 0x3e
F_OK = 0x0
F_PATHPKG_CHECK = 0x34
F_PEOFPOSMODE = 0x3
F_PREALLOCATE = 0x2a
F_PUNCHHOLE = 0x63
F_RDADVISE = 0x2c
F_RDAHEAD = 0x2d
F_RDLCK = 0x1
F_SETBACKINGSTORE = 0x46
F_SETFD = 0x2
F_SETFL = 0x4
F_SETLK = 0x8
F_SETLKW = 0x9
F_SETLKWTIMEOUT = 0xa
F_SETNOSIGPIPE = 0x49
F_SETOWN = 0x6
F_SETPROTECTIONCLASS = 0x40
F_SETSIZE = 0x2b
F_SINGLE_WRITER = 0x4c
F_THAW_FS = 0x36
F_TRANSCODEKEY = 0x4b
F_TRIM_ACTIVE_FILE = 0x64
F_UNLCK = 0x2
F_VOLPOSMODE = 0x4
F_WRLCK = 0x3
HUPCL = 0x4000
ICANON = 0x100
ICMP6_FILTER = 0x12
ICRNL = 0x100
IEXTEN = 0x400
IFF_ALLMULTI = 0x200
IFF_ALTPHYS = 0x4000
IFF_BROADCAST = 0x2
IFF_DEBUG = 0x4
IFF_LINK0 = 0x1000
IFF_LINK1 = 0x2000
IFF_LINK2 = 0x4000
IFF_LOOPBACK = 0x8
IFF_MULTICAST = 0x8000
IFF_NOARP = 0x80
IFF_NOTRAILERS = 0x20
IFF_OACTIVE = 0x400
IFF_POINTOPOINT = 0x10
IFF_PROMISC = 0x100
IFF_RUNNING = 0x40
IFF_SIMPLEX = 0x800
IFF_UP = 0x1
IFNAMSIZ = 0x10
IFT_1822 = 0x2
IFT_AAL5 = 0x31
IFT_ARCNET = 0x23
IFT_ARCNETPLUS = 0x24
IFT_ATM = 0x25
IFT_BRIDGE = 0xd1
IFT_CARP = 0xf8
IFT_CELLULAR = 0xff
IFT_CEPT = 0x13
IFT_DS3 = 0x1e
IFT_ENC = 0xf4
IFT_EON = 0x19
IFT_ETHER = 0x6
IFT_FAITH = 0x38
IFT_FDDI = 0xf
IFT_FRELAY = 0x20
IFT_FRELAYDCE = 0x2c
IFT_GIF = 0x37
IFT_HDH1822 = 0x3
IFT_HIPPI = 0x2f
IFT_HSSI = 0x2e
IFT_HY = 0xe
IFT_IEEE1394 = 0x90
IFT_IEEE8023ADLAG = 0x88
IFT_ISDNBASIC = 0x14
IFT_ISDNPRIMARY = 0x15
IFT_ISO88022LLC = 0x29
IFT_ISO88023 = 0x7
IFT_ISO88024 = 0x8
IFT_ISO88025 = 0x9
IFT_ISO88026 = 0xa
IFT_L2VLAN = 0x87
IFT_LAPB = 0x10
IFT_LOCALTALK = 0x2a
IFT_LOOP = 0x18
IFT_MIOX25 = 0x26
IFT_MODEM = 0x30
IFT_NSIP = 0x1b
IFT_OTHER = 0x1
IFT_P10 = 0xc
IFT_P80 = 0xd
IFT_PARA = 0x22
IFT_PDP = 0xff
IFT_PFLOG = 0xf5
IFT_PFSYNC = 0xf6
IFT_PKTAP = 0xfe
IFT_PPP = 0x17
IFT_PROPMUX = 0x36
IFT_PROPVIRTUAL = 0x35
IFT_PTPSERIAL = 0x16
IFT_RS232 = 0x21
IFT_SDLC = 0x11
IFT_SIP = 0x1f
IFT_SLIP = 0x1c
IFT_SMDSDXI = 0x2b
IFT_SMDSICIP = 0x34
IFT_SONET = 0x27
IFT_SONETPATH = 0x32
IFT_SONETVT = 0x33
IFT_STARLAN = 0xb
IFT_STF = 0x39
IFT_T1 = 0x12
IFT_ULTRA = 0x1d
IFT_V35 = 0x2d
IFT_X25 = 0x5
IFT_X25DDN = 0x4
IFT_X25PLE = 0x28
IFT_XETHER = 0x1a
IGNBRK = 0x1
IGNCR = 0x80
IGNPAR = 0x4
IMAXBEL = 0x2000
INLCR = 0x40
INPCK = 0x10
IN_CLASSA_HOST = 0xffffff
IN_CLASSA_MAX = 0x80
IN_CLASSA_NET = 0xff000000
IN_CLASSA_NSHIFT = 0x18
IN_CLASSB_HOST = 0xffff
IN_CLASSB_MAX = 0x10000
IN_CLASSB_NET = 0xffff0000
IN_CLASSB_NSHIFT = 0x10
IN_CLASSC_HOST = 0xff
IN_CLASSC_NET = 0xffffff00
IN_CLASSC_NSHIFT = 0x8
IN_CLASSD_HOST = 0xfffffff
IN_CLASSD_NET = 0xf0000000
IN_CLASSD_NSHIFT = 0x1c
IN_LINKLOCALNETNUM = 0xa9fe0000
IN_LOOPBACKNET = 0x7f
IPPROTO_3PC = 0x22
IPPROTO_ADFS = 0x44
IPPROTO_AH = 0x33
IPPROTO_AHIP = 0x3d
IPPROTO_APES = 0x63
IPPROTO_ARGUS = 0xd
IPPROTO_AX25 = 0x5d
IPPROTO_BHA = 0x31
IPPROTO_BLT = 0x1e
IPPROTO_BRSATMON = 0x4c
IPPROTO_CFTP = 0x3e
IPPROTO_CHAOS = 0x10
IPPROTO_CMTP = 0x26
IPPROTO_CPHB = 0x49
IPPROTO_CPNX = 0x48
IPPROTO_DDP = 0x25
IPPROTO_DGP = 0x56
IPPROTO_DIVERT = 0xfe
IPPROTO_DONE = 0x101
IPPROTO_DSTOPTS = 0x3c
IPPROTO_EGP = 0x8
IPPROTO_EMCON = 0xe
IPPROTO_ENCAP = 0x62
IPPROTO_EON = 0x50
IPPROTO_ESP = 0x32
IPPROTO_ETHERIP = 0x61
IPPROTO_FRAGMENT = 0x2c
IPPROTO_GGP = 0x3
IPPROTO_GMTP = 0x64
IPPROTO_GRE = 0x2f
IPPROTO_HELLO = 0x3f
IPPROTO_HMP = 0x14
IPPROTO_HOPOPTS = 0x0
IPPROTO_ICMP = 0x1
IPPROTO_ICMPV6 = 0x3a
IPPROTO_IDP = 0x16
IPPROTO_IDPR = 0x23
IPPROTO_IDRP = 0x2d
IPPROTO_IGMP = 0x2
IPPROTO_IGP = 0x55
IPPROTO_IGRP = 0x58
IPPROTO_IL = 0x28
IPPROTO_INLSP = 0x34
IPPROTO_INP = 0x20
IPPROTO_IP = 0x0
IPPROTO_IPCOMP = 0x6c
IPPROTO_IPCV = 0x47
IPPROTO_IPEIP = 0x5e
IPPROTO_IPIP = 0x4
IPPROTO_IPPC = 0x43
IPPROTO_IPV4 = 0x4
IPPROTO_IPV6 = 0x29
IPPROTO_IRTP = 0x1c
IPPROTO_KRYPTOLAN = 0x41
IPPROTO_LARP = 0x5b
IPPROTO_LEAF1 = 0x19
IPPROTO_LEAF2 = 0x1a
IPPROTO_MAX = 0x100
IPPROTO_MAXID = 0x34
IPPROTO_MEAS = 0x13
IPPROTO_MHRP = 0x30
IPPROTO_MICP = 0x5f
IPPROTO_MTP = 0x5c
IPPROTO_MUX = 0x12
IPPROTO_ND = 0x4d
IPPROTO_NHRP = 0x36
IPPROTO_NONE = 0x3b
IPPROTO_NSP = 0x1f
IPPROTO_NVPII = 0xb
IPPROTO_OSPFIGP = 0x59
IPPROTO_PGM = 0x71
IPPROTO_PIGP = 0x9
IPPROTO_PIM = 0x67
IPPROTO_PRM = 0x15
IPPROTO_PUP = 0xc
IPPROTO_PVP = 0x4b
IPPROTO_RAW = 0xff
IPPROTO_RCCMON = 0xa
IPPROTO_RDP = 0x1b
IPPROTO_ROUTING = 0x2b
IPPROTO_RSVP = 0x2e
IPPROTO_RVD = 0x42
IPPROTO_SATEXPAK = 0x40
IPPROTO_SATMON = 0x45
IPPROTO_SCCSP = 0x60
IPPROTO_SCTP = 0x84
IPPROTO_SDRP = 0x2a
IPPROTO_SEP = 0x21
IPPROTO_SRPC = 0x5a
IPPROTO_ST = 0x7
IPPROTO_SVMTP = 0x52
IPPROTO_SWIPE = 0x35
IPPROTO_TCF = 0x57
IPPROTO_TCP = 0x6
IPPROTO_TP = 0x1d
IPPROTO_TPXX = 0x27
IPPROTO_TRUNK1 = 0x17
IPPROTO_TRUNK2 = 0x18
IPPROTO_TTP = 0x54
IPPROTO_UDP = 0x11
IPPROTO_VINES = 0x53
IPPROTO_VISA = 0x46
IPPROTO_VMTP = 0x51
IPPROTO_WBEXPAK = 0x4f
IPPROTO_WBMON = 0x4e
IPPROTO_WSN = 0x4a
IPPROTO_XNET = 0xf
IPPROTO_XTP = 0x24
IPV6_2292DSTOPTS = 0x17
IPV6_2292HOPLIMIT = 0x14
IPV6_2292HOPOPTS = 0x16
IPV6_2292NEXTHOP = 0x15
IPV6_2292PKTINFO = 0x13
IPV6_2292PKTOPTIONS = 0x19
IPV6_2292RTHDR = 0x18
IPV6_BINDV6ONLY = 0x1b
IPV6_BOUND_IF = 0x7d
IPV6_CHECKSUM = 0x1a
IPV6_DEFAULT_MULTICAST_HOPS = 0x1
IPV6_DEFAULT_MULTICAST_LOOP = 0x1
IPV6_DEFHLIM = 0x40
IPV6_FAITH = 0x1d
IPV6_FLOWINFO_MASK = 0xffffff0f
IPV6_FLOWLABEL_MASK = 0xffff0f00
IPV6_FLOW_ECN_MASK = 0x300
IPV6_FRAGTTL = 0x3c
IPV6_FW_ADD = 0x1e
IPV6_FW_DEL = 0x1f
IPV6_FW_FLUSH = 0x20
IPV6_FW_GET = 0x22
IPV6_FW_ZERO = 0x21
IPV6_HLIMDEC = 0x1
IPV6_IPSEC_POLICY = 0x1c
IPV6_JOIN_GROUP = 0xc
IPV6_LEAVE_GROUP = 0xd
IPV6_MAXHLIM = 0xff
IPV6_MAXOPTHDR = 0x800
IPV6_MAXPACKET = 0xffff
IPV6_MAX_GROUP_SRC_FILTER = 0x200
IPV6_MAX_MEMBERSHIPS = 0xfff
IPV6_MAX_SOCK_SRC_FILTER = 0x80
IPV6_MIN_MEMBERSHIPS = 0x1f
IPV6_MMTU = 0x500
IPV6_MULTICAST_HOPS = 0xa
IPV6_MULTICAST_IF = 0x9
IPV6_MULTICAST_LOOP = 0xb
IPV6_PORTRANGE = 0xe
IPV6_PORTRANGE_DEFAULT = 0x0
IPV6_PORTRANGE_HIGH = 0x1
IPV6_PORTRANGE_LOW = 0x2
IPV6_RECVTCLASS = 0x23
IPV6_RTHDR_LOOSE = 0x0
IPV6_RTHDR_STRICT = 0x1
IPV6_RTHDR_TYPE_0 = 0x0
IPV6_SOCKOPT_RESERVED1 = 0x3
IPV6_TCLASS = 0x24
IPV6_UNICAST_HOPS = 0x4
IPV6_V6ONLY = 0x1b
IPV6_VERSION = 0x60
IPV6_VERSION_MASK = 0xf0
IP_ADD_MEMBERSHIP = 0xc
IP_ADD_SOURCE_MEMBERSHIP = 0x46
IP_BLOCK_SOURCE = 0x48
IP_BOUND_IF = 0x19
IP_DEFAULT_MULTICAST_LOOP = 0x1
IP_DEFAULT_MULTICAST_TTL = 0x1
IP_DF = 0x4000
IP_DROP_MEMBERSHIP = 0xd
IP_DROP_SOURCE_MEMBERSHIP = 0x47
IP_DUMMYNET_CONFIGURE = 0x3c
IP_DUMMYNET_DEL = 0x3d
IP_DUMMYNET_FLUSH = 0x3e
IP_DUMMYNET_GET = 0x40
IP_FAITH = 0x16
IP_FW_ADD = 0x28
IP_FW_DEL = 0x29
IP_FW_FLUSH = 0x2a
IP_FW_GET = 0x2c
IP_FW_RESETLOG = 0x2d
IP_FW_ZERO = 0x2b
IP_HDRINCL = 0x2
IP_IPSEC_POLICY = 0x15
IP_MAXPACKET = 0xffff
IP_MAX_GROUP_SRC_FILTER = 0x200
IP_MAX_MEMBERSHIPS = 0xfff
IP_MAX_SOCK_MUTE_FILTER = 0x80
IP_MAX_SOCK_SRC_FILTER = 0x80
IP_MF = 0x2000
IP_MIN_MEMBERSHIPS = 0x1f
IP_MSFILTER = 0x4a
IP_MSS = 0x240
IP_MULTICAST_IF = 0x9
IP_MULTICAST_IFINDEX = 0x42
IP_MULTICAST_LOOP = 0xb
IP_MULTICAST_TTL = 0xa
IP_MULTICAST_VIF = 0xe
IP_NAT__XXX = 0x37
IP_OFFMASK = 0x1fff
IP_OLD_FW_ADD = 0x32
IP_OLD_FW_DEL = 0x33
IP_OLD_FW_FLUSH = 0x34
IP_OLD_FW_GET = 0x36
IP_OLD_FW_RESETLOG = 0x38
IP_OLD_FW_ZERO = 0x35
IP_OPTIONS = 0x1
IP_PKTINFO = 0x1a
IP_PORTRANGE = 0x13
IP_PORTRANGE_DEFAULT = 0x0
IP_PORTRANGE_HIGH = 0x1
IP_PORTRANGE_LOW = 0x2
IP_RECVDSTADDR = 0x7
IP_RECVIF = 0x14
IP_RECVOPTS = 0x5
IP_RECVPKTINFO = 0x1a
IP_RECVRETOPTS = 0x6
IP_RECVTOS = 0x1b
IP_RECVTTL = 0x18
IP_RETOPTS = 0x8
IP_RF = 0x8000
IP_RSVP_OFF = 0x10
IP_RSVP_ON = 0xf
IP_RSVP_VIF_OFF = 0x12
IP_RSVP_VIF_ON = 0x11
IP_STRIPHDR = 0x17
IP_TOS = 0x3
IP_TRAFFIC_MGT_BACKGROUND = 0x41
IP_TTL = 0x4
IP_UNBLOCK_SOURCE = 0x49
ISIG = 0x80
ISTRIP = 0x20
IUTF8 = 0x4000
IXANY = 0x800
IXOFF = 0x400
IXON = 0x200
LOCK_EX = 0x2
LOCK_NB = 0x4
LOCK_SH = 0x1
LOCK_UN = 0x8
MADV_CAN_REUSE = 0x9
MADV_DONTNEED = 0x4
MADV_FREE = 0x5
MADV_FREE_REUSABLE = 0x7
MADV_FREE_REUSE = 0x8
MADV_NORMAL = 0x0
MADV_PAGEOUT = 0xa
MADV_RANDOM = 0x1
MADV_SEQUENTIAL = 0x2
MADV_WILLNEED = 0x3
MADV_ZERO_WIRED_PAGES = 0x6
MAP_ANON = 0x1000
MAP_ANONYMOUS = 0x1000
MAP_COPY = 0x2
MAP_FILE = 0x0
MAP_FIXED = 0x10
MAP_HASSEMAPHORE = 0x200
MAP_JIT = 0x800
MAP_NOCACHE = 0x400
MAP_NOEXTEND = 0x100
MAP_NORESERVE = 0x40
MAP_PRIVATE = 0x2
MAP_RENAME = 0x20
MAP_RESERVED0080 = 0x80
MAP_RESILIENT_CODESIGN = 0x2000
MAP_RESILIENT_MEDIA = 0x4000
MAP_SHARED = 0x1
MCL_CURRENT = 0x1
MCL_FUTURE = 0x2
MNT_ASYNC = 0x40
MNT_AUTOMOUNTED = 0x400000
MNT_CMDFLAGS = 0xf0000
MNT_CPROTECT = 0x80
MNT_DEFWRITE = 0x2000000
MNT_DONTBROWSE = 0x100000
MNT_DOVOLFS = 0x8000
MNT_DWAIT = 0x4
MNT_EXPORTED = 0x100
MNT_FORCE = 0x80000
MNT_IGNORE_OWNERSHIP = 0x200000
MNT_JOURNALED = 0x800000
MNT_LOCAL = 0x1000
MNT_MULTILABEL = 0x4000000
MNT_NOATIME = 0x10000000
MNT_NOBLOCK = 0x20000
MNT_NODEV = 0x10
MNT_NOEXEC = 0x4
MNT_NOSUID = 0x8
MNT_NOUSERXATTR = 0x1000000
MNT_NOWAIT = 0x2
MNT_QUARANTINE = 0x400
MNT_QUOTA = 0x2000
MNT_RDONLY = 0x1
MNT_RELOAD = 0x40000
MNT_ROOTFS = 0x4000
MNT_SYNCHRONOUS = 0x2
MNT_UNION = 0x20
MNT_UNKNOWNPERMISSIONS = 0x200000
MNT_UPDATE = 0x10000
MNT_VISFLAGMASK = 0x17f0f5ff
MNT_WAIT = 0x1
MSG_CTRUNC = 0x20
MSG_DONTROUTE = 0x4
MSG_DONTWAIT = 0x80
MSG_EOF = 0x100
MSG_EOR = 0x8
MSG_FLUSH = 0x400
MSG_HAVEMORE = 0x2000
MSG_HOLD = 0x800
MSG_NEEDSA = 0x10000
MSG_OOB = 0x1
MSG_PEEK = 0x2
MSG_RCVMORE = 0x4000
MSG_SEND = 0x1000
MSG_TRUNC = 0x10
MSG_WAITALL = 0x40
MSG_WAITSTREAM = 0x200
MS_ASYNC = 0x1
MS_DEACTIVATE = 0x8
MS_INVALIDATE = 0x2
MS_KILLPAGES = 0x4
MS_SYNC = 0x10
NAME_MAX = 0xff
NET_RT_DUMP = 0x1
NET_RT_DUMP2 = 0x7
NET_RT_FLAGS = 0x2
NET_RT_IFLIST = 0x3
NET_RT_IFLIST2 = 0x6
NET_RT_MAXID = 0xa
NET_RT_STAT = 0x4
NET_RT_TRASH = 0x5
NL0 = 0x0
NL1 = 0x100
NL2 = 0x200
NL3 = 0x300
NLDLY = 0x300
NOFLSH = 0x80000000
NOKERNINFO = 0x2000000
NOTE_ABSOLUTE = 0x8
NOTE_ATTRIB = 0x8
NOTE_BACKGROUND = 0x40
NOTE_CHILD = 0x4
NOTE_CRITICAL = 0x20
NOTE_DELETE = 0x1
NOTE_EXEC = 0x20000000
NOTE_EXIT = 0x80000000
NOTE_EXITSTATUS = 0x4000000
NOTE_EXIT_CSERROR = 0x40000
NOTE_EXIT_DECRYPTFAIL = 0x10000
NOTE_EXIT_DETAIL = 0x2000000
NOTE_EXIT_DETAIL_MASK = 0x70000
NOTE_EXIT_MEMORY = 0x20000
NOTE_EXIT_REPARENTED = 0x80000
NOTE_EXTEND = 0x4
NOTE_FFAND = 0x40000000
NOTE_FFCOPY = 0xc0000000
NOTE_FFCTRLMASK = 0xc0000000
NOTE_FFLAGSMASK = 0xffffff
NOTE_FFNOP = 0x0
NOTE_FFOR = 0x80000000
NOTE_FORK = 0x40000000
NOTE_FUNLOCK = 0x100
NOTE_LEEWAY = 0x10
NOTE_LINK = 0x10
NOTE_LOWAT = 0x1
NOTE_MACH_CONTINUOUS_TIME = 0x80
NOTE_NONE = 0x80
NOTE_NSECONDS = 0x4
NOTE_OOB = 0x2
NOTE_PCTRLMASK = -0x100000
NOTE_PDATAMASK = 0xfffff
NOTE_REAP = 0x10000000
NOTE_RENAME = 0x20
NOTE_REVOKE = 0x40
NOTE_SECONDS = 0x1
NOTE_SIGNAL = 0x8000000
NOTE_TRACK = 0x1
NOTE_TRACKERR = 0x2
NOTE_TRIGGER = 0x1000000
NOTE_USECONDS = 0x2
NOTE_VM_ERROR = 0x10000000
NOTE_VM_PRESSURE = 0x80000000
NOTE_VM_PRESSURE_SUDDEN_TERMINATE = 0x20000000
NOTE_VM_PRESSURE_TERMINATE = 0x40000000
NOTE_WRITE = 0x2
OCRNL = 0x10
OFDEL = 0x20000
OFILL = 0x80
ONLCR = 0x2
ONLRET = 0x40
ONOCR = 0x20
ONOEOT = 0x8
OPOST = 0x1
OXTABS = 0x4
O_ACCMODE = 0x3
O_ALERT = 0x20000000
O_APPEND = 0x8
O_ASYNC = 0x40
O_CLOEXEC = 0x1000000
O_CREAT = 0x200
O_DIRECTORY = 0x100000
O_DP_GETRAWENCRYPTED = 0x1
O_DP_GETRAWUNENCRYPTED = 0x2
O_DSYNC = 0x400000
O_EVTONLY = 0x8000
O_EXCL = 0x800
O_EXLOCK = 0x20
O_FSYNC = 0x80
O_NDELAY = 0x4
O_NOCTTY = 0x20000
O_NOFOLLOW = 0x100
O_NONBLOCK = 0x4
O_POPUP = 0x80000000
O_RDONLY = 0x0
O_RDWR = 0x2
O_SHLOCK = 0x10
O_SYMLINK = 0x200000
O_SYNC = 0x80
O_TRUNC = 0x400
O_WRONLY = 0x1
PARENB = 0x1000
PARMRK = 0x8
PARODD = 0x2000
PENDIN = 0x20000000
PRIO_PGRP = 0x1
PRIO_PROCESS = 0x0
PRIO_USER = 0x2
PROT_EXEC = 0x4
PROT_NONE = 0x0
PROT_READ = 0x1
PROT_WRITE = 0x2
PT_ATTACH = 0xa
PT_ATTACHEXC = 0xe
PT_CONTINUE = 0x7
PT_DENY_ATTACH = 0x1f
PT_DETACH = 0xb
PT_FIRSTMACH = 0x20
PT_FORCEQUOTA = 0x1e
PT_KILL = 0x8
PT_READ_D = 0x2
PT_READ_I = 0x1
PT_READ_U = 0x3
PT_SIGEXC = 0xc
PT_STEP = 0x9
PT_THUPDATE = 0xd
PT_TRACE_ME = 0x0
PT_WRITE_D = 0x5
PT_WRITE_I = 0x4
PT_WRITE_U = 0x6
RLIMIT_AS = 0x5
RLIMIT_CORE = 0x4
RLIMIT_CPU = 0x0
RLIMIT_CPU_USAGE_MONITOR = 0x2
RLIMIT_DATA = 0x2
RLIMIT_FSIZE = 0x1
RLIMIT_MEMLOCK = 0x6
RLIMIT_NOFILE = 0x8
RLIMIT_NPROC = 0x7
RLIMIT_RSS = 0x5
RLIMIT_STACK = 0x3
RLIM_INFINITY = 0x7fffffffffffffff
RTAX_AUTHOR = 0x6
RTAX_BRD = 0x7
RTAX_DST = 0x0
RTAX_GATEWAY = 0x1
RTAX_GENMASK = 0x3
RTAX_IFA = 0x5
RTAX_IFP = 0x4
RTAX_MAX = 0x8
RTAX_NETMASK = 0x2
RTA_AUTHOR = 0x40
RTA_BRD = 0x80
RTA_DST = 0x1
RTA_GATEWAY = 0x2
RTA_GENMASK = 0x8
RTA_IFA = 0x20
RTA_IFP = 0x10
RTA_NETMASK = 0x4
RTF_BLACKHOLE = 0x1000
RTF_BROADCAST = 0x400000
RTF_CLONING = 0x100
RTF_CONDEMNED = 0x2000000
RTF_DELCLONE = 0x80
RTF_DONE = 0x40
RTF_DYNAMIC = 0x10
RTF_GATEWAY = 0x2
RTF_HOST = 0x4
RTF_IFREF = 0x4000000
RTF_IFSCOPE = 0x1000000
RTF_LLINFO = 0x400
RTF_LOCAL = 0x200000
RTF_MODIFIED = 0x20
RTF_MULTICAST = 0x800000
RTF_NOIFREF = 0x2000
RTF_PINNED = 0x100000
RTF_PRCLONING = 0x10000
RTF_PROTO1 = 0x8000
RTF_PROTO2 = 0x4000
RTF_PROTO3 = 0x40000
RTF_PROXY = 0x8000000
RTF_REJECT = 0x8
RTF_ROUTER = 0x10000000
RTF_STATIC = 0x800
RTF_UP = 0x1
RTF_WASCLONED = 0x20000
RTF_XRESOLVE = 0x200
RTM_ADD = 0x1
RTM_CHANGE = 0x3
RTM_DELADDR = 0xd
RTM_DELETE = 0x2
RTM_DELMADDR = 0x10
RTM_GET = 0x4
RTM_GET2 = 0x14
RTM_IFINFO = 0xe
RTM_IFINFO2 = 0x12
RTM_LOCK = 0x8
RTM_LOSING = 0x5
RTM_MISS = 0x7
RTM_NEWADDR = 0xc
RTM_NEWMADDR = 0xf
RTM_NEWMADDR2 = 0x13
RTM_OLDADD = 0x9
RTM_OLDDEL = 0xa
RTM_REDIRECT = 0x6
RTM_RESOLVE = 0xb
RTM_RTTUNIT = 0xf4240
RTM_VERSION = 0x5
RTV_EXPIRE = 0x4
RTV_HOPCOUNT = 0x2
RTV_MTU = 0x1
RTV_RPIPE = 0x8
RTV_RTT = 0x40
RTV_RTTVAR = 0x80
RTV_SPIPE = 0x10
RTV_SSTHRESH = 0x20
RUSAGE_CHILDREN = -0x1
RUSAGE_SELF = 0x0
SCM_CREDS = 0x3
SCM_RIGHTS = 0x1
SCM_TIMESTAMP = 0x2
SCM_TIMESTAMP_MONOTONIC = 0x4
SHUT_RD = 0x0
SHUT_RDWR = 0x2
SHUT_WR = 0x1
SIOCADDMULTI = 0x80206931
SIOCAIFADDR = 0x8040691a
SIOCARPIPLL = 0xc0206928
SIOCATMARK = 0x40047307
SIOCAUTOADDR = 0xc0206926
SIOCAUTONETMASK = 0x80206927
SIOCDELMULTI = 0x80206932
SIOCDIFADDR = 0x80206919
SIOCDIFPHYADDR = 0x80206941
SIOCGDRVSPEC = 0xc028697b
SIOCGETVLAN = 0xc020697f
SIOCGHIWAT = 0x40047301
SIOCGIFADDR = 0xc0206921
SIOCGIFALTMTU = 0xc0206948
SIOCGIFASYNCMAP = 0xc020697c
SIOCGIFBOND = 0xc0206947
SIOCGIFBRDADDR = 0xc0206923
SIOCGIFCAP = 0xc020695b
SIOCGIFCONF = 0xc00c6924
SIOCGIFDEVMTU = 0xc0206944
SIOCGIFDSTADDR = 0xc0206922
SIOCGIFFLAGS = 0xc0206911
SIOCGIFGENERIC = 0xc020693a
SIOCGIFKPI = 0xc0206987
SIOCGIFMAC = 0xc0206982
SIOCGIFMEDIA = 0xc02c6938
SIOCGIFMETRIC = 0xc0206917
SIOCGIFMTU = 0xc0206933
SIOCGIFNETMASK = 0xc0206925
SIOCGIFPDSTADDR = 0xc0206940
SIOCGIFPHYS = 0xc0206935
SIOCGIFPSRCADDR = 0xc020693f
SIOCGIFSTATUS = 0xc331693d
SIOCGIFVLAN = 0xc020697f
SIOCGIFWAKEFLAGS = 0xc0206988
SIOCGLOWAT = 0x40047303
SIOCGPGRP = 0x40047309
SIOCIFCREATE = 0xc0206978
SIOCIFCREATE2 = 0xc020697a
SIOCIFDESTROY = 0x80206979
SIOCIFGCLONERS = 0xc0106981
SIOCRSLVMULTI = 0xc010693b
SIOCSDRVSPEC = 0x8028697b
SIOCSETVLAN = 0x8020697e
SIOCSHIWAT = 0x80047300
SIOCSIFADDR = 0x8020690c
SIOCSIFALTMTU = 0x80206945
SIOCSIFASYNCMAP = 0x8020697d
SIOCSIFBOND = 0x80206946
SIOCSIFBRDADDR = 0x80206913
SIOCSIFCAP = 0x8020695a
SIOCSIFDSTADDR = 0x8020690e
SIOCSIFFLAGS = 0x80206910
SIOCSIFGENERIC = 0x80206939
SIOCSIFKPI = 0x80206986
SIOCSIFLLADDR = 0x8020693c
SIOCSIFMAC = 0x80206983
SIOCSIFMEDIA = 0xc0206937
SIOCSIFMETRIC = 0x80206918
SIOCSIFMTU = 0x80206934
SIOCSIFNETMASK = 0x80206916
SIOCSIFPHYADDR = 0x8040693e
SIOCSIFPHYS = 0x80206936
SIOCSIFVLAN = 0x8020697e
SIOCSLOWAT = 0x80047302
SIOCSPGRP = 0x80047308
SOCK_DGRAM = 0x2
SOCK_MAXADDRLEN = 0xff
SOCK_RAW = 0x3
SOCK_RDM = 0x4
SOCK_SEQPACKET = 0x5
SOCK_STREAM = 0x1
SOL_SOCKET = 0xffff
SOMAXCONN = 0x80
SO_ACCEPTCONN = 0x2
SO_BROADCAST = 0x20
SO_DEBUG = 0x1
SO_DONTROUTE = 0x10
SO_DONTTRUNC = 0x2000
SO_ERROR = 0x1007
SO_KEEPALIVE = 0x8
SO_LABEL = 0x1010
SO_LINGER = 0x80
SO_LINGER_SEC = 0x1080
SO_NETSVC_MARKING_LEVEL = 0x1119
SO_NET_SERVICE_TYPE = 0x1116
SO_NKE = 0x1021
SO_NOADDRERR = 0x1023
SO_NOSIGPIPE = 0x1022
SO_NOTIFYCONFLICT = 0x1026
SO_NP_EXTENSIONS = 0x1083
SO_NREAD = 0x1020
SO_NUMRCVPKT = 0x1112
SO_NWRITE = 0x1024
SO_OOBINLINE = 0x100
SO_PEERLABEL = 0x1011
SO_RANDOMPORT = 0x1082
SO_RCVBUF = 0x1002
SO_RCVLOWAT = 0x1004
SO_RCVTIMEO = 0x1006
SO_REUSEADDR = 0x4
SO_REUSEPORT = 0x200
SO_REUSESHAREUID = 0x1025
SO_SNDBUF = 0x1001
SO_SNDLOWAT = 0x1003
SO_SNDTIMEO = 0x1005
SO_TIMESTAMP = 0x400
SO_TIMESTAMP_MONOTONIC = 0x800
SO_TYPE = 0x1008
SO_UPCALLCLOSEWAIT = 0x1027
SO_USELOOPBACK = 0x40
SO_WANTMORE = 0x4000
SO_WANTOOBFLAG = 0x8000
S_IEXEC = 0x40
S_IFBLK = 0x6000
S_IFCHR = 0x2000
S_IFDIR = 0x4000
S_IFIFO = 0x1000
S_IFLNK = 0xa000
S_IFMT = 0xf000
S_IFREG = 0x8000
S_IFSOCK = 0xc000
S_IFWHT = 0xe000
S_IREAD = 0x100
S_IRGRP = 0x20
S_IROTH = 0x4
S_IRUSR = 0x100
S_IRWXG = 0x38
S_IRWXO = 0x7
S_IRWXU = 0x1c0
S_ISGID = 0x400
S_ISTXT = 0x200
S_ISUID = 0x800
S_ISVTX = 0x200
S_IWGRP = 0x10
S_IWOTH = 0x2
S_IWRITE = 0x80
S_IWUSR = 0x80
S_IXGRP = 0x8
S_IXOTH = 0x1
S_IXUSR = 0x40
TAB0 = 0x0
TAB1 = 0x400
TAB2 = 0x800
TAB3 = 0x4
TABDLY = 0xc04
TCIFLUSH = 0x1
TCIOFF = 0x3
TCIOFLUSH = 0x3
TCION = 0x4
TCOFLUSH = 0x2
TCOOFF = 0x1
TCOON = 0x2
TCP_CONNECTIONTIMEOUT = 0x20
TCP_CONNECTION_INFO = 0x106
TCP_ENABLE_ECN = 0x104
TCP_FASTOPEN = 0x105
TCP_KEEPALIVE = 0x10
TCP_KEEPCNT = 0x102
TCP_KEEPINTVL = 0x101
TCP_MAXHLEN = 0x3c
TCP_MAXOLEN = 0x28
TCP_MAXSEG = 0x2
TCP_MAXWIN = 0xffff
TCP_MAX_SACK = 0x4
TCP_MAX_WINSHIFT = 0xe
TCP_MINMSS = 0xd8
TCP_MSS = 0x200
TCP_NODELAY = 0x1
TCP_NOOPT = 0x8
TCP_NOPUSH = 0x4
TCP_NOTSENT_LOWAT = 0x201
TCP_RXT_CONNDROPTIME = 0x80
TCP_RXT_FINDROP = 0x100
TCP_SENDMOREACKS = 0x103
TCSAFLUSH = 0x2
TIOCCBRK = 0x2000747a
TIOCCDTR = 0x20007478
TIOCCONS = 0x80047462
TIOCDCDTIMESTAMP = 0x40107458
TIOCDRAIN = 0x2000745e
TIOCDSIMICROCODE = 0x20007455
TIOCEXCL = 0x2000740d
TIOCEXT = 0x80047460
TIOCFLUSH = 0x80047410
TIOCGDRAINWAIT = 0x40047456
TIOCGETA = 0x40487413
TIOCGETD = 0x4004741a
TIOCGPGRP = 0x40047477
TIOCGWINSZ = 0x40087468
TIOCIXOFF = 0x20007480
TIOCIXON = 0x20007481
TIOCMBIC = 0x8004746b
TIOCMBIS = 0x8004746c
TIOCMGDTRWAIT = 0x4004745a
TIOCMGET = 0x4004746a
TIOCMODG = 0x40047403
TIOCMODS = 0x80047404
TIOCMSDTRWAIT = 0x8004745b
TIOCMSET = 0x8004746d
TIOCM_CAR = 0x40
TIOCM_CD = 0x40
TIOCM_CTS = 0x20
TIOCM_DSR = 0x100
TIOCM_DTR = 0x2
TIOCM_LE = 0x1
TIOCM_RI = 0x80
TIOCM_RNG = 0x80
TIOCM_RTS = 0x4
TIOCM_SR = 0x10
TIOCM_ST = 0x8
TIOCNOTTY = 0x20007471
TIOCNXCL = 0x2000740e
TIOCOUTQ = 0x40047473
TIOCPKT = 0x80047470
TIOCPKT_DATA = 0x0
TIOCPKT_DOSTOP = 0x20
TIOCPKT_FLUSHREAD = 0x1
TIOCPKT_FLUSHWRITE = 0x2
TIOCPKT_IOCTL = 0x40
TIOCPKT_NOSTOP = 0x10
TIOCPKT_START = 0x8
TIOCPKT_STOP = 0x4
TIOCPTYGNAME = 0x40807453
TIOCPTYGRANT = 0x20007454
TIOCPTYUNLK = 0x20007452
TIOCREMOTE = 0x80047469
TIOCSBRK = 0x2000747b
TIOCSCONS = 0x20007463
TIOCSCTTY = 0x20007461
TIOCSDRAINWAIT = 0x80047457
TIOCSDTR = 0x20007479
TIOCSETA = 0x80487414
TIOCSETAF = 0x80487416
TIOCSETAW = 0x80487415
TIOCSETD = 0x8004741b
TIOCSIG = 0x2000745f
TIOCSPGRP = 0x80047476
TIOCSTART = 0x2000746e
TIOCSTAT = 0x20007465
TIOCSTI = 0x80017472
TIOCSTOP = 0x2000746f
TIOCSWINSZ = 0x80087467
TIOCTIMESTAMP = 0x40107459
TIOCUCNTL = 0x80047466
TOSTOP = 0x400000
VDISCARD = 0xf
VDSUSP = 0xb
VEOF = 0x0
VEOL = 0x1
VEOL2 = 0x2
VERASE = 0x3
VINTR = 0x8
VKILL = 0x5
VLNEXT = 0xe
VMIN = 0x10
VM_LOADAVG = 0x2
VM_MACHFACTOR = 0x4
VM_MAXID = 0x6
VM_METER = 0x1
VM_SWAPUSAGE = 0x5
VQUIT = 0x9
VREPRINT = 0x6
VSTART = 0xc
VSTATUS = 0x12
VSTOP = 0xd
VSUSP = 0xa
VT0 = 0x0
VT1 = 0x10000
VTDLY = 0x10000
VTIME = 0x11
VWERASE = 0x4
WCONTINUED = 0x10
WCOREFLAG = 0x80
WEXITED = 0x4
WNOHANG = 0x1
WNOWAIT = 0x20
WORDSIZE = 0x40
WSTOPPED = 0x8
WUNTRACED = 0x2
)
// Errors
const (
E2BIG = syscall.Errno(0x7)
EACCES = syscall.Errno(0xd)
EADDRINUSE = syscall.Errno(0x30)
EADDRNOTAVAIL = syscall.Errno(0x31)
EAFNOSUPPORT = syscall.Errno(0x2f)
EAGAIN = syscall.Errno(0x23)
EALREADY = syscall.Errno(0x25)
EAUTH = syscall.Errno(0x50)
EBADARCH = syscall.Errno(0x56)
EBADEXEC = syscall.Errno(0x55)
EBADF = syscall.Errno(0x9)
EBADMACHO = syscall.Errno(0x58)
EBADMSG = syscall.Errno(0x5e)
EBADRPC = syscall.Errno(0x48)
EBUSY = syscall.Errno(0x10)
ECANCELED = syscall.Errno(0x59)
ECHILD = syscall.Errno(0xa)
ECONNABORTED = syscall.Errno(0x35)
ECONNREFUSED = syscall.Errno(0x3d)
ECONNRESET = syscall.Errno(0x36)
EDEADLK = syscall.Errno(0xb)
EDESTADDRREQ = syscall.Errno(0x27)
EDEVERR = syscall.Errno(0x53)
EDOM = syscall.Errno(0x21)
EDQUOT = syscall.Errno(0x45)
EEXIST = syscall.Errno(0x11)
EFAULT = syscall.Errno(0xe)
EFBIG = syscall.Errno(0x1b)
EFTYPE = syscall.Errno(0x4f)
EHOSTDOWN = syscall.Errno(0x40)
EHOSTUNREACH = syscall.Errno(0x41)
EIDRM = syscall.Errno(0x5a)
EILSEQ = syscall.Errno(0x5c)
EINPROGRESS = syscall.Errno(0x24)
EINTR = syscall.Errno(0x4)
EINVAL = syscall.Errno(0x16)
EIO = syscall.Errno(0x5)
EISCONN = syscall.Errno(0x38)
EISDIR = syscall.Errno(0x15)
ELAST = syscall.Errno(0x6a)
ELOOP = syscall.Errno(0x3e)
EMFILE = syscall.Errno(0x18)
EMLINK = syscall.Errno(0x1f)
EMSGSIZE = syscall.Errno(0x28)
EMULTIHOP = syscall.Errno(0x5f)
ENAMETOOLONG = syscall.Errno(0x3f)
ENEEDAUTH = syscall.Errno(0x51)
ENETDOWN = syscall.Errno(0x32)
ENETRESET = syscall.Errno(0x34)
ENETUNREACH = syscall.Errno(0x33)
ENFILE = syscall.Errno(0x17)
ENOATTR = syscall.Errno(0x5d)
ENOBUFS = syscall.Errno(0x37)
ENODATA = syscall.Errno(0x60)
ENODEV = syscall.Errno(0x13)
ENOENT = syscall.Errno(0x2)
ENOEXEC = syscall.Errno(0x8)
ENOLCK = syscall.Errno(0x4d)
ENOLINK = syscall.Errno(0x61)
ENOMEM = syscall.Errno(0xc)
ENOMSG = syscall.Errno(0x5b)
ENOPOLICY = syscall.Errno(0x67)
ENOPROTOOPT = syscall.Errno(0x2a)
ENOSPC = syscall.Errno(0x1c)
ENOSR = syscall.Errno(0x62)
ENOSTR = syscall.Errno(0x63)
ENOSYS = syscall.Errno(0x4e)
ENOTBLK = syscall.Errno(0xf)
ENOTCONN = syscall.Errno(0x39)
ENOTDIR = syscall.Errno(0x14)
ENOTEMPTY = syscall.Errno(0x42)
ENOTRECOVERABLE = syscall.Errno(0x68)
ENOTSOCK = syscall.Errno(0x26)
ENOTSUP = syscall.Errno(0x2d)
ENOTTY = syscall.Errno(0x19)
ENXIO = syscall.Errno(0x6)
EOPNOTSUPP = syscall.Errno(0x66)
EOVERFLOW = syscall.Errno(0x54)
EOWNERDEAD = syscall.Errno(0x69)
EPERM = syscall.Errno(0x1)
EPFNOSUPPORT = syscall.Errno(0x2e)
EPIPE = syscall.Errno(0x20)
EPROCLIM = syscall.Errno(0x43)
EPROCUNAVAIL = syscall.Errno(0x4c)
EPROGMISMATCH = syscall.Errno(0x4b)
EPROGUNAVAIL = syscall.Errno(0x4a)
EPROTO = syscall.Errno(0x64)
EPROTONOSUPPORT = syscall.Errno(0x2b)
EPROTOTYPE = syscall.Errno(0x29)
EPWROFF = syscall.Errno(0x52)
EQFULL = syscall.Errno(0x6a)
ERANGE = syscall.Errno(0x22)
EREMOTE = syscall.Errno(0x47)
EROFS = syscall.Errno(0x1e)
ERPCMISMATCH = syscall.Errno(0x49)
ESHLIBVERS = syscall.Errno(0x57)
ESHUTDOWN = syscall.Errno(0x3a)
ESOCKTNOSUPPORT = syscall.Errno(0x2c)
ESPIPE = syscall.Errno(0x1d)
ESRCH = syscall.Errno(0x3)
ESTALE = syscall.Errno(0x46)
ETIME = syscall.Errno(0x65)
ETIMEDOUT = syscall.Errno(0x3c)
ETOOMANYREFS = syscall.Errno(0x3b)
ETXTBSY = syscall.Errno(0x1a)
EUSERS = syscall.Errno(0x44)
EWOULDBLOCK = syscall.Errno(0x23)
EXDEV = syscall.Errno(0x12)
)
// Signals
const (
SIGABRT = syscall.Signal(0x6)
SIGALRM = syscall.Signal(0xe)
SIGBUS = syscall.Signal(0xa)
SIGCHLD = syscall.Signal(0x14)
SIGCONT = syscall.Signal(0x13)
SIGEMT = syscall.Signal(0x7)
SIGFPE = syscall.Signal(0x8)
SIGHUP = syscall.Signal(0x1)
SIGILL = syscall.Signal(0x4)
SIGINFO = syscall.Signal(0x1d)
SIGINT = syscall.Signal(0x2)
SIGIO = syscall.Signal(0x17)
SIGIOT = syscall.Signal(0x6)
SIGKILL = syscall.Signal(0x9)
SIGPIPE = syscall.Signal(0xd)
SIGPROF = syscall.Signal(0x1b)
SIGQUIT = syscall.Signal(0x3)
SIGSEGV = syscall.Signal(0xb)
SIGSTOP = syscall.Signal(0x11)
SIGSYS = syscall.Signal(0xc)
SIGTERM = syscall.Signal(0xf)
SIGTRAP = syscall.Signal(0x5)
SIGTSTP = syscall.Signal(0x12)
SIGTTIN = syscall.Signal(0x15)
SIGTTOU = syscall.Signal(0x16)
SIGURG = syscall.Signal(0x10)
SIGUSR1 = syscall.Signal(0x1e)
SIGUSR2 = syscall.Signal(0x1f)
SIGVTALRM = syscall.Signal(0x1a)
SIGWINCH = syscall.Signal(0x1c)
SIGXCPU = syscall.Signal(0x18)
SIGXFSZ = syscall.Signal(0x19)
)
// Error table
var errors = [...]string{
1: "operation not permitted",
2: "no such file or directory",
3: "no such process",
4: "interrupted system call",
5: "input/output error",
6: "device not configured",
7: "argument list too long",
8: "exec format error",
9: "bad file descriptor",
10: "no child processes",
11: "resource deadlock avoided",
12: "cannot allocate memory",
13: "permission denied",
14: "bad address",
15: "block device required",
16: "resource busy",
17: "file exists",
18: "cross-device link",
19: "operation not supported by device",
20: "not a directory",
21: "is a directory",
22: "invalid argument",
23: "too many open files in system",
24: "too many open files",
25: "inappropriate ioctl for device",
26: "text file busy",
27: "file too large",
28: "no space left on device",
29: "illegal seek",
30: "read-only file system",
31: "too many links",
32: "broken pipe",
33: "numerical argument out of domain",
34: "result too large",
35: "resource temporarily unavailable",
36: "operation now in progress",
37: "operation already in progress",
38: "socket operation on non-socket",
39: "destination address required",
40: "message too long",
41: "protocol wrong type for socket",
42: "protocol not available",
43: "protocol not supported",
44: "socket type not supported",
45: "operation not supported",
46: "protocol family not supported",
47: "address family not supported by protocol family",
48: "address already in use",
49: "can't assign requested address",
50: "network is down",
51: "network is unreachable",
52: "network dropped connection on reset",
53: "software caused connection abort",
54: "connection reset by peer",
55: "no buffer space available",
56: "socket is already connected",
57: "socket is not connected",
58: "can't send after socket shutdown",
59: "too many references: can't splice",
60: "operation timed out",
61: "connection refused",
62: "too many levels of symbolic links",
63: "file name too long",
64: "host is down",
65: "no route to host",
66: "directory not empty",
67: "too many processes",
68: "too many users",
69: "disc quota exceeded",
70: "stale NFS file handle",
71: "too many levels of remote in path",
72: "RPC struct is bad",
73: "RPC version wrong",
74: "RPC prog. not avail",
75: "program version wrong",
76: "bad procedure for program",
77: "no locks available",
78: "function not implemented",
79: "inappropriate file type or format",
80: "authentication error",
81: "need authenticator",
82: "device power is off",
83: "device error",
84: "value too large to be stored in data type",
85: "bad executable (or shared library)",
86: "bad CPU type in executable",
87: "shared library version mismatch",
88: "malformed Mach-o file",
89: "operation canceled",
90: "identifier removed",
91: "no message of desired type",
92: "illegal byte sequence",
93: "attribute not found",
94: "bad message",
95: "EMULTIHOP (Reserved)",
96: "no message available on STREAM",
97: "ENOLINK (Reserved)",
98: "no STREAM resources",
99: "not a STREAM",
100: "protocol error",
101: "STREAM ioctl timeout",
102: "operation not supported on socket",
103: "policy not found",
104: "state not recoverable",
105: "previous owner died",
106: "interface output queue is full",
}
// Signal table
var signals = [...]string{
1: "hangup",
2: "interrupt",
3: "quit",
4: "illegal instruction",
5: "trace/BPT trap",
6: "abort trap",
7: "EMT trap",
8: "floating point exception",
9: "killed",
10: "bus error",
11: "segmentation fault",
12: "bad system call",
13: "broken pipe",
14: "alarm clock",
15: "terminated",
16: "urgent I/O condition",
17: "suspended (signal)",
18: "suspended",
19: "continued",
20: "child exited",
21: "stopped (tty input)",
22: "stopped (tty output)",
23: "I/O possible",
24: "cputime limit exceeded",
25: "filesize limit exceeded",
26: "virtual timer expired",
27: "profiling timer expired",
28: "window size changes",
29: "information request",
30: "user defined signal 1",
31: "user defined signal 2",
}
| vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go | 0 | https://github.com/ethereum/go-ethereum/commit/cf05ef9106779da0df62c0c03312fc489171aaa5 | [
0.0008371007279492915,
0.00019682869606185704,
0.0001615872315596789,
0.00017132026550825685,
0.00007116919732652605
] |
{
"id": 0,
"code_window": [
"\ttab.mutex.Lock()\n",
"\tdefer tab.mutex.Unlock()\n",
"\n",
"\t// Find all non-empty buckets and get a fresh slice of their entries.\n",
"\tvar buckets [][]*Node\n",
"\tfor _, b := range tab.buckets {\n",
"\t\tif len(b.entries) > 0 {\n",
"\t\t\tbuckets = append(buckets, b.entries[:])\n",
"\t\t}\n",
"\t}\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tfor _, b := range &tab.buckets {\n"
],
"file_path": "p2p/discover/table.go",
"type": "replace",
"edit_start_line_idx": 162
} | package bn256
import (
"math/big"
)
// twistPoint implements the elliptic curve y²=x³+3/ξ over GF(p²). Points are
// kept in Jacobian form and t=z² when valid. The group G₂ is the set of
// n-torsion points of this curve over GF(p²) (where n = Order)
type twistPoint struct {
x, y, z, t gfP2
}
var twistB = &gfP2{
gfP{0x38e7ecccd1dcff67, 0x65f0b37d93ce0d3e, 0xd749d0dd22ac00aa, 0x0141b9ce4a688d4d},
gfP{0x3bf938e377b802a8, 0x020b1b273633535d, 0x26b7edf049755260, 0x2514c6324384a86d},
}
// twistGen is the generator of group G₂.
var twistGen = &twistPoint{
gfP2{
gfP{0xafb4737da84c6140, 0x6043dd5a5802d8c4, 0x09e950fc52a02f86, 0x14fef0833aea7b6b},
gfP{0x8e83b5d102bc2026, 0xdceb1935497b0172, 0xfbb8264797811adf, 0x19573841af96503b},
},
gfP2{
gfP{0x64095b56c71856ee, 0xdc57f922327d3cbb, 0x55f935be33351076, 0x0da4a0e693fd6482},
gfP{0x619dfa9d886be9f6, 0xfe7fd297f59e9b78, 0xff9e1a62231b7dfe, 0x28fd7eebae9e4206},
},
gfP2{*newGFp(0), *newGFp(1)},
gfP2{*newGFp(0), *newGFp(1)},
}
func (c *twistPoint) String() string {
c.MakeAffine()
x, y := gfP2Decode(&c.x), gfP2Decode(&c.y)
return "(" + x.String() + ", " + y.String() + ")"
}
func (c *twistPoint) Set(a *twistPoint) {
c.x.Set(&a.x)
c.y.Set(&a.y)
c.z.Set(&a.z)
c.t.Set(&a.t)
}
// IsOnCurve returns true iff c is on the curve.
func (c *twistPoint) IsOnCurve() bool {
c.MakeAffine()
if c.IsInfinity() {
return true
}
y2, x3 := &gfP2{}, &gfP2{}
y2.Square(&c.y)
x3.Square(&c.x).Mul(x3, &c.x).Add(x3, twistB)
if *y2 != *x3 {
return false
}
cneg := &twistPoint{}
cneg.Mul(c, Order)
return cneg.z.IsZero()
}
func (c *twistPoint) SetInfinity() {
c.x.SetZero()
c.y.SetOne()
c.z.SetZero()
c.t.SetZero()
}
func (c *twistPoint) IsInfinity() bool {
return c.z.IsZero()
}
func (c *twistPoint) Add(a, b *twistPoint) {
// For additional comments, see the same function in curve.go.
if a.IsInfinity() {
c.Set(b)
return
}
if b.IsInfinity() {
c.Set(a)
return
}
// See http://hyperelliptic.org/EFD/g1p/auto-code/shortw/jacobian-0/addition/add-2007-bl.op3
z12 := (&gfP2{}).Square(&a.z)
z22 := (&gfP2{}).Square(&b.z)
u1 := (&gfP2{}).Mul(&a.x, z22)
u2 := (&gfP2{}).Mul(&b.x, z12)
t := (&gfP2{}).Mul(&b.z, z22)
s1 := (&gfP2{}).Mul(&a.y, t)
t.Mul(&a.z, z12)
s2 := (&gfP2{}).Mul(&b.y, t)
h := (&gfP2{}).Sub(u2, u1)
xEqual := h.IsZero()
t.Add(h, h)
i := (&gfP2{}).Square(t)
j := (&gfP2{}).Mul(h, i)
t.Sub(s2, s1)
yEqual := t.IsZero()
if xEqual && yEqual {
c.Double(a)
return
}
r := (&gfP2{}).Add(t, t)
v := (&gfP2{}).Mul(u1, i)
t4 := (&gfP2{}).Square(r)
t.Add(v, v)
t6 := (&gfP2{}).Sub(t4, j)
c.x.Sub(t6, t)
t.Sub(v, &c.x) // t7
t4.Mul(s1, j) // t8
t6.Add(t4, t4) // t9
t4.Mul(r, t) // t10
c.y.Sub(t4, t6)
t.Add(&a.z, &b.z) // t11
t4.Square(t) // t12
t.Sub(t4, z12) // t13
t4.Sub(t, z22) // t14
c.z.Mul(t4, h)
}
func (c *twistPoint) Double(a *twistPoint) {
// See http://hyperelliptic.org/EFD/g1p/auto-code/shortw/jacobian-0/doubling/dbl-2009-l.op3
A := (&gfP2{}).Square(&a.x)
B := (&gfP2{}).Square(&a.y)
C := (&gfP2{}).Square(B)
t := (&gfP2{}).Add(&a.x, B)
t2 := (&gfP2{}).Square(t)
t.Sub(t2, A)
t2.Sub(t, C)
d := (&gfP2{}).Add(t2, t2)
t.Add(A, A)
e := (&gfP2{}).Add(t, A)
f := (&gfP2{}).Square(e)
t.Add(d, d)
c.x.Sub(f, t)
t.Add(C, C)
t2.Add(t, t)
t.Add(t2, t2)
c.y.Sub(d, &c.x)
t2.Mul(e, &c.y)
c.y.Sub(t2, t)
t.Mul(&a.y, &a.z)
c.z.Add(t, t)
}
func (c *twistPoint) Mul(a *twistPoint, scalar *big.Int) {
sum, t := &twistPoint{}, &twistPoint{}
for i := scalar.BitLen(); i >= 0; i-- {
t.Double(sum)
if scalar.Bit(i) != 0 {
sum.Add(t, a)
} else {
sum.Set(t)
}
}
c.Set(sum)
}
func (c *twistPoint) MakeAffine() {
if c.z.IsOne() {
return
} else if c.z.IsZero() {
c.x.SetZero()
c.y.SetOne()
c.t.SetZero()
return
}
zInv := (&gfP2{}).Invert(&c.z)
t := (&gfP2{}).Mul(&c.y, zInv)
zInv2 := (&gfP2{}).Square(zInv)
c.y.Mul(t, zInv2)
t.Mul(&c.x, zInv2)
c.x.Set(t)
c.z.SetOne()
c.t.SetOne()
}
func (c *twistPoint) Neg(a *twistPoint) {
c.x.Set(&a.x)
c.y.Neg(&a.y)
c.z.Set(&a.z)
c.t.SetZero()
}
| crypto/bn256/cloudflare/twist.go | 0 | https://github.com/ethereum/go-ethereum/commit/cf05ef9106779da0df62c0c03312fc489171aaa5 | [
0.6883249878883362,
0.060482803732156754,
0.00016769542708061635,
0.00017434258188586682,
0.18664175271987915
] |
{
"id": 0,
"code_window": [
"\ttab.mutex.Lock()\n",
"\tdefer tab.mutex.Unlock()\n",
"\n",
"\t// Find all non-empty buckets and get a fresh slice of their entries.\n",
"\tvar buckets [][]*Node\n",
"\tfor _, b := range tab.buckets {\n",
"\t\tif len(b.entries) > 0 {\n",
"\t\t\tbuckets = append(buckets, b.entries[:])\n",
"\t\t}\n",
"\t}\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tfor _, b := range &tab.buckets {\n"
],
"file_path": "p2p/discover/table.go",
"type": "replace",
"edit_start_line_idx": 162
} | package natpmp
import "time"
type callObserver interface {
observeCall(msg []byte, result []byte, err error)
}
// A caller that records the RPC call.
type recorder struct {
child caller
observer callObserver
}
func (n *recorder) call(msg []byte, timeout time.Duration) (result []byte, err error) {
result, err = n.child.call(msg, timeout)
n.observer.observeCall(msg, result, err)
return
}
| vendor/github.com/jackpal/go-nat-pmp/recorder.go | 0 | https://github.com/ethereum/go-ethereum/commit/cf05ef9106779da0df62c0c03312fc489171aaa5 | [
0.00017098052194342017,
0.00016979509382508695,
0.0001686096511548385,
0.00016979509382508695,
0.0000011854353942908347
] |
{
"id": 1,
"code_window": [
"func (tab *Table) copyLiveNodes() {\n",
"\ttab.mutex.Lock()\n",
"\tdefer tab.mutex.Unlock()\n",
"\n",
"\tnow := time.Now()\n",
"\tfor _, b := range tab.buckets {\n",
"\t\tfor _, n := range b.entries {\n",
"\t\t\tif now.Sub(n.addedAt) >= seedMinTableTime {\n",
"\t\t\t\ttab.db.updateNode(n)\n",
"\t\t\t}\n",
"\t\t}\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tfor _, b := range &tab.buckets {\n"
],
"file_path": "p2p/discover/table.go",
"type": "replace",
"edit_start_line_idx": 510
} | // Copyright 2014 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package trie
import (
"fmt"
"io"
"strings"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/rlp"
)
var indices = []string{"0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "a", "b", "c", "d", "e", "f", "[17]"}
type node interface {
fstring(string) string
cache() (hashNode, bool)
canUnload(cachegen, cachelimit uint16) bool
}
type (
fullNode struct {
Children [17]node // Actual trie node data to encode/decode (needs custom encoder)
flags nodeFlag
}
shortNode struct {
Key []byte
Val node
flags nodeFlag
}
hashNode []byte
valueNode []byte
)
// nilValueNode is used when collapsing internal trie nodes for hashing, since
// unset children need to serialize correctly.
var nilValueNode = valueNode(nil)
// EncodeRLP encodes a full node into the consensus RLP format.
func (n *fullNode) EncodeRLP(w io.Writer) error {
var nodes [17]node
for i, child := range n.Children {
if child != nil {
nodes[i] = child
} else {
nodes[i] = nilValueNode
}
}
return rlp.Encode(w, nodes)
}
func (n *fullNode) copy() *fullNode { copy := *n; return © }
func (n *shortNode) copy() *shortNode { copy := *n; return © }
// nodeFlag contains caching-related metadata about a node.
type nodeFlag struct {
hash hashNode // cached hash of the node (may be nil)
gen uint16 // cache generation counter
dirty bool // whether the node has changes that must be written to the database
}
// canUnload tells whether a node can be unloaded.
func (n *nodeFlag) canUnload(cachegen, cachelimit uint16) bool {
return !n.dirty && cachegen-n.gen >= cachelimit
}
func (n *fullNode) canUnload(gen, limit uint16) bool { return n.flags.canUnload(gen, limit) }
func (n *shortNode) canUnload(gen, limit uint16) bool { return n.flags.canUnload(gen, limit) }
func (n hashNode) canUnload(uint16, uint16) bool { return false }
func (n valueNode) canUnload(uint16, uint16) bool { return false }
func (n *fullNode) cache() (hashNode, bool) { return n.flags.hash, n.flags.dirty }
func (n *shortNode) cache() (hashNode, bool) { return n.flags.hash, n.flags.dirty }
func (n hashNode) cache() (hashNode, bool) { return nil, true }
func (n valueNode) cache() (hashNode, bool) { return nil, true }
// Pretty printing.
func (n *fullNode) String() string { return n.fstring("") }
func (n *shortNode) String() string { return n.fstring("") }
func (n hashNode) String() string { return n.fstring("") }
func (n valueNode) String() string { return n.fstring("") }
func (n *fullNode) fstring(ind string) string {
resp := fmt.Sprintf("[\n%s ", ind)
for i, node := range n.Children {
if node == nil {
resp += fmt.Sprintf("%s: <nil> ", indices[i])
} else {
resp += fmt.Sprintf("%s: %v", indices[i], node.fstring(ind+" "))
}
}
return resp + fmt.Sprintf("\n%s] ", ind)
}
func (n *shortNode) fstring(ind string) string {
return fmt.Sprintf("{%x: %v} ", n.Key, n.Val.fstring(ind+" "))
}
func (n hashNode) fstring(ind string) string {
return fmt.Sprintf("<%x> ", []byte(n))
}
func (n valueNode) fstring(ind string) string {
return fmt.Sprintf("%x ", []byte(n))
}
func mustDecodeNode(hash, buf []byte, cachegen uint16) node {
n, err := decodeNode(hash, buf, cachegen)
if err != nil {
panic(fmt.Sprintf("node %x: %v", hash, err))
}
return n
}
// decodeNode parses the RLP encoding of a trie node.
func decodeNode(hash, buf []byte, cachegen uint16) (node, error) {
if len(buf) == 0 {
return nil, io.ErrUnexpectedEOF
}
elems, _, err := rlp.SplitList(buf)
if err != nil {
return nil, fmt.Errorf("decode error: %v", err)
}
switch c, _ := rlp.CountValues(elems); c {
case 2:
n, err := decodeShort(hash, elems, cachegen)
return n, wrapError(err, "short")
case 17:
n, err := decodeFull(hash, elems, cachegen)
return n, wrapError(err, "full")
default:
return nil, fmt.Errorf("invalid number of list elements: %v", c)
}
}
func decodeShort(hash, elems []byte, cachegen uint16) (node, error) {
kbuf, rest, err := rlp.SplitString(elems)
if err != nil {
return nil, err
}
flag := nodeFlag{hash: hash, gen: cachegen}
key := compactToHex(kbuf)
if hasTerm(key) {
// value node
val, _, err := rlp.SplitString(rest)
if err != nil {
return nil, fmt.Errorf("invalid value node: %v", err)
}
return &shortNode{key, append(valueNode{}, val...), flag}, nil
}
r, _, err := decodeRef(rest, cachegen)
if err != nil {
return nil, wrapError(err, "val")
}
return &shortNode{key, r, flag}, nil
}
func decodeFull(hash, elems []byte, cachegen uint16) (*fullNode, error) {
n := &fullNode{flags: nodeFlag{hash: hash, gen: cachegen}}
for i := 0; i < 16; i++ {
cld, rest, err := decodeRef(elems, cachegen)
if err != nil {
return n, wrapError(err, fmt.Sprintf("[%d]", i))
}
n.Children[i], elems = cld, rest
}
val, _, err := rlp.SplitString(elems)
if err != nil {
return n, err
}
if len(val) > 0 {
n.Children[16] = append(valueNode{}, val...)
}
return n, nil
}
const hashLen = len(common.Hash{})
func decodeRef(buf []byte, cachegen uint16) (node, []byte, error) {
kind, val, rest, err := rlp.Split(buf)
if err != nil {
return nil, buf, err
}
switch {
case kind == rlp.List:
// 'embedded' node reference. The encoding must be smaller
// than a hash in order to be valid.
if size := len(buf) - len(rest); size > hashLen {
err := fmt.Errorf("oversized embedded node (size is %d bytes, want size < %d)", size, hashLen)
return nil, buf, err
}
n, err := decodeNode(nil, buf, cachegen)
return n, rest, err
case kind == rlp.String && len(val) == 0:
// empty node
return nil, rest, nil
case kind == rlp.String && len(val) == 32:
return append(hashNode{}, val...), rest, nil
default:
return nil, nil, fmt.Errorf("invalid RLP string size %d (want 0 or 32)", len(val))
}
}
// wraps a decoding error with information about the path to the
// invalid child node (for debugging encoding issues).
type decodeError struct {
what error
stack []string
}
func wrapError(err error, ctx string) error {
if err == nil {
return nil
}
if decErr, ok := err.(*decodeError); ok {
decErr.stack = append(decErr.stack, ctx)
return decErr
}
return &decodeError{err, []string{ctx}}
}
func (err *decodeError) Error() string {
return fmt.Sprintf("%v (decode path: %s)", err.what, strings.Join(err.stack, "<-"))
}
| trie/node.go | 1 | https://github.com/ethereum/go-ethereum/commit/cf05ef9106779da0df62c0c03312fc489171aaa5 | [
0.9721493721008301,
0.0881885290145874,
0.0001616897206986323,
0.0003018675197381526,
0.24628527462482452
] |
{
"id": 1,
"code_window": [
"func (tab *Table) copyLiveNodes() {\n",
"\ttab.mutex.Lock()\n",
"\tdefer tab.mutex.Unlock()\n",
"\n",
"\tnow := time.Now()\n",
"\tfor _, b := range tab.buckets {\n",
"\t\tfor _, n := range b.entries {\n",
"\t\t\tif now.Sub(n.addedAt) >= seedMinTableTime {\n",
"\t\t\t\ttab.db.updateNode(n)\n",
"\t\t\t}\n",
"\t\t}\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tfor _, b := range &tab.buckets {\n"
],
"file_path": "p2p/discover/table.go",
"type": "replace",
"edit_start_line_idx": 510
} | /**********************************************************************
* Copyright (c) 2013, 2014 Pieter Wuille *
* Distributed under the MIT software license, see the accompanying *
* file COPYING or http://www.opensource.org/licenses/mit-license.php.*
**********************************************************************/
#ifndef _SECP256K1_FIELD_REPR_IMPL_H_
#define _SECP256K1_FIELD_REPR_IMPL_H_
#if defined HAVE_CONFIG_H
#include "libsecp256k1-config.h"
#endif
#include "util.h"
#include "num.h"
#include "field.h"
#if defined(USE_ASM_X86_64)
#include "field_5x52_asm_impl.h"
#else
#include "field_5x52_int128_impl.h"
#endif
/** Implements arithmetic modulo FFFFFFFF FFFFFFFF FFFFFFFF FFFFFFFF FFFFFFFF FFFFFFFF FFFFFFFE FFFFFC2F,
* represented as 5 uint64_t's in base 2^52. The values are allowed to contain >52 each. In particular,
* each FieldElem has a 'magnitude' associated with it. Internally, a magnitude M means each element
* is at most M*(2^53-1), except the most significant one, which is limited to M*(2^49-1). All operations
* accept any input with magnitude at most M, and have different rules for propagating magnitude to their
* output.
*/
#ifdef VERIFY
static void secp256k1_fe_verify(const secp256k1_fe *a) {
const uint64_t *d = a->n;
int m = a->normalized ? 1 : 2 * a->magnitude, r = 1;
/* secp256k1 'p' value defined in "Standards for Efficient Cryptography" (SEC2) 2.7.1. */
r &= (d[0] <= 0xFFFFFFFFFFFFFULL * m);
r &= (d[1] <= 0xFFFFFFFFFFFFFULL * m);
r &= (d[2] <= 0xFFFFFFFFFFFFFULL * m);
r &= (d[3] <= 0xFFFFFFFFFFFFFULL * m);
r &= (d[4] <= 0x0FFFFFFFFFFFFULL * m);
r &= (a->magnitude >= 0);
r &= (a->magnitude <= 2048);
if (a->normalized) {
r &= (a->magnitude <= 1);
if (r && (d[4] == 0x0FFFFFFFFFFFFULL) && ((d[3] & d[2] & d[1]) == 0xFFFFFFFFFFFFFULL)) {
r &= (d[0] < 0xFFFFEFFFFFC2FULL);
}
}
VERIFY_CHECK(r == 1);
}
#endif
static void secp256k1_fe_normalize(secp256k1_fe *r) {
uint64_t t0 = r->n[0], t1 = r->n[1], t2 = r->n[2], t3 = r->n[3], t4 = r->n[4];
/* Reduce t4 at the start so there will be at most a single carry from the first pass */
uint64_t m;
uint64_t x = t4 >> 48; t4 &= 0x0FFFFFFFFFFFFULL;
/* The first pass ensures the magnitude is 1, ... */
t0 += x * 0x1000003D1ULL;
t1 += (t0 >> 52); t0 &= 0xFFFFFFFFFFFFFULL;
t2 += (t1 >> 52); t1 &= 0xFFFFFFFFFFFFFULL; m = t1;
t3 += (t2 >> 52); t2 &= 0xFFFFFFFFFFFFFULL; m &= t2;
t4 += (t3 >> 52); t3 &= 0xFFFFFFFFFFFFFULL; m &= t3;
/* ... except for a possible carry at bit 48 of t4 (i.e. bit 256 of the field element) */
VERIFY_CHECK(t4 >> 49 == 0);
/* At most a single final reduction is needed; check if the value is >= the field characteristic */
x = (t4 >> 48) | ((t4 == 0x0FFFFFFFFFFFFULL) & (m == 0xFFFFFFFFFFFFFULL)
& (t0 >= 0xFFFFEFFFFFC2FULL));
/* Apply the final reduction (for constant-time behaviour, we do it always) */
t0 += x * 0x1000003D1ULL;
t1 += (t0 >> 52); t0 &= 0xFFFFFFFFFFFFFULL;
t2 += (t1 >> 52); t1 &= 0xFFFFFFFFFFFFFULL;
t3 += (t2 >> 52); t2 &= 0xFFFFFFFFFFFFFULL;
t4 += (t3 >> 52); t3 &= 0xFFFFFFFFFFFFFULL;
/* If t4 didn't carry to bit 48 already, then it should have after any final reduction */
VERIFY_CHECK(t4 >> 48 == x);
/* Mask off the possible multiple of 2^256 from the final reduction */
t4 &= 0x0FFFFFFFFFFFFULL;
r->n[0] = t0; r->n[1] = t1; r->n[2] = t2; r->n[3] = t3; r->n[4] = t4;
#ifdef VERIFY
r->magnitude = 1;
r->normalized = 1;
secp256k1_fe_verify(r);
#endif
}
static void secp256k1_fe_normalize_weak(secp256k1_fe *r) {
uint64_t t0 = r->n[0], t1 = r->n[1], t2 = r->n[2], t3 = r->n[3], t4 = r->n[4];
/* Reduce t4 at the start so there will be at most a single carry from the first pass */
uint64_t x = t4 >> 48; t4 &= 0x0FFFFFFFFFFFFULL;
/* The first pass ensures the magnitude is 1, ... */
t0 += x * 0x1000003D1ULL;
t1 += (t0 >> 52); t0 &= 0xFFFFFFFFFFFFFULL;
t2 += (t1 >> 52); t1 &= 0xFFFFFFFFFFFFFULL;
t3 += (t2 >> 52); t2 &= 0xFFFFFFFFFFFFFULL;
t4 += (t3 >> 52); t3 &= 0xFFFFFFFFFFFFFULL;
/* ... except for a possible carry at bit 48 of t4 (i.e. bit 256 of the field element) */
VERIFY_CHECK(t4 >> 49 == 0);
r->n[0] = t0; r->n[1] = t1; r->n[2] = t2; r->n[3] = t3; r->n[4] = t4;
#ifdef VERIFY
r->magnitude = 1;
secp256k1_fe_verify(r);
#endif
}
static void secp256k1_fe_normalize_var(secp256k1_fe *r) {
uint64_t t0 = r->n[0], t1 = r->n[1], t2 = r->n[2], t3 = r->n[3], t4 = r->n[4];
/* Reduce t4 at the start so there will be at most a single carry from the first pass */
uint64_t m;
uint64_t x = t4 >> 48; t4 &= 0x0FFFFFFFFFFFFULL;
/* The first pass ensures the magnitude is 1, ... */
t0 += x * 0x1000003D1ULL;
t1 += (t0 >> 52); t0 &= 0xFFFFFFFFFFFFFULL;
t2 += (t1 >> 52); t1 &= 0xFFFFFFFFFFFFFULL; m = t1;
t3 += (t2 >> 52); t2 &= 0xFFFFFFFFFFFFFULL; m &= t2;
t4 += (t3 >> 52); t3 &= 0xFFFFFFFFFFFFFULL; m &= t3;
/* ... except for a possible carry at bit 48 of t4 (i.e. bit 256 of the field element) */
VERIFY_CHECK(t4 >> 49 == 0);
/* At most a single final reduction is needed; check if the value is >= the field characteristic */
x = (t4 >> 48) | ((t4 == 0x0FFFFFFFFFFFFULL) & (m == 0xFFFFFFFFFFFFFULL)
& (t0 >= 0xFFFFEFFFFFC2FULL));
if (x) {
t0 += 0x1000003D1ULL;
t1 += (t0 >> 52); t0 &= 0xFFFFFFFFFFFFFULL;
t2 += (t1 >> 52); t1 &= 0xFFFFFFFFFFFFFULL;
t3 += (t2 >> 52); t2 &= 0xFFFFFFFFFFFFFULL;
t4 += (t3 >> 52); t3 &= 0xFFFFFFFFFFFFFULL;
/* If t4 didn't carry to bit 48 already, then it should have after any final reduction */
VERIFY_CHECK(t4 >> 48 == x);
/* Mask off the possible multiple of 2^256 from the final reduction */
t4 &= 0x0FFFFFFFFFFFFULL;
}
r->n[0] = t0; r->n[1] = t1; r->n[2] = t2; r->n[3] = t3; r->n[4] = t4;
#ifdef VERIFY
r->magnitude = 1;
r->normalized = 1;
secp256k1_fe_verify(r);
#endif
}
static int secp256k1_fe_normalizes_to_zero(secp256k1_fe *r) {
uint64_t t0 = r->n[0], t1 = r->n[1], t2 = r->n[2], t3 = r->n[3], t4 = r->n[4];
/* z0 tracks a possible raw value of 0, z1 tracks a possible raw value of P */
uint64_t z0, z1;
/* Reduce t4 at the start so there will be at most a single carry from the first pass */
uint64_t x = t4 >> 48; t4 &= 0x0FFFFFFFFFFFFULL;
/* The first pass ensures the magnitude is 1, ... */
t0 += x * 0x1000003D1ULL;
t1 += (t0 >> 52); t0 &= 0xFFFFFFFFFFFFFULL; z0 = t0; z1 = t0 ^ 0x1000003D0ULL;
t2 += (t1 >> 52); t1 &= 0xFFFFFFFFFFFFFULL; z0 |= t1; z1 &= t1;
t3 += (t2 >> 52); t2 &= 0xFFFFFFFFFFFFFULL; z0 |= t2; z1 &= t2;
t4 += (t3 >> 52); t3 &= 0xFFFFFFFFFFFFFULL; z0 |= t3; z1 &= t3;
z0 |= t4; z1 &= t4 ^ 0xF000000000000ULL;
/* ... except for a possible carry at bit 48 of t4 (i.e. bit 256 of the field element) */
VERIFY_CHECK(t4 >> 49 == 0);
return (z0 == 0) | (z1 == 0xFFFFFFFFFFFFFULL);
}
static int secp256k1_fe_normalizes_to_zero_var(secp256k1_fe *r) {
uint64_t t0, t1, t2, t3, t4;
uint64_t z0, z1;
uint64_t x;
t0 = r->n[0];
t4 = r->n[4];
/* Reduce t4 at the start so there will be at most a single carry from the first pass */
x = t4 >> 48;
/* The first pass ensures the magnitude is 1, ... */
t0 += x * 0x1000003D1ULL;
/* z0 tracks a possible raw value of 0, z1 tracks a possible raw value of P */
z0 = t0 & 0xFFFFFFFFFFFFFULL;
z1 = z0 ^ 0x1000003D0ULL;
/* Fast return path should catch the majority of cases */
if ((z0 != 0ULL) & (z1 != 0xFFFFFFFFFFFFFULL)) {
return 0;
}
t1 = r->n[1];
t2 = r->n[2];
t3 = r->n[3];
t4 &= 0x0FFFFFFFFFFFFULL;
t1 += (t0 >> 52);
t2 += (t1 >> 52); t1 &= 0xFFFFFFFFFFFFFULL; z0 |= t1; z1 &= t1;
t3 += (t2 >> 52); t2 &= 0xFFFFFFFFFFFFFULL; z0 |= t2; z1 &= t2;
t4 += (t3 >> 52); t3 &= 0xFFFFFFFFFFFFFULL; z0 |= t3; z1 &= t3;
z0 |= t4; z1 &= t4 ^ 0xF000000000000ULL;
/* ... except for a possible carry at bit 48 of t4 (i.e. bit 256 of the field element) */
VERIFY_CHECK(t4 >> 49 == 0);
return (z0 == 0) | (z1 == 0xFFFFFFFFFFFFFULL);
}
SECP256K1_INLINE static void secp256k1_fe_set_int(secp256k1_fe *r, int a) {
r->n[0] = a;
r->n[1] = r->n[2] = r->n[3] = r->n[4] = 0;
#ifdef VERIFY
r->magnitude = 1;
r->normalized = 1;
secp256k1_fe_verify(r);
#endif
}
SECP256K1_INLINE static int secp256k1_fe_is_zero(const secp256k1_fe *a) {
const uint64_t *t = a->n;
#ifdef VERIFY
VERIFY_CHECK(a->normalized);
secp256k1_fe_verify(a);
#endif
return (t[0] | t[1] | t[2] | t[3] | t[4]) == 0;
}
SECP256K1_INLINE static int secp256k1_fe_is_odd(const secp256k1_fe *a) {
#ifdef VERIFY
VERIFY_CHECK(a->normalized);
secp256k1_fe_verify(a);
#endif
return a->n[0] & 1;
}
SECP256K1_INLINE static void secp256k1_fe_clear(secp256k1_fe *a) {
int i;
#ifdef VERIFY
a->magnitude = 0;
a->normalized = 1;
#endif
for (i=0; i<5; i++) {
a->n[i] = 0;
}
}
static int secp256k1_fe_cmp_var(const secp256k1_fe *a, const secp256k1_fe *b) {
int i;
#ifdef VERIFY
VERIFY_CHECK(a->normalized);
VERIFY_CHECK(b->normalized);
secp256k1_fe_verify(a);
secp256k1_fe_verify(b);
#endif
for (i = 4; i >= 0; i--) {
if (a->n[i] > b->n[i]) {
return 1;
}
if (a->n[i] < b->n[i]) {
return -1;
}
}
return 0;
}
static int secp256k1_fe_set_b32(secp256k1_fe *r, const unsigned char *a) {
int i;
r->n[0] = r->n[1] = r->n[2] = r->n[3] = r->n[4] = 0;
for (i=0; i<32; i++) {
int j;
for (j=0; j<2; j++) {
int limb = (8*i+4*j)/52;
int shift = (8*i+4*j)%52;
r->n[limb] |= (uint64_t)((a[31-i] >> (4*j)) & 0xF) << shift;
}
}
if (r->n[4] == 0x0FFFFFFFFFFFFULL && (r->n[3] & r->n[2] & r->n[1]) == 0xFFFFFFFFFFFFFULL && r->n[0] >= 0xFFFFEFFFFFC2FULL) {
return 0;
}
#ifdef VERIFY
r->magnitude = 1;
r->normalized = 1;
secp256k1_fe_verify(r);
#endif
return 1;
}
/** Convert a field element to a 32-byte big endian value. Requires the input to be normalized */
static void secp256k1_fe_get_b32(unsigned char *r, const secp256k1_fe *a) {
int i;
#ifdef VERIFY
VERIFY_CHECK(a->normalized);
secp256k1_fe_verify(a);
#endif
for (i=0; i<32; i++) {
int j;
int c = 0;
for (j=0; j<2; j++) {
int limb = (8*i+4*j)/52;
int shift = (8*i+4*j)%52;
c |= ((a->n[limb] >> shift) & 0xF) << (4 * j);
}
r[31-i] = c;
}
}
SECP256K1_INLINE static void secp256k1_fe_negate(secp256k1_fe *r, const secp256k1_fe *a, int m) {
#ifdef VERIFY
VERIFY_CHECK(a->magnitude <= m);
secp256k1_fe_verify(a);
#endif
r->n[0] = 0xFFFFEFFFFFC2FULL * 2 * (m + 1) - a->n[0];
r->n[1] = 0xFFFFFFFFFFFFFULL * 2 * (m + 1) - a->n[1];
r->n[2] = 0xFFFFFFFFFFFFFULL * 2 * (m + 1) - a->n[2];
r->n[3] = 0xFFFFFFFFFFFFFULL * 2 * (m + 1) - a->n[3];
r->n[4] = 0x0FFFFFFFFFFFFULL * 2 * (m + 1) - a->n[4];
#ifdef VERIFY
r->magnitude = m + 1;
r->normalized = 0;
secp256k1_fe_verify(r);
#endif
}
SECP256K1_INLINE static void secp256k1_fe_mul_int(secp256k1_fe *r, int a) {
r->n[0] *= a;
r->n[1] *= a;
r->n[2] *= a;
r->n[3] *= a;
r->n[4] *= a;
#ifdef VERIFY
r->magnitude *= a;
r->normalized = 0;
secp256k1_fe_verify(r);
#endif
}
SECP256K1_INLINE static void secp256k1_fe_add(secp256k1_fe *r, const secp256k1_fe *a) {
#ifdef VERIFY
secp256k1_fe_verify(a);
#endif
r->n[0] += a->n[0];
r->n[1] += a->n[1];
r->n[2] += a->n[2];
r->n[3] += a->n[3];
r->n[4] += a->n[4];
#ifdef VERIFY
r->magnitude += a->magnitude;
r->normalized = 0;
secp256k1_fe_verify(r);
#endif
}
static void secp256k1_fe_mul(secp256k1_fe *r, const secp256k1_fe *a, const secp256k1_fe * SECP256K1_RESTRICT b) {
#ifdef VERIFY
VERIFY_CHECK(a->magnitude <= 8);
VERIFY_CHECK(b->magnitude <= 8);
secp256k1_fe_verify(a);
secp256k1_fe_verify(b);
VERIFY_CHECK(r != b);
#endif
secp256k1_fe_mul_inner(r->n, a->n, b->n);
#ifdef VERIFY
r->magnitude = 1;
r->normalized = 0;
secp256k1_fe_verify(r);
#endif
}
static void secp256k1_fe_sqr(secp256k1_fe *r, const secp256k1_fe *a) {
#ifdef VERIFY
VERIFY_CHECK(a->magnitude <= 8);
secp256k1_fe_verify(a);
#endif
secp256k1_fe_sqr_inner(r->n, a->n);
#ifdef VERIFY
r->magnitude = 1;
r->normalized = 0;
secp256k1_fe_verify(r);
#endif
}
static SECP256K1_INLINE void secp256k1_fe_cmov(secp256k1_fe *r, const secp256k1_fe *a, int flag) {
uint64_t mask0, mask1;
mask0 = flag + ~((uint64_t)0);
mask1 = ~mask0;
r->n[0] = (r->n[0] & mask0) | (a->n[0] & mask1);
r->n[1] = (r->n[1] & mask0) | (a->n[1] & mask1);
r->n[2] = (r->n[2] & mask0) | (a->n[2] & mask1);
r->n[3] = (r->n[3] & mask0) | (a->n[3] & mask1);
r->n[4] = (r->n[4] & mask0) | (a->n[4] & mask1);
#ifdef VERIFY
if (a->magnitude > r->magnitude) {
r->magnitude = a->magnitude;
}
r->normalized &= a->normalized;
#endif
}
static SECP256K1_INLINE void secp256k1_fe_storage_cmov(secp256k1_fe_storage *r, const secp256k1_fe_storage *a, int flag) {
uint64_t mask0, mask1;
mask0 = flag + ~((uint64_t)0);
mask1 = ~mask0;
r->n[0] = (r->n[0] & mask0) | (a->n[0] & mask1);
r->n[1] = (r->n[1] & mask0) | (a->n[1] & mask1);
r->n[2] = (r->n[2] & mask0) | (a->n[2] & mask1);
r->n[3] = (r->n[3] & mask0) | (a->n[3] & mask1);
}
static void secp256k1_fe_to_storage(secp256k1_fe_storage *r, const secp256k1_fe *a) {
#ifdef VERIFY
VERIFY_CHECK(a->normalized);
#endif
r->n[0] = a->n[0] | a->n[1] << 52;
r->n[1] = a->n[1] >> 12 | a->n[2] << 40;
r->n[2] = a->n[2] >> 24 | a->n[3] << 28;
r->n[3] = a->n[3] >> 36 | a->n[4] << 16;
}
static SECP256K1_INLINE void secp256k1_fe_from_storage(secp256k1_fe *r, const secp256k1_fe_storage *a) {
r->n[0] = a->n[0] & 0xFFFFFFFFFFFFFULL;
r->n[1] = a->n[0] >> 52 | ((a->n[1] << 12) & 0xFFFFFFFFFFFFFULL);
r->n[2] = a->n[1] >> 40 | ((a->n[2] << 24) & 0xFFFFFFFFFFFFFULL);
r->n[3] = a->n[2] >> 28 | ((a->n[3] << 36) & 0xFFFFFFFFFFFFFULL);
r->n[4] = a->n[3] >> 16;
#ifdef VERIFY
r->magnitude = 1;
r->normalized = 1;
#endif
}
#endif
| crypto/secp256k1/libsecp256k1/src/field_5x52_impl.h | 0 | https://github.com/ethereum/go-ethereum/commit/cf05ef9106779da0df62c0c03312fc489171aaa5 | [
0.9660096764564514,
0.023400062695145607,
0.00016232587222475559,
0.00017459328228142112,
0.1410249024629593
] |
{
"id": 1,
"code_window": [
"func (tab *Table) copyLiveNodes() {\n",
"\ttab.mutex.Lock()\n",
"\tdefer tab.mutex.Unlock()\n",
"\n",
"\tnow := time.Now()\n",
"\tfor _, b := range tab.buckets {\n",
"\t\tfor _, n := range b.entries {\n",
"\t\t\tif now.Sub(n.addedAt) >= seedMinTableTime {\n",
"\t\t\t\ttab.db.updateNode(n)\n",
"\t\t\t}\n",
"\t\t}\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tfor _, b := range &tab.buckets {\n"
],
"file_path": "p2p/discover/table.go",
"type": "replace",
"edit_start_line_idx": 510
} | // Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package websocket
import (
"bufio"
"io"
"net"
"net/http"
"net/url"
)
// DialError is an error that occurs while dialling a websocket server.
type DialError struct {
*Config
Err error
}
func (e *DialError) Error() string {
return "websocket.Dial " + e.Config.Location.String() + ": " + e.Err.Error()
}
// NewConfig creates a new WebSocket config for client connection.
func NewConfig(server, origin string) (config *Config, err error) {
config = new(Config)
config.Version = ProtocolVersionHybi13
config.Location, err = url.ParseRequestURI(server)
if err != nil {
return
}
config.Origin, err = url.ParseRequestURI(origin)
if err != nil {
return
}
config.Header = http.Header(make(map[string][]string))
return
}
// NewClient creates a new WebSocket client connection over rwc.
func NewClient(config *Config, rwc io.ReadWriteCloser) (ws *Conn, err error) {
br := bufio.NewReader(rwc)
bw := bufio.NewWriter(rwc)
err = hybiClientHandshake(config, br, bw)
if err != nil {
return
}
buf := bufio.NewReadWriter(br, bw)
ws = newHybiClientConn(config, buf, rwc)
return
}
// Dial opens a new client connection to a WebSocket.
func Dial(url_, protocol, origin string) (ws *Conn, err error) {
config, err := NewConfig(url_, origin)
if err != nil {
return nil, err
}
if protocol != "" {
config.Protocol = []string{protocol}
}
return DialConfig(config)
}
var portMap = map[string]string{
"ws": "80",
"wss": "443",
}
func parseAuthority(location *url.URL) string {
if _, ok := portMap[location.Scheme]; ok {
if _, _, err := net.SplitHostPort(location.Host); err != nil {
return net.JoinHostPort(location.Host, portMap[location.Scheme])
}
}
return location.Host
}
// DialConfig opens a new client connection to a WebSocket with a config.
func DialConfig(config *Config) (ws *Conn, err error) {
var client net.Conn
if config.Location == nil {
return nil, &DialError{config, ErrBadWebSocketLocation}
}
if config.Origin == nil {
return nil, &DialError{config, ErrBadWebSocketOrigin}
}
dialer := config.Dialer
if dialer == nil {
dialer = &net.Dialer{}
}
client, err = dialWithDialer(dialer, config)
if err != nil {
goto Error
}
ws, err = NewClient(config, client)
if err != nil {
client.Close()
goto Error
}
return
Error:
return nil, &DialError{config, err}
}
| vendor/golang.org/x/net/websocket/client.go | 0 | https://github.com/ethereum/go-ethereum/commit/cf05ef9106779da0df62c0c03312fc489171aaa5 | [
0.0005083224386908114,
0.00023606188187841326,
0.00016475064330734313,
0.00017412012675777078,
0.00011232728866161779
] |
{
"id": 1,
"code_window": [
"func (tab *Table) copyLiveNodes() {\n",
"\ttab.mutex.Lock()\n",
"\tdefer tab.mutex.Unlock()\n",
"\n",
"\tnow := time.Now()\n",
"\tfor _, b := range tab.buckets {\n",
"\t\tfor _, n := range b.entries {\n",
"\t\t\tif now.Sub(n.addedAt) >= seedMinTableTime {\n",
"\t\t\t\ttab.db.updateNode(n)\n",
"\t\t\t}\n",
"\t\t}\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tfor _, b := range &tab.buckets {\n"
],
"file_path": "p2p/discover/table.go",
"type": "replace",
"edit_start_line_idx": 510
} | // Copyright 2011 Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package uuid
import (
"crypto/md5"
"crypto/sha1"
"hash"
)
// Well known Name Space IDs and UUIDs
var (
NameSpace_DNS = Parse("6ba7b810-9dad-11d1-80b4-00c04fd430c8")
NameSpace_URL = Parse("6ba7b811-9dad-11d1-80b4-00c04fd430c8")
NameSpace_OID = Parse("6ba7b812-9dad-11d1-80b4-00c04fd430c8")
NameSpace_X500 = Parse("6ba7b814-9dad-11d1-80b4-00c04fd430c8")
NIL = Parse("00000000-0000-0000-0000-000000000000")
)
// NewHash returns a new UUID derived from the hash of space concatenated with
// data generated by h. The hash should be at least 16 byte in length. The
// first 16 bytes of the hash are used to form the UUID. The version of the
// UUID will be the lower 4 bits of version. NewHash is used to implement
// NewMD5 and NewSHA1.
func NewHash(h hash.Hash, space UUID, data []byte, version int) UUID {
h.Reset()
h.Write(space)
h.Write([]byte(data))
s := h.Sum(nil)
uuid := make([]byte, 16)
copy(uuid, s)
uuid[6] = (uuid[6] & 0x0f) | uint8((version&0xf)<<4)
uuid[8] = (uuid[8] & 0x3f) | 0x80 // RFC 4122 variant
return uuid
}
// NewMD5 returns a new MD5 (Version 3) UUID based on the
// supplied name space and data.
//
// NewHash(md5.New(), space, data, 3)
func NewMD5(space UUID, data []byte) UUID {
return NewHash(md5.New(), space, data, 3)
}
// NewSHA1 returns a new SHA1 (Version 5) UUID based on the
// supplied name space and data.
//
// NewHash(sha1.New(), space, data, 5)
func NewSHA1(space UUID, data []byte) UUID {
return NewHash(sha1.New(), space, data, 5)
}
| vendor/github.com/pborman/uuid/hash.go | 0 | https://github.com/ethereum/go-ethereum/commit/cf05ef9106779da0df62c0c03312fc489171aaa5 | [
0.0001792182301869616,
0.0001709634525468573,
0.00016251821944024414,
0.0001719168940326199,
0.000005260538728180109
] |
Subsets and Splits