hunk
dict | file
stringlengths 0
11.8M
| file_path
stringlengths 2
234
| label
int64 0
1
| commit_url
stringlengths 74
103
| dependency_score
sequencelengths 5
5
|
---|---|---|---|---|---|
{
"id": 3,
"code_window": [
"\tif !ok {\n",
"\t\tt.Fatalf(\"Unable to locate rendered content\")\n",
"\t}\n",
"\n"
],
"labels": [
"keep",
"replace",
"keep",
"keep"
],
"after_edit": [
"\t\tt.Fatalf(\"Unable to locate rendered content: %s\", test.file)\n"
],
"file_path": "hugolib/site_test.go",
"type": "replace",
"edit_start_line_idx": 273
} | package hugolib
import (
"bytes"
"fmt"
"html/template"
"strings"
"testing"
)
const (
TEMPLATE_TITLE = "{{ .Title }}"
PAGE_SIMPLE_TITLE = `---
title: simple template
---
content`
TEMPLATE_MISSING_FUNC = "{{ .Title | funcdoesnotexists }}"
TEMPLATE_FUNC = "{{ .Title | urlize }}"
TEMPLATE_CONTENT = "{{ .Content }}"
TEMPLATE_DATE = "{{ .Date }}"
INVALID_TEMPLATE_FORMAT_DATE = "{{ .Date.Format time.RFC3339 }}"
TEMPLATE_WITH_URL = "<a href=\"foobar.jpg\">Going</a>"
PAGE_URL_SPECIFIED = `---
title: simple template
url: "mycategory/my-whatever-content/"
---
content`
PAGE_WITH_MD = `---
title: page with md
---
# heading 1
text
## heading 2
more text
`
)
func pageMust(p *Page, err error) *Page {
if err != nil {
panic(err)
}
return p
}
func TestDegenerateRenderThingMissingTemplate(t *testing.T) {
p, _ := ReadFrom(strings.NewReader(PAGE_SIMPLE_TITLE), "content/a/file.md")
s := new(Site)
s.prepTemplates()
_, err := s.RenderThing(p, "foobar")
if err == nil {
t.Errorf("Expected err to be returned when missing the template.")
}
}
func TestAddInvalidTemplate(t *testing.T) {
s := new(Site)
s.prepTemplates()
err := s.addTemplate("missing", TEMPLATE_MISSING_FUNC)
if err == nil {
t.Fatalf("Expecting the template to return an error")
}
}
func matchRender(t *testing.T, s *Site, p *Page, tmplName string, expected string) {
content, err := s.RenderThing(p, tmplName)
if err != nil {
t.Fatalf("Unable to render template.")
}
if string(content.Bytes()) != expected {
t.Fatalf("Content did not match expected: %s. got: %s", expected, content)
}
}
func _TestAddSameTemplateTwice(t *testing.T) {
p := pageMust(ReadFrom(strings.NewReader(PAGE_SIMPLE_TITLE), "content/a/file.md"))
s := new(Site)
s.prepTemplates()
err := s.addTemplate("foo", TEMPLATE_TITLE)
if err != nil {
t.Fatalf("Unable to add template foo")
}
matchRender(t, s, p, "foo", "simple template")
err = s.addTemplate("foo", "NEW {{ .Title }}")
if err != nil {
t.Fatalf("Unable to add template foo: %s", err)
}
matchRender(t, s, p, "foo", "NEW simple template")
}
func TestRenderThing(t *testing.T) {
tests := []struct {
content string
template string
expected string
}{
{PAGE_SIMPLE_TITLE, TEMPLATE_TITLE, "simple template"},
{PAGE_SIMPLE_TITLE, TEMPLATE_FUNC, "simple-template"},
{PAGE_WITH_MD, TEMPLATE_CONTENT, "<h1>heading 1</h1>\n\n<p>text</p>\n\n<h2>heading 2</h2>\n\n<p>more text</p>\n"},
{SIMPLE_PAGE_RFC3339_DATE, TEMPLATE_DATE, "2013-05-17 16:59:30 +0000 UTC"},
}
s := new(Site)
s.prepTemplates()
for i, test := range tests {
p, err := ReadFrom(strings.NewReader(test.content), "content/a/file.md")
if err != nil {
t.Fatalf("Error parsing buffer: %s", err)
}
templateName := fmt.Sprintf("foobar%d", i)
err = s.addTemplate(templateName, test.template)
if err != nil {
t.Fatalf("Unable to add template")
}
p.Content = template.HTML(p.Content)
html, err2 := s.RenderThing(p, templateName)
if err2 != nil {
t.Errorf("Unable to render html: %s", err)
}
if string(html.Bytes()) != test.expected {
t.Errorf("Content does not match.\nExpected\n\t'%q'\ngot\n\t'%q'", test.expected, html)
}
}
}
func TestRenderThingOrDefault(t *testing.T) {
tests := []struct {
content string
missing bool
template string
expected string
}{
{PAGE_SIMPLE_TITLE, true, TEMPLATE_TITLE, "simple template"},
{PAGE_SIMPLE_TITLE, true, TEMPLATE_FUNC, "simple-template"},
{PAGE_SIMPLE_TITLE, false, TEMPLATE_TITLE, "simple template"},
{PAGE_SIMPLE_TITLE, false, TEMPLATE_FUNC, "simple-template"},
}
s := new(Site)
s.prepTemplates()
for i, test := range tests {
p, err := ReadFrom(strings.NewReader(PAGE_SIMPLE_TITLE), "content/a/file.md")
if err != nil {
t.Fatalf("Error parsing buffer: %s", err)
}
templateName := fmt.Sprintf("default%d", i)
err = s.addTemplate(templateName, test.template)
if err != nil {
t.Fatalf("Unable to add template")
}
var html *bytes.Buffer
var err2 error
if test.missing {
html, err2 = s.RenderThingOrDefault(p, "missing", templateName)
} else {
html, err2 = s.RenderThingOrDefault(p, templateName, "missing_default")
}
if err2 != nil {
t.Errorf("Unable to render html: %s", err)
}
if string(html.Bytes()) != test.expected {
t.Errorf("Content does not match. Expected '%s', got '%s'", test.expected, html)
}
}
}
func TestSetOutFile(t *testing.T) {
s := new(Site)
p := pageMust(ReadFrom(strings.NewReader(PAGE_URL_SPECIFIED), "content/a/file.md"))
s.setOutFile(p)
expected := "mycategory/my-whatever-content/index.html"
if p.OutFile != "mycategory/my-whatever-content/index.html" {
t.Errorf("Outfile does not match. Expected '%s', got '%s'", expected, p.OutFile)
}
}
func TestSkipRender(t *testing.T) {
files := make(map[string][]byte)
target := &InMemoryTarget{files: files}
sources := []byteSource{
{"sect/doc1.html", []byte("---\nmarkup: markdown\n---\n# title\nsome *content*")},
{"sect/doc2.html", []byte("<!doctype html><html><body>more content</body></html>")},
{"sect/doc3.md", []byte("# doc3\n*some* content")},
{"sect/doc4.md", []byte("---\ntitle: doc4\n---\n# doc4\n*some content*")},
{"sect/doc5.html", []byte("<!doctype html><html>{{ template \"head\" }}<body>body5</body></html>")},
}
s := &Site{
Target: target,
Config: Config{BaseUrl: "http://auth/bub/"},
Source: &inMemorySource{sources},
}
s.initializeSiteInfo()
s.prepTemplates()
must(s.addTemplate("_default/single.html", "{{.Content}}"))
must(s.addTemplate("head", "<head><script src=\"script.js\"></script></head>"))
if err := s.CreatePages(); err != nil {
t.Fatalf("Unable to create pages: %s", err)
}
if err := s.BuildSiteMeta(); err != nil {
t.Fatalf("Unable to build site metadata: %s", err)
}
if err := s.RenderPages(); err != nil {
t.Fatalf("Unable to render pages. %s", err)
}
tests := []struct {
doc string
expected string
}{
{"sect/doc1.html", "<html><head></head><body><h1>title</h1>\n\n<p>some <em>content</em></p>\n</body></html>"},
{"sect/doc2.html", "<!DOCTYPE html><html><head></head><body>more content</body></html>"},
{"sect/doc3.html", "<html><head></head><body><h1>doc3</h1>\n\n<p><em>some</em> content</p>\n</body></html>"},
{"sect/doc4.html", "<html><head></head><body><h1>doc4</h1>\n\n<p><em>some content</em></p>\n</body></html>"},
{"sect/doc5.html", "<!DOCTYPE html><html><head><script src=\"http://auth/bub/script.js\"></script></head><body>body5</body></html>"},
}
for _, test := range tests {
content, ok := target.files[test.doc]
if !ok {
t.Fatalf("Did not find %s in target. %v", test.doc, target.files)
}
if !bytes.Equal(content, []byte(test.expected)) {
t.Errorf("%s content expected:\n%q\ngot:\n%q", test.doc, test.expected, string(content))
}
}
}
func TestAbsUrlify(t *testing.T) {
files := make(map[string][]byte)
target := &InMemoryTarget{files: files}
s := &Site{
Target: target,
Config: Config{BaseUrl: "http://auth/bub/"},
Source: &inMemorySource{urlFakeSource},
}
s.initializeSiteInfo()
s.prepTemplates()
must(s.addTemplate("blue/single.html", TEMPLATE_WITH_URL))
if err := s.CreatePages(); err != nil {
t.Fatalf("Unable to create pages: %s", err)
}
if err := s.BuildSiteMeta(); err != nil {
t.Fatalf("Unable to build site metadata: %s", err)
}
if err := s.RenderPages(); err != nil {
t.Fatalf("Unable to render pages. %s", err)
}
content, ok := target.files["content/blue/slug-doc-1.html"]
if !ok {
t.Fatalf("Unable to locate rendered content")
}
expected := "<html><head></head><body><a href=\"http://auth/bub/foobar.jpg\">Going</a></body></html>"
if string(content) != expected {
t.Errorf("AbsUrlify content expected:\n%q\ngot\n%q", expected, string(content))
}
}
| hugolib/site_test.go | 1 | https://github.com/gohugoio/hugo/commit/784077da4dcc3476f61bbf99c5f873b71694dd64 | [
0.8728101253509521,
0.0315857008099556,
0.0001664464798523113,
0.0003941857721656561,
0.15900175273418427
] |
{
"id": 3,
"code_window": [
"\tif !ok {\n",
"\t\tt.Fatalf(\"Unable to locate rendered content\")\n",
"\t}\n",
"\n"
],
"labels": [
"keep",
"replace",
"keep",
"keep"
],
"after_edit": [
"\t\tt.Fatalf(\"Unable to locate rendered content: %s\", test.file)\n"
],
"file_path": "hugolib/site_test.go",
"type": "replace",
"edit_start_line_idx": 273
} | // Copyright © 2013 Steve Francia <[email protected]>.
//
// Licensed under the Simple Public License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://opensource.org/licenses/Simple-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package hugolib
import (
"bytes"
"fmt"
"github.com/spf13/hugo/template/bundle"
"strings"
"unicode"
)
var _ = fmt.Println
type ShortcodeFunc func([]string) string
type Shortcode struct {
Name string
Func ShortcodeFunc
}
type ShortcodeWithPage struct {
Params interface{}
Page *Page
}
type Shortcodes map[string]ShortcodeFunc
func ShortcodesHandle(stringToParse string, p *Page, t bundle.Template) string {
posStart := strings.Index(stringToParse, "{{%")
if posStart > 0 {
posEnd := strings.Index(stringToParse[posStart:], "%}}") + posStart
if posEnd > posStart {
name, par := SplitParams(stringToParse[posStart+3 : posEnd])
params := Tokenize(par)
var data = &ShortcodeWithPage{Params: params, Page: p}
newString := stringToParse[:posStart] + ShortcodeRender(name, data, t) + ShortcodesHandle(stringToParse[posEnd+3:], p, t)
return newString
}
}
return stringToParse
}
func StripShortcodes(stringToParse string) string {
posStart := strings.Index(stringToParse, "{{%")
if posStart > 0 {
posEnd := strings.Index(stringToParse[posStart:], "%}}") + posStart
if posEnd > posStart {
newString := stringToParse[:posStart] + StripShortcodes(stringToParse[posEnd+3:])
return newString
}
}
return stringToParse
}
func Tokenize(in string) interface{} {
first := strings.Fields(in)
var final = make([]string, 0)
var keys = make([]string, 0)
inQuote := false
start := 0
for i, v := range first {
index := strings.Index(v, "=")
if !inQuote {
if index > 1 {
keys = append(keys, v[:index])
v = v[index+1:]
}
}
if !strings.HasPrefix(v, "“") && !inQuote {
final = append(final, v)
} else if inQuote && strings.HasSuffix(v, "”") && !strings.HasSuffix(v, "\\\"") {
first[i] = v[:len(v)-7]
final = append(final, strings.Join(first[start:i+1], " "))
inQuote = false
} else if strings.HasPrefix(v, "“") && !inQuote {
if strings.HasSuffix(v, "”") {
final = append(final, v[7:len(v)-7])
} else {
start = i
first[i] = v[7:]
inQuote = true
}
}
// No closing "... just make remainder the final token
if inQuote && i == len(first) {
final = append(final, first[start:len(first)]...)
}
}
if len(keys) > 0 {
var m = make(map[string]string)
for i, k := range keys {
m[k] = final[i]
}
return m
}
return final
}
func SplitParams(in string) (name string, par2 string) {
i := strings.IndexFunc(strings.TrimSpace(in), unicode.IsSpace)
if i < 1 {
return strings.TrimSpace(in), ""
}
return strings.TrimSpace(in[:i+1]), strings.TrimSpace(in[i+1:])
}
func ShortcodeRender(name string, data *ShortcodeWithPage, t bundle.Template) string {
buffer := new(bytes.Buffer)
t.ExecuteTemplate(buffer, "shortcodes/"+name+".html", data)
return buffer.String()
}
| hugolib/shortcode.go | 0 | https://github.com/gohugoio/hugo/commit/784077da4dcc3476f61bbf99c5f873b71694dd64 | [
0.005556690040975809,
0.000703167577739805,
0.00016808666987344623,
0.0001799071324057877,
0.0013971914304420352
] |
{
"id": 3,
"code_window": [
"\tif !ok {\n",
"\t\tt.Fatalf(\"Unable to locate rendered content\")\n",
"\t}\n",
"\n"
],
"labels": [
"keep",
"replace",
"keep",
"keep"
],
"after_edit": [
"\t\tt.Fatalf(\"Unable to locate rendered content: %s\", test.file)\n"
],
"file_path": "hugolib/site_test.go",
"type": "replace",
"edit_start_line_idx": 273
} | ---
title: "Aliases"
date: "2013-07-09"
aliases:
- /doc/redirects/
- /doc/alias/
- /doc/aliases/
---
For people migrating existing published content to Hugo theres a good chance
you need a mechanism to handle redirecting old urls.
Luckily, this can be handled easily with aliases in Hugo.
## Example
**content/posts/my-awesome-blog-post.md**
---
aliases:
- /posts/my-original-url/
- /2010/even-earlier-url.html
---
Now when you go to any of the aliases locations they
will redirect to the page.
## Important Behaviors
1. *Hugo makes no assumptions about aliases. They also don't change based
on your UglyUrls setting. You need to provide absolute path to your webroot and the
complete filename or directory.*
2. *Aliases are rendered prior to any content and will be overwritten by
any content with the same location.*
| docs/content/extras/aliases.md | 0 | https://github.com/gohugoio/hugo/commit/784077da4dcc3476f61bbf99c5f873b71694dd64 | [
0.0002338585036341101,
0.0001854020229075104,
0.00016794617113191634,
0.0001699017157079652,
0.000027991911338176578
] |
{
"id": 3,
"code_window": [
"\tif !ok {\n",
"\t\tt.Fatalf(\"Unable to locate rendered content\")\n",
"\t}\n",
"\n"
],
"labels": [
"keep",
"replace",
"keep",
"keep"
],
"after_edit": [
"\t\tt.Fatalf(\"Unable to locate rendered content: %s\", test.file)\n"
],
"file_path": "hugolib/site_test.go",
"type": "replace",
"edit_start_line_idx": 273
} | // Copyright © 2013 Steve Francia <[email protected]>.
//
// Licensed under the Simple Public License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://opensource.org/licenses/Simple-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package hugolib
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"github.com/BurntSushi/toml"
"github.com/spf13/hugo/parser"
helper "github.com/spf13/hugo/template"
"github.com/spf13/hugo/template/bundle"
"github.com/theplant/blackfriday"
"html/template"
"io"
"launchpad.net/goyaml"
"path"
"sort"
"strings"
"time"
)
type Page struct {
Status string
Images []string
Content template.HTML
Summary template.HTML
RawMarkdown string // TODO should be []byte
Params map[string]interface{}
contentType string
Draft bool
Aliases []string
Tmpl bundle.Template
Markup string
renderable bool
PageMeta
File
Position
Node
}
type File struct {
FileName, OutFile, Extension string
}
type PageMeta struct {
WordCount int
FuzzyWordCount int
}
type Position struct {
Prev *Page
Next *Page
}
type Pages []*Page
func (p Pages) Len() int { return len(p) }
func (p Pages) Less(i, j int) bool { return p[i].Date.Unix() > p[j].Date.Unix() }
func (p Pages) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
// TODO eliminate unnecessary things
func (p Pages) Sort() { sort.Sort(p) }
func (p Pages) Limit(n int) Pages { return p[0:n] }
func getSummaryString(content []byte) ([]byte, bool) {
if bytes.Contains(content, summaryDivider) {
return bytes.Split(content, summaryDivider)[0], false
} else {
plainContent := StripHTML(StripShortcodes(string(content)))
return []byte(TruncateWordsToWholeSentence(plainContent, summaryLength)), true
}
}
// TODO abstract further to support loading from more
// than just files on disk. Should load reader (file, []byte)
func newPage(filename string) *Page {
page := Page{contentType: "",
File: File{FileName: filename, Extension: "html"},
Node: Node{Keywords: make([]string, 10, 30)},
Params: make(map[string]interface{})}
page.Date, _ = time.Parse("20060102", "20080101")
page.guessSection()
return &page
}
func StripHTML(s string) string {
output := ""
// Shortcut strings with no tags in them
if !strings.ContainsAny(s, "<>") {
output = s
} else {
s = strings.Replace(s, "\n", " ", -1)
s = strings.Replace(s, "</p>", " \n", -1)
s = strings.Replace(s, "<br>", " \n", -1)
s = strings.Replace(s, "</br>", " \n", -1)
// Walk through the string removing all tags
b := new(bytes.Buffer)
inTag := false
for _, r := range s {
switch r {
case '<':
inTag = true
case '>':
inTag = false
default:
if !inTag {
b.WriteRune(r)
}
}
}
output = b.String()
}
return output
}
func (p *Page) IsRenderable() bool {
return p.renderable
}
func (p *Page) guessSection() {
if p.Section == "" {
x := strings.Split(p.FileName, "/")
if len(x) > 1 {
if section := x[len(x)-2]; section != "content" {
p.Section = section
}
}
}
}
func (page *Page) Type() string {
if page.contentType != "" {
return page.contentType
}
page.guessSection()
if x := page.Section; x != "" {
return x
}
return "page"
}
func (page *Page) Layout(l ...string) string {
layout := ""
if len(l) == 0 {
layout = "single"
} else {
layout = l[0]
}
if x := page.layout; x != "" {
return x
}
return strings.ToLower(page.Type()) + "/" + layout + ".html"
}
func ReadFrom(buf io.Reader, name string) (page *Page, err error) {
if len(name) == 0 {
return nil, errors.New("Zero length page name")
}
p := newPage(name)
if err = p.parse(buf); err != nil {
return
}
p.analyzePage()
return p, nil
}
func (p *Page) analyzePage() {
p.WordCount = TotalWords(p.RawMarkdown)
p.FuzzyWordCount = int((p.WordCount+100)/100) * 100
}
func (p *Page) Permalink() template.HTML {
baseUrl := string(p.Site.BaseUrl)
section := strings.TrimSpace(p.Section)
pSlug := strings.TrimSpace(p.Slug)
pUrl := strings.TrimSpace(p.Url)
var permalink string
if len(pSlug) > 0 {
if p.Site.Config.UglyUrls {
permalink = section + "/" + p.Slug + "." + p.Extension
} else {
permalink = section + "/" + p.Slug + "/"
}
} else if len(pUrl) > 2 {
permalink = pUrl
} else {
_, t := path.Split(p.FileName)
if p.Site.Config.UglyUrls {
x := replaceExtension(strings.TrimSpace(t), p.Extension)
permalink = section + "/" + x
} else {
file, _ := fileExt(strings.TrimSpace(t))
permalink = section + "/" + file
}
}
return template.HTML(MakePermalink(baseUrl, permalink))
}
func (page *Page) handleTomlMetaData(datum []byte) (interface{}, error) {
m := map[string]interface{}{}
datum = removeTomlIdentifier(datum)
if _, err := toml.Decode(string(datum), &m); err != nil {
return m, fmt.Errorf("Invalid TOML in %s \nError parsing page meta data: %s", page.FileName, err)
}
return m, nil
}
func removeTomlIdentifier(datum []byte) []byte {
return bytes.Replace(datum, []byte("+++"), []byte(""), -1)
}
func (page *Page) handleYamlMetaData(datum []byte) (interface{}, error) {
m := map[string]interface{}{}
if err := goyaml.Unmarshal(datum, &m); err != nil {
return m, fmt.Errorf("Invalid YAML in %s \nError parsing page meta data: %s", page.FileName, err)
}
return m, nil
}
func (page *Page) handleJsonMetaData(datum []byte) (interface{}, error) {
var f interface{}
if err := json.Unmarshal(datum, &f); err != nil {
return f, fmt.Errorf("Invalid JSON in %v \nError parsing page meta data: %s", page.FileName, err)
}
return f, nil
}
func (page *Page) update(f interface{}) error {
m := f.(map[string]interface{})
for k, v := range m {
switch strings.ToLower(k) {
case "title":
page.Title = interfaceToString(v)
case "description":
page.Description = interfaceToString(v)
case "slug":
page.Slug = helper.Urlize(interfaceToString(v))
case "url":
if url := interfaceToString(v); strings.HasPrefix(url, "http://") || strings.HasPrefix(url, "https://") {
return fmt.Errorf("Only relative urls are supported, %v provided", url)
}
page.Url = helper.Urlize(interfaceToString(v))
case "type":
page.contentType = interfaceToString(v)
case "keywords":
page.Keywords = interfaceArrayToStringArray(v)
case "date", "pubdate":
page.Date = interfaceToStringToDate(v)
case "draft":
page.Draft = interfaceToBool(v)
case "layout":
page.layout = interfaceToString(v)
case "markup":
page.Markup = interfaceToString(v)
case "aliases":
page.Aliases = interfaceArrayToStringArray(v)
for _, alias := range page.Aliases {
if strings.HasPrefix(alias, "http://") || strings.HasPrefix(alias, "https://") {
return fmt.Errorf("Only relative aliases are supported, %v provided", alias)
}
}
case "status":
page.Status = interfaceToString(v)
default:
// If not one of the explicit values, store in Params
switch vv := v.(type) {
case string: // handle string values
page.Params[strings.ToLower(k)] = vv
default: // handle array of strings as well
switch vvv := vv.(type) {
case []interface{}:
var a = make([]string, len(vvv))
for i, u := range vvv {
a[i] = interfaceToString(u)
}
page.Params[strings.ToLower(k)] = a
}
}
}
}
return nil
}
func (page *Page) GetParam(key string) interface{} {
v := page.Params[strings.ToLower(key)]
if v == nil {
return nil
}
switch v.(type) {
case string:
return interfaceToString(v)
case []string:
return v
}
return nil
}
type frontmatterType struct {
markstart, markend []byte
parse func([]byte) (interface{}, error)
includeMark bool
}
const YAML_DELIM = "---"
const TOML_DELIM = "+++"
func (page *Page) detectFrontMatter(mark rune) (f *frontmatterType) {
switch mark {
case '-':
return &frontmatterType{[]byte(YAML_DELIM), []byte(YAML_DELIM), page.handleYamlMetaData, false}
case '+':
return &frontmatterType{[]byte(TOML_DELIM), []byte(TOML_DELIM), page.handleTomlMetaData, false}
case '{':
return &frontmatterType{[]byte{'{'}, []byte{'}'}, page.handleJsonMetaData, true}
default:
return nil
}
}
func (p *Page) Render(layout ...string) template.HTML {
curLayout := ""
if len(layout) > 0 {
curLayout = layout[0]
}
return template.HTML(string(p.ExecuteTemplate(curLayout).Bytes()))
}
func (p *Page) ExecuteTemplate(layout string) *bytes.Buffer {
l := p.Layout(layout)
buffer := new(bytes.Buffer)
p.Tmpl.ExecuteTemplate(buffer, l, p)
return buffer
}
func (page *Page) guessMarkupType() string {
if page.Markup != "" {
return page.Markup
}
if strings.HasSuffix(page.FileName, ".md") {
return "md"
}
return "unknown"
}
func (page *Page) parse(reader io.Reader) error {
p, err := parser.ReadFrom(reader)
if err != nil {
return err
}
page.renderable = p.IsRenderable()
front := p.FrontMatter()
if len(front) != 0 {
fm := page.detectFrontMatter(rune(front[0]))
meta, err := fm.parse(front)
if err != nil {
return err
}
if err = page.update(meta); err != nil {
return err
}
}
switch page.guessMarkupType() {
case "md", "markdown", "mdown":
page.convertMarkdown(bytes.NewReader(p.Content()))
case "rst":
page.convertRestructuredText(bytes.NewReader(p.Content()))
case "html":
fallthrough
default:
page.Content = template.HTML(p.Content())
}
return nil
}
func (page *Page) convertMarkdown(lines io.Reader) {
b := new(bytes.Buffer)
b.ReadFrom(lines)
content := b.Bytes()
page.Content = template.HTML(string(blackfriday.MarkdownCommon(RemoveSummaryDivider(content))))
summary, plain := getSummaryString(content)
if plain {
page.Summary = template.HTML(string(summary))
} else {
page.Summary = template.HTML(string(blackfriday.MarkdownCommon(summary)))
}
}
func (page *Page) convertRestructuredText(lines io.Reader) {
b := new(bytes.Buffer)
b.ReadFrom(lines)
content := b.Bytes()
page.Content = template.HTML(getRstContent(content))
summary, plain := getSummaryString(content)
if plain {
page.Summary = template.HTML(string(summary))
} else {
page.Summary = template.HTML(getRstContent(summary))
}
}
| hugolib/page.go | 0 | https://github.com/gohugoio/hugo/commit/784077da4dcc3476f61bbf99c5f873b71694dd64 | [
0.0017177738482132554,
0.00026977763627655804,
0.00016669617616571486,
0.00017195494729094207,
0.00027550928643904626
] |
{
"id": 4,
"code_window": [
"\t}\n",
"\n",
"\texpected := \"<html><head></head><body><a href=\\\"http://auth/bub/foobar.jpg\\\">Going</a></body></html>\"\n",
"\tif string(content) != expected {\n",
"\t\tt.Errorf(\"AbsUrlify content expected:\\n%q\\ngot\\n%q\", expected, string(content))\n",
"\t}\n"
],
"labels": [
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\texpected := test.expected\n"
],
"file_path": "hugolib/site_test.go",
"type": "replace",
"edit_start_line_idx": 276
} | package transform
import (
htmltran "code.google.com/p/go-html-transform/html/transform"
"io"
"net/url"
)
type Transformer struct {
BaseURL string
}
func (t *Transformer) Apply(r io.Reader, w io.Writer) (err error) {
var tr *htmltran.Transformer
if tr, err = htmltran.NewFromReader(r); err != nil {
return
}
if err = t.absUrlify(tr, elattr{"a", "href"}, elattr{"script", "src"}); err != nil {
return
}
return tr.Render(w)
}
type elattr struct {
tag, attr string
}
func (t *Transformer) absUrlify(tr *htmltran.Transformer, selectors ...elattr) (err error) {
var baseURL, inURL *url.URL
if baseURL, err = url.Parse(t.BaseURL); err != nil {
return
}
replace := func(in string) string {
if inURL, err = url.Parse(in); err != nil {
return in + "?"
}
return baseURL.ResolveReference(inURL).String()
}
for _, el := range selectors {
if err = tr.Apply(htmltran.TransformAttrib(el.attr, replace), el.tag); err != nil {
return
}
}
return
}
| transform/post.go | 1 | https://github.com/gohugoio/hugo/commit/784077da4dcc3476f61bbf99c5f873b71694dd64 | [
0.001231579459272325,
0.00042824019328691065,
0.00016098392370622605,
0.00017209944780915976,
0.00040288924355991185
] |
{
"id": 4,
"code_window": [
"\t}\n",
"\n",
"\texpected := \"<html><head></head><body><a href=\\\"http://auth/bub/foobar.jpg\\\">Going</a></body></html>\"\n",
"\tif string(content) != expected {\n",
"\t\tt.Errorf(\"AbsUrlify content expected:\\n%q\\ngot\\n%q\", expected, string(content))\n",
"\t}\n"
],
"labels": [
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\texpected := test.expected\n"
],
"file_path": "hugolib/site_test.go",
"type": "replace",
"edit_start_line_idx": 276
} | language: go
go:
- 1.1
script:
- go test ./...
- go build
- ./hugo -s docs/
| .travis.yml | 0 | https://github.com/gohugoio/hugo/commit/784077da4dcc3476f61bbf99c5f873b71694dd64 | [
0.00017391752044204623,
0.00017391752044204623,
0.00017391752044204623,
0.00017391752044204623,
0
] |
{
"id": 4,
"code_window": [
"\t}\n",
"\n",
"\texpected := \"<html><head></head><body><a href=\\\"http://auth/bub/foobar.jpg\\\">Going</a></body></html>\"\n",
"\tif string(content) != expected {\n",
"\t\tt.Errorf(\"AbsUrlify content expected:\\n%q\\ngot\\n%q\", expected, string(content))\n",
"\t}\n"
],
"labels": [
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\texpected := test.expected\n"
],
"file_path": "hugolib/site_test.go",
"type": "replace",
"edit_start_line_idx": 276
} | ---
title: "Roadmap"
date: "2013-07-01"
aliases: ["/doc/roadmap/"]
---
In no particular order, here is what we are working on:
* Pagination
* Support for top level pages (other than homepage)
* Better error handling
* Syntax highlighting
* Commands
* Actions (eg. hugo create page)
* Related Posts
* Support for other formats
| docs/content/meta/roadmap.md | 0 | https://github.com/gohugoio/hugo/commit/784077da4dcc3476f61bbf99c5f873b71694dd64 | [
0.0001672896178206429,
0.00016581866657361388,
0.00016434771532658488,
0.00016581866657361388,
0.0000014709512470290065
] |
{
"id": 4,
"code_window": [
"\t}\n",
"\n",
"\texpected := \"<html><head></head><body><a href=\\\"http://auth/bub/foobar.jpg\\\">Going</a></body></html>\"\n",
"\tif string(content) != expected {\n",
"\t\tt.Errorf(\"AbsUrlify content expected:\\n%q\\ngot\\n%q\", expected, string(content))\n",
"\t}\n"
],
"labels": [
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\texpected := test.expected\n"
],
"file_path": "hugolib/site_test.go",
"type": "replace",
"edit_start_line_idx": 276
} | package target
import (
"testing"
)
func TestHTMLRedirectAlias(t *testing.T) {
var o Translator
o = new(HTMLRedirectAlias)
tests := []struct {
value string
expected string
}{
{"", ""},
{"s", "s/index.html"},
{"/", "/index.html"},
{"alias 1", "alias-1/index.html"},
{"alias 2/", "alias-2/index.html"},
{"alias 3.html", "alias-3.html"},
{"alias4.html", "alias4.html"},
{"/alias 5.html", "/alias-5.html"},
}
for _, test := range tests {
path, err := o.Translate(test.value)
if err != nil {
t.Fatalf("Translate returned an error: %s", err)
}
if path != test.expected {
t.Errorf("Expected: %s, got: %s", test.expected, path)
}
}
}
| target/alias_test.go | 0 | https://github.com/gohugoio/hugo/commit/784077da4dcc3476f61bbf99c5f873b71694dd64 | [
0.0002725186350289732,
0.00019498527399264276,
0.0001669603807386011,
0.0001702310109976679,
0.000044784046622226015
] |
{
"id": 5,
"code_window": [
"\tif string(content) != expected {\n",
"\t\tt.Errorf(\"AbsUrlify content expected:\\n%q\\ngot\\n%q\", expected, string(content))\n",
"\t}\n",
"}\n"
],
"labels": [
"keep",
"keep",
"keep",
"add"
],
"after_edit": [
"}"
],
"file_path": "hugolib/site_test.go",
"type": "add",
"edit_start_line_idx": 281
} | package hugolib
import (
"bytes"
"fmt"
"html/template"
"strings"
"testing"
)
const (
TEMPLATE_TITLE = "{{ .Title }}"
PAGE_SIMPLE_TITLE = `---
title: simple template
---
content`
TEMPLATE_MISSING_FUNC = "{{ .Title | funcdoesnotexists }}"
TEMPLATE_FUNC = "{{ .Title | urlize }}"
TEMPLATE_CONTENT = "{{ .Content }}"
TEMPLATE_DATE = "{{ .Date }}"
INVALID_TEMPLATE_FORMAT_DATE = "{{ .Date.Format time.RFC3339 }}"
TEMPLATE_WITH_URL = "<a href=\"foobar.jpg\">Going</a>"
PAGE_URL_SPECIFIED = `---
title: simple template
url: "mycategory/my-whatever-content/"
---
content`
PAGE_WITH_MD = `---
title: page with md
---
# heading 1
text
## heading 2
more text
`
)
func pageMust(p *Page, err error) *Page {
if err != nil {
panic(err)
}
return p
}
func TestDegenerateRenderThingMissingTemplate(t *testing.T) {
p, _ := ReadFrom(strings.NewReader(PAGE_SIMPLE_TITLE), "content/a/file.md")
s := new(Site)
s.prepTemplates()
_, err := s.RenderThing(p, "foobar")
if err == nil {
t.Errorf("Expected err to be returned when missing the template.")
}
}
func TestAddInvalidTemplate(t *testing.T) {
s := new(Site)
s.prepTemplates()
err := s.addTemplate("missing", TEMPLATE_MISSING_FUNC)
if err == nil {
t.Fatalf("Expecting the template to return an error")
}
}
func matchRender(t *testing.T, s *Site, p *Page, tmplName string, expected string) {
content, err := s.RenderThing(p, tmplName)
if err != nil {
t.Fatalf("Unable to render template.")
}
if string(content.Bytes()) != expected {
t.Fatalf("Content did not match expected: %s. got: %s", expected, content)
}
}
func _TestAddSameTemplateTwice(t *testing.T) {
p := pageMust(ReadFrom(strings.NewReader(PAGE_SIMPLE_TITLE), "content/a/file.md"))
s := new(Site)
s.prepTemplates()
err := s.addTemplate("foo", TEMPLATE_TITLE)
if err != nil {
t.Fatalf("Unable to add template foo")
}
matchRender(t, s, p, "foo", "simple template")
err = s.addTemplate("foo", "NEW {{ .Title }}")
if err != nil {
t.Fatalf("Unable to add template foo: %s", err)
}
matchRender(t, s, p, "foo", "NEW simple template")
}
func TestRenderThing(t *testing.T) {
tests := []struct {
content string
template string
expected string
}{
{PAGE_SIMPLE_TITLE, TEMPLATE_TITLE, "simple template"},
{PAGE_SIMPLE_TITLE, TEMPLATE_FUNC, "simple-template"},
{PAGE_WITH_MD, TEMPLATE_CONTENT, "<h1>heading 1</h1>\n\n<p>text</p>\n\n<h2>heading 2</h2>\n\n<p>more text</p>\n"},
{SIMPLE_PAGE_RFC3339_DATE, TEMPLATE_DATE, "2013-05-17 16:59:30 +0000 UTC"},
}
s := new(Site)
s.prepTemplates()
for i, test := range tests {
p, err := ReadFrom(strings.NewReader(test.content), "content/a/file.md")
if err != nil {
t.Fatalf("Error parsing buffer: %s", err)
}
templateName := fmt.Sprintf("foobar%d", i)
err = s.addTemplate(templateName, test.template)
if err != nil {
t.Fatalf("Unable to add template")
}
p.Content = template.HTML(p.Content)
html, err2 := s.RenderThing(p, templateName)
if err2 != nil {
t.Errorf("Unable to render html: %s", err)
}
if string(html.Bytes()) != test.expected {
t.Errorf("Content does not match.\nExpected\n\t'%q'\ngot\n\t'%q'", test.expected, html)
}
}
}
func TestRenderThingOrDefault(t *testing.T) {
tests := []struct {
content string
missing bool
template string
expected string
}{
{PAGE_SIMPLE_TITLE, true, TEMPLATE_TITLE, "simple template"},
{PAGE_SIMPLE_TITLE, true, TEMPLATE_FUNC, "simple-template"},
{PAGE_SIMPLE_TITLE, false, TEMPLATE_TITLE, "simple template"},
{PAGE_SIMPLE_TITLE, false, TEMPLATE_FUNC, "simple-template"},
}
s := new(Site)
s.prepTemplates()
for i, test := range tests {
p, err := ReadFrom(strings.NewReader(PAGE_SIMPLE_TITLE), "content/a/file.md")
if err != nil {
t.Fatalf("Error parsing buffer: %s", err)
}
templateName := fmt.Sprintf("default%d", i)
err = s.addTemplate(templateName, test.template)
if err != nil {
t.Fatalf("Unable to add template")
}
var html *bytes.Buffer
var err2 error
if test.missing {
html, err2 = s.RenderThingOrDefault(p, "missing", templateName)
} else {
html, err2 = s.RenderThingOrDefault(p, templateName, "missing_default")
}
if err2 != nil {
t.Errorf("Unable to render html: %s", err)
}
if string(html.Bytes()) != test.expected {
t.Errorf("Content does not match. Expected '%s', got '%s'", test.expected, html)
}
}
}
func TestSetOutFile(t *testing.T) {
s := new(Site)
p := pageMust(ReadFrom(strings.NewReader(PAGE_URL_SPECIFIED), "content/a/file.md"))
s.setOutFile(p)
expected := "mycategory/my-whatever-content/index.html"
if p.OutFile != "mycategory/my-whatever-content/index.html" {
t.Errorf("Outfile does not match. Expected '%s', got '%s'", expected, p.OutFile)
}
}
func TestSkipRender(t *testing.T) {
files := make(map[string][]byte)
target := &InMemoryTarget{files: files}
sources := []byteSource{
{"sect/doc1.html", []byte("---\nmarkup: markdown\n---\n# title\nsome *content*")},
{"sect/doc2.html", []byte("<!doctype html><html><body>more content</body></html>")},
{"sect/doc3.md", []byte("# doc3\n*some* content")},
{"sect/doc4.md", []byte("---\ntitle: doc4\n---\n# doc4\n*some content*")},
{"sect/doc5.html", []byte("<!doctype html><html>{{ template \"head\" }}<body>body5</body></html>")},
}
s := &Site{
Target: target,
Config: Config{BaseUrl: "http://auth/bub/"},
Source: &inMemorySource{sources},
}
s.initializeSiteInfo()
s.prepTemplates()
must(s.addTemplate("_default/single.html", "{{.Content}}"))
must(s.addTemplate("head", "<head><script src=\"script.js\"></script></head>"))
if err := s.CreatePages(); err != nil {
t.Fatalf("Unable to create pages: %s", err)
}
if err := s.BuildSiteMeta(); err != nil {
t.Fatalf("Unable to build site metadata: %s", err)
}
if err := s.RenderPages(); err != nil {
t.Fatalf("Unable to render pages. %s", err)
}
tests := []struct {
doc string
expected string
}{
{"sect/doc1.html", "<html><head></head><body><h1>title</h1>\n\n<p>some <em>content</em></p>\n</body></html>"},
{"sect/doc2.html", "<!DOCTYPE html><html><head></head><body>more content</body></html>"},
{"sect/doc3.html", "<html><head></head><body><h1>doc3</h1>\n\n<p><em>some</em> content</p>\n</body></html>"},
{"sect/doc4.html", "<html><head></head><body><h1>doc4</h1>\n\n<p><em>some content</em></p>\n</body></html>"},
{"sect/doc5.html", "<!DOCTYPE html><html><head><script src=\"http://auth/bub/script.js\"></script></head><body>body5</body></html>"},
}
for _, test := range tests {
content, ok := target.files[test.doc]
if !ok {
t.Fatalf("Did not find %s in target. %v", test.doc, target.files)
}
if !bytes.Equal(content, []byte(test.expected)) {
t.Errorf("%s content expected:\n%q\ngot:\n%q", test.doc, test.expected, string(content))
}
}
}
func TestAbsUrlify(t *testing.T) {
files := make(map[string][]byte)
target := &InMemoryTarget{files: files}
s := &Site{
Target: target,
Config: Config{BaseUrl: "http://auth/bub/"},
Source: &inMemorySource{urlFakeSource},
}
s.initializeSiteInfo()
s.prepTemplates()
must(s.addTemplate("blue/single.html", TEMPLATE_WITH_URL))
if err := s.CreatePages(); err != nil {
t.Fatalf("Unable to create pages: %s", err)
}
if err := s.BuildSiteMeta(); err != nil {
t.Fatalf("Unable to build site metadata: %s", err)
}
if err := s.RenderPages(); err != nil {
t.Fatalf("Unable to render pages. %s", err)
}
content, ok := target.files["content/blue/slug-doc-1.html"]
if !ok {
t.Fatalf("Unable to locate rendered content")
}
expected := "<html><head></head><body><a href=\"http://auth/bub/foobar.jpg\">Going</a></body></html>"
if string(content) != expected {
t.Errorf("AbsUrlify content expected:\n%q\ngot\n%q", expected, string(content))
}
}
| hugolib/site_test.go | 1 | https://github.com/gohugoio/hugo/commit/784077da4dcc3476f61bbf99c5f873b71694dd64 | [
0.9815766215324402,
0.04199123755097389,
0.00016566572594456375,
0.0001758971338858828,
0.1807321012020111
] |
{
"id": 5,
"code_window": [
"\tif string(content) != expected {\n",
"\t\tt.Errorf(\"AbsUrlify content expected:\\n%q\\ngot\\n%q\", expected, string(content))\n",
"\t}\n",
"}\n"
],
"labels": [
"keep",
"keep",
"keep",
"add"
],
"after_edit": [
"}"
],
"file_path": "hugolib/site_test.go",
"type": "add",
"edit_start_line_idx": 281
} | package parser
import (
"bufio"
"bytes"
"errors"
"fmt"
"io"
"unicode"
)
const (
HTML_LEAD = "<"
YAML_LEAD = "-"
YAML_DELIM_UNIX = "---\n"
YAML_DELIM_DOS = "---\r\n"
TOML_LEAD = "+"
TOML_DELIM_UNIX = "+++\n"
TOML_DELIM_DOS = "+++\r\n"
JAVA_LEAD = "{"
)
var (
delims = [][]byte{
[]byte(YAML_DELIM_UNIX),
[]byte(YAML_DELIM_DOS),
[]byte(TOML_DELIM_UNIX),
[]byte(TOML_DELIM_DOS),
[]byte(JAVA_LEAD),
}
unixEnding = []byte("\n")
dosEnding = []byte("\r\n")
)
type FrontMatter []byte
type Content []byte
type Page interface {
FrontMatter() FrontMatter
Content() Content
IsRenderable() bool
}
type page struct {
render bool
frontmatter FrontMatter
content Content
}
func (p *page) Content() Content {
return p.content
}
func (p *page) FrontMatter() FrontMatter {
return p.frontmatter
}
func (p *page) IsRenderable() bool {
return p.render
}
// ReadFrom reads the content from an io.Reader and constructs a page.
func ReadFrom(r io.Reader) (p Page, err error) {
reader := bufio.NewReader(r)
if err = chompWhitespace(reader); err != nil {
return
}
firstLine, err := peekLine(reader)
if err != nil {
return
}
newp := new(page)
newp.render = shouldRender(firstLine)
if newp.render && isFrontMatterDelim(firstLine) {
left, right := determineDelims(firstLine)
fm, err := extractFrontMatterDelims(reader, left, right)
if err != nil {
return nil, err
}
newp.frontmatter = fm
}
content, err := extractContent(reader)
if err != nil {
return nil, err
}
newp.content = content
return newp, nil
}
func chompWhitespace(r io.RuneScanner) (err error) {
for {
c, _, err := r.ReadRune()
if err != nil {
return err
}
if !unicode.IsSpace(c) {
r.UnreadRune()
return nil
}
}
return
}
func peekLine(r *bufio.Reader) (line []byte, err error) {
firstFive, err := r.Peek(5)
if err != nil {
return
}
idx := bytes.IndexByte(firstFive, '\n')
if idx == -1 {
return firstFive, nil
}
idx += 1 // include newline.
return firstFive[:idx], nil
}
func shouldRender(lead []byte) (frontmatter bool) {
if len(lead) <= 0 {
return
}
if bytes.Equal(lead[:1], []byte(HTML_LEAD)) {
return
}
return true
}
func isFrontMatterDelim(data []byte) bool {
for _, d := range delims {
if bytes.HasPrefix(data, d) {
return true
}
}
return false
}
func determineDelims(firstLine []byte) (left, right []byte) {
switch len(firstLine) {
case 4:
if firstLine[0] == YAML_LEAD[0] {
return []byte(YAML_DELIM_UNIX), []byte(YAML_DELIM_UNIX)
}
return []byte(TOML_DELIM_UNIX), []byte(TOML_DELIM_UNIX)
case 5:
if firstLine[0] == YAML_LEAD[0] {
return []byte(YAML_DELIM_DOS), []byte(YAML_DELIM_DOS)
}
return []byte(TOML_DELIM_DOS), []byte(TOML_DELIM_DOS)
case 3:
fallthrough
case 2:
fallthrough
case 1:
return []byte(JAVA_LEAD), []byte("}")
default:
panic(fmt.Sprintf("Unable to determine delims from %q", firstLine))
}
return
}
func extractFrontMatterDelims(r *bufio.Reader, left, right []byte) (fm FrontMatter, err error) {
var (
c byte
level int = 0
bytesRead int = 0
sameDelim = bytes.Equal(left, right)
)
wr := new(bytes.Buffer)
for {
if c, err = r.ReadByte(); err != nil {
return nil, fmt.Errorf("Unable to read frontmatter at filepos %d: %s", bytesRead, err)
}
bytesRead += 1
switch c {
case left[0]:
var (
buf []byte = []byte{c}
remaining []byte
)
if remaining, err = r.Peek(len(left) - 1); err != nil {
return nil, err
}
buf = append(buf, remaining...)
if bytes.Equal(buf, left) {
if sameDelim {
if level == 0 {
level = 1
} else {
level = 0
}
} else {
level += 1
}
}
if _, err = wr.Write([]byte{c}); err != nil {
return nil, err
}
if level == 0 {
if _, err = r.Read(remaining); err != nil {
return nil, err
}
if _, err = wr.Write(remaining); err != nil {
return nil, err
}
}
case right[0]:
match, err := matches(r, wr, []byte{c}, right)
if err != nil {
return nil, err
}
if match {
level -= 1
}
default:
if err = wr.WriteByte(c); err != nil {
return nil, err
}
}
if level == 0 && !unicode.IsSpace(rune(c)) {
if err = chompWhitespace(r); err != nil {
if err != io.EOF {
return nil, err
}
}
return wr.Bytes(), nil
}
}
return nil, errors.New("Could not find front matter.")
}
func matches_quick(buf, expected []byte) (ok bool, err error) {
return bytes.Equal(expected, buf), nil
}
func matches(r *bufio.Reader, wr io.Writer, c, expected []byte) (ok bool, err error) {
if len(expected) == 1 {
if _, err = wr.Write(c); err != nil {
return
}
return bytes.Equal(c, expected), nil
}
buf := make([]byte, len(expected)-1)
if buf, err = r.Peek(len(expected) - 1); err != nil {
return
}
buf = append(c, buf...)
return bytes.Equal(expected, buf), nil
}
func extractContent(r io.Reader) (content Content, err error) {
wr := new(bytes.Buffer)
if _, err = wr.ReadFrom(r); err != nil {
return
}
return wr.Bytes(), nil
}
| parser/page.go | 0 | https://github.com/gohugoio/hugo/commit/784077da4dcc3476f61bbf99c5f873b71694dd64 | [
0.019966762512922287,
0.0015873877564445138,
0.00016699939442332834,
0.0001737840211717412,
0.004176952410489321
] |
{
"id": 5,
"code_window": [
"\tif string(content) != expected {\n",
"\t\tt.Errorf(\"AbsUrlify content expected:\\n%q\\ngot\\n%q\", expected, string(content))\n",
"\t}\n",
"}\n"
],
"labels": [
"keep",
"keep",
"keep",
"add"
],
"after_edit": [
"}"
],
"file_path": "hugolib/site_test.go",
"type": "add",
"edit_start_line_idx": 281
} | package source
import (
"io"
"os"
"path/filepath"
)
type Input interface {
Files() []*File
}
type File struct {
Name string
Contents io.Reader
}
type Filesystem struct {
files []*File
Base string
AvoidPaths []string
}
func (f *Filesystem) Files() []*File {
f.captureFiles()
return f.files
}
func (f *Filesystem) add(name string, reader io.Reader) {
name = filepath.ToSlash(name)
f.files = append(f.files, &File{Name: name, Contents: reader})
}
func (f *Filesystem) captureFiles() {
walker := func(path string, fi os.FileInfo, err error) error {
if err != nil {
return nil
}
if fi.IsDir() {
if f.avoid(path) {
return filepath.SkipDir
}
return nil
} else {
if ignoreDotFile(path) {
return nil
}
file, err := os.Open(path)
if err != nil {
return err
}
f.add(path, file)
return nil
}
}
filepath.Walk(f.Base, walker)
}
func (f *Filesystem) avoid(path string) bool {
for _, avoid := range f.AvoidPaths {
if avoid == path {
return true
}
}
return false
}
func ignoreDotFile(path string) bool {
return filepath.Base(path)[0] == '.'
}
| source/filesystem.go | 0 | https://github.com/gohugoio/hugo/commit/784077da4dcc3476f61bbf99c5f873b71694dd64 | [
0.00019786776101682335,
0.00017491103790234774,
0.00016986737318802625,
0.00017216648848261684,
0.000008780876669334248
] |
{
"id": 5,
"code_window": [
"\tif string(content) != expected {\n",
"\t\tt.Errorf(\"AbsUrlify content expected:\\n%q\\ngot\\n%q\", expected, string(content))\n",
"\t}\n",
"}\n"
],
"labels": [
"keep",
"keep",
"keep",
"add"
],
"after_edit": [
"}"
],
"file_path": "hugolib/site_test.go",
"type": "add",
"edit_start_line_idx": 281
} | package source
import (
"testing"
)
func TestIgnoreDotFiles(t *testing.T) {
tests := []struct {
path string
ignore bool
}{
{"barfoo.md", false},
{"foobar/barfoo.md", false},
{"foobar/.barfoo.md", true},
{".barfoo.md", true},
{".md", true},
{"", true},
}
for _, test := range tests {
if ignored := ignoreDotFile(test.path); test.ignore != ignored {
t.Errorf("File not ignored. Expected: %t, got: %t", test.ignore, ignored)
}
}
}
| source/content_directory_test.go | 0 | https://github.com/gohugoio/hugo/commit/784077da4dcc3476f61bbf99c5f873b71694dd64 | [
0.00024975178530439734,
0.00019887315283995122,
0.00017139804549515247,
0.00017546962772030383,
0.000036015004297951236
] |
{
"id": 6,
"code_window": [
"\treplace := func(in string) string {\n",
"\t\tif inURL, err = url.Parse(in); err != nil {\n",
"\t\t\treturn in + \"?\"\n",
"\t\t}\n",
"\t\treturn baseURL.ResolveReference(inURL).String()\n",
"\t}\n",
"\n",
"\tfor _, el := range selectors {\n",
"\t\tif err = tr.Apply(htmltran.TransformAttrib(el.attr, replace), el.tag); err != nil {\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tif fragmentOnly(inURL) {\n",
"\t\t\treturn in\n",
"\t\t}\n"
],
"file_path": "transform/post.go",
"type": "add",
"edit_start_line_idx": 41
} | package transform
import (
htmltran "code.google.com/p/go-html-transform/html/transform"
"io"
"net/url"
)
type Transformer struct {
BaseURL string
}
func (t *Transformer) Apply(r io.Reader, w io.Writer) (err error) {
var tr *htmltran.Transformer
if tr, err = htmltran.NewFromReader(r); err != nil {
return
}
if err = t.absUrlify(tr, elattr{"a", "href"}, elattr{"script", "src"}); err != nil {
return
}
return tr.Render(w)
}
type elattr struct {
tag, attr string
}
func (t *Transformer) absUrlify(tr *htmltran.Transformer, selectors ...elattr) (err error) {
var baseURL, inURL *url.URL
if baseURL, err = url.Parse(t.BaseURL); err != nil {
return
}
replace := func(in string) string {
if inURL, err = url.Parse(in); err != nil {
return in + "?"
}
return baseURL.ResolveReference(inURL).String()
}
for _, el := range selectors {
if err = tr.Apply(htmltran.TransformAttrib(el.attr, replace), el.tag); err != nil {
return
}
}
return
}
| transform/post.go | 1 | https://github.com/gohugoio/hugo/commit/784077da4dcc3476f61bbf99c5f873b71694dd64 | [
0.9976266026496887,
0.33337223529815674,
0.00016836175927892327,
0.003164305817335844,
0.4686439037322998
] |
{
"id": 6,
"code_window": [
"\treplace := func(in string) string {\n",
"\t\tif inURL, err = url.Parse(in); err != nil {\n",
"\t\t\treturn in + \"?\"\n",
"\t\t}\n",
"\t\treturn baseURL.ResolveReference(inURL).String()\n",
"\t}\n",
"\n",
"\tfor _, el := range selectors {\n",
"\t\tif err = tr.Apply(htmltran.TransformAttrib(el.attr, replace), el.tag); err != nil {\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tif fragmentOnly(inURL) {\n",
"\t\t\treturn in\n",
"\t\t}\n"
],
"file_path": "transform/post.go",
"type": "add",
"edit_start_line_idx": 41
} | ---
title: "Sections"
date: "2013-07-01"
---
Hugo thinks that you organize your content with a purpose. The same structure
that works to organize your source content is used to organize the rendered
site ( [see organization](/content/organization) ). Following this pattern Hugo
uses the top level of your content organization as **the Section**.
The following example site uses two sections, "post" and "quote".
.
└── content
├── post
| ├── firstpost.md // <- http://site.com/post/firstpost/
| ├── happy
| | └── happiness.md // <- http://site.com/happy/happiness/
| └── secondpost.md // <- http://site.com/post/secondpost/
└── quote
├── first.md // <- http://site.com/quote/first/
└── second.md // <- http://site.com/quote/second/
*Regardless of location on disk, the section can be provided in the front matter
which will affect the destination location*.
## Sections and Types
By default everything created within a section will use the content type
that matches the section name.
Section defined in the front matter have the same impact.
To change the type of a given piece of content simply define the type
in the front matter.
If a layout for a given type hasn't been provided a default type template will
be used instead provided is exists.
| docs/content/content/sections.md | 0 | https://github.com/gohugoio/hugo/commit/784077da4dcc3476f61bbf99c5f873b71694dd64 | [
0.00017413754540029913,
0.00017064002167899162,
0.00016111032164189965,
0.00017312433919869363,
0.000004922555035591358
] |
{
"id": 6,
"code_window": [
"\treplace := func(in string) string {\n",
"\t\tif inURL, err = url.Parse(in); err != nil {\n",
"\t\t\treturn in + \"?\"\n",
"\t\t}\n",
"\t\treturn baseURL.ResolveReference(inURL).String()\n",
"\t}\n",
"\n",
"\tfor _, el := range selectors {\n",
"\t\tif err = tr.Apply(htmltran.TransformAttrib(el.attr, replace), el.tag); err != nil {\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tif fragmentOnly(inURL) {\n",
"\t\t\treturn in\n",
"\t\t}\n"
],
"file_path": "transform/post.go",
"type": "add",
"edit_start_line_idx": 41
} | package target
import (
"bytes"
helpers "github.com/spf13/hugo/template"
"html/template"
"path"
"strings"
)
const ALIAS = "<!DOCTYPE html><html><head><link rel=\"canonical\" href=\"{{ .Permalink }}\"/><meta http-equiv=\"content-type\" content=\"text/html; charset=utf-8\" /><meta http-equiv=\"refresh\" content=\"0;url={{ .Permalink }}\" /></head></html>"
const ALIAS_XHTML = "<!DOCTYPE html><html xmlns=\"http://www.w3.org/1999/xhtml\"><head><link rel=\"canonical\" href=\"{{ .Permalink }}\"/><meta http-equiv=\"content-type\" content=\"text/html; charset=utf-8\" /><meta http-equiv=\"refresh\" content=\"0;url={{ .Permalink }}\" /></head></html>"
var DefaultAliasTemplates *template.Template
func init() {
DefaultAliasTemplates = template.New("")
template.Must(DefaultAliasTemplates.New("alias").Parse(ALIAS))
template.Must(DefaultAliasTemplates.New("alias-xhtml").Parse(ALIAS_XHTML))
}
type AliasPublisher interface {
Translator
Publish(string, template.HTML) error
}
type HTMLRedirectAlias struct {
PublishDir string
Templates *template.Template
}
func (h *HTMLRedirectAlias) Translate(alias string) (aliasPath string, err error) {
if len(alias) <= 0 {
return
}
if strings.HasSuffix(alias, "/") {
alias = alias + "index.html"
} else if !strings.HasSuffix(alias, ".html") {
alias = alias + "/index.html"
}
return path.Join(h.PublishDir, helpers.Urlize(alias)), nil
}
type AliasNode struct {
Permalink template.HTML
}
func (h *HTMLRedirectAlias) Publish(path string, permalink template.HTML) (err error) {
if path, err = h.Translate(path); err != nil {
return
}
t := "alias"
if strings.HasSuffix(path, ".xhtml") {
t = "alias-xhtml"
}
template := DefaultAliasTemplates
if h.Templates != nil {
template = h.Templates
}
buffer := new(bytes.Buffer)
err = template.ExecuteTemplate(buffer, t, &AliasNode{permalink})
if err != nil {
return
}
return writeToDisk(path, buffer)
}
| target/htmlredirect.go | 0 | https://github.com/gohugoio/hugo/commit/784077da4dcc3476f61bbf99c5f873b71694dd64 | [
0.0005110430647619069,
0.00022350368089973927,
0.00016640823741909117,
0.00017056688375305384,
0.00011259019811404869
] |
{
"id": 6,
"code_window": [
"\treplace := func(in string) string {\n",
"\t\tif inURL, err = url.Parse(in); err != nil {\n",
"\t\t\treturn in + \"?\"\n",
"\t\t}\n",
"\t\treturn baseURL.ResolveReference(inURL).String()\n",
"\t}\n",
"\n",
"\tfor _, el := range selectors {\n",
"\t\tif err = tr.Apply(htmltran.TransformAttrib(el.attr, replace), el.tag); err != nil {\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tif fragmentOnly(inURL) {\n",
"\t\t\treturn in\n",
"\t\t}\n"
],
"file_path": "transform/post.go",
"type": "add",
"edit_start_line_idx": 41
} | package hugolib
import (
"os"
"strings"
)
func fileExt(path string) (file, ext string) {
if strings.Contains(path, ".") {
i := len(path) - 1
for path[i] != '.' {
i--
}
return path[:i], path[i+1:]
}
return path, ""
}
func replaceExtension(path string, newExt string) string {
f, _ := fileExt(path)
return f + "." + newExt
}
// Check if Exists && is Directory
func dirExists(path string) (bool, error) {
fi, err := os.Stat(path)
if err == nil && fi.IsDir() {
return true, nil
}
if os.IsNotExist(err) {
return false, nil
}
return false, err
}
| hugolib/path.go | 0 | https://github.com/gohugoio/hugo/commit/784077da4dcc3476f61bbf99c5f873b71694dd64 | [
0.0016909206751734018,
0.0005470660980790854,
0.0001641380222281441,
0.00016660285473335534,
0.0006604057853110135
] |
{
"id": 7,
"code_window": [
"\n",
"\treturn\n",
"}\n"
],
"labels": [
"keep",
"keep",
"add"
],
"after_edit": [
"\n",
"func fragmentOnly(u *url.URL) bool {\n",
"\treturn u.Fragment != \"\" && u.Scheme == \"\" && u.Opaque == \"\" && u.User == nil && u.Host == \"\" && u.Path == \"\" && u.Path == \"\" && u.RawQuery == \"\"\n",
"}"
],
"file_path": "transform/post.go",
"type": "add",
"edit_start_line_idx": 52
} | package hugolib
import (
"bytes"
"fmt"
"html/template"
"strings"
"testing"
)
const (
TEMPLATE_TITLE = "{{ .Title }}"
PAGE_SIMPLE_TITLE = `---
title: simple template
---
content`
TEMPLATE_MISSING_FUNC = "{{ .Title | funcdoesnotexists }}"
TEMPLATE_FUNC = "{{ .Title | urlize }}"
TEMPLATE_CONTENT = "{{ .Content }}"
TEMPLATE_DATE = "{{ .Date }}"
INVALID_TEMPLATE_FORMAT_DATE = "{{ .Date.Format time.RFC3339 }}"
TEMPLATE_WITH_URL = "<a href=\"foobar.jpg\">Going</a>"
PAGE_URL_SPECIFIED = `---
title: simple template
url: "mycategory/my-whatever-content/"
---
content`
PAGE_WITH_MD = `---
title: page with md
---
# heading 1
text
## heading 2
more text
`
)
func pageMust(p *Page, err error) *Page {
if err != nil {
panic(err)
}
return p
}
func TestDegenerateRenderThingMissingTemplate(t *testing.T) {
p, _ := ReadFrom(strings.NewReader(PAGE_SIMPLE_TITLE), "content/a/file.md")
s := new(Site)
s.prepTemplates()
_, err := s.RenderThing(p, "foobar")
if err == nil {
t.Errorf("Expected err to be returned when missing the template.")
}
}
func TestAddInvalidTemplate(t *testing.T) {
s := new(Site)
s.prepTemplates()
err := s.addTemplate("missing", TEMPLATE_MISSING_FUNC)
if err == nil {
t.Fatalf("Expecting the template to return an error")
}
}
func matchRender(t *testing.T, s *Site, p *Page, tmplName string, expected string) {
content, err := s.RenderThing(p, tmplName)
if err != nil {
t.Fatalf("Unable to render template.")
}
if string(content.Bytes()) != expected {
t.Fatalf("Content did not match expected: %s. got: %s", expected, content)
}
}
func _TestAddSameTemplateTwice(t *testing.T) {
p := pageMust(ReadFrom(strings.NewReader(PAGE_SIMPLE_TITLE), "content/a/file.md"))
s := new(Site)
s.prepTemplates()
err := s.addTemplate("foo", TEMPLATE_TITLE)
if err != nil {
t.Fatalf("Unable to add template foo")
}
matchRender(t, s, p, "foo", "simple template")
err = s.addTemplate("foo", "NEW {{ .Title }}")
if err != nil {
t.Fatalf("Unable to add template foo: %s", err)
}
matchRender(t, s, p, "foo", "NEW simple template")
}
func TestRenderThing(t *testing.T) {
tests := []struct {
content string
template string
expected string
}{
{PAGE_SIMPLE_TITLE, TEMPLATE_TITLE, "simple template"},
{PAGE_SIMPLE_TITLE, TEMPLATE_FUNC, "simple-template"},
{PAGE_WITH_MD, TEMPLATE_CONTENT, "<h1>heading 1</h1>\n\n<p>text</p>\n\n<h2>heading 2</h2>\n\n<p>more text</p>\n"},
{SIMPLE_PAGE_RFC3339_DATE, TEMPLATE_DATE, "2013-05-17 16:59:30 +0000 UTC"},
}
s := new(Site)
s.prepTemplates()
for i, test := range tests {
p, err := ReadFrom(strings.NewReader(test.content), "content/a/file.md")
if err != nil {
t.Fatalf("Error parsing buffer: %s", err)
}
templateName := fmt.Sprintf("foobar%d", i)
err = s.addTemplate(templateName, test.template)
if err != nil {
t.Fatalf("Unable to add template")
}
p.Content = template.HTML(p.Content)
html, err2 := s.RenderThing(p, templateName)
if err2 != nil {
t.Errorf("Unable to render html: %s", err)
}
if string(html.Bytes()) != test.expected {
t.Errorf("Content does not match.\nExpected\n\t'%q'\ngot\n\t'%q'", test.expected, html)
}
}
}
func TestRenderThingOrDefault(t *testing.T) {
tests := []struct {
content string
missing bool
template string
expected string
}{
{PAGE_SIMPLE_TITLE, true, TEMPLATE_TITLE, "simple template"},
{PAGE_SIMPLE_TITLE, true, TEMPLATE_FUNC, "simple-template"},
{PAGE_SIMPLE_TITLE, false, TEMPLATE_TITLE, "simple template"},
{PAGE_SIMPLE_TITLE, false, TEMPLATE_FUNC, "simple-template"},
}
s := new(Site)
s.prepTemplates()
for i, test := range tests {
p, err := ReadFrom(strings.NewReader(PAGE_SIMPLE_TITLE), "content/a/file.md")
if err != nil {
t.Fatalf("Error parsing buffer: %s", err)
}
templateName := fmt.Sprintf("default%d", i)
err = s.addTemplate(templateName, test.template)
if err != nil {
t.Fatalf("Unable to add template")
}
var html *bytes.Buffer
var err2 error
if test.missing {
html, err2 = s.RenderThingOrDefault(p, "missing", templateName)
} else {
html, err2 = s.RenderThingOrDefault(p, templateName, "missing_default")
}
if err2 != nil {
t.Errorf("Unable to render html: %s", err)
}
if string(html.Bytes()) != test.expected {
t.Errorf("Content does not match. Expected '%s', got '%s'", test.expected, html)
}
}
}
func TestSetOutFile(t *testing.T) {
s := new(Site)
p := pageMust(ReadFrom(strings.NewReader(PAGE_URL_SPECIFIED), "content/a/file.md"))
s.setOutFile(p)
expected := "mycategory/my-whatever-content/index.html"
if p.OutFile != "mycategory/my-whatever-content/index.html" {
t.Errorf("Outfile does not match. Expected '%s', got '%s'", expected, p.OutFile)
}
}
func TestSkipRender(t *testing.T) {
files := make(map[string][]byte)
target := &InMemoryTarget{files: files}
sources := []byteSource{
{"sect/doc1.html", []byte("---\nmarkup: markdown\n---\n# title\nsome *content*")},
{"sect/doc2.html", []byte("<!doctype html><html><body>more content</body></html>")},
{"sect/doc3.md", []byte("# doc3\n*some* content")},
{"sect/doc4.md", []byte("---\ntitle: doc4\n---\n# doc4\n*some content*")},
{"sect/doc5.html", []byte("<!doctype html><html>{{ template \"head\" }}<body>body5</body></html>")},
}
s := &Site{
Target: target,
Config: Config{BaseUrl: "http://auth/bub/"},
Source: &inMemorySource{sources},
}
s.initializeSiteInfo()
s.prepTemplates()
must(s.addTemplate("_default/single.html", "{{.Content}}"))
must(s.addTemplate("head", "<head><script src=\"script.js\"></script></head>"))
if err := s.CreatePages(); err != nil {
t.Fatalf("Unable to create pages: %s", err)
}
if err := s.BuildSiteMeta(); err != nil {
t.Fatalf("Unable to build site metadata: %s", err)
}
if err := s.RenderPages(); err != nil {
t.Fatalf("Unable to render pages. %s", err)
}
tests := []struct {
doc string
expected string
}{
{"sect/doc1.html", "<html><head></head><body><h1>title</h1>\n\n<p>some <em>content</em></p>\n</body></html>"},
{"sect/doc2.html", "<!DOCTYPE html><html><head></head><body>more content</body></html>"},
{"sect/doc3.html", "<html><head></head><body><h1>doc3</h1>\n\n<p><em>some</em> content</p>\n</body></html>"},
{"sect/doc4.html", "<html><head></head><body><h1>doc4</h1>\n\n<p><em>some content</em></p>\n</body></html>"},
{"sect/doc5.html", "<!DOCTYPE html><html><head><script src=\"http://auth/bub/script.js\"></script></head><body>body5</body></html>"},
}
for _, test := range tests {
content, ok := target.files[test.doc]
if !ok {
t.Fatalf("Did not find %s in target. %v", test.doc, target.files)
}
if !bytes.Equal(content, []byte(test.expected)) {
t.Errorf("%s content expected:\n%q\ngot:\n%q", test.doc, test.expected, string(content))
}
}
}
func TestAbsUrlify(t *testing.T) {
files := make(map[string][]byte)
target := &InMemoryTarget{files: files}
s := &Site{
Target: target,
Config: Config{BaseUrl: "http://auth/bub/"},
Source: &inMemorySource{urlFakeSource},
}
s.initializeSiteInfo()
s.prepTemplates()
must(s.addTemplate("blue/single.html", TEMPLATE_WITH_URL))
if err := s.CreatePages(); err != nil {
t.Fatalf("Unable to create pages: %s", err)
}
if err := s.BuildSiteMeta(); err != nil {
t.Fatalf("Unable to build site metadata: %s", err)
}
if err := s.RenderPages(); err != nil {
t.Fatalf("Unable to render pages. %s", err)
}
content, ok := target.files["content/blue/slug-doc-1.html"]
if !ok {
t.Fatalf("Unable to locate rendered content")
}
expected := "<html><head></head><body><a href=\"http://auth/bub/foobar.jpg\">Going</a></body></html>"
if string(content) != expected {
t.Errorf("AbsUrlify content expected:\n%q\ngot\n%q", expected, string(content))
}
}
| hugolib/site_test.go | 1 | https://github.com/gohugoio/hugo/commit/784077da4dcc3476f61bbf99c5f873b71694dd64 | [
0.013202721253037453,
0.0007918994524516165,
0.00016497858450748026,
0.00019187573343515396,
0.0023677244316786528
] |
{
"id": 7,
"code_window": [
"\n",
"\treturn\n",
"}\n"
],
"labels": [
"keep",
"keep",
"add"
],
"after_edit": [
"\n",
"func fragmentOnly(u *url.URL) bool {\n",
"\treturn u.Fragment != \"\" && u.Scheme == \"\" && u.Opaque == \"\" && u.User == nil && u.Host == \"\" && u.Path == \"\" && u.Path == \"\" && u.RawQuery == \"\"\n",
"}"
],
"file_path": "transform/post.go",
"type": "add",
"edit_start_line_idx": 52
} | package target
import (
"testing"
)
func TestFileTranslator(t *testing.T) {
tests := []struct {
content string
expected string
}{
{"/", "index.html"},
{"index.html", "index/index.html"},
{"foo", "foo/index.html"},
{"foo.html", "foo/index.html"},
{"foo.xhtml", "foo/index.xhtml"},
{"section", "section/index.html"},
{"section/", "section/index.html"},
{"section/foo", "section/foo/index.html"},
{"section/foo.html", "section/foo/index.html"},
{"section/foo.rss", "section/foo/index.rss"},
}
for _, test := range tests {
f := new(Filesystem)
dest, err := f.Translate(test.content)
if err != nil {
t.Fatalf("Translate returned and unexpected err: %s", err)
}
if dest != test.expected {
t.Errorf("Tranlate expected return: %s, got: %s", test.expected, dest)
}
}
}
func TestFileTranslatorBase(t *testing.T) {
tests := []struct {
content string
expected string
}{
{"/", "a/base/index.html"},
}
for _, test := range tests {
f := &Filesystem{PublishDir: "a/base"}
fts := &Filesystem{PublishDir: "a/base/"}
for _, fs := range []*Filesystem{f, fts} {
dest, err := fs.Translate(test.content)
if err != nil {
t.Fatalf("Translated returned and err: %s", err)
}
if dest != test.expected {
t.Errorf("Translate expected: %s, got: %s", test.expected, dest)
}
}
}
}
func TestTranslateUglyUrls(t *testing.T) {
tests := []struct {
content string
expected string
}{
{"foo.html", "foo.html"},
{"/", "index.html"},
{"section", "section.html"},
{"index.html", "index.html"},
}
for _, test := range tests {
f := &Filesystem{UglyUrls: true}
dest, err := f.Translate(test.content)
if err != nil {
t.Fatalf("Translate returned an unexpected err: %s", err)
}
if dest != test.expected {
t.Errorf("Translate expected return: %s, got: %s", test.expected, dest)
}
}
}
func TestTranslateDefaultExtension(t *testing.T) {
f := &Filesystem{DefaultExtension: ".foobar"}
dest, _ := f.Translate("baz")
if dest != "baz/index.foobar" {
t.Errorf("Translate expected return: %s, got %s", "baz/index.foobar", dest)
}
}
| target/file_test.go | 0 | https://github.com/gohugoio/hugo/commit/784077da4dcc3476f61bbf99c5f873b71694dd64 | [
0.0003405533207114786,
0.00020860449876636267,
0.0001671054633334279,
0.00017932557966560125,
0.00005250361209618859
] |
{
"id": 7,
"code_window": [
"\n",
"\treturn\n",
"}\n"
],
"labels": [
"keep",
"keep",
"add"
],
"after_edit": [
"\n",
"func fragmentOnly(u *url.URL) bool {\n",
"\treturn u.Fragment != \"\" && u.Scheme == \"\" && u.Opaque == \"\" && u.User == nil && u.Host == \"\" && u.Path == \"\" && u.Path == \"\" && u.RawQuery == \"\"\n",
"}"
],
"file_path": "transform/post.go",
"type": "add",
"edit_start_line_idx": 52
} | package hugolib
import (
"path"
"strings"
"testing"
)
var SIMPLE_PAGE_YAML = `---
contenttype: ""
---
Sample Text
`
func TestDegenerateMissingFolderInPageFilename(t *testing.T) {
p, err := ReadFrom(strings.NewReader(SIMPLE_PAGE_YAML), path.Join("foobar"))
if err != nil {
t.Fatalf("Error in ReadFrom")
}
if p.Section != "" {
t.Fatalf("No section should be set for a file path: foobar")
}
}
func TestNewPageWithFilePath(t *testing.T) {
toCheck := []struct {
input string
section string
layout string
}{
{path.Join("sub", "foobar.html"), "sub", "sub/single.html"},
{path.Join("content", "sub", "foobar.html"), "sub", "sub/single.html"},
{path.Join("content", "dub", "sub", "foobar.html"), "sub", "sub/single.html"},
}
for _, el := range toCheck {
p, err := ReadFrom(strings.NewReader(SIMPLE_PAGE_YAML), el.input)
p.guessSection()
if err != nil {
t.Fatalf("Reading from SIMPLE_PAGE_YAML resulted in an error: %s", err)
}
if p.Section != el.section {
t.Fatalf("Section not set to %s for page %s. Got: %s", el.section, el.input, p.Section)
}
if p.Layout() != el.layout {
t.Fatalf("Layout incorrect. Expected: '%s', Got: '%s'", el.layout, p.Layout())
}
}
}
| hugolib/path_seperators_test.go | 0 | https://github.com/gohugoio/hugo/commit/784077da4dcc3476f61bbf99c5f873b71694dd64 | [
0.0002806828706525266,
0.00021330425806809217,
0.00017067004228010774,
0.0002064259024336934,
0.00004052741496707313
] |
{
"id": 7,
"code_window": [
"\n",
"\treturn\n",
"}\n"
],
"labels": [
"keep",
"keep",
"add"
],
"after_edit": [
"\n",
"func fragmentOnly(u *url.URL) bool {\n",
"\treturn u.Fragment != \"\" && u.Scheme == \"\" && u.Opaque == \"\" && u.User == nil && u.Host == \"\" && u.Path == \"\" && u.Path == \"\" && u.RawQuery == \"\"\n",
"}"
],
"file_path": "transform/post.go",
"type": "add",
"edit_start_line_idx": 52
} | # Hugo
A Fast and Flexible Static Site Generator built with love by [spf13](http://spf13.com)
and [friends](http://github.com/spf13/hugo/graphs/contributors) in Go.
[](https://travis-ci.org/spf13/hugo)
## Overview
Hugo is a static site generator written in GoLang. It is optimized for
speed, easy use and configurability. Hugo takes a directory with content and
templates and renders them into a full html website.
Hugo makes use of markdown files with front matter for meta data.
A typical website of moderate size can be
rendered in a fraction of a second. A good rule of thumb is that Hugo
takes around 1 millisecond for each piece of content.
It is written to work well with any
kind of website including blogs, tumbles and docs.
**Complete documentation is available at [Hugo Documentation](http://hugo.spf13.com).**
# Getting Started
## Installing Hugo
Hugo is written in GoLang with support for Windows, Linux, FreeBSD and OSX.
The latest release can be found at [hugo releases](https://github.com/spf13/hugo/releases).
We currently build for Windows, Linux, FreeBSD and OS X for x64
and 386 architectures.
### Installing Hugo (binary)
Installation is very easy. Simply download the appropriate version for your
platform from [hugo releases](https://github.com/spf13/hugo/releases).
Once downloaded it can be run from anywhere. You don't need to install
it into a global location. This works well for shared hosts and other systems
where you don't have a privileged account.
Ideally you should install it somewhere in your path for easy use. `/usr/local/bin`
is the most probable location.
*The Hugo executible has no external dependencies.*
### Installing from source
#### Dependencies
* Git
* Go 1.1+
* Mercurial
* Bazaar
#### Clone locally (for contributors):
git clone https://github.com/spf13/hugo
cd hugo
go get
Because go expects all of your libraries to be found in either $GOROOT or $GOPATH,
it's helpful to symlink the project to one of the following paths:
* ln -s /path/to/your/hugo $GOPATH/src/github.com/spf13/hugo
* ln -s /path/to/your/hugo $GOROOT/src/pkg/github.com/spf13/hugo
#### Get directly from Github:
If you only want to build from source, it's even easier.
go get github.com/spf13/hugo
#### Building Hugo
cd /path/to/hugo
go build -o hugo main.go
mv hugo /usr/local/bin/
#### Running Hugo
cd /path/to/hugo
go install github.com/spf13/hugo/hugolibs
go run main.go
**Complete documentation is available at [Hugo Documentation](http://hugo.spf13.com).**
| README.md | 0 | https://github.com/gohugoio/hugo/commit/784077da4dcc3476f61bbf99c5f873b71694dd64 | [
0.00017130395281128585,
0.0001687218464212492,
0.00016493594739586115,
0.00016922717622946948,
0.000002016862254095031
] |
{
"id": 0,
"code_window": [
"\t\tDiskMonitor: execinfra.NewMonitor(\n",
"\t\t\tctx, ds.ParentDiskMonitor, \"flow-disk-monitor\",\n",
"\t\t),\n",
"\t\tPreserveFlowSpecs: localState.PreserveFlowSpecs,\n",
"\t}\n",
"\n",
"\tif localState.IsLocal && localState.Collection != nil {\n",
"\t\t// If we were passed a descs.Collection to use, then take it. In this case,\n",
"\t\t// the caller will handle releasing the used descriptors, so we don't need\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "pkg/sql/distsql/server.go",
"type": "replace",
"edit_start_line_idx": 490
} | // Copyright 2016 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package sql
import (
"context"
"fmt"
"math"
"runtime"
"sync"
"sync/atomic"
"time"
"github.com/cockroachdb/cockroach/pkg/base"
"github.com/cockroachdb/cockroach/pkg/col/coldata"
"github.com/cockroachdb/cockroach/pkg/kv"
"github.com/cockroachdb/cockroach/pkg/kv/kvclient/rangecache"
"github.com/cockroachdb/cockroach/pkg/roachpb"
"github.com/cockroachdb/cockroach/pkg/rpc"
"github.com/cockroachdb/cockroach/pkg/rpc/nodedialer"
"github.com/cockroachdb/cockroach/pkg/server/telemetry"
"github.com/cockroachdb/cockroach/pkg/settings"
"github.com/cockroachdb/cockroach/pkg/sql/colflow"
"github.com/cockroachdb/cockroach/pkg/sql/contention"
"github.com/cockroachdb/cockroach/pkg/sql/contentionpb"
"github.com/cockroachdb/cockroach/pkg/sql/distsql"
"github.com/cockroachdb/cockroach/pkg/sql/execinfra"
"github.com/cockroachdb/cockroach/pkg/sql/execinfra/execopnode"
"github.com/cockroachdb/cockroach/pkg/sql/execinfrapb"
"github.com/cockroachdb/cockroach/pkg/sql/flowinfra"
"github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode"
"github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror"
"github.com/cockroachdb/cockroach/pkg/sql/physicalplan"
"github.com/cockroachdb/cockroach/pkg/sql/rowenc"
"github.com/cockroachdb/cockroach/pkg/sql/rowexec"
"github.com/cockroachdb/cockroach/pkg/sql/sem/tree"
"github.com/cockroachdb/cockroach/pkg/sql/sessiondatapb"
"github.com/cockroachdb/cockroach/pkg/sql/sqltelemetry"
"github.com/cockroachdb/cockroach/pkg/sql/types"
"github.com/cockroachdb/cockroach/pkg/util/contextutil"
"github.com/cockroachdb/cockroach/pkg/util/errorutil/unimplemented"
"github.com/cockroachdb/cockroach/pkg/util/hlc"
"github.com/cockroachdb/cockroach/pkg/util/log"
"github.com/cockroachdb/cockroach/pkg/util/metric"
"github.com/cockroachdb/cockroach/pkg/util/mon"
"github.com/cockroachdb/cockroach/pkg/util/ring"
"github.com/cockroachdb/cockroach/pkg/util/stop"
"github.com/cockroachdb/cockroach/pkg/util/syncutil"
"github.com/cockroachdb/cockroach/pkg/util/tracing"
"github.com/cockroachdb/errors"
pbtypes "github.com/gogo/protobuf/types"
)
var settingDistSQLNumRunners = settings.RegisterIntSetting(
settings.TenantWritable,
"sql.distsql.num_runners",
"determines the number of DistSQL runner goroutines used for issuing SetupFlow RPCs",
// We use GOMAXPROCS instead of NumCPU because the former could be adjusted
// based on cgroup limits (see cgroups.AdjustMaxProcs).
//
// The choice of the default multiple of 4 was made in order to get the
// original value of 16 on machines with 4 CPUs.
4*int64(runtime.GOMAXPROCS(0)), /* defaultValue */
func(v int64) error {
if v < 0 {
return errors.Errorf("cannot be set to a negative value: %d", v)
}
if v > distSQLNumRunnersMax {
return errors.Errorf("cannot be set to a value exceeding %d: %d", distSQLNumRunnersMax, v)
}
return nil
},
)
// Somewhat arbitrary upper bound.
var distSQLNumRunnersMax = 256 * int64(runtime.GOMAXPROCS(0))
// runnerRequest is the request that is sent (via a channel) to a worker.
type runnerRequest struct {
ctx context.Context
nodeDialer *nodedialer.Dialer
flowReq *execinfrapb.SetupFlowRequest
sqlInstanceID base.SQLInstanceID
resultChan chan<- runnerResult
}
// runnerResult is returned by a worker (via a channel) for each received
// request.
type runnerResult struct {
nodeID base.SQLInstanceID
err error
}
func (req runnerRequest) run() {
res := runnerResult{nodeID: req.sqlInstanceID}
conn, err := req.nodeDialer.Dial(req.ctx, roachpb.NodeID(req.sqlInstanceID), rpc.DefaultClass)
if err != nil {
res.err = err
} else {
client := execinfrapb.NewDistSQLClient(conn)
// TODO(radu): do we want a timeout here?
if sp := tracing.SpanFromContext(req.ctx); sp != nil && !sp.IsNoop() {
req.flowReq.TraceInfo = sp.Meta().ToProto()
}
resp, err := client.SetupFlow(req.ctx, req.flowReq)
if err != nil {
res.err = err
} else {
res.err = resp.Error.ErrorDetail(req.ctx)
}
}
req.resultChan <- res
}
type runnerCoordinator struct {
// runnerChan is used by the DistSQLPlanner to send out requests (for
// running SetupFlow RPCs) to a pool of workers.
runnerChan chan runnerRequest
// newDesiredNumWorkers is used to notify the coordinator that the size of
// the pool of workers might have changed.
newDesiredNumWorkers chan int64
atomics struct {
// numWorkers tracks the number of workers running at the moment. This
// needs to be accessed atomically, but only because of the usage in
// tests.
numWorkers int64
}
}
func (c *runnerCoordinator) init(ctx context.Context, stopper *stop.Stopper, sv *settings.Values) {
// This channel has to be unbuffered because we want to only be able to send
// requests if a worker is actually there to receive them.
c.runnerChan = make(chan runnerRequest)
stopWorkerChan := make(chan struct{})
worker := func(context.Context) {
for {
select {
case req := <-c.runnerChan:
req.run()
case <-stopWorkerChan:
return
}
}
}
stopChan := stopper.ShouldQuiesce()
// This is a buffered channel because we will be sending on it from the
// callback when the corresponding setting changes. The buffer size of 1
// should be sufficient, but we use a larger buffer out of caution (in case
// the cluster setting is updated rapidly) - in order to not block the
// goroutine that is updating the settings.
c.newDesiredNumWorkers = make(chan int64, 4)
// setNewNumWorkers sets the new target size of the pool of workers.
setNewNumWorkers := func(newNumWorkers int64) {
select {
case c.newDesiredNumWorkers <- newNumWorkers:
case <-stopChan:
// If the server is quescing, then the new size of the pool doesn't
// matter.
return
}
}
// Whenever the corresponding setting is updated, we need to notify the
// coordinator.
// NB: runnerCoordinator.init is called once per server lifetime so this
// won't leak an unbounded number of OnChange callbacks.
settingDistSQLNumRunners.SetOnChange(sv, func(ctx context.Context) {
setNewNumWorkers(settingDistSQLNumRunners.Get(sv))
})
// We need to set the target pool size based on the current setting
// explicitly since the OnChange callback won't ever be called for the
// initial value - the setting initialization has already been performed
// before we registered the OnChange callback.
setNewNumWorkers(settingDistSQLNumRunners.Get(sv))
// Spin up the coordinator goroutine.
_ = stopper.RunAsyncTask(ctx, "distsql-runner-coordinator", func(context.Context) {
// Make sure to stop all workers when the coordinator exits.
defer close(stopWorkerChan)
for {
select {
case newNumWorkers := <-c.newDesiredNumWorkers:
for {
numWorkers := atomic.LoadInt64(&c.atomics.numWorkers)
if numWorkers == newNumWorkers {
break
}
if numWorkers < newNumWorkers {
// Need to spin another worker.
err := stopper.RunAsyncTask(ctx, "distsql-runner", worker)
if err != nil {
return
}
atomic.AddInt64(&c.atomics.numWorkers, 1)
} else {
// Need to stop one of the workers.
select {
case stopWorkerChan <- struct{}{}:
atomic.AddInt64(&c.atomics.numWorkers, -1)
case <-stopChan:
return
}
}
}
case <-stopChan:
return
}
}
})
}
// To allow for canceling flows via CancelDeadFlows RPC on different nodes
// simultaneously, we use a pool of workers.
const numCancelingWorkers = 4
func (dsp *DistSQLPlanner) initCancelingWorkers(initCtx context.Context) {
dsp.cancelFlowsCoordinator.workerWait = make(chan struct{}, numCancelingWorkers)
const cancelRequestTimeout = 10 * time.Second
for i := 0; i < numCancelingWorkers; i++ {
workerID := i + 1
_ = dsp.stopper.RunAsyncTask(initCtx, "distsql-canceling-worker", func(parentCtx context.Context) {
stopChan := dsp.stopper.ShouldQuiesce()
for {
select {
case <-stopChan:
return
case <-dsp.cancelFlowsCoordinator.workerWait:
req, sqlInstanceID := dsp.cancelFlowsCoordinator.getFlowsToCancel()
if req == nil {
// There are no flows to cancel at the moment. This
// shouldn't really happen.
log.VEventf(parentCtx, 2, "worker %d woke up but didn't find any flows to cancel", workerID)
continue
}
log.VEventf(parentCtx, 2, "worker %d is canceling at most %d flows on node %d", workerID, len(req.FlowIDs), sqlInstanceID)
// TODO: Double check that we only ever cancel flows on SQL nodes/pods here.
conn, err := dsp.podNodeDialer.Dial(parentCtx, roachpb.NodeID(sqlInstanceID), rpc.DefaultClass)
if err != nil {
// We failed to dial the node, so we give up given that
// our cancellation is best effort. It is possible that
// the node is dead anyway.
continue
}
client := execinfrapb.NewDistSQLClient(conn)
_ = contextutil.RunWithTimeout(
parentCtx,
"cancel dead flows",
cancelRequestTimeout,
func(ctx context.Context) error {
_, _ = client.CancelDeadFlows(ctx, req)
return nil
})
}
}
})
}
}
type deadFlowsOnNode struct {
ids []execinfrapb.FlowID
sqlInstanceID base.SQLInstanceID
}
// cancelFlowsCoordinator is responsible for batching up the requests to cancel
// remote flows initiated on the behalf of the current node when the local flows
// errored out.
type cancelFlowsCoordinator struct {
mu struct {
syncutil.Mutex
// deadFlowsByNode is a ring of pointers to deadFlowsOnNode objects.
deadFlowsByNode ring.Buffer
}
// workerWait should be used by canceling workers to block until there are
// some dead flows to cancel.
workerWait chan struct{}
}
// getFlowsToCancel returns a request to cancel some dead flows on a particular
// node. If there are no dead flows to cancel, it returns nil, 0. Safe for
// concurrent usage.
func (c *cancelFlowsCoordinator) getFlowsToCancel() (
*execinfrapb.CancelDeadFlowsRequest,
base.SQLInstanceID,
) {
c.mu.Lock()
defer c.mu.Unlock()
if c.mu.deadFlowsByNode.Len() == 0 {
return nil, base.SQLInstanceID(0)
}
deadFlows := c.mu.deadFlowsByNode.GetFirst().(*deadFlowsOnNode)
c.mu.deadFlowsByNode.RemoveFirst()
req := &execinfrapb.CancelDeadFlowsRequest{
FlowIDs: deadFlows.ids,
}
return req, deadFlows.sqlInstanceID
}
// addFlowsToCancel adds all remote flows from flows map to be canceled via
// CancelDeadFlows RPC. Safe for concurrent usage.
func (c *cancelFlowsCoordinator) addFlowsToCancel(
flows map[base.SQLInstanceID]*execinfrapb.FlowSpec,
) {
c.mu.Lock()
for sqlInstanceID, f := range flows {
if sqlInstanceID != f.Gateway {
// c.mu.deadFlowsByNode.Len() is at most the number of nodes in the
// cluster, so a linear search for the node ID should be
// sufficiently fast.
found := false
for j := 0; j < c.mu.deadFlowsByNode.Len(); j++ {
deadFlows := c.mu.deadFlowsByNode.Get(j).(*deadFlowsOnNode)
if sqlInstanceID == deadFlows.sqlInstanceID {
deadFlows.ids = append(deadFlows.ids, f.FlowID)
found = true
break
}
}
if !found {
c.mu.deadFlowsByNode.AddLast(&deadFlowsOnNode{
ids: []execinfrapb.FlowID{f.FlowID},
sqlInstanceID: sqlInstanceID,
})
}
}
}
queueLength := c.mu.deadFlowsByNode.Len()
c.mu.Unlock()
// Notify the canceling workers that there are some flows to cancel (we send
// on the channel at most the length of the queue number of times in order
// to not wake up the workers uselessly). Note that we do it in a
// non-blocking fashion (because the workers might be busy canceling other
// flows at the moment). Also because the channel is buffered, they won't go
// to sleep once they are done.
numWorkersToWakeUp := numCancelingWorkers
if numWorkersToWakeUp > queueLength {
numWorkersToWakeUp = queueLength
}
for i := 0; i < numWorkersToWakeUp; i++ {
select {
case c.workerWait <- struct{}{}:
default:
// We have filled the buffer of the channel, so there is no need to
// try to send any more notifications.
return
}
}
}
// setupFlows sets up all the flows specified in flows using the provided state.
// It will first attempt to set up all remote flows using the dsp workers if
// available or sequentially if not, and then finally set up the gateway flow,
// whose output is the DistSQLReceiver provided. This flow is then returned to
// be run.
func (dsp *DistSQLPlanner) setupFlows(
ctx context.Context,
evalCtx *extendedEvalContext,
leafInputState *roachpb.LeafTxnInputState,
flows map[base.SQLInstanceID]*execinfrapb.FlowSpec,
recv *DistSQLReceiver,
localState distsql.LocalState,
collectStats bool,
statementSQL string,
) (context.Context, flowinfra.Flow, execopnode.OpChains, error) {
thisNodeID := dsp.gatewaySQLInstanceID
_, ok := flows[thisNodeID]
if !ok {
return nil, nil, nil, errors.AssertionFailedf("missing gateway flow")
}
if localState.IsLocal && len(flows) != 1 {
return nil, nil, nil, errors.AssertionFailedf("IsLocal set but there's multiple flows")
}
const setupFlowRequestStmtMaxLength = 500
if len(statementSQL) > setupFlowRequestStmtMaxLength {
statementSQL = statementSQL[:setupFlowRequestStmtMaxLength]
}
setupReq := execinfrapb.SetupFlowRequest{
LeafTxnInputState: leafInputState,
Version: execinfra.Version,
EvalContext: execinfrapb.MakeEvalContext(&evalCtx.Context),
TraceKV: evalCtx.Tracing.KVTracingEnabled(),
CollectStats: collectStats,
StatementSQL: statementSQL,
}
if vectorizeMode := evalCtx.SessionData().VectorizeMode; vectorizeMode != sessiondatapb.VectorizeOff {
// Now we determine whether the vectorized engine supports the flow
// specs.
for _, spec := range flows {
if err := colflow.IsSupported(vectorizeMode, spec); err != nil {
log.VEventf(ctx, 2, "failed to vectorize: %s", err)
if vectorizeMode == sessiondatapb.VectorizeExperimentalAlways {
return nil, nil, nil, err
}
// Vectorization is not supported for this flow, so we override
// the setting.
setupReq.EvalContext.SessionData.VectorizeMode = sessiondatapb.VectorizeOff
break
}
}
}
// Start all the flows except the flow on this node (there is always a flow
// on this node).
var resultChan chan runnerResult
if len(flows) > 1 {
resultChan = make(chan runnerResult, len(flows)-1)
for nodeID, flowSpec := range flows {
if nodeID == thisNodeID {
// Skip this node.
continue
}
req := setupReq
req.Flow = *flowSpec
runReq := runnerRequest{
ctx: ctx,
nodeDialer: dsp.podNodeDialer,
flowReq: &req,
sqlInstanceID: nodeID,
resultChan: resultChan,
}
// Send out a request to the workers; if no worker is available, run
// directly.
select {
case dsp.runnerCoordinator.runnerChan <- runReq:
default:
runReq.run()
}
}
}
// Now set up the flow on this node.
setupReq.Flow = *flows[thisNodeID]
var batchReceiver execinfra.BatchReceiver
if recv.batchWriter != nil {
// Use the DistSQLReceiver as an execinfra.BatchReceiver only if the
// former has the corresponding writer set.
batchReceiver = recv
}
ctx, flow, opChains, firstErr := dsp.distSQLSrv.SetupLocalSyncFlow(ctx, evalCtx.Mon, &setupReq, recv, batchReceiver, localState)
// Now wait for all the flows to be scheduled on remote nodes. Note that we
// are not waiting for the flows themselves to complete.
for i := 0; i < len(flows)-1; i++ {
res := <-resultChan
if firstErr == nil {
firstErr = res.err
}
// TODO(radu): accumulate the flows that we failed to set up and move them
// into the local flow.
}
// Note that we need to return the local flow even if firstErr is non-nil so
// that the local flow is properly cleaned up.
return ctx, flow, opChains, firstErr
}
const clientRejectedMsg string = "client rejected when attempting to run DistSQL plan"
// Run executes a physical plan. The plan should have been finalized using
// FinalizePlan.
//
// All errors encountered are reported to the DistSQLReceiver's resultWriter.
// Additionally, if the error is a "communication error" (an error encountered
// while using that resultWriter), the error is also stored in
// DistSQLReceiver.commErr. That can be tested to see if a client session needs
// to be closed.
//
// Args:
// - txn is the transaction in which the plan will run. If nil, the different
// processors are expected to manage their own internal transactions.
// - evalCtx is the evaluation context in which the plan will run. It might be
// mutated.
// - finishedSetupFn, if non-nil, is called synchronously after all the
// processors have successfully started up.
//
// It returns a non-nil (although it can be a noop when an error is
// encountered) cleanup function that must be called in order to release the
// resources.
func (dsp *DistSQLPlanner) Run(
ctx context.Context,
planCtx *PlanningCtx,
txn *kv.Txn,
plan *PhysicalPlan,
recv *DistSQLReceiver,
evalCtx *extendedEvalContext,
finishedSetupFn func(),
) (cleanup func()) {
cleanup = func() {}
flows := plan.GenerateFlowSpecs()
defer func() {
for _, flowSpec := range flows {
physicalplan.ReleaseFlowSpec(flowSpec)
}
}()
if _, ok := flows[dsp.gatewaySQLInstanceID]; !ok {
recv.SetError(errors.Errorf("expected to find gateway flow"))
return cleanup
}
var (
localState distsql.LocalState
leafInputState *roachpb.LeafTxnInputState
)
// NB: putting part of evalCtx in localState means it might be mutated down
// the line.
localState.EvalContext = &evalCtx.Context
localState.IsLocal = planCtx.isLocal
localState.Txn = txn
localState.LocalProcs = plan.LocalProcessors
// If we need to perform some operation on the flow specs, we want to
// preserve the specs during the flow setup.
localState.PreserveFlowSpecs = planCtx.saveFlows != nil
// If we have access to a planner and are currently being used to plan
// statements in a user transaction, then take the descs.Collection to resolve
// types with during flow execution. This is necessary to do in the case of
// a transaction that has already created or updated some types. If we do not
// use the local descs.Collection, we would attempt to acquire a lease on
// modified types when accessing them, which would error out.
if planCtx.planner != nil && !planCtx.planner.isInternalPlanner {
localState.Collection = planCtx.planner.Descriptors()
}
// noMutations indicates whether we know for sure that the plan doesn't have
// any mutations. If we don't have the access to the planner (which can be
// the case not on the main query execution path, i.e. BulkIO, CDC, etc),
// then we are ignorant of the details of the execution plan, so we choose
// to be on the safe side and mark 'noMutations' as 'false'.
noMutations := planCtx.planner != nil && !planCtx.planner.curPlan.flags.IsSet(planFlagContainsMutation)
if txn == nil {
// Txn can be nil in some cases, like BulkIO flows. In such a case, we
// cannot create a LeafTxn, so we cannot parallelize scans.
planCtx.parallelizeScansIfLocal = false
} else {
if planCtx.isLocal && noMutations && planCtx.parallelizeScansIfLocal {
// Even though we have a single flow on the gateway node, we might
// have decided to parallelize the scans. If that's the case, we
// will need to use the Leaf txn.
for _, flow := range flows {
localState.HasConcurrency = localState.HasConcurrency || execinfra.HasParallelProcessors(flow)
}
}
if noMutations {
// Even if planCtx.isLocal is false (which is the case when we think
// it's worth distributing the query), we need to go through the
// processors to figure out whether any of them have concurrency.
//
// However, the concurrency requires the usage of LeafTxns which is
// only acceptable if we don't have any mutations in the plan.
// TODO(yuzefovich): we could be smarter here and allow the usage of
// the RootTxn by the mutations while still using the Streamer (that
// gets a LeafTxn) iff the plan is such that there is no concurrency
// between the root and the leaf txns.
//
// At the moment of writing, this is only relevant whenever the
// Streamer API might be used by some of the processors. The
// Streamer internally can have concurrency, so it expects to be
// given a LeafTxn. In order for that LeafTxn to be created later,
// during the flow setup, we need to populate leafInputState below,
// so we tell the localState that there is concurrency.
if execinfra.CanUseStreamer(dsp.st) {
for _, proc := range plan.Processors {
if jr := proc.Spec.Core.JoinReader; jr != nil {
// Both index and lookup joins, with and without
// ordering, are executed via the Streamer API that has
// concurrency.
localState.HasConcurrency = true
break
}
}
}
}
if localState.MustUseLeafTxn() {
// Set up leaf txns using the txnCoordMeta if we need to.
tis, err := txn.GetLeafTxnInputStateOrRejectClient(ctx)
if err != nil {
log.Infof(ctx, "%s: %s", clientRejectedMsg, err)
recv.SetError(err)
return cleanup
}
if tis == nil {
recv.SetError(errors.AssertionFailedf(
"leafInputState is nil when txn is non-nil and we must use the leaf txn",
))
return cleanup
}
leafInputState = tis
}
}
if !planCtx.skipDistSQLDiagramGeneration && log.ExpensiveLogEnabled(ctx, 2) {
var stmtStr string
if planCtx.planner != nil && planCtx.planner.stmt.AST != nil {
stmtStr = planCtx.planner.stmt.String()
}
_, url, err := execinfrapb.GeneratePlanDiagramURL(stmtStr, flows, execinfrapb.DiagramFlags{})
if err != nil {
log.VEventf(ctx, 2, "error generating diagram: %s", err)
} else {
log.VEventf(ctx, 2, "plan diagram URL:\n%s", url.String())
}
}
log.VEvent(ctx, 2, "running DistSQL plan")
dsp.distSQLSrv.ServerConfig.Metrics.QueryStart()
defer dsp.distSQLSrv.ServerConfig.Metrics.QueryStop()
recv.outputTypes = plan.GetResultTypes()
recv.contendedQueryMetric = dsp.distSQLSrv.Metrics.ContendedQueriesCount
if len(flows) == 1 {
// We ended up planning everything locally, regardless of whether we
// intended to distribute or not.
localState.IsLocal = true
} else {
defer func() {
if recv.resultWriter.Err() != nil {
// The execution of this query encountered some error, so we
// will eagerly cancel all scheduled flows on the remote nodes
// (if they haven't been started yet) because they are now dead.
// TODO(yuzefovich): consider whether augmenting
// ConnectInboundStream to keep track of the streams that
// initiated FlowStream RPC is worth it - the flows containing
// such streams must have been started, so there is no point in
// trying to cancel them this way. This will allow us to reduce
// the size of the CancelDeadFlows request and speed up the
// lookup on the remote node whether a particular dead flow
// should be canceled. However, this improves the unhappy case,
// but it'll slowdown the happy case - by introducing additional
// tracking.
dsp.cancelFlowsCoordinator.addFlowsToCancel(flows)
}
}()
}
// Currently, we get the statement only if there is a planner available in
// the planCtx which is the case only on the "main" query path (for
// user-issued queries).
// TODO(yuzefovich): propagate the statement in all cases.
var statementSQL string
if planCtx.planner != nil {
statementSQL = planCtx.planner.stmt.StmtNoConstants
}
ctx, flow, opChains, err := dsp.setupFlows(
ctx, evalCtx, leafInputState, flows, recv, localState, planCtx.collectExecStats, statementSQL,
)
// Make sure that the local flow is always cleaned up if it was created.
if flow != nil {
cleanup = func() {
flow.Cleanup(ctx)
}
}
if err != nil {
recv.SetError(err)
return cleanup
}
if finishedSetupFn != nil {
finishedSetupFn()
}
if planCtx.planner != nil && flow.IsVectorized() {
planCtx.planner.curPlan.flags.Set(planFlagVectorized)
}
if planCtx.saveFlows != nil {
if err := planCtx.saveFlows(flows, opChains); err != nil {
recv.SetError(err)
return cleanup
}
}
// Check that flows that were forced to be planned locally and didn't need
// to have concurrency don't actually have it.
//
// This is important, since these flows are forced to use the RootTxn (since
// they might have mutations), and the RootTxn does not permit concurrency.
// For such flows, we were supposed to have fused everything.
if txn != nil && !localState.MustUseLeafTxn() && flow.ConcurrentTxnUse() {
recv.SetError(errors.AssertionFailedf(
"unexpected concurrency for a flow that was forced to be planned locally"))
return cleanup
}
// TODO(radu): this should go through the flow scheduler.
flow.Run(ctx, func() {})
// TODO(yuzefovich): it feels like this closing should happen after
// PlanAndRun. We should refactor this and get rid off ignoreClose field.
if planCtx.planner != nil && !planCtx.ignoreClose {
// planCtx can change before the cleanup function is executed, so we make
// a copy of the planner and bind it to the function.
curPlan := &planCtx.planner.curPlan
return func() {
// We need to close the planNode tree we translated into a DistSQL plan
// before flow.Cleanup, which closes memory accounts that expect to be
// emptied.
curPlan.close(ctx)
flow.Cleanup(ctx)
}
}
// ignoreClose is set to true meaning that someone else will handle the
// closing of the current plan, so we simply clean up the flow.
return cleanup
}
// DistSQLReceiver is an execinfra.RowReceiver and execinfra.BatchReceiver that
// writes results to a rowResultWriter and batchResultWriter, respectively. This
// is where the DistSQL execution meets the SQL Session - the result writer
// comes from a client Session.
//
// DistSQLReceiver also update the RangeDescriptorCache in response to DistSQL
// metadata about misplanned ranges.
type DistSQLReceiver struct {
ctx context.Context
// These two interfaces refer to the same object, but batchWriter might be
// unset (resultWriter is always set). These are used to send the results
// to.
resultWriter rowResultWriter
batchWriter batchResultWriter
stmtType tree.StatementReturnType
// outputTypes are the types of the result columns produced by the plan.
outputTypes []*types.T
// existsMode indicates that the caller is only interested in the existence
// of a single row. Used by subqueries in EXISTS mode.
existsMode bool
// discardRows is set when we want to discard rows (for testing/benchmarks).
// See EXECUTE .. DISCARD ROWS.
discardRows bool
// commErr keeps track of the error received from interacting with the
// resultWriter. This represents a "communication error" and as such is unlike
// query execution errors: when the DistSQLReceiver is used within a SQL
// session, such errors mean that we have to bail on the session.
// Query execution errors are reported to the resultWriter. For some client's
// convenience, communication errors are also reported to the resultWriter.
//
// Once set, no more rows are accepted.
commErr error
row tree.Datums
status execinfra.ConsumerStatus
alloc tree.DatumAlloc
closed bool
rangeCache *rangecache.RangeCache
tracing *SessionTracing
// cleanup will be called when the DistSQLReceiver is Release()'d back to
// its sync.Pool.
cleanup func()
// The transaction in which the flow producing data for this
// receiver runs. The DistSQLReceiver updates the transaction in
// response to RetryableTxnError's and when distributed processors
// pass back LeafTxnFinalState objects via ProducerMetas. Nil if no
// transaction should be updated on errors (i.e. if the flow overall
// doesn't run in a transaction).
txn *kv.Txn
// A handler for clock signals arriving from remote nodes. This should update
// this node's clock.
clockUpdater clockUpdater
stats *topLevelQueryStats
expectedRowsRead int64
progressAtomic *uint64
// contendedQueryMetric is a Counter that is incremented at most once if the
// query produces at least one contention event.
contendedQueryMetric *metric.Counter
// contentionRegistry is a Registry that contention events are added to.
contentionRegistry *contention.Registry
testingKnobs struct {
// pushCallback, if set, will be called every time DistSQLReceiver.Push
// is called, with the same arguments.
pushCallback func(rowenc.EncDatumRow, *execinfrapb.ProducerMetadata)
}
}
// rowResultWriter is a subset of CommandResult to be used with the
// DistSQLReceiver. It's implemented by RowResultWriter.
type rowResultWriter interface {
// AddRow writes a result row.
// Note that the caller owns the row slice and might reuse it.
AddRow(ctx context.Context, row tree.Datums) error
IncrementRowsAffected(ctx context.Context, n int)
SetError(error)
Err() error
}
// batchResultWriter is a subset of CommandResult to be used with the
// DistSQLReceiver when the consumer can operate on columnar batches directly.
type batchResultWriter interface {
AddBatch(context.Context, coldata.Batch) error
}
// MetadataResultWriter is used to stream metadata rather than row results in a
// DistSQL flow.
type MetadataResultWriter interface {
AddMeta(ctx context.Context, meta *execinfrapb.ProducerMetadata)
}
// MetadataCallbackWriter wraps a rowResultWriter to stream metadata in a
// DistSQL flow. It executes a given callback when metadata is added.
type MetadataCallbackWriter struct {
rowResultWriter
fn func(ctx context.Context, meta *execinfrapb.ProducerMetadata) error
}
// AddMeta implements the MetadataResultWriter interface.
func (w *MetadataCallbackWriter) AddMeta(ctx context.Context, meta *execinfrapb.ProducerMetadata) {
if err := w.fn(ctx, meta); err != nil {
w.SetError(err)
}
}
// NewMetadataCallbackWriter creates a new MetadataCallbackWriter.
func NewMetadataCallbackWriter(
rowResultWriter rowResultWriter,
metaFn func(ctx context.Context, meta *execinfrapb.ProducerMetadata) error,
) *MetadataCallbackWriter {
return &MetadataCallbackWriter{rowResultWriter: rowResultWriter, fn: metaFn}
}
// NewMetadataOnlyMetadataCallbackWriter creates a new MetadataCallbackWriter
// that uses errOnlyResultWriter and only supports receiving
// execinfrapb.ProducerMetadata.
func NewMetadataOnlyMetadataCallbackWriter() *MetadataCallbackWriter {
return NewMetadataCallbackWriter(
&errOnlyResultWriter{},
func(ctx context.Context, meta *execinfrapb.ProducerMetadata) error {
return nil
},
)
}
// errOnlyResultWriter is a rowResultWriter and batchResultWriter that only
// supports receiving an error. All other functions that deal with producing
// results panic.
type errOnlyResultWriter struct {
err error
}
var _ rowResultWriter = &errOnlyResultWriter{}
var _ batchResultWriter = &errOnlyResultWriter{}
func (w *errOnlyResultWriter) SetError(err error) {
w.err = err
}
func (w *errOnlyResultWriter) Err() error {
return w.err
}
func (w *errOnlyResultWriter) AddRow(ctx context.Context, row tree.Datums) error {
panic("AddRow not supported by errOnlyResultWriter")
}
func (w *errOnlyResultWriter) AddBatch(ctx context.Context, batch coldata.Batch) error {
panic("AddBatch not supported by errOnlyResultWriter")
}
func (w *errOnlyResultWriter) IncrementRowsAffected(ctx context.Context, n int) {
panic("IncrementRowsAffected not supported by errOnlyResultWriter")
}
// RowResultWriter is a thin wrapper around a RowContainer.
type RowResultWriter struct {
rowContainer *rowContainerHelper
rowsAffected int
err error
}
var _ rowResultWriter = &RowResultWriter{}
// NewRowResultWriter creates a new RowResultWriter.
func NewRowResultWriter(rowContainer *rowContainerHelper) *RowResultWriter {
return &RowResultWriter{rowContainer: rowContainer}
}
// IncrementRowsAffected implements the rowResultWriter interface.
func (b *RowResultWriter) IncrementRowsAffected(ctx context.Context, n int) {
b.rowsAffected += n
}
// AddRow implements the rowResultWriter interface.
func (b *RowResultWriter) AddRow(ctx context.Context, row tree.Datums) error {
if b.rowContainer != nil {
return b.rowContainer.AddRow(ctx, row)
}
return nil
}
// SetError is part of the rowResultWriter interface.
func (b *RowResultWriter) SetError(err error) {
b.err = err
}
// Err is part of the rowResultWriter interface.
func (b *RowResultWriter) Err() error {
return b.err
}
// CallbackResultWriter is a rowResultWriter that runs a callback function
// on AddRow.
type CallbackResultWriter struct {
fn func(ctx context.Context, row tree.Datums) error
rowsAffected int
err error
}
var _ rowResultWriter = &CallbackResultWriter{}
// NewCallbackResultWriter creates a new CallbackResultWriter.
func NewCallbackResultWriter(
fn func(ctx context.Context, row tree.Datums) error,
) *CallbackResultWriter {
return &CallbackResultWriter{fn: fn}
}
// IncrementRowsAffected is part of the rowResultWriter interface.
func (c *CallbackResultWriter) IncrementRowsAffected(ctx context.Context, n int) {
c.rowsAffected += n
}
// AddRow is part of the rowResultWriter interface.
func (c *CallbackResultWriter) AddRow(ctx context.Context, row tree.Datums) error {
return c.fn(ctx, row)
}
// SetError is part of the rowResultWriter interface.
func (c *CallbackResultWriter) SetError(err error) {
c.err = err
}
// Err is part of the rowResultWriter interface.
func (c *CallbackResultWriter) Err() error {
return c.err
}
var _ execinfra.RowReceiver = &DistSQLReceiver{}
var _ execinfra.BatchReceiver = &DistSQLReceiver{}
var receiverSyncPool = sync.Pool{
New: func() interface{} {
return &DistSQLReceiver{}
},
}
// ClockUpdater describes an object that can be updated with an observed
// timestamp. Usually wraps an hlc.Clock.
type clockUpdater interface {
// Update updates this ClockUpdater with the observed hlc.Timestamp.
Update(observedTS hlc.ClockTimestamp)
}
// MakeDistSQLReceiver creates a DistSQLReceiver.
//
// ctx is the Context that the receiver will use throughout its
// lifetime. resultWriter is the container where the results will be
// stored. If only the row count is needed, this can be nil.
//
// txn is the transaction in which the producer flow runs; it will be updated
// on errors. Nil if the flow overall doesn't run in a transaction.
func MakeDistSQLReceiver(
ctx context.Context,
resultWriter rowResultWriter,
stmtType tree.StatementReturnType,
rangeCache *rangecache.RangeCache,
txn *kv.Txn,
clockUpdater clockUpdater,
tracing *SessionTracing,
contentionRegistry *contention.Registry,
testingPushCallback func(rowenc.EncDatumRow, *execinfrapb.ProducerMetadata),
) *DistSQLReceiver {
consumeCtx, cleanup := tracing.TraceExecConsume(ctx)
r := receiverSyncPool.Get().(*DistSQLReceiver)
// Check whether the result writer supports pushing batches into it directly
// without having to materialize them.
var batchWriter batchResultWriter
if commandResult, ok := resultWriter.(RestrictedCommandResult); ok {
if commandResult.SupportsAddBatch() {
batchWriter = commandResult
}
}
*r = DistSQLReceiver{
ctx: consumeCtx,
cleanup: cleanup,
resultWriter: resultWriter,
batchWriter: batchWriter,
rangeCache: rangeCache,
txn: txn,
clockUpdater: clockUpdater,
stats: &topLevelQueryStats{},
stmtType: stmtType,
tracing: tracing,
contentionRegistry: contentionRegistry,
}
r.testingKnobs.pushCallback = testingPushCallback
return r
}
// Release releases this DistSQLReceiver back to the pool.
func (r *DistSQLReceiver) Release() {
r.cleanup()
*r = DistSQLReceiver{}
receiverSyncPool.Put(r)
}
// clone clones the receiver for running sub- and post-queries. Not all fields
// are cloned. The receiver should be released when no longer needed.
func (r *DistSQLReceiver) clone() *DistSQLReceiver {
ret := receiverSyncPool.Get().(*DistSQLReceiver)
*ret = DistSQLReceiver{
ctx: r.ctx,
cleanup: func() {},
rangeCache: r.rangeCache,
txn: r.txn,
clockUpdater: r.clockUpdater,
stats: r.stats,
stmtType: tree.Rows,
tracing: r.tracing,
contentionRegistry: r.contentionRegistry,
}
return ret
}
// SetError provides a convenient way for a client to pass in an error, thus
// pretending that a query execution error happened. The error is passed along
// to the resultWriter.
//
// The status of DistSQLReceiver is updated accordingly.
func (r *DistSQLReceiver) SetError(err error) {
r.resultWriter.SetError(err)
// If we encountered an error, we will transition to draining unless we were
// canceled.
if r.ctx.Err() != nil {
log.VEventf(r.ctx, 1, "encountered error (transitioning to shutting down): %v", r.ctx.Err())
r.status = execinfra.ConsumerClosed
} else {
log.VEventf(r.ctx, 1, "encountered error (transitioning to draining): %v", err)
r.status = execinfra.DrainRequested
}
}
// pushMeta takes in non-empty metadata object and pushes it to the result
// writer. Possibly updated status is returned.
func (r *DistSQLReceiver) pushMeta(meta *execinfrapb.ProducerMetadata) execinfra.ConsumerStatus {
if metaWriter, ok := r.resultWriter.(MetadataResultWriter); ok {
metaWriter.AddMeta(r.ctx, meta)
}
if meta.LeafTxnFinalState != nil {
if r.txn != nil {
if r.txn.ID() == meta.LeafTxnFinalState.Txn.ID {
if err := r.txn.UpdateRootWithLeafFinalState(r.ctx, meta.LeafTxnFinalState); err != nil {
r.SetError(err)
}
}
} else {
r.SetError(
errors.Errorf("received a leaf final state (%s); but have no root", meta.LeafTxnFinalState))
}
}
if meta.Err != nil {
// Check if the error we just received should take precedence over a
// previous error (if any).
if roachpb.ErrPriority(meta.Err) > roachpb.ErrPriority(r.resultWriter.Err()) {
if r.txn != nil {
if retryErr := (*roachpb.UnhandledRetryableError)(nil); errors.As(meta.Err, &retryErr) {
// Update the txn in response to remote errors. In the non-DistSQL
// world, the TxnCoordSender handles "unhandled" retryable errors,
// but this one is coming from a distributed SQL node, which has
// left the handling up to the root transaction.
meta.Err = r.txn.UpdateStateOnRemoteRetryableErr(r.ctx, &retryErr.PErr)
// Update the clock with information from the error. On non-DistSQL
// code paths, the DistSender does this.
// TODO(andrei): We don't propagate clock signals on success cases
// through DistSQL; we should. We also don't propagate them through
// non-retryable errors; we also should.
if r.clockUpdater != nil {
r.clockUpdater.Update(retryErr.PErr.Now)
}
}
}
r.SetError(meta.Err)
}
}
if len(meta.Ranges) > 0 {
r.rangeCache.Insert(r.ctx, meta.Ranges...)
}
if len(meta.TraceData) > 0 {
if span := tracing.SpanFromContext(r.ctx); span != nil {
span.ImportRemoteRecording(meta.TraceData)
}
var ev roachpb.ContentionEvent
for i := range meta.TraceData {
meta.TraceData[i].Structured(func(any *pbtypes.Any, _ time.Time) {
if !pbtypes.Is(any, &ev) {
return
}
if err := pbtypes.UnmarshalAny(any, &ev); err != nil {
return
}
if r.contendedQueryMetric != nil {
// Increment the contended query metric at most once
// if the query sees at least one contention event.
r.contendedQueryMetric.Inc(1)
r.contendedQueryMetric = nil
}
contentionEvent := contentionpb.ExtendedContentionEvent{
BlockingEvent: ev,
}
if r.txn != nil {
contentionEvent.WaitingTxnID = r.txn.ID()
}
r.contentionRegistry.AddContentionEvent(contentionEvent)
})
}
}
if meta.Metrics != nil {
r.stats.bytesRead += meta.Metrics.BytesRead
r.stats.rowsRead += meta.Metrics.RowsRead
r.stats.rowsWritten += meta.Metrics.RowsWritten
if r.progressAtomic != nil && r.expectedRowsRead != 0 {
progress := float64(r.stats.rowsRead) / float64(r.expectedRowsRead)
atomic.StoreUint64(r.progressAtomic, math.Float64bits(progress))
}
meta.Metrics.Release()
}
// Release the meta object. It is unsafe for use after this call.
meta.Release()
return r.status
}
// handleCommErr handles the communication error (the one returned when
// attempting to add data to the result writer).
func (r *DistSQLReceiver) handleCommErr(commErr error) {
// ErrLimitedResultClosed and errIEResultChannelClosed are not real
// errors, it is a signal to stop distsql and return success to the
// client (that's why we don't set the error on the resultWriter).
if errors.Is(commErr, ErrLimitedResultClosed) {
log.VEvent(r.ctx, 1, "encountered ErrLimitedResultClosed (transitioning to draining)")
r.status = execinfra.DrainRequested
} else if errors.Is(commErr, errIEResultChannelClosed) {
log.VEvent(r.ctx, 1, "encountered errIEResultChannelClosed (transitioning to draining)")
r.status = execinfra.DrainRequested
} else {
// Set the error on the resultWriter to notify the consumer about
// it. Most clients don't care to differentiate between
// communication errors and query execution errors, so they can
// simply inspect resultWriter.Err().
r.SetError(commErr)
// The only client that needs to know that a communication error and
// not a query execution error has occurred is
// connExecutor.execWithDistSQLEngine which will inspect r.commErr
// on its own and will shut down the connection.
//
// We don't need to shut down the connection if there's a
// portal-related error. This is definitely a layering violation,
// but is part of some accepted technical debt (see comments on
// sql/pgwire.limitedCommandResult.moreResultsNeeded). Instead of
// changing the signature of AddRow, we have a sentinel error that
// is handled specially here.
if !errors.Is(commErr, ErrLimitedResultNotSupported) {
r.commErr = commErr
}
}
}
// Push is part of the execinfra.RowReceiver interface.
func (r *DistSQLReceiver) Push(
row rowenc.EncDatumRow, meta *execinfrapb.ProducerMetadata,
) execinfra.ConsumerStatus {
if r.testingKnobs.pushCallback != nil {
r.testingKnobs.pushCallback(row, meta)
}
if meta != nil {
return r.pushMeta(meta)
}
if r.resultWriter.Err() == nil && r.ctx.Err() != nil {
r.SetError(r.ctx.Err())
}
if r.status != execinfra.NeedMoreRows {
return r.status
}
if r.stmtType != tree.Rows {
n := int(tree.MustBeDInt(row[0].Datum))
// We only need the row count. planNodeToRowSource is set up to handle
// ensuring that the last stage in the pipeline will return a single-column
// row with the row count in it, so just grab that and exit.
r.resultWriter.IncrementRowsAffected(r.ctx, n)
return r.status
}
if r.discardRows {
// Discard rows.
return r.status
}
if r.existsMode {
// In "exists" mode, the consumer is only looking for whether a single
// row is pushed or not, so the contents do not matter.
r.row = []tree.Datum{}
log.VEvent(r.ctx, 2, `a row is pushed in "exists" mode, so transition to draining`)
r.status = execinfra.DrainRequested
} else {
if r.row == nil {
r.row = make(tree.Datums, len(row))
}
for i, encDatum := range row {
err := encDatum.EnsureDecoded(r.outputTypes[i], &r.alloc)
if err != nil {
r.SetError(err)
return r.status
}
r.row[i] = encDatum.Datum
}
}
r.tracing.TraceExecRowsResult(r.ctx, r.row)
if commErr := r.resultWriter.AddRow(r.ctx, r.row); commErr != nil {
r.handleCommErr(commErr)
}
return r.status
}
// PushBatch is part of the execinfra.BatchReceiver interface.
func (r *DistSQLReceiver) PushBatch(
batch coldata.Batch, meta *execinfrapb.ProducerMetadata,
) execinfra.ConsumerStatus {
if meta != nil {
return r.pushMeta(meta)
}
if r.resultWriter.Err() == nil && r.ctx.Err() != nil {
r.SetError(r.ctx.Err())
}
if r.status != execinfra.NeedMoreRows {
return r.status
}
if batch.Length() == 0 {
// Nothing to do on the zero-length batch.
return r.status
}
if r.stmtType != tree.Rows {
// We only need the row count. planNodeToRowSource is set up to handle
// ensuring that the last stage in the pipeline will return a single-column
// row with the row count in it, so just grab that and exit.
r.resultWriter.IncrementRowsAffected(r.ctx, int(batch.ColVec(0).Int64()[0]))
return r.status
}
if r.discardRows {
// Discard rows.
return r.status
}
if r.existsMode {
// Exists mode is only used by the subqueries which currently don't
// support pushing batches.
panic("unsupported exists mode for PushBatch")
}
r.tracing.TraceExecBatchResult(r.ctx, batch)
if commErr := r.batchWriter.AddBatch(r.ctx, batch); commErr != nil {
r.handleCommErr(commErr)
}
return r.status
}
var (
// ErrLimitedResultNotSupported is an error produced by pgwire
// indicating an unsupported feature of row count limits was attempted.
ErrLimitedResultNotSupported = unimplemented.NewWithIssue(40195, "multiple active portals not supported")
// ErrLimitedResultClosed is a sentinel error produced by pgwire
// indicating the portal should be closed without error.
ErrLimitedResultClosed = errors.New("row count limit closed")
)
// ProducerDone is part of the execinfra.RowReceiver interface.
func (r *DistSQLReceiver) ProducerDone() {
if r.closed {
panic("double close")
}
r.closed = true
}
// PlanAndRunAll combines running the the main query, subqueries and cascades/checks.
// If an error is returned, the connection needs to stop processing queries.
// Query execution errors stored in recv; they are not returned.
func (dsp *DistSQLPlanner) PlanAndRunAll(
ctx context.Context,
evalCtx *extendedEvalContext,
planCtx *PlanningCtx,
planner *planner,
recv *DistSQLReceiver,
evalCtxFactory func() *extendedEvalContext,
) error {
if len(planner.curPlan.subqueryPlans) != 0 {
// Create a separate memory account for the results of the subqueries.
// Note that we intentionally defer the closure of the account until we
// return from this method (after the main query is executed).
subqueryResultMemAcc := planner.EvalContext().Mon.MakeBoundAccount()
defer subqueryResultMemAcc.Close(ctx)
if !dsp.PlanAndRunSubqueries(
ctx, planner, evalCtxFactory, planner.curPlan.subqueryPlans, recv, &subqueryResultMemAcc,
// Skip the diagram generation since on this "main" query path we
// can get it via the statement bundle.
true, /* skipDistSQLDiagramGeneration */
) {
return recv.commErr
}
}
recv.discardRows = planner.instrumentation.ShouldDiscardRows()
// We pass in whether or not we wanted to distribute this plan, which tells
// the planner whether or not to plan remote table readers.
cleanup := dsp.PlanAndRun(
ctx, evalCtx, planCtx, planner.txn, planner.curPlan.main, recv,
)
// Note that we're not cleaning up right away because postqueries might
// need to have access to the main query tree.
defer cleanup()
if recv.commErr != nil || recv.resultWriter.Err() != nil {
return recv.commErr
}
dsp.PlanAndRunCascadesAndChecks(
ctx, planner, evalCtxFactory, &planner.curPlan.planComponents, recv,
)
return recv.commErr
}
// PlanAndRunSubqueries returns false if an error was encountered and sets that
// error in the provided receiver. Note that if false is returned, then this
// function will have closed all the subquery plans because it assumes that the
// caller will not try to run the main plan given that the subqueries'
// evaluation failed.
// - subqueryResultMemAcc must be a non-nil memory account that the result of
// subqueries' evaluation will be registered with. It is the caller's
// responsibility to shrink (or close) the account accordingly, once the
// references to those results are lost.
func (dsp *DistSQLPlanner) PlanAndRunSubqueries(
ctx context.Context,
planner *planner,
evalCtxFactory func() *extendedEvalContext,
subqueryPlans []subquery,
recv *DistSQLReceiver,
subqueryResultMemAcc *mon.BoundAccount,
skipDistSQLDiagramGeneration bool,
) bool {
for planIdx, subqueryPlan := range subqueryPlans {
if err := dsp.planAndRunSubquery(
ctx,
planIdx,
subqueryPlan,
planner,
evalCtxFactory(),
subqueryPlans,
recv,
subqueryResultMemAcc,
skipDistSQLDiagramGeneration,
); err != nil {
recv.SetError(err)
// Usually we leave the closure of subqueries to occur when the
// whole plan is being closed (i.e. planTop.close); however, since
// we've encountered an error, we might never get to the point of
// closing the whole plan, so we choose to defensively close the
// subqueries here.
for i := range subqueryPlans {
subqueryPlans[i].plan.Close(ctx)
}
return false
}
}
return true
}
// subqueryResultMemAcc must be a non-nil memory account that the result of the
// subquery's evaluation will be registered with. It is the caller's
// responsibility to shrink it (or close it) accordingly, once the references to
// those results are lost.
func (dsp *DistSQLPlanner) planAndRunSubquery(
ctx context.Context,
planIdx int,
subqueryPlan subquery,
planner *planner,
evalCtx *extendedEvalContext,
subqueryPlans []subquery,
recv *DistSQLReceiver,
subqueryResultMemAcc *mon.BoundAccount,
skipDistSQLDiagramGeneration bool,
) error {
subqueryMonitor := mon.NewMonitor(
"subquery",
mon.MemoryResource,
dsp.distSQLSrv.Metrics.CurBytesCount,
dsp.distSQLSrv.Metrics.MaxBytesHist,
-1, /* use default block size */
noteworthyMemoryUsageBytes,
dsp.distSQLSrv.Settings,
)
subqueryMonitor.StartNoReserved(ctx, evalCtx.Mon)
defer subqueryMonitor.Stop(ctx)
subqueryMemAccount := subqueryMonitor.MakeBoundAccount()
defer subqueryMemAccount.Close(ctx)
distributeSubquery := getPlanDistribution(
ctx, planner, planner.execCfg.NodeInfo.NodeID, planner.SessionData().DistSQLMode, subqueryPlan.plan,
).WillDistribute()
distribute := DistributionType(DistributionTypeNone)
if distributeSubquery {
distribute = DistributionTypeAlways
}
subqueryPlanCtx := dsp.NewPlanningCtx(ctx, evalCtx, planner, planner.txn,
distribute)
subqueryPlanCtx.stmtType = tree.Rows
subqueryPlanCtx.skipDistSQLDiagramGeneration = skipDistSQLDiagramGeneration
if planner.instrumentation.ShouldSaveFlows() {
subqueryPlanCtx.saveFlows = subqueryPlanCtx.getDefaultSaveFlowsFunc(ctx, planner, planComponentTypeSubquery)
}
subqueryPlanCtx.traceMetadata = planner.instrumentation.traceMetadata
subqueryPlanCtx.collectExecStats = planner.instrumentation.ShouldCollectExecStats()
// Don't close the top-level plan from subqueries - someone else will handle
// that.
subqueryPlanCtx.ignoreClose = true
subqueryPhysPlan, physPlanCleanup, err := dsp.createPhysPlan(ctx, subqueryPlanCtx, subqueryPlan.plan)
defer physPlanCleanup()
if err != nil {
return err
}
dsp.finalizePlanWithRowCount(subqueryPlanCtx, subqueryPhysPlan, subqueryPlan.rowCount)
// TODO(arjun): #28264: We set up a row container, wrap it in a row
// receiver, and use it and serialize the results of the subquery. The type
// of the results stored in the container depends on the type of the subquery.
subqueryRecv := recv.clone()
defer subqueryRecv.Release()
var typs []*types.T
if subqueryPlan.execMode == rowexec.SubqueryExecModeExists {
subqueryRecv.existsMode = true
typs = []*types.T{}
} else {
typs = subqueryPhysPlan.GetResultTypes()
}
var rows rowContainerHelper
rows.Init(typs, evalCtx, "subquery" /* opName */)
defer rows.Close(ctx)
// TODO(yuzefovich): consider implementing batch receiving result writer.
subqueryRowReceiver := NewRowResultWriter(&rows)
subqueryRecv.resultWriter = subqueryRowReceiver
subqueryPlans[planIdx].started = true
dsp.Run(ctx, subqueryPlanCtx, planner.txn, subqueryPhysPlan, subqueryRecv, evalCtx, nil /* finishedSetupFn */)()
if err := subqueryRowReceiver.Err(); err != nil {
return err
}
var alreadyAccountedFor int64
switch subqueryPlan.execMode {
case rowexec.SubqueryExecModeExists:
// For EXISTS expressions, all we want to know if there is at least one row.
hasRows := rows.Len() != 0
subqueryPlans[planIdx].result = tree.MakeDBool(tree.DBool(hasRows))
case rowexec.SubqueryExecModeAllRows, rowexec.SubqueryExecModeAllRowsNormalized:
// TODO(yuzefovich): this is unfortunate - we're materializing all
// buffered rows into a single tuple kept in memory. Refactor it.
var result tree.DTuple
iterator := newRowContainerIterator(ctx, rows, typs)
defer iterator.Close()
for {
row, err := iterator.Next()
if err != nil {
return err
}
if row == nil {
break
}
var toAppend tree.Datum
if row.Len() == 1 {
// This seems hokey, but if we don't do this then the subquery expands
// to a tuple of tuples instead of a tuple of values and an expression
// like "k IN (SELECT foo FROM bar)" will fail because we're comparing
// a single value against a tuple.
toAppend = row[0]
} else {
toAppend = &tree.DTuple{D: row}
}
// Perform memory accounting for this datum. We do this in an
// incremental fashion since we might be materializing a lot of data
// into a single result tuple, and the memory accounting below might
// come too late.
size := int64(toAppend.Size())
alreadyAccountedFor += size
if err = subqueryResultMemAcc.Grow(ctx, size); err != nil {
return err
}
result.D = append(result.D, toAppend)
}
if subqueryPlan.execMode == rowexec.SubqueryExecModeAllRowsNormalized {
// During the normalization, we will remove duplicate elements which
// we've already accounted for. That's ok because below we will
// reconcile the incremental accounting with the final result's
// memory footprint.
result.Normalize(&evalCtx.Context)
}
subqueryPlans[planIdx].result = &result
case rowexec.SubqueryExecModeOneRow:
switch rows.Len() {
case 0:
subqueryPlans[planIdx].result = tree.DNull
case 1:
iterator := newRowContainerIterator(ctx, rows, typs)
defer iterator.Close()
row, err := iterator.Next()
if err != nil {
return err
}
if row == nil {
return errors.AssertionFailedf("iterator didn't return a row although container len is 1")
}
switch row.Len() {
case 1:
subqueryPlans[planIdx].result = row[0]
default:
subqueryPlans[planIdx].result = &tree.DTuple{D: row}
}
default:
return pgerror.Newf(pgcode.CardinalityViolation,
"more than one row returned by a subquery used as an expression")
}
default:
return fmt.Errorf("unexpected subqueryExecMode: %d", subqueryPlan.execMode)
}
// Account for the result of the subquery using the separate memory account
// since it outlives the execution of the subquery itself.
actualSize := int64(subqueryPlans[planIdx].result.Size())
if actualSize >= alreadyAccountedFor {
if err := subqueryResultMemAcc.Grow(ctx, actualSize-alreadyAccountedFor); err != nil {
return err
}
} else {
// We've accounted for more than the actual result needs. For example,
// this could occur in rowexec.SubqueryExecModeAllRowsNormalized mode
// with many duplicate elements.
subqueryResultMemAcc.Shrink(ctx, alreadyAccountedFor-actualSize)
}
return nil
}
// PlanAndRun generates a physical plan from a planNode tree and executes it. It
// assumes that the tree is supported (see CheckSupport).
//
// All errors encountered are reported to the DistSQLReceiver's resultWriter.
// Additionally, if the error is a "communication error" (an error encountered
// while using that resultWriter), the error is also stored in
// DistSQLReceiver.commErr. That can be tested to see if a client session needs
// to be closed.
//
// It returns a non-nil (although it can be a noop when an error is
// encountered) cleanup function that must be called once the planTop AST is no
// longer needed and can be closed. Note that this function also cleans up the
// flow which is unfortunate but is caused by the sharing of memory monitors
// between planning and execution - cleaning up the flow wants to close the
// monitor, but it cannot do so because the AST needs to live longer and still
// uses the same monitor. That's why we end up in a situation that in order to
// clean up the flow, we need to close the AST first, but we can only do that
// after PlanAndRun returns.
func (dsp *DistSQLPlanner) PlanAndRun(
ctx context.Context,
evalCtx *extendedEvalContext,
planCtx *PlanningCtx,
txn *kv.Txn,
plan planMaybePhysical,
recv *DistSQLReceiver,
) (cleanup func()) {
log.VEventf(ctx, 2, "creating DistSQL plan with isLocal=%v", planCtx.isLocal)
physPlan, physPlanCleanup, err := dsp.createPhysPlan(ctx, planCtx, plan)
if err != nil {
recv.SetError(err)
return func() {
// Make sure to close the current plan in case of a physical
// planning error. Usually, this is done in runCleanup() below, but
// we won't get to that point, so we have to do so here.
planCtx.planner.curPlan.close(ctx)
physPlanCleanup()
}
}
dsp.finalizePlanWithRowCount(planCtx, physPlan, planCtx.planner.curPlan.mainRowCount)
recv.expectedRowsRead = int64(physPlan.TotalEstimatedScannedRows)
runCleanup := dsp.Run(ctx, planCtx, txn, physPlan, recv, evalCtx, nil /* finishedSetupFn */)
return func() {
runCleanup()
physPlanCleanup()
}
}
// PlanAndRunCascadesAndChecks runs any cascade and check queries.
//
// Because cascades can themselves generate more cascades or check queries, this
// method can append to plan.cascades and plan.checkPlans (and all these plans
// must be closed later).
//
// Returns false if an error was encountered and sets that error in the provided
// receiver.
func (dsp *DistSQLPlanner) PlanAndRunCascadesAndChecks(
ctx context.Context,
planner *planner,
evalCtxFactory func() *extendedEvalContext,
plan *planComponents,
recv *DistSQLReceiver,
) bool {
if len(plan.cascades) == 0 && len(plan.checkPlans) == 0 {
return false
}
prevSteppingMode := planner.Txn().ConfigureStepping(ctx, kv.SteppingEnabled)
defer func() { _ = planner.Txn().ConfigureStepping(ctx, prevSteppingMode) }()
// We treat plan.cascades as a queue.
for i := 0; i < len(plan.cascades); i++ {
// The original bufferNode is stored in c.Buffer; we can refer to it
// directly.
// TODO(radu): this requires keeping all previous plans "alive" until the
// very end. We may want to make copies of the buffer nodes and clean up
// everything else.
buf := plan.cascades[i].Buffer
var numBufferedRows int
if buf != nil {
numBufferedRows = buf.(*bufferNode).rows.rows.Len()
if numBufferedRows == 0 {
// No rows were actually modified.
continue
}
}
log.VEventf(ctx, 2, "executing cascade for constraint %s", plan.cascades[i].FKName)
// We place a sequence point before every cascade, so
// that each subsequent cascade can observe the writes
// by the previous step.
// TODO(radu): the cascades themselves can have more cascades; if any of
// those fall back to legacy cascades code, it will disable stepping. So we
// have to reenable stepping each time.
_ = planner.Txn().ConfigureStepping(ctx, kv.SteppingEnabled)
if err := planner.Txn().Step(ctx); err != nil {
recv.SetError(err)
return false
}
evalCtx := evalCtxFactory()
execFactory := newExecFactory(planner)
// The cascading query is allowed to autocommit only if it is the last
// cascade and there are no check queries to run.
allowAutoCommit := planner.autoCommit
if len(plan.checkPlans) > 0 || i < len(plan.cascades)-1 {
allowAutoCommit = false
}
cascadePlan, err := plan.cascades[i].PlanFn(
ctx, &planner.semaCtx, &evalCtx.Context, execFactory,
buf, numBufferedRows, allowAutoCommit,
)
if err != nil {
recv.SetError(err)
return false
}
cp := cascadePlan.(*planComponents)
plan.cascades[i].plan = cp.main
if len(cp.subqueryPlans) > 0 {
recv.SetError(errors.AssertionFailedf("cascades should not have subqueries"))
return false
}
// Queue any new cascades.
if len(cp.cascades) > 0 {
plan.cascades = append(plan.cascades, cp.cascades...)
}
// Collect any new checks.
if len(cp.checkPlans) > 0 {
plan.checkPlans = append(plan.checkPlans, cp.checkPlans...)
}
// In cyclical reference situations, the number of cascading operations can
// be arbitrarily large. To avoid OOM, we enforce a limit. This is also a
// safeguard in case we have a bug that results in an infinite cascade loop.
if limit := int(evalCtx.SessionData().OptimizerFKCascadesLimit); len(plan.cascades) > limit {
telemetry.Inc(sqltelemetry.CascadesLimitReached)
err := pgerror.Newf(pgcode.TriggeredActionException, "cascades limit (%d) reached", limit)
recv.SetError(err)
return false
}
if err := dsp.planAndRunPostquery(
ctx,
cp.main,
planner,
evalCtx,
recv,
); err != nil {
recv.SetError(err)
return false
}
}
if len(plan.checkPlans) == 0 {
return true
}
// We place a sequence point before the checks, so that they observe the
// writes of the main query and/or any cascades.
// TODO(radu): the cascades themselves can have more cascades; if any of
// those fall back to legacy cascades code, it will disable stepping. So we
// have to reenable stepping each time.
_ = planner.Txn().ConfigureStepping(ctx, kv.SteppingEnabled)
if err := planner.Txn().Step(ctx); err != nil {
recv.SetError(err)
return false
}
for i := range plan.checkPlans {
log.VEventf(ctx, 2, "executing check query %d out of %d", i+1, len(plan.checkPlans))
if err := dsp.planAndRunPostquery(
ctx,
plan.checkPlans[i].plan,
planner,
evalCtxFactory(),
recv,
); err != nil {
recv.SetError(err)
return false
}
}
return true
}
// planAndRunPostquery runs a cascade or check query.
func (dsp *DistSQLPlanner) planAndRunPostquery(
ctx context.Context,
postqueryPlan planMaybePhysical,
planner *planner,
evalCtx *extendedEvalContext,
recv *DistSQLReceiver,
) error {
postqueryMonitor := mon.NewMonitor(
"postquery",
mon.MemoryResource,
dsp.distSQLSrv.Metrics.CurBytesCount,
dsp.distSQLSrv.Metrics.MaxBytesHist,
-1, /* use default block size */
noteworthyMemoryUsageBytes,
dsp.distSQLSrv.Settings,
)
postqueryMonitor.StartNoReserved(ctx, evalCtx.Mon)
defer postqueryMonitor.Stop(ctx)
postqueryMemAccount := postqueryMonitor.MakeBoundAccount()
defer postqueryMemAccount.Close(ctx)
distributePostquery := getPlanDistribution(
ctx, planner, planner.execCfg.NodeInfo.NodeID, planner.SessionData().DistSQLMode, postqueryPlan,
).WillDistribute()
distribute := DistributionType(DistributionTypeNone)
if distributePostquery {
distribute = DistributionTypeAlways
}
postqueryPlanCtx := dsp.NewPlanningCtx(ctx, evalCtx, planner, planner.txn, distribute)
postqueryPlanCtx.stmtType = tree.Rows
postqueryPlanCtx.ignoreClose = true
// Postqueries are only executed on the main query path where we skip the
// diagram generation.
postqueryPlanCtx.skipDistSQLDiagramGeneration = true
if planner.instrumentation.ShouldSaveFlows() {
postqueryPlanCtx.saveFlows = postqueryPlanCtx.getDefaultSaveFlowsFunc(ctx, planner, planComponentTypePostquery)
}
postqueryPlanCtx.traceMetadata = planner.instrumentation.traceMetadata
postqueryPlanCtx.collectExecStats = planner.instrumentation.ShouldCollectExecStats()
postqueryPhysPlan, physPlanCleanup, err := dsp.createPhysPlan(ctx, postqueryPlanCtx, postqueryPlan)
defer physPlanCleanup()
if err != nil {
return err
}
dsp.FinalizePlan(postqueryPlanCtx, postqueryPhysPlan)
postqueryRecv := recv.clone()
defer postqueryRecv.Release()
// TODO(yuzefovich): at the moment, errOnlyResultWriter is sufficient here,
// but it may not be the case when we support cascades through the optimizer.
postqueryResultWriter := &errOnlyResultWriter{}
postqueryRecv.resultWriter = postqueryResultWriter
postqueryRecv.batchWriter = postqueryResultWriter
dsp.Run(ctx, postqueryPlanCtx, planner.txn, postqueryPhysPlan, postqueryRecv, evalCtx, nil /* finishedSetupFn */)()
return postqueryRecv.resultWriter.Err()
}
| pkg/sql/distsql_running.go | 1 | https://github.com/cockroachdb/cockroach/commit/4a3592d16fc45d28640451115293119b8ff3cfe8 | [
0.0061272429302334785,
0.00028356051188893616,
0.00016100250650197268,
0.00017008042777888477,
0.0005114087252877653
] |
{
"id": 0,
"code_window": [
"\t\tDiskMonitor: execinfra.NewMonitor(\n",
"\t\t\tctx, ds.ParentDiskMonitor, \"flow-disk-monitor\",\n",
"\t\t),\n",
"\t\tPreserveFlowSpecs: localState.PreserveFlowSpecs,\n",
"\t}\n",
"\n",
"\tif localState.IsLocal && localState.Collection != nil {\n",
"\t\t// If we were passed a descs.Collection to use, then take it. In this case,\n",
"\t\t// the caller will handle releasing the used descriptors, so we don't need\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "pkg/sql/distsql/server.go",
"type": "replace",
"edit_start_line_idx": 490
} | #!/usr/bin/env bash
set -euo pipefail
dir="$(dirname $(dirname $(dirname $(dirname $(dirname "${0}")))))"
source "$dir/teamcity-support.sh" # For $root, would_stress
source "$dir/teamcity-bazel-support.sh" # For run_bazel
if would_stress; then
tc_start_block "Run stress tests"
run_bazel env BUILD_VCS_NUMBER="$BUILD_VCS_NUMBER" build/teamcity/cockroach/ci/tests/maybe_stress_impl.sh stressrace
tc_end_block "Run stress tests"
fi
| build/teamcity/cockroach/ci/tests/maybe_stressrace.sh | 0 | https://github.com/cockroachdb/cockroach/commit/4a3592d16fc45d28640451115293119b8ff3cfe8 | [
0.00017644508625380695,
0.0001756185374688357,
0.00017479198868386447,
0.0001756185374688357,
8.265487849712372e-7
] |
{
"id": 0,
"code_window": [
"\t\tDiskMonitor: execinfra.NewMonitor(\n",
"\t\t\tctx, ds.ParentDiskMonitor, \"flow-disk-monitor\",\n",
"\t\t),\n",
"\t\tPreserveFlowSpecs: localState.PreserveFlowSpecs,\n",
"\t}\n",
"\n",
"\tif localState.IsLocal && localState.Collection != nil {\n",
"\t\t// If we were passed a descs.Collection to use, then take it. In this case,\n",
"\t\t// the caller will handle releasing the used descriptors, so we don't need\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "pkg/sql/distsql/server.go",
"type": "replace",
"edit_start_line_idx": 490
} | #! /usr/bin/env expect -f
source [file join [file dirname $argv0] common.tcl]
spawn /bin/bash
send "PS1=':''/# '\r"
eexpect ":/# "
# Perform command-line checking for logging flags. We cannot use a
# regular unit test for this, because the logging flags are declared
# for the global `CommandLine` object of package `flag`, and any
# errors when parsing flags in that context cause the (test) process
# to exit entirely (it has errorHandling set to ExitOnError).
start_test "Check that log files are created by default in the store directory."
send "$argv start-single-node --insecure --store=path=logs/mystore\r"
eexpect "node starting"
interrupt
eexpect ":/# "
send "ls logs/mystore/logs\r"
eexpect "cockroach.log"
eexpect ":/# "
end_test
start_test "Check that an empty -log-dir disables file logging."
send "$argv start-single-node --insecure --store=path=logs/mystore2 --log-dir=\r"
eexpect "node starting"
interrupt
eexpect ":/# "
send "ls logs/mystore2/logs 2>/dev/null | grep -vE 'heap_profiler|goroutine_dump|inflight_trace_dump' | wc -l\r"
eexpect "0"
eexpect ":/# "
end_test
start_test "Check that leading tildes are properly rejected."
send "$argv start-single-node --insecure -s=path=logs/db --log-dir=\~/blah\r"
eexpect "log directory cannot start with '~'"
eexpect ":/# "
end_test
start_test "Check that the user can override."
send "$argv start-single-node --insecure -s=path=logs/db --log-dir=logs/blah/\~/blah\r"
eexpect "logs: *blah/~/blah"
interrupt
eexpect ":/# "
end_test
start_test "Check that TRUE and FALSE are valid values for the severity flags."
send "$argv start-single-node --insecure -s=path=logs/db --logtostderr=false\r"
eexpect "node starting"
interrupt
eexpect ":/# "
send "$argv start-single-node --insecure -s=path=logs/db --logtostderr=true\r"
eexpect "node starting"
interrupt
eexpect ":/# "
send "$argv start-single-node --insecure -s=path=logs/db --logtostderr=2\r"
eexpect "node starting"
interrupt
eexpect ":/# "
send "$argv start-single-node --insecure -s=path=logs/db --logtostderr=cantparse\r"
eexpect "parsing \"cantparse\": invalid syntax"
eexpect ":/# "
end_test
start_test "Check that conflicting legacy and new flags are properly rejected for server commands"
send "$argv start-single-node --insecure --logtostderr=true --log=abc\r"
eexpect "log is incompatible with legacy discrete logging flag"
eexpect ":/# "
end_test
start_test "Check that conflicting legacy and new flags are properly rejected for client commands"
send "$argv sql --insecure --logtostderr=true --log=abc\r"
eexpect "log is incompatible with legacy discrete logging flag"
eexpect ":/# "
end_test
start_test "Check that the log flag is properly recognized for non-server commands"
send "$argv debug reset-quorum 123 --log='sinks: {stderr: {format: json }}'\r"
eexpect "\"severity\":\"ERROR\""
eexpect "connection to server failed"
eexpect ":/# "
send "exit 0\r"
eexpect eof
| pkg/cli/interactive_tests/test_log_flags.tcl | 0 | https://github.com/cockroachdb/cockroach/commit/4a3592d16fc45d28640451115293119b8ff3cfe8 | [
0.00017621439474169165,
0.0001723332388792187,
0.00016670458717271686,
0.0001735049154376611,
0.000003538983946782537
] |
{
"id": 0,
"code_window": [
"\t\tDiskMonitor: execinfra.NewMonitor(\n",
"\t\t\tctx, ds.ParentDiskMonitor, \"flow-disk-monitor\",\n",
"\t\t),\n",
"\t\tPreserveFlowSpecs: localState.PreserveFlowSpecs,\n",
"\t}\n",
"\n",
"\tif localState.IsLocal && localState.Collection != nil {\n",
"\t\t// If we were passed a descs.Collection to use, then take it. In this case,\n",
"\t\t// the caller will handle releasing the used descriptors, so we don't need\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "pkg/sql/distsql/server.go",
"type": "replace",
"edit_start_line_idx": 490
} | // Copyright 2020 The Cockroach Authors.
//
// Licensed as a CockroachDB Enterprise file under the Cockroach Community
// License (the "License"); you may not use this file except in compliance with
// the License. You may obtain a copy of the License at
//
// https://github.com/cockroachdb/cockroach/blob/master/licenses/CCL.txt
import React from "react";
import { shallow } from "enzyme";
import { createMemoryHistory, History } from "history";
import { match as Match } from "react-router-dom";
import { ClusterVisualization } from "./index";
import { Breadcrumbs } from "./breadcrumbs";
describe("ClusterVisualization", () => {
describe("parse tiers params from URL path", () => {
let history: History;
let match: Match;
beforeEach(() => {
history = createMemoryHistory();
match = {
path: "/overview/map",
params: {},
url: "http://localhost/overview/map",
isExact: true,
};
});
// parsed tiers from params are not stored in state and passed directly to <Breadcrumbs />
// component so we can validate the parsed result by checking Breadcrumbs props.
it("parses tiers as empty array for /overview/map path", () => {
const wrapper = shallow(
<ClusterVisualization
history={history}
location={history.location}
clusterDataError={null}
enterpriseEnabled={true}
licenseDataExists={true}
match={match}
/>,
);
history.push("/overview/map");
wrapper.update();
expect(wrapper.find(Breadcrumbs).prop("tiers").length).toBe(0);
});
it("parses multiple tiers in path for `/overview/map/region=us-west/az=a` path", () => {
history.push("/overview/map/region=us-west/az=a");
const wrapper = shallow(
<ClusterVisualization
history={history}
location={history.location}
clusterDataError={null}
enterpriseEnabled={true}
licenseDataExists={true}
match={match}
/>,
);
wrapper.update();
const expectedTiers = [
{ key: "region", value: "us-west" },
{ key: "az", value: "a" },
];
expect(wrapper.find(Breadcrumbs).prop("tiers")).toEqual(expectedTiers);
});
});
});
| pkg/ui/workspaces/db-console/ccl/src/views/clusterviz/containers/map/index.spec.tsx | 0 | https://github.com/cockroachdb/cockroach/commit/4a3592d16fc45d28640451115293119b8ff3cfe8 | [
0.00017875384946819395,
0.00017540622502565384,
0.00016973784659057856,
0.0001757366262609139,
0.0000023580462311656447
] |
{
"id": 1,
"code_window": [
"\tTxn *kv.Txn\n",
"\n",
"\t// LocalProcs is an array of planNodeToRowSource processors. It's in order and\n",
"\t// will be indexed into by the RowSourceIdx field in LocalPlanNodeSpec.\n",
"\tLocalProcs []execinfra.LocalProcessor\n",
"\n",
"\t// PreserveFlowSpecs is true when the flow setup code needs to be careful\n",
"\t// when modifying the specifications of processors.\n",
"\tPreserveFlowSpecs bool\n",
"}\n",
"\n",
"// MustUseLeafTxn returns true if a LeafTxn must be used. It is valid to call\n",
"// this method only after IsLocal and HasConcurrency have been set correctly.\n",
"func (l LocalState) MustUseLeafTxn() bool {\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "pkg/sql/distsql/server.go",
"type": "replace",
"edit_start_line_idx": 553
} | // Copyright 2019 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
// This file lives here instead of sql/flowinfra to avoid an import cycle.
package execinfra
import (
"github.com/cockroachdb/cockroach/pkg/base"
"github.com/cockroachdb/cockroach/pkg/keys"
"github.com/cockroachdb/cockroach/pkg/kv"
"github.com/cockroachdb/cockroach/pkg/sql/catalog"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/descs"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc"
"github.com/cockroachdb/cockroach/pkg/sql/evalcatalog"
"github.com/cockroachdb/cockroach/pkg/sql/execinfrapb"
"github.com/cockroachdb/cockroach/pkg/sql/sem/eval"
"github.com/cockroachdb/cockroach/pkg/sql/sem/tree"
"github.com/cockroachdb/cockroach/pkg/util/log"
"github.com/cockroachdb/cockroach/pkg/util/mon"
"github.com/cockroachdb/cockroach/pkg/util/stop"
)
// FlowCtx encompasses the configuration parameters needed for various flow
// components.
type FlowCtx struct {
AmbientContext log.AmbientContext
Cfg *ServerConfig
// ID is a unique identifier for a flow.
ID execinfrapb.FlowID
// EvalCtx is used by all the processors in the flow to evaluate expressions.
// Processors that intend to evaluate expressions with this EvalCtx should
// get a copy with NewEvalCtx instead of storing a pointer to this one
// directly (since some processor mutate the EvalContext they use).
//
// TODO(andrei): Get rid of this field and pass a non-shared EvalContext to
// cores of the processors that need it.
EvalCtx *eval.Context
// The transaction in which kv operations performed by processors in the flow
// must be performed. Processors in the Flow will use this txn concurrently.
// This field is generally not nil, except for flows that don't run in a
// higher-level txn (like backfills).
Txn *kv.Txn
// MakeLeafTxn returns a new LeafTxn, different from Txn.
MakeLeafTxn func() (*kv.Txn, error)
// Descriptors is used to look up leased table descriptors and to construct
// transaction bound TypeResolvers to resolve type references during flow
// setup. It is not safe for concurrent use and is intended to be used only
// during flow setup and initialization. The Descriptors object is initialized
// when the FlowContext is created on the gateway node using the planner's
// descs.Collection and is created on remote nodes with a new descs.Collection
// In the latter case, after the flow is complete, all descriptors leased from
// this object must be released.
Descriptors *descs.Collection
// EvalCatalogBuiltins is initialized if the flow context is remote and the
// above descs.Collection is non-nil. It is referenced in the eval.Context
// in order to provide catalog access to builtins.
EvalCatalogBuiltins evalcatalog.Builtins
// IsDescriptorsCleanupRequired is set if Descriptors needs to release the
// leases it acquired after the flow is complete.
IsDescriptorsCleanupRequired bool
// nodeID is the ID of the node on which the processors using this FlowCtx
// run.
NodeID *base.SQLIDContainer
// TraceKV is true if KV tracing was requested by the session.
TraceKV bool
// CollectStats is true if execution stats collection was requested.
CollectStats bool
// Local is true if this flow is being run as part of a local-only query.
Local bool
// Gateway is true if this flow is being run on the gateway node.
Gateway bool
// DiskMonitor is this flow's disk monitor. All disk usage for this flow must
// be registered through this monitor.
DiskMonitor *mon.BytesMonitor
// PreserveFlowSpecs is true when the flow setup code needs to be careful
// when modifying the specifications of processors.
PreserveFlowSpecs bool
}
// NewEvalCtx returns a modifiable copy of the FlowCtx's EvalContext.
// Processors should use this method any time they need to store a pointer to
// the EvalContext, since processors may mutate the EvalContext. Specifically,
// every processor that runs ProcOutputHelper.Init must pass in a modifiable
// EvalContext, since it stores that EvalContext in its exprHelpers and mutates
// them at runtime to ensure expressions are evaluated with the correct indexed
// var context.
func (ctx *FlowCtx) NewEvalCtx() *eval.Context {
evalCopy := ctx.EvalCtx.Copy()
return evalCopy
}
// TestingKnobs returns the distsql testing knobs for this flow context.
func (ctx *FlowCtx) TestingKnobs() TestingKnobs {
return ctx.Cfg.TestingKnobs
}
// Stopper returns the stopper for this flowCtx.
func (ctx *FlowCtx) Stopper() *stop.Stopper {
return ctx.Cfg.Stopper
}
// Codec returns the SQL codec for this flowCtx.
func (ctx *FlowCtx) Codec() keys.SQLCodec {
return ctx.EvalCtx.Codec
}
// TableDescriptor returns a catalog.TableDescriptor object for the given
// descriptor proto, using the descriptors collection if it is available.
func (ctx *FlowCtx) TableDescriptor(desc *descpb.TableDescriptor) catalog.TableDescriptor {
if desc == nil {
return nil
}
if ctx != nil && ctx.Descriptors != nil && ctx.Txn != nil {
leased, _ := ctx.Descriptors.GetLeasedImmutableTableByID(ctx.EvalCtx.Ctx(), ctx.Txn, desc.ID)
if leased != nil && leased.GetVersion() == desc.Version {
return leased
}
}
return tabledesc.NewUnsafeImmutable(desc)
}
// NewTypeResolver creates a new TypeResolver that is bound under the input
// transaction. It returns a nil resolver if the FlowCtx doesn't hold a
// descs.Collection object.
func (ctx *FlowCtx) NewTypeResolver(txn *kv.Txn) descs.DistSQLTypeResolver {
if ctx == nil || ctx.Descriptors == nil {
return descs.DistSQLTypeResolver{}
}
return descs.NewDistSQLTypeResolver(ctx.Descriptors, txn)
}
// NewSemaContext creates a new SemaContext with a TypeResolver bound to the
// input transaction.
func (ctx *FlowCtx) NewSemaContext(txn *kv.Txn) *tree.SemaContext {
resolver := ctx.NewTypeResolver(txn)
semaCtx := tree.MakeSemaContext()
semaCtx.TypeResolver = &resolver
return &semaCtx
}
// ProcessorComponentID returns a ComponentID for the given processor in this
// flow.
func (ctx *FlowCtx) ProcessorComponentID(procID int32) execinfrapb.ComponentID {
return execinfrapb.ProcessorComponentID(ctx.NodeID.SQLInstanceID(), ctx.ID, procID)
}
// StreamComponentID returns a ComponentID for the given stream in this flow.
// The stream must originate from the node associated with this FlowCtx.
func (ctx *FlowCtx) StreamComponentID(streamID execinfrapb.StreamID) execinfrapb.ComponentID {
return execinfrapb.StreamComponentID(ctx.NodeID.SQLInstanceID(), ctx.ID, streamID)
}
| pkg/sql/execinfra/flow_context.go | 1 | https://github.com/cockroachdb/cockroach/commit/4a3592d16fc45d28640451115293119b8ff3cfe8 | [
0.02432052418589592,
0.002945711137726903,
0.00016396782302763313,
0.00044548441655933857,
0.00568192545324564
] |
{
"id": 1,
"code_window": [
"\tTxn *kv.Txn\n",
"\n",
"\t// LocalProcs is an array of planNodeToRowSource processors. It's in order and\n",
"\t// will be indexed into by the RowSourceIdx field in LocalPlanNodeSpec.\n",
"\tLocalProcs []execinfra.LocalProcessor\n",
"\n",
"\t// PreserveFlowSpecs is true when the flow setup code needs to be careful\n",
"\t// when modifying the specifications of processors.\n",
"\tPreserveFlowSpecs bool\n",
"}\n",
"\n",
"// MustUseLeafTxn returns true if a LeafTxn must be used. It is valid to call\n",
"// this method only after IsLocal and HasConcurrency have been set correctly.\n",
"func (l LocalState) MustUseLeafTxn() bool {\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "pkg/sql/distsql/server.go",
"type": "replace",
"edit_start_line_idx": 553
} | // Copyright 2016 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package tests_test
import (
"bytes"
"context"
gosql "database/sql"
"fmt"
"math/rand"
"reflect"
"runtime"
"strings"
"testing"
"github.com/cockroachdb/cockroach/pkg/base"
kv2 "github.com/cockroachdb/cockroach/pkg/kv"
"github.com/cockroachdb/cockroach/pkg/testutils/serverutils"
"github.com/cockroachdb/cockroach/pkg/util/log"
"github.com/cockroachdb/errors"
)
type kvInterface interface {
Insert(rows, run int) error
Update(rows, run int) error
Delete(rows, run int) error
Scan(rows, run int) error
prep(rows int, initData bool) error
done()
}
// kvNative uses the native client package to implement kvInterface.
type kvNative struct {
db *kv2.DB
epoch int
prefix string
doneFn func()
}
func newKVNative(b *testing.B) kvInterface {
s, _, db := serverutils.StartServer(b, base.TestServerArgs{})
// Note that using the local client.DB isn't a strictly fair
// comparison with SQL as we want these client requests to be sent
// over the network.
return &kvNative{
db: db,
doneFn: func() {
s.Stopper().Stop(context.Background())
},
}
}
func (kv *kvNative) Insert(rows, run int) error {
firstRow := rows * run
lastRow := rows * (run + 1)
err := kv.db.Txn(context.Background(), func(ctx context.Context, txn *kv2.Txn) error {
b := txn.NewBatch()
for i := firstRow; i < lastRow; i++ {
b.Put(fmt.Sprintf("%s%08d", kv.prefix, i), i)
}
return txn.CommitInBatch(ctx, b)
})
return err
}
func (kv *kvNative) Update(rows, run int) error {
err := kv.db.Txn(context.Background(), func(ctx context.Context, txn *kv2.Txn) error {
// Read all values in a batch.
b := txn.NewBatch()
// Don't permute the rows, to be similar to SQL which sorts the spans in a
// batch.
for i := 0; i < rows; i++ {
b.GetForUpdate(fmt.Sprintf("%s%08d", kv.prefix, i))
}
if err := txn.Run(ctx, b); err != nil {
return err
}
// Now add one to each value and add as puts to write batch.
wb := txn.NewBatch()
for i, result := range b.Results {
v := result.Rows[0].ValueInt()
wb.Put(fmt.Sprintf("%s%08d", kv.prefix, i), v+1)
}
return txn.CommitInBatch(ctx, wb)
})
return err
}
func (kv *kvNative) Delete(rows, run int) error {
firstRow := rows * run
lastRow := rows * (run + 1)
err := kv.db.Txn(context.Background(), func(ctx context.Context, txn *kv2.Txn) error {
b := txn.NewBatch()
for i := firstRow; i < lastRow; i++ {
b.Del(fmt.Sprintf("%s%08d", kv.prefix, i))
}
return txn.CommitInBatch(ctx, b)
})
return err
}
func (kv *kvNative) Scan(rows, run int) error {
var kvs []kv2.KeyValue
err := kv.db.Txn(context.Background(), func(ctx context.Context, txn *kv2.Txn) error {
var err error
kvs, err = txn.Scan(ctx, fmt.Sprintf("%s%08d", kv.prefix, 0), fmt.Sprintf("%s%08d", kv.prefix, rows), int64(rows))
return err
})
if len(kvs) != rows {
return errors.Errorf("expected %d rows; got %d", rows, len(kvs))
}
return err
}
func (kv *kvNative) prep(rows int, initData bool) error {
kv.epoch++
kv.prefix = fmt.Sprintf("%d/", kv.epoch)
if !initData {
return nil
}
err := kv.db.Txn(context.Background(), func(ctx context.Context, txn *kv2.Txn) error {
b := txn.NewBatch()
for i := 0; i < rows; i++ {
b.Put(fmt.Sprintf("%s%08d", kv.prefix, i), i)
}
return txn.CommitInBatch(ctx, b)
})
return err
}
func (kv *kvNative) done() {
kv.doneFn()
}
// kvSQL is a SQL-based implementation of the KV interface.
type kvSQL struct {
db *gosql.DB
buf bytes.Buffer
doneFn func()
}
func newKVSQL(b *testing.B) kvInterface {
s, db, _ := serverutils.StartServer(b, base.TestServerArgs{UseDatabase: "bench"})
if _, err := db.Exec(`CREATE DATABASE IF NOT EXISTS bench`); err != nil {
b.Fatal(err)
}
kv := &kvSQL{}
kv.db = db
kv.doneFn = func() {
s.Stopper().Stop(context.Background())
}
return kv
}
func (kv *kvSQL) Insert(rows, run int) error {
firstRow := rows * run
defer kv.buf.Reset()
kv.buf.WriteString(`INSERT INTO bench.kv VALUES `)
for i := 0; i < rows; i++ {
if i > 0 {
kv.buf.WriteString(", ")
}
fmt.Fprintf(&kv.buf, "('%08d', %d)", i+firstRow, i)
}
_, err := kv.db.Exec(kv.buf.String())
return err
}
func (kv *kvSQL) Update(rows, run int) error {
return kv.UpdateWithShift(rows, 0)
}
func (kv *kvSQL) UpdateWithShift(rows, run int) error {
startRow := rows * run
perm := rand.Perm(rows)
defer kv.buf.Reset()
kv.buf.WriteString(`UPDATE bench.kv SET v = v + 1 WHERE k IN (`)
for j := 0; j < rows; j++ {
if j > 0 {
kv.buf.WriteString(", ")
}
fmt.Fprintf(&kv.buf, `'%08d'`, startRow+perm[j])
}
kv.buf.WriteString(`)`)
_, err := kv.db.Exec(kv.buf.String())
return err
}
func (kv *kvSQL) Delete(rows, run int) error {
firstRow := rows * run
defer kv.buf.Reset()
kv.buf.WriteString(`DELETE FROM bench.kv WHERE k IN (`)
for j := 0; j < rows; j++ {
if j > 0 {
kv.buf.WriteString(", ")
}
fmt.Fprintf(&kv.buf, `'%08d'`, j+firstRow)
}
kv.buf.WriteString(`)`)
_, err := kv.db.Exec(kv.buf.String())
return err
}
func (kv *kvSQL) Scan(count, run int) error {
rows, err := kv.db.Query(fmt.Sprintf("SELECT * FROM bench.kv LIMIT %d", count))
if err != nil {
return err
}
n := 0
for rows.Next() {
n++
}
rows.Close()
if err := rows.Err(); err != nil {
return err
}
if n != count {
return errors.Errorf("unexpected result count: %d (expected %d)", n, count)
}
return nil
}
func (kv *kvSQL) prep(rows int, initData bool) error {
if _, err := kv.db.Exec(`DROP TABLE IF EXISTS bench.kv`); err != nil {
return err
}
schema := `
CREATE TABLE IF NOT EXISTS bench.kv (
k STRING PRIMARY KEY,
v INT,
FAMILY (k, v)
)
`
if _, err := kv.db.Exec(schema); err != nil {
return err
}
if !initData {
return nil
}
defer kv.buf.Reset()
kv.buf.WriteString(`INSERT INTO bench.kv VALUES `)
numRowsInBatch := 0
for i := 0; i < rows; i++ {
if numRowsInBatch > 0 {
kv.buf.WriteString(", ")
}
fmt.Fprintf(&kv.buf, "('%08d', %d)", i, i)
numRowsInBatch++
// Break initial inserts into batches of 1000 rows, since some tests can
// overflow the batch limits.
if numRowsInBatch > 1000 {
if _, err := kv.db.Exec(kv.buf.String()); err != nil {
return err
}
kv.buf.Reset()
kv.buf.WriteString(`INSERT INTO bench.kv VALUES `)
numRowsInBatch = 0
}
}
var err error
if numRowsInBatch > 0 {
if _, err = kv.db.Exec(kv.buf.String()); err != nil {
return err
}
}
// Ensure stats are up-to-date.
_, err = kv.db.Exec("ANALYZE bench.kv")
return err
}
func (kv *kvSQL) done() {
kv.doneFn()
}
func BenchmarkKV(b *testing.B) {
defer log.Scope(b).Close(b)
for i, opFn := range []func(kvInterface, int, int) error{
kvInterface.Insert,
kvInterface.Update,
kvInterface.Delete,
kvInterface.Scan,
} {
opName := runtime.FuncForPC(reflect.ValueOf(opFn).Pointer()).Name()
opName = strings.TrimPrefix(opName, "github.com/cockroachdb/cockroach/pkg/sql/tests_test.kvInterface.")
b.Run(opName, func(b *testing.B) {
for _, kvFn := range []func(*testing.B) kvInterface{
newKVNative,
newKVSQL,
} {
kvTyp := runtime.FuncForPC(reflect.ValueOf(kvFn).Pointer()).Name()
kvTyp = strings.TrimPrefix(kvTyp, "github.com/cockroachdb/cockroach/pkg/sql/tests_test.newKV")
b.Run(kvTyp, func(b *testing.B) {
for _, rows := range []int{1, 10, 100, 1000, 10000} {
b.Run(fmt.Sprintf("rows=%d", rows), func(b *testing.B) {
kv := kvFn(b)
defer kv.done()
if err := kv.prep(rows, i != 0 /* Insert */ && i != 2 /* Delete */); err != nil {
b.Fatal(err)
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
if err := opFn(kv, rows, i); err != nil {
b.Fatal(err)
}
}
b.StopTimer()
})
}
})
}
})
}
}
// This is a narrower and tweaked version of BenchmarkKV/Update/SQL that does
// SQL queries of the form UPDATE bench.kv SET v = v + 1 WHERE k IN (...).
// This will eventually be merged back into BenchmarkKV above, but we are
// keeping it separate for now since changing BenchmarkKV reasonably for all
// numbers of rows is tricky due to the optimization that switches scans for a
// large set specified by the SQL IN operator to full table scans that are not
// bounded by the [smallest, largest] key in the set.
//
// TODO(sumeer): The wide scan issue seems fixed by running "ANALYZE bench.kv"
// in kvSQL.prep. Confirm that this is indeed sufficient, and also change
// kvNative to be consistent with kvSQL.
//
// This benchmark does updates to 100 existing rows in a table. The number of
// versions is limited, to be more realistic (unlike BenchmarkKV which keeps
// updating the same rows for all benchmark iterations).
//
// The benchmarks stresses KV and storage performance involving medium size
// batches of work (in this case batches of 100, due to the 100 rows being
// updated), and can be considered a reasonable proxy for KV and storage
// performance (and to target improvements in those layers). We are not
// focusing on smaller batches because latency improvements for queries that
// are small and already fast are not really beneficial to the user.
// Specifically, these transactional update queries run as a 1PC transaction
// and do two significant pieces of work in storage:
// - A read-only batch with 100 ScanRequests (this should eventually be
// optimized by SQL to 100 GetRequests
// https://github.com/cockroachdb/cockroach/issues/46758). The spans in the
// batch are in sorted order. At the storage layer, the same iterator is
// reused across the requests in a batch, and results in the following
// sequence of calls repeated a 100 times: SetBounds, SeekGE, <iterate>.
// The <iterate> part is looking for the next MVCCKey (not version) within
// the span, and will not find such a key, but needs to step over the
// versions of the key that it did find. This exercises the
// pebbleMVCCScanner's itersBeforeSeek optimization, and will only involve
// Next calls if the versions are <= 5. Else it will Seek after doing Next 5
// times. That is, if there are k version per key and k <= 5, <Iterate> will
// be k Next calls. If k > 5, there will be 5 Next calls followed by a
// SeekGE. The maxVersions=8 benchmark below has some iterations that will
// need to do this seek.
// - A write batch with 100 PutRequests, again in sorted order. At
// the storage layer, the same iterator will get reused across the requests
// in a batch, and results in 100 SeekPrefixGE calls to that iterator.
// Note that in this case the Distinct batch optimization is not being used.
// Even the experimental approach in
// https://github.com/sumeerbhola/cockroach/commit/eeeec51bd40ef47e743dc0c9ca47cf15710bae09
// indicates that we cannot use an unindexed Pebble batch (which would have
// been an optimization).
// This workload has keys that are clustered in the storage key space. Also,
// the volume of data is small, so the Pebble iterator stack is not deep. Both
// these things may not be representative of the real world. I like to run
// this with -benchtime 5000x which increases the amount of data in the
// engine, and causes data to be spilled from the memtable into sstables, and
// keeps the engine size consistent across runs.
//
// TODO(sumeer): consider disabling load-based splitting so that tests with
// large number of iterations, and hence large volume of data, stay
// predictable.
func BenchmarkKVAndStorageUsingSQL(b *testing.B) {
defer log.Scope(b).Close(b)
const rowsToUpdate = 100
for _, maxVersions := range []int{2, 4, 8} {
b.Run(fmt.Sprintf("versions=%d", maxVersions), func(b *testing.B) {
kv := newKVSQL(b).(*kvSQL)
defer kv.done()
numUpdatesToRow := maxVersions - 1
// We only need ceil(b.N/numUpdatesToRow) * rowsToUpdate, but create the
// maximum number of rows needed by the smallest setting of maxVersions
// so that the comparison across settings is not as affected by the size
// of the engine. Otherwise the benchmark with maxVersions=2 creates
// more keys, resulting in more files in the engine, which makes it
// slower.
rowsToInit := b.N * rowsToUpdate
if err := kv.prep(rowsToInit, true); err != nil {
b.Fatal(err)
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
if err := kv.UpdateWithShift(rowsToUpdate, i/numUpdatesToRow); err != nil {
b.Fatal(err)
}
}
b.StopTimer()
})
}
}
| pkg/sql/tests/kv_test.go | 0 | https://github.com/cockroachdb/cockroach/commit/4a3592d16fc45d28640451115293119b8ff3cfe8 | [
0.0015653157606720924,
0.00032119412207975984,
0.00016166074783541262,
0.0001710098295006901,
0.00033771462040022016
] |
{
"id": 1,
"code_window": [
"\tTxn *kv.Txn\n",
"\n",
"\t// LocalProcs is an array of planNodeToRowSource processors. It's in order and\n",
"\t// will be indexed into by the RowSourceIdx field in LocalPlanNodeSpec.\n",
"\tLocalProcs []execinfra.LocalProcessor\n",
"\n",
"\t// PreserveFlowSpecs is true when the flow setup code needs to be careful\n",
"\t// when modifying the specifications of processors.\n",
"\tPreserveFlowSpecs bool\n",
"}\n",
"\n",
"// MustUseLeafTxn returns true if a LeafTxn must be used. It is valid to call\n",
"// this method only after IsLocal and HasConcurrency have been set correctly.\n",
"func (l LocalState) MustUseLeafTxn() bool {\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "pkg/sql/distsql/server.go",
"type": "replace",
"edit_start_line_idx": 553
} | // Copyright 2021 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
export * from "./infoTooltip";
| pkg/ui/workspaces/db-console/src/components/infoTooltip/index.tsx | 0 | https://github.com/cockroachdb/cockroach/commit/4a3592d16fc45d28640451115293119b8ff3cfe8 | [
0.00017820161883719265,
0.00017235595441889018,
0.0001665102900005877,
0.00017235595441889018,
0.000005845664418302476
] |
{
"id": 1,
"code_window": [
"\tTxn *kv.Txn\n",
"\n",
"\t// LocalProcs is an array of planNodeToRowSource processors. It's in order and\n",
"\t// will be indexed into by the RowSourceIdx field in LocalPlanNodeSpec.\n",
"\tLocalProcs []execinfra.LocalProcessor\n",
"\n",
"\t// PreserveFlowSpecs is true when the flow setup code needs to be careful\n",
"\t// when modifying the specifications of processors.\n",
"\tPreserveFlowSpecs bool\n",
"}\n",
"\n",
"// MustUseLeafTxn returns true if a LeafTxn must be used. It is valid to call\n",
"// this method only after IsLocal and HasConcurrency have been set correctly.\n",
"func (l LocalState) MustUseLeafTxn() bool {\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "pkg/sql/distsql/server.go",
"type": "replace",
"edit_start_line_idx": 553
} | # Test splits for simple tables with indexes, dropping some and then adding
# some new others.
exec-sql
CREATE DATABASE db;
CREATE TABLE db.t(i INT PRIMARY KEY, j INT);
----
# We should observe splits at the table start, index start, and start of
# (non-existent) next index.
splits database=db table=t
----
+ 1 between start of table and start of 1st index
+ 1 for 1st index
+ 1 between end of 1st index and end of table
= 3
# Create a couple of indexes.
exec-sql
CREATE INDEX idx2 ON db.t (j);
CREATE INDEX idx3 ON db.t (j);
----
# We should observe splits for each one, in addition to what we had earlier. We
# should also observe gaps between indexes.
splits database=db table=t
----
+ 1 between start of table and start of 1st index
+ 1 for 1st index
+ 1 for 2nd index
+ 1 for 3rd index
+ 2 gap(s) between 3 indexes
+ 1 between end of 3rd index and end of table
= 7
# Drop an index to create a "gap" in the keyspace.
exec-sql
DROP INDEX db.t@idx2;
----
# The gap should appear.
splits database=db table=t
----
+ 1 between start of table and start of 1st index
+ 1 for 1st index
+ 1 for 2nd index
+ 1 gap(s) between 2 indexes
+ 1 between end of 2nd index and end of table
= 5
# Create another index to make sure it appears as expected.
exec-sql
CREATE INDEX idx6 ON db.t (j);
----
splits database=db table=t
----
+ 1 between start of table and start of 1st index
+ 1 for 1st index
+ 1 for 2nd index
+ 1 for 3rd index
+ 2 gap(s) between 3 indexes
+ 1 between end of 3rd index and end of table
= 7
| pkg/ccl/spanconfigccl/spanconfigsplitterccl/testdata/indexes | 0 | https://github.com/cockroachdb/cockroach/commit/4a3592d16fc45d28640451115293119b8ff3cfe8 | [
0.00017027862486429513,
0.00016720242274459451,
0.0001643789146328345,
0.00016767288616392761,
0.0000019949563920818036
] |
{
"id": 2,
"code_window": [
"\tlocalState.IsLocal = planCtx.isLocal\n",
"\tlocalState.Txn = txn\n",
"\tlocalState.LocalProcs = plan.LocalProcessors\n",
"\t// If we need to perform some operation on the flow specs, we want to\n",
"\t// preserve the specs during the flow setup.\n",
"\tlocalState.PreserveFlowSpecs = planCtx.saveFlows != nil\n",
"\t// If we have access to a planner and are currently being used to plan\n",
"\t// statements in a user transaction, then take the descs.Collection to resolve\n",
"\t// types with during flow execution. This is necessary to do in the case of\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "pkg/sql/distsql_running.go",
"type": "replace",
"edit_start_line_idx": 520
} | // Copyright 2019 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
// This file lives here instead of sql/flowinfra to avoid an import cycle.
package execinfra
import (
"github.com/cockroachdb/cockroach/pkg/base"
"github.com/cockroachdb/cockroach/pkg/keys"
"github.com/cockroachdb/cockroach/pkg/kv"
"github.com/cockroachdb/cockroach/pkg/sql/catalog"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/descs"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc"
"github.com/cockroachdb/cockroach/pkg/sql/evalcatalog"
"github.com/cockroachdb/cockroach/pkg/sql/execinfrapb"
"github.com/cockroachdb/cockroach/pkg/sql/sem/eval"
"github.com/cockroachdb/cockroach/pkg/sql/sem/tree"
"github.com/cockroachdb/cockroach/pkg/util/log"
"github.com/cockroachdb/cockroach/pkg/util/mon"
"github.com/cockroachdb/cockroach/pkg/util/stop"
)
// FlowCtx encompasses the configuration parameters needed for various flow
// components.
type FlowCtx struct {
AmbientContext log.AmbientContext
Cfg *ServerConfig
// ID is a unique identifier for a flow.
ID execinfrapb.FlowID
// EvalCtx is used by all the processors in the flow to evaluate expressions.
// Processors that intend to evaluate expressions with this EvalCtx should
// get a copy with NewEvalCtx instead of storing a pointer to this one
// directly (since some processor mutate the EvalContext they use).
//
// TODO(andrei): Get rid of this field and pass a non-shared EvalContext to
// cores of the processors that need it.
EvalCtx *eval.Context
// The transaction in which kv operations performed by processors in the flow
// must be performed. Processors in the Flow will use this txn concurrently.
// This field is generally not nil, except for flows that don't run in a
// higher-level txn (like backfills).
Txn *kv.Txn
// MakeLeafTxn returns a new LeafTxn, different from Txn.
MakeLeafTxn func() (*kv.Txn, error)
// Descriptors is used to look up leased table descriptors and to construct
// transaction bound TypeResolvers to resolve type references during flow
// setup. It is not safe for concurrent use and is intended to be used only
// during flow setup and initialization. The Descriptors object is initialized
// when the FlowContext is created on the gateway node using the planner's
// descs.Collection and is created on remote nodes with a new descs.Collection
// In the latter case, after the flow is complete, all descriptors leased from
// this object must be released.
Descriptors *descs.Collection
// EvalCatalogBuiltins is initialized if the flow context is remote and the
// above descs.Collection is non-nil. It is referenced in the eval.Context
// in order to provide catalog access to builtins.
EvalCatalogBuiltins evalcatalog.Builtins
// IsDescriptorsCleanupRequired is set if Descriptors needs to release the
// leases it acquired after the flow is complete.
IsDescriptorsCleanupRequired bool
// nodeID is the ID of the node on which the processors using this FlowCtx
// run.
NodeID *base.SQLIDContainer
// TraceKV is true if KV tracing was requested by the session.
TraceKV bool
// CollectStats is true if execution stats collection was requested.
CollectStats bool
// Local is true if this flow is being run as part of a local-only query.
Local bool
// Gateway is true if this flow is being run on the gateway node.
Gateway bool
// DiskMonitor is this flow's disk monitor. All disk usage for this flow must
// be registered through this monitor.
DiskMonitor *mon.BytesMonitor
// PreserveFlowSpecs is true when the flow setup code needs to be careful
// when modifying the specifications of processors.
PreserveFlowSpecs bool
}
// NewEvalCtx returns a modifiable copy of the FlowCtx's EvalContext.
// Processors should use this method any time they need to store a pointer to
// the EvalContext, since processors may mutate the EvalContext. Specifically,
// every processor that runs ProcOutputHelper.Init must pass in a modifiable
// EvalContext, since it stores that EvalContext in its exprHelpers and mutates
// them at runtime to ensure expressions are evaluated with the correct indexed
// var context.
func (ctx *FlowCtx) NewEvalCtx() *eval.Context {
evalCopy := ctx.EvalCtx.Copy()
return evalCopy
}
// TestingKnobs returns the distsql testing knobs for this flow context.
func (ctx *FlowCtx) TestingKnobs() TestingKnobs {
return ctx.Cfg.TestingKnobs
}
// Stopper returns the stopper for this flowCtx.
func (ctx *FlowCtx) Stopper() *stop.Stopper {
return ctx.Cfg.Stopper
}
// Codec returns the SQL codec for this flowCtx.
func (ctx *FlowCtx) Codec() keys.SQLCodec {
return ctx.EvalCtx.Codec
}
// TableDescriptor returns a catalog.TableDescriptor object for the given
// descriptor proto, using the descriptors collection if it is available.
func (ctx *FlowCtx) TableDescriptor(desc *descpb.TableDescriptor) catalog.TableDescriptor {
if desc == nil {
return nil
}
if ctx != nil && ctx.Descriptors != nil && ctx.Txn != nil {
leased, _ := ctx.Descriptors.GetLeasedImmutableTableByID(ctx.EvalCtx.Ctx(), ctx.Txn, desc.ID)
if leased != nil && leased.GetVersion() == desc.Version {
return leased
}
}
return tabledesc.NewUnsafeImmutable(desc)
}
// NewTypeResolver creates a new TypeResolver that is bound under the input
// transaction. It returns a nil resolver if the FlowCtx doesn't hold a
// descs.Collection object.
func (ctx *FlowCtx) NewTypeResolver(txn *kv.Txn) descs.DistSQLTypeResolver {
if ctx == nil || ctx.Descriptors == nil {
return descs.DistSQLTypeResolver{}
}
return descs.NewDistSQLTypeResolver(ctx.Descriptors, txn)
}
// NewSemaContext creates a new SemaContext with a TypeResolver bound to the
// input transaction.
func (ctx *FlowCtx) NewSemaContext(txn *kv.Txn) *tree.SemaContext {
resolver := ctx.NewTypeResolver(txn)
semaCtx := tree.MakeSemaContext()
semaCtx.TypeResolver = &resolver
return &semaCtx
}
// ProcessorComponentID returns a ComponentID for the given processor in this
// flow.
func (ctx *FlowCtx) ProcessorComponentID(procID int32) execinfrapb.ComponentID {
return execinfrapb.ProcessorComponentID(ctx.NodeID.SQLInstanceID(), ctx.ID, procID)
}
// StreamComponentID returns a ComponentID for the given stream in this flow.
// The stream must originate from the node associated with this FlowCtx.
func (ctx *FlowCtx) StreamComponentID(streamID execinfrapb.StreamID) execinfrapb.ComponentID {
return execinfrapb.StreamComponentID(ctx.NodeID.SQLInstanceID(), ctx.ID, streamID)
}
| pkg/sql/execinfra/flow_context.go | 1 | https://github.com/cockroachdb/cockroach/commit/4a3592d16fc45d28640451115293119b8ff3cfe8 | [
0.005991545505821705,
0.0015291079180315137,
0.00016862944175954908,
0.0009127521188929677,
0.0015681541990488768
] |
{
"id": 2,
"code_window": [
"\tlocalState.IsLocal = planCtx.isLocal\n",
"\tlocalState.Txn = txn\n",
"\tlocalState.LocalProcs = plan.LocalProcessors\n",
"\t// If we need to perform some operation on the flow specs, we want to\n",
"\t// preserve the specs during the flow setup.\n",
"\tlocalState.PreserveFlowSpecs = planCtx.saveFlows != nil\n",
"\t// If we have access to a planner and are currently being used to plan\n",
"\t// statements in a user transaction, then take the descs.Collection to resolve\n",
"\t// types with during flow execution. This is necessary to do in the case of\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "pkg/sql/distsql_running.go",
"type": "replace",
"edit_start_line_idx": 520
} | // Copyright 2017 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package util
import (
"testing"
"time"
"github.com/cockroachdb/cockroach/pkg/util/timeutil"
)
func TestEveryN(t *testing.T) {
start := timeutil.Now()
en := EveryN{N: time.Minute}
testCases := []struct {
t time.Duration // time since start
expected bool
}{
{0, true}, // the first attempt to log should always succeed
{0, false},
{time.Second, false},
{time.Minute - 1, false},
{time.Minute, true},
{time.Minute, false},
{time.Minute + 30*time.Second, false},
{10 * time.Minute, true},
{10 * time.Minute, false},
{10*time.Minute + 59*time.Second, false},
{11 * time.Minute, true},
}
for _, tc := range testCases {
if a, e := en.ShouldProcess(start.Add(tc.t)), tc.expected; a != e {
t.Errorf("ShouldProcess(%v) got %v, want %v", tc.t, a, e)
}
}
}
| pkg/util/every_n_test.go | 0 | https://github.com/cockroachdb/cockroach/commit/4a3592d16fc45d28640451115293119b8ff3cfe8 | [
0.00017969717737287283,
0.00017333836876787245,
0.00017038712394423783,
0.00017117641982622445,
0.0000035530365494196303
] |
{
"id": 2,
"code_window": [
"\tlocalState.IsLocal = planCtx.isLocal\n",
"\tlocalState.Txn = txn\n",
"\tlocalState.LocalProcs = plan.LocalProcessors\n",
"\t// If we need to perform some operation on the flow specs, we want to\n",
"\t// preserve the specs during the flow setup.\n",
"\tlocalState.PreserveFlowSpecs = planCtx.saveFlows != nil\n",
"\t// If we have access to a planner and are currently being used to plan\n",
"\t// statements in a user transaction, then take the descs.Collection to resolve\n",
"\t// types with during flow execution. This is necessary to do in the case of\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "pkg/sql/distsql_running.go",
"type": "replace",
"edit_start_line_idx": 520
} | debug doctor examine zipdir testdata/doctor/debugzip
----
debug doctor examine zipdir testdata/doctor/debugzip 21.1-52
WARNING: errors occurred during the production of system.jobs.txt, contents may be missing or incomplete.
Examining 37 descriptors and 42 namespace entries...
ParentID 52, ParentSchemaID 29: relation "users" (53): referenced database ID 52: referenced descriptor not found
ParentID 52, ParentSchemaID 29: relation "vehicles" (54): referenced database ID 52: referenced descriptor not found
ParentID 52, ParentSchemaID 29: relation "rides" (55): referenced database ID 52: referenced descriptor not found
ParentID 52, ParentSchemaID 29: relation "vehicle_location_histories" (56): referenced database ID 52: referenced descriptor not found
ParentID 52, ParentSchemaID 29: relation "promo_codes" (57): referenced database ID 52: referenced descriptor not found
ParentID 52, ParentSchemaID 29: relation "user_promo_codes" (58): referenced database ID 52: referenced descriptor not found
ParentID 0, ParentSchemaID 0: namespace entry "movr" (52): descriptor not found
Examining 2 jobs...
job 587337426984566785: running schema change GC refers to missing table descriptor(s) [59]; existing descriptors that still need to be dropped []; job safe to delete: true.
ERROR: validation failed
| pkg/cli/testdata/doctor/test_examine_zipdir | 0 | https://github.com/cockroachdb/cockroach/commit/4a3592d16fc45d28640451115293119b8ff3cfe8 | [
0.000176036061020568,
0.00017496402142569423,
0.00017389198183082044,
0.00017496402142569423,
0.000001072039594873786
] |
{
"id": 2,
"code_window": [
"\tlocalState.IsLocal = planCtx.isLocal\n",
"\tlocalState.Txn = txn\n",
"\tlocalState.LocalProcs = plan.LocalProcessors\n",
"\t// If we need to perform some operation on the flow specs, we want to\n",
"\t// preserve the specs during the flow setup.\n",
"\tlocalState.PreserveFlowSpecs = planCtx.saveFlows != nil\n",
"\t// If we have access to a planner and are currently being used to plan\n",
"\t// statements in a user transaction, then take the descs.Collection to resolve\n",
"\t// types with during flow execution. This is necessary to do in the case of\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "pkg/sql/distsql_running.go",
"type": "replace",
"edit_start_line_idx": 520
} | // Copyright 2019 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package faker
import (
"fmt"
"strconv"
"golang.org/x/exp/rand"
)
type addressFaker struct {
streetAddress *weightedEntries
streetSuffix *weightedEntries
name nameFaker
}
// StreetAddress returns a random en_US street address.
func (f *addressFaker) StreetAddress(rng *rand.Rand) string {
return f.streetAddress.Rand(rng).(func(rng *rand.Rand) string)(rng)
}
func (f *addressFaker) buildingNumber(rng *rand.Rand) string {
return strconv.Itoa(randInt(rng, 1000, 99999))
}
func (f *addressFaker) streetName(rng *rand.Rand) string {
return fmt.Sprintf(`%s %s`, f.firstOrLastName(rng), f.streetSuffix.Rand(rng))
}
func (f *addressFaker) firstOrLastName(rng *rand.Rand) string {
switch rng.Intn(3) {
case 0:
return f.name.firstNameFemale.Rand(rng).(string)
case 1:
return f.name.firstNameMale.Rand(rng).(string)
case 2:
return f.name.lastName.Rand(rng).(string)
}
panic(`unreachable`)
}
func secondaryAddress(rng *rand.Rand) string {
switch rng.Intn(2) {
case 0:
return fmt.Sprintf(`Apt. %d`, rng.Intn(100))
case 1:
return fmt.Sprintf(`Suite %d`, rng.Intn(100))
}
panic(`unreachable`)
}
func newAddressFaker(name nameFaker) addressFaker {
f := addressFaker{name: name}
f.streetSuffix = streetSuffix()
f.streetAddress = makeWeightedEntries(
func(rng *rand.Rand) string {
return fmt.Sprintf(`%s %s`, f.buildingNumber(rng), f.streetName(rng))
}, 0.5,
func(rng *rand.Rand) string {
return fmt.Sprintf(`%s %s %s`,
f.buildingNumber(rng), f.streetName(rng), secondaryAddress(rng))
}, 0.5,
)
return f
}
| pkg/workload/faker/address.go | 0 | https://github.com/cockroachdb/cockroach/commit/4a3592d16fc45d28640451115293119b8ff3cfe8 | [
0.00017916168144438416,
0.00017143282457254827,
0.00016513376613147557,
0.000171038816915825,
0.000004816343334823614
] |
{
"id": 3,
"code_window": [
"\t// DiskMonitor is this flow's disk monitor. All disk usage for this flow must\n",
"\t// be registered through this monitor.\n",
"\tDiskMonitor *mon.BytesMonitor\n",
"\n",
"\t// PreserveFlowSpecs is true when the flow setup code needs to be careful\n",
"\t// when modifying the specifications of processors.\n",
"\tPreserveFlowSpecs bool\n",
"}\n",
"\n",
"// NewEvalCtx returns a modifiable copy of the FlowCtx's EvalContext.\n",
"// Processors should use this method any time they need to store a pointer to\n",
"// the EvalContext, since processors may mutate the EvalContext. Specifically,\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "pkg/sql/execinfra/flow_context.go",
"type": "replace",
"edit_start_line_idx": 97
} | // Copyright 2016 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package distsql
import (
"context"
"io"
"time"
"github.com/cockroachdb/cockroach/pkg/base"
"github.com/cockroachdb/cockroach/pkg/gossip"
"github.com/cockroachdb/cockroach/pkg/kv"
"github.com/cockroachdb/cockroach/pkg/roachpb"
"github.com/cockroachdb/cockroach/pkg/server/telemetry"
"github.com/cockroachdb/cockroach/pkg/settings"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/descs"
"github.com/cockroachdb/cockroach/pkg/sql/colflow"
"github.com/cockroachdb/cockroach/pkg/sql/execinfra"
"github.com/cockroachdb/cockroach/pkg/sql/execinfra/execopnode"
"github.com/cockroachdb/cockroach/pkg/sql/execinfrapb"
"github.com/cockroachdb/cockroach/pkg/sql/faketreeeval"
"github.com/cockroachdb/cockroach/pkg/sql/flowinfra"
"github.com/cockroachdb/cockroach/pkg/sql/rowflow"
"github.com/cockroachdb/cockroach/pkg/sql/sem/eval"
"github.com/cockroachdb/cockroach/pkg/sql/sem/tree"
"github.com/cockroachdb/cockroach/pkg/sql/sessiondata"
"github.com/cockroachdb/cockroach/pkg/sql/sessiondatapb"
"github.com/cockroachdb/cockroach/pkg/sql/sqltelemetry"
"github.com/cockroachdb/cockroach/pkg/util/envutil"
"github.com/cockroachdb/cockroach/pkg/util/log"
"github.com/cockroachdb/cockroach/pkg/util/mon"
"github.com/cockroachdb/cockroach/pkg/util/timeutil"
"github.com/cockroachdb/cockroach/pkg/util/tracing"
"github.com/cockroachdb/cockroach/pkg/util/tracing/grpcinterceptor"
"github.com/cockroachdb/errors"
"github.com/cockroachdb/redact"
)
// minFlowDrainWait is the minimum amount of time a draining server allows for
// any incoming flows to be registered. It acts as a grace period in which the
// draining server waits for its gossiped draining state to be received by other
// nodes.
const minFlowDrainWait = 1 * time.Second
// MultiTenancyIssueNo is the issue tracking DistSQL's Gossip and
// NodeID dependencies.
//
// See https://github.com/cockroachdb/cockroach/issues/47900.
const MultiTenancyIssueNo = 47900
var noteworthyMemoryUsageBytes = envutil.EnvOrDefaultInt64("COCKROACH_NOTEWORTHY_DISTSQL_MEMORY_USAGE", 1024*1024 /* 1MB */)
// ServerImpl implements the server for the distributed SQL APIs.
type ServerImpl struct {
execinfra.ServerConfig
flowRegistry *flowinfra.FlowRegistry
flowScheduler *flowinfra.FlowScheduler
memMonitor *mon.BytesMonitor
regexpCache *tree.RegexpCache
}
var _ execinfrapb.DistSQLServer = &ServerImpl{}
// NewServer instantiates a DistSQLServer.
func NewServer(
ctx context.Context, cfg execinfra.ServerConfig, flowScheduler *flowinfra.FlowScheduler,
) *ServerImpl {
ds := &ServerImpl{
ServerConfig: cfg,
regexpCache: tree.NewRegexpCache(512),
flowRegistry: flowinfra.NewFlowRegistry(),
flowScheduler: flowScheduler,
memMonitor: mon.NewMonitor(
"distsql",
mon.MemoryResource,
cfg.Metrics.CurBytesCount,
cfg.Metrics.MaxBytesHist,
-1, /* increment: use default block size */
noteworthyMemoryUsageBytes,
cfg.Settings,
),
}
ds.memMonitor.StartNoReserved(ctx, cfg.ParentMemoryMonitor)
// We have to initialize the flow scheduler at the same time we're creating
// the DistSQLServer because the latter will be registered as a gRPC service
// right away, so the RPCs might start coming in pretty much right after the
// current method returns. See #66330.
ds.flowScheduler.Init(ds.Metrics)
return ds
}
// Start launches workers for the server.
//
// Note that the initialization of the server required for performing the
// incoming RPCs needs to go into NewServer above because once that method
// returns, the server is registered as a gRPC service and needs to be fully
// initialized. For example, the initialization of the flow scheduler has to
// happen in NewServer.
func (ds *ServerImpl) Start() {
// Gossip the version info so that other nodes don't plan incompatible flows
// for us.
if g, ok := ds.ServerConfig.Gossip.Optional(MultiTenancyIssueNo); ok {
if nodeID, ok := ds.ServerConfig.NodeID.OptionalNodeID(); ok {
if err := g.AddInfoProto(
gossip.MakeDistSQLNodeVersionKey(base.SQLInstanceID(nodeID)),
&execinfrapb.DistSQLVersionGossipInfo{
Version: execinfra.Version,
MinAcceptedVersion: execinfra.MinAcceptedVersion,
},
0, // ttl - no expiration
); err != nil {
panic(err)
}
}
}
if err := ds.setDraining(false); err != nil {
panic(err)
}
ds.flowScheduler.Start()
}
// NumRemoteFlowsInQueue returns the number of remote flows scheduled to run on
// this server which are currently in the queue of the flow scheduler.
func (ds *ServerImpl) NumRemoteFlowsInQueue() int {
return ds.flowScheduler.NumFlowsInQueue()
}
// NumRemoteRunningFlows returns the number of remote flows currently running on
// this server.
func (ds *ServerImpl) NumRemoteRunningFlows() int {
return ds.flowScheduler.NumRunningFlows()
}
// SetCancelDeadFlowsCallback sets a testing callback that will be executed by
// the flow scheduler at the end of CancelDeadFlows call. The callback must be
// concurrency-safe.
func (ds *ServerImpl) SetCancelDeadFlowsCallback(cb func(int)) {
ds.flowScheduler.TestingKnobs.CancelDeadFlowsCallback = cb
}
// TODO(yuzefovich): remove this setting in 23.1.
var cancelRunningQueriesAfterFlowDrainWait = settings.RegisterBoolSetting(
settings.TenantWritable,
"sql.distsql.drain.cancel_after_wait.enabled",
"determines whether queries that are still running on a node being drained "+
"are forcefully canceled after waiting the 'server.shutdown.query_wait' period",
true,
)
// Drain changes the node's draining state through gossip and drains the
// server's flowRegistry. See flowRegistry.Drain for more details.
func (ds *ServerImpl) Drain(
ctx context.Context, flowDrainWait time.Duration, reporter func(int, redact.SafeString),
) {
if err := ds.setDraining(true); err != nil {
log.Warningf(ctx, "unable to gossip distsql draining state: %v", err)
}
flowWait := flowDrainWait
minWait := minFlowDrainWait
if ds.ServerConfig.TestingKnobs.DrainFast {
flowWait = 0
minWait = 0
} else if g, ok := ds.Gossip.Optional(MultiTenancyIssueNo); !ok || len(g.Outgoing()) == 0 {
// If there is only one node in the cluster (us), there's no need to
// wait a minimum time for the draining state to be gossiped.
minWait = 0
}
cancelStillRunning := cancelRunningQueriesAfterFlowDrainWait.Get(&ds.Settings.SV)
ds.flowRegistry.Drain(flowWait, minWait, reporter, cancelStillRunning)
}
// setDraining changes the node's draining state through gossip to the provided
// state.
func (ds *ServerImpl) setDraining(drain bool) error {
nodeID, ok := ds.ServerConfig.NodeID.OptionalNodeID()
if !ok {
// Ignore draining requests when running on behalf of a tenant.
// NB: intentionally swallow the error or the server will fatal.
_ = MultiTenancyIssueNo // related issue
return nil
}
if g, ok := ds.ServerConfig.Gossip.Optional(MultiTenancyIssueNo); ok {
return g.AddInfoProto(
gossip.MakeDistSQLDrainingKey(base.SQLInstanceID(nodeID)),
&execinfrapb.DistSQLDrainingInfo{
Draining: drain,
},
0, // ttl - no expiration
)
}
return nil
}
// FlowVerIsCompatible checks a flow's version is compatible with this node's
// DistSQL version.
func FlowVerIsCompatible(
flowVer, minAcceptedVersion, serverVersion execinfrapb.DistSQLVersion,
) bool {
return flowVer >= minAcceptedVersion && flowVer <= serverVersion
}
// setupFlow creates a Flow.
//
// Args:
// reserved: Specifies the upfront memory reservation that the flow takes
// ownership of. This account is already closed if an error is returned or
// will be closed through Flow.Cleanup.
// localState: Specifies if the flow runs entirely on this node and, if it does,
// specifies the txn and other attributes.
//
// Note: unless an error is returned, the returned context contains a span that
// must be finished through Flow.Cleanup.
func (ds *ServerImpl) setupFlow(
ctx context.Context,
parentSpan *tracing.Span,
parentMonitor *mon.BytesMonitor,
reserved *mon.BoundAccount,
req *execinfrapb.SetupFlowRequest,
rowSyncFlowConsumer execinfra.RowReceiver,
batchSyncFlowConsumer execinfra.BatchReceiver,
localState LocalState,
) (retCtx context.Context, _ flowinfra.Flow, _ execopnode.OpChains, retErr error) {
var sp *tracing.Span // will be Finish()ed by Flow.Cleanup()
var monitor *mon.BytesMonitor // will be closed in Flow.Cleanup()
onFlowCleanup := func() {
reserved.Close(retCtx)
}
// Make sure that we clean up all resources (which in the happy case are
// cleaned up in Flow.Cleanup()) if an error is encountered.
defer func() {
if retErr != nil {
if sp != nil {
sp.Finish()
}
if monitor != nil {
monitor.Stop(ctx)
}
onFlowCleanup()
retCtx = tracing.ContextWithSpan(ctx, nil)
}
}()
if !FlowVerIsCompatible(req.Version, execinfra.MinAcceptedVersion, execinfra.Version) {
err := errors.Errorf(
"version mismatch in flow request: %d; this node accepts %d through %d",
req.Version, execinfra.MinAcceptedVersion, execinfra.Version,
)
log.Warningf(ctx, "%v", err)
return ctx, nil, nil, err
}
const opName = "flow"
if parentSpan == nil {
ctx, sp = ds.Tracer.StartSpanCtx(ctx, opName)
} else if localState.IsLocal {
// If we're a local flow, we don't need a "follows from" relationship: we're
// going to run this flow synchronously.
// TODO(andrei): localState.IsLocal is not quite the right thing to use.
// If that field is unset, we might still want to create a child span if
// this flow is run synchronously.
ctx, sp = ds.Tracer.StartSpanCtx(ctx, opName, tracing.WithParent(parentSpan))
} else {
// We use FollowsFrom because the flow's span outlives the SetupFlow request.
ctx, sp = ds.Tracer.StartSpanCtx(
ctx,
opName,
tracing.WithParent(parentSpan),
tracing.WithFollowsFrom(),
)
}
monitor = mon.NewMonitor(
"flow",
mon.MemoryResource,
ds.Metrics.CurBytesCount,
ds.Metrics.MaxBytesHist,
-1, /* use default block size */
noteworthyMemoryUsageBytes,
ds.Settings,
)
monitor.Start(ctx, parentMonitor, reserved)
makeLeaf := func() (*kv.Txn, error) {
tis := req.LeafTxnInputState
if tis == nil {
// This must be a flow running for some bulk-io operation that doesn't use
// a txn.
return nil, nil
}
if tis.Txn.Status != roachpb.PENDING {
return nil, errors.AssertionFailedf("cannot create flow in non-PENDING txn: %s",
tis.Txn)
}
// The flow will run in a LeafTxn because we do not want each distributed
// Txn to heartbeat the transaction.
return kv.NewLeafTxn(ctx, ds.DB, roachpb.NodeID(req.Flow.Gateway), tis), nil
}
var evalCtx *eval.Context
var leafTxn *kv.Txn
if localState.EvalContext != nil {
// If we're running on the gateway, then we'll reuse already existing
// eval context. This is the case even if the query is distributed -
// this allows us to avoid an unnecessary deserialization of the eval
// context proto.
evalCtx = localState.EvalContext
// We're about to mutate the evalCtx and we want to restore its original
// state once the flow cleans up. Note that we could have made a copy of
// the whole evalContext, but that isn't free, so we choose to restore
// the original state in order to avoid performance regressions.
origMon := evalCtx.Mon
origTxn := evalCtx.Txn
oldOnFlowCleanup := onFlowCleanup
onFlowCleanup = func() {
evalCtx.Mon = origMon
evalCtx.Txn = origTxn
oldOnFlowCleanup()
}
evalCtx.Mon = monitor
if localState.MustUseLeafTxn() {
var err error
leafTxn, err = makeLeaf()
if err != nil {
return nil, nil, nil, err
}
// Update the Txn field early (before f.SetTxn() below) since some
// processors capture the field in their constructor (see #41992).
evalCtx.Txn = leafTxn
}
} else {
if localState.IsLocal {
return nil, nil, nil, errors.AssertionFailedf(
"EvalContext expected to be populated when IsLocal is set")
}
sd, err := sessiondata.UnmarshalNonLocal(req.EvalContext.SessionData)
if err != nil {
return ctx, nil, nil, err
}
// It's important to populate evalCtx.Txn early. We'll write it again in the
// f.SetTxn() call below, but by then it will already have been captured by
// processors.
leafTxn, err = makeLeaf()
if err != nil {
return nil, nil, nil, err
}
evalCtx = &eval.Context{
Settings: ds.ServerConfig.Settings,
SessionDataStack: sessiondata.NewStack(sd),
ClusterID: ds.ServerConfig.LogicalClusterID.Get(),
ClusterName: ds.ServerConfig.ClusterName,
NodeID: ds.ServerConfig.NodeID,
Codec: ds.ServerConfig.Codec,
ReCache: ds.regexpCache,
Mon: monitor,
Locality: ds.ServerConfig.Locality,
Tracer: ds.ServerConfig.Tracer,
// Most processors will override this Context with their own context in
// ProcessorBase. StartInternal().
Context: ctx,
Planner: &faketreeeval.DummyEvalPlanner{},
PrivilegedAccessor: &faketreeeval.DummyPrivilegedAccessor{},
SessionAccessor: &faketreeeval.DummySessionAccessor{},
ClientNoticeSender: &faketreeeval.DummyClientNoticeSender{},
Sequence: &faketreeeval.DummySequenceOperators{},
Tenant: &faketreeeval.DummyTenantOperator{},
Regions: &faketreeeval.DummyRegionOperator{},
Txn: leafTxn,
SQLLivenessReader: ds.ServerConfig.SQLLivenessReader,
SQLStatsController: ds.ServerConfig.SQLStatsController,
SchemaTelemetryController: ds.ServerConfig.SchemaTelemetryController,
IndexUsageStatsController: ds.ServerConfig.IndexUsageStatsController,
RangeStatsFetcher: ds.ServerConfig.RangeStatsFetcher,
}
evalCtx.SetStmtTimestamp(timeutil.Unix(0 /* sec */, req.EvalContext.StmtTimestampNanos))
evalCtx.SetTxnTimestamp(timeutil.Unix(0 /* sec */, req.EvalContext.TxnTimestampNanos))
}
// Create the FlowCtx for the flow.
flowCtx := ds.newFlowContext(
ctx, req.Flow.FlowID, evalCtx, makeLeaf, req.TraceKV, req.CollectStats, localState, req.Flow.Gateway == ds.NodeID.SQLInstanceID(),
)
// req always contains the desired vectorize mode, regardless of whether we
// have non-nil localState.EvalContext. We don't want to update EvalContext
// itself when the vectorize mode needs to be changed because we would need
// to restore the original value which can have data races under stress.
isVectorized := req.EvalContext.SessionData.VectorizeMode != sessiondatapb.VectorizeOff
f := newFlow(
flowCtx, sp, ds.flowRegistry, rowSyncFlowConsumer, batchSyncFlowConsumer,
localState.LocalProcs, isVectorized, onFlowCleanup, req.StatementSQL,
)
opt := flowinfra.FuseNormally
if !localState.MustUseLeafTxn() {
// If there are no remote flows and the local flow doesn't have any
// concurrency, fuse everything. This is needed in order for us to be
// able to use the RootTxn for the flow 's execution; the RootTxn
// doesn't allow for concurrent operations. Local flows with mutations
// need to use the RootTxn.
opt = flowinfra.FuseAggressively
}
var opChains execopnode.OpChains
var err error
ctx, opChains, err = f.Setup(ctx, &req.Flow, opt)
if err != nil {
log.Errorf(ctx, "error setting up flow: %s", err)
return ctx, nil, nil, err
}
if !f.IsLocal() {
flowCtx.AmbientContext.AddLogTag("f", f.GetFlowCtx().ID.Short())
ctx = flowCtx.AmbientContext.AnnotateCtx(ctx)
telemetry.Inc(sqltelemetry.DistSQLExecCounter)
}
if f.IsVectorized() {
telemetry.Inc(sqltelemetry.VecExecCounter)
}
// Figure out what txn the flow needs to run in, if any. For gateway flows
// that have no remote flows and also no concurrency, the (root) txn comes
// from localState.Txn if we haven't already created a leaf txn. Otherwise,
// we create, if necessary, a txn based on the request's LeafTxnInputState.
var txn *kv.Txn
if localState.IsLocal && !f.ConcurrentTxnUse() && leafTxn == nil {
txn = localState.Txn
} else {
// If I haven't created the leaf already, do it now.
if leafTxn == nil {
leafTxn, err = makeLeaf()
if err != nil {
return nil, nil, nil, err
}
}
txn = leafTxn
}
// TODO(andrei): We're about to overwrite f.EvalCtx.Txn, but the existing
// field has already been captured by various processors and operators that
// have already made a copy of the EvalCtx. In case this is not the gateway,
// we had already set the LeafTxn on the EvalCtx above, so it's OK. In case
// this is the gateway, if we're running with the RootTxn, then again it was
// set above so it's fine. If we're using a LeafTxn on the gateway, though,
// then the processors have erroneously captured the Root. See #41992.
f.SetTxn(txn)
return ctx, f, opChains, nil
}
// newFlowContext creates a new FlowCtx that can be used during execution of
// a flow.
func (ds *ServerImpl) newFlowContext(
ctx context.Context,
id execinfrapb.FlowID,
evalCtx *eval.Context,
makeLeafTxn func() (*kv.Txn, error),
traceKV bool,
collectStats bool,
localState LocalState,
isGatewayNode bool,
) execinfra.FlowCtx {
// TODO(radu): we should sanity check some of these fields.
flowCtx := execinfra.FlowCtx{
AmbientContext: ds.AmbientContext,
Cfg: &ds.ServerConfig,
ID: id,
EvalCtx: evalCtx,
Txn: evalCtx.Txn,
MakeLeafTxn: makeLeafTxn,
NodeID: ds.ServerConfig.NodeID,
TraceKV: traceKV,
CollectStats: collectStats,
Local: localState.IsLocal,
Gateway: isGatewayNode,
// The flow disk monitor is a child of the server's and is closed on
// Cleanup.
DiskMonitor: execinfra.NewMonitor(
ctx, ds.ParentDiskMonitor, "flow-disk-monitor",
),
PreserveFlowSpecs: localState.PreserveFlowSpecs,
}
if localState.IsLocal && localState.Collection != nil {
// If we were passed a descs.Collection to use, then take it. In this case,
// the caller will handle releasing the used descriptors, so we don't need
// to cleanup the descriptors when cleaning up the flow.
flowCtx.Descriptors = localState.Collection
} else {
// If we weren't passed a descs.Collection, then make a new one. We are
// responsible for cleaning it up and releasing any accessed descriptors
// on flow cleanup.
flowCtx.Descriptors = ds.CollectionFactory.NewCollection(ctx, descs.NewTemporarySchemaProvider(evalCtx.SessionDataStack), nil /* monitor */)
flowCtx.IsDescriptorsCleanupRequired = true
flowCtx.EvalCatalogBuiltins.Init(evalCtx.Codec, evalCtx.Txn, flowCtx.Descriptors)
evalCtx.CatalogBuiltins = &flowCtx.EvalCatalogBuiltins
}
return flowCtx
}
func newFlow(
flowCtx execinfra.FlowCtx,
sp *tracing.Span,
flowReg *flowinfra.FlowRegistry,
rowSyncFlowConsumer execinfra.RowReceiver,
batchSyncFlowConsumer execinfra.BatchReceiver,
localProcessors []execinfra.LocalProcessor,
isVectorized bool,
onFlowCleanup func(),
statementSQL string,
) flowinfra.Flow {
base := flowinfra.NewFlowBase(flowCtx, sp, flowReg, rowSyncFlowConsumer, batchSyncFlowConsumer, localProcessors, onFlowCleanup, statementSQL)
if isVectorized {
return colflow.NewVectorizedFlow(base)
}
return rowflow.NewRowBasedFlow(base)
}
// LocalState carries information that is required to set up a flow with wrapped
// planNodes.
type LocalState struct {
EvalContext *eval.Context
// Collection is set if this flow is running on the gateway as part of user
// SQL session. It is the current descs.Collection of the planner executing
// the flow.
Collection *descs.Collection
// IsLocal is set if the flow is running on the gateway and there are no
// remote flows.
IsLocal bool
// HasConcurrency indicates whether the local flow uses multiple goroutines.
HasConcurrency bool
// Txn is filled in on the gateway only. It is the RootTxn that the query is running in.
// This will be used directly by the flow if the flow has no concurrency and IsLocal is set.
// If there is concurrency, a LeafTxn will be created.
Txn *kv.Txn
// LocalProcs is an array of planNodeToRowSource processors. It's in order and
// will be indexed into by the RowSourceIdx field in LocalPlanNodeSpec.
LocalProcs []execinfra.LocalProcessor
// PreserveFlowSpecs is true when the flow setup code needs to be careful
// when modifying the specifications of processors.
PreserveFlowSpecs bool
}
// MustUseLeafTxn returns true if a LeafTxn must be used. It is valid to call
// this method only after IsLocal and HasConcurrency have been set correctly.
func (l LocalState) MustUseLeafTxn() bool {
return !l.IsLocal || l.HasConcurrency
}
// SetupLocalSyncFlow sets up a synchronous flow on the current (planning) node,
// connecting the sync response output stream to the given RowReceiver. It's
// used by the gateway node to set up the flows local to it. The flow is not
// started. The flow will be associated with the given context.
// Note: the returned context contains a span that must be finished through
// Flow.Cleanup.
func (ds *ServerImpl) SetupLocalSyncFlow(
ctx context.Context,
parentMonitor *mon.BytesMonitor,
req *execinfrapb.SetupFlowRequest,
output execinfra.RowReceiver,
batchOutput execinfra.BatchReceiver,
localState LocalState,
) (context.Context, flowinfra.Flow, execopnode.OpChains, error) {
ctx, f, opChains, err := ds.setupFlow(
ctx, tracing.SpanFromContext(ctx), parentMonitor, &mon.BoundAccount{}, /* reserved */
req, output, batchOutput, localState,
)
if err != nil {
return nil, nil, nil, err
}
return ctx, f, opChains, err
}
// setupSpanForIncomingRPC creates a span for a SetupFlow RPC. The caller must
// finish the returned span.
//
// For most other RPCs, there's a gRPC server interceptor that opens spans based
// on trace info passed as gRPC metadata. But the SetupFlow RPC is common and so
// we have a more efficient implementation based on tracing information being
// passed in the request proto.
func (ds *ServerImpl) setupSpanForIncomingRPC(
ctx context.Context, req *execinfrapb.SetupFlowRequest,
) (context.Context, *tracing.Span) {
tr := ds.ServerConfig.AmbientContext.Tracer
parentSpan := tracing.SpanFromContext(ctx)
if parentSpan != nil {
// It's not expected to have a span in the context since the gRPC server
// interceptor that generally opens spans exempts this particular RPC. Note
// that this method is not called for flows local to the gateway.
return tr.StartSpanCtx(ctx, grpcinterceptor.SetupFlowMethodName,
tracing.WithParent(parentSpan),
tracing.WithServerSpanKind)
}
if !req.TraceInfo.Empty() {
return tr.StartSpanCtx(ctx, grpcinterceptor.SetupFlowMethodName,
tracing.WithRemoteParentFromTraceInfo(&req.TraceInfo),
tracing.WithServerSpanKind)
}
// For backwards compatibility with 21.2, if tracing info was passed as
// gRPC metadata, we use it.
remoteParent, err := grpcinterceptor.ExtractSpanMetaFromGRPCCtx(ctx, tr)
if err != nil {
log.Warningf(ctx, "error extracting tracing info from gRPC: %s", err)
}
return tr.StartSpanCtx(ctx, grpcinterceptor.SetupFlowMethodName,
tracing.WithRemoteParentFromSpanMeta(remoteParent),
tracing.WithServerSpanKind)
}
// SetupFlow is part of the execinfrapb.DistSQLServer interface.
func (ds *ServerImpl) SetupFlow(
ctx context.Context, req *execinfrapb.SetupFlowRequest,
) (*execinfrapb.SimpleResponse, error) {
log.VEventf(ctx, 1, "received SetupFlow request from n%v for flow %v", req.Flow.Gateway, req.Flow.FlowID)
_, rpcSpan := ds.setupSpanForIncomingRPC(ctx, req)
defer rpcSpan.Finish()
// Note: the passed context will be canceled when this RPC completes, so we
// can't associate it with the flow.
ctx = ds.AnnotateCtx(context.Background())
if err := func() error {
// Reserve some memory for this remote flow which is a poor man's
// admission control based on the RAM usage.
reserved := ds.memMonitor.MakeBoundAccount()
err := reserved.Grow(ctx, mon.DefaultPoolAllocationSize)
if err != nil {
return err
}
var f flowinfra.Flow
ctx, f, _, err = ds.setupFlow(
ctx, rpcSpan, ds.memMonitor, &reserved, req, nil, /* rowSyncFlowConsumer */
nil /* batchSyncFlowConsumer */, LocalState{},
)
if err != nil {
return err
}
return ds.flowScheduler.ScheduleFlow(ctx, f)
}(); err != nil {
// We return flow deployment errors in the response so that they are
// packaged correctly over the wire. If we return them directly to this
// function, they become part of an rpc error.
return &execinfrapb.SimpleResponse{Error: execinfrapb.NewError(ctx, err)}, nil
}
return &execinfrapb.SimpleResponse{}, nil
}
// CancelDeadFlows is part of the execinfrapb.DistSQLServer interface.
func (ds *ServerImpl) CancelDeadFlows(
_ context.Context, req *execinfrapb.CancelDeadFlowsRequest,
) (*execinfrapb.SimpleResponse, error) {
ds.flowScheduler.CancelDeadFlows(req)
return &execinfrapb.SimpleResponse{}, nil
}
func (ds *ServerImpl) flowStreamInt(
ctx context.Context, stream execinfrapb.DistSQL_FlowStreamServer,
) error {
// Receive the first message.
msg, err := stream.Recv()
if err != nil {
if err == io.EOF {
return errors.AssertionFailedf("missing header message")
}
return err
}
if msg.Header == nil {
return errors.AssertionFailedf("no header in first message")
}
flowID := msg.Header.FlowID
streamID := msg.Header.StreamID
if log.V(1) {
log.Infof(ctx, "connecting inbound stream %s/%d", flowID.Short(), streamID)
}
f, streamStrategy, cleanup, err := ds.flowRegistry.ConnectInboundStream(
ctx, flowID, streamID, stream, flowinfra.SettingFlowStreamTimeout.Get(&ds.Settings.SV),
)
if err != nil {
return err
}
defer cleanup()
log.VEventf(ctx, 1, "connected inbound stream %s/%d", flowID.Short(), streamID)
return streamStrategy.Run(f.AmbientContext.AnnotateCtx(ctx), stream, msg, f)
}
// FlowStream is part of the execinfrapb.DistSQLServer interface.
func (ds *ServerImpl) FlowStream(stream execinfrapb.DistSQL_FlowStreamServer) error {
ctx := ds.AnnotateCtx(stream.Context())
err := ds.flowStreamInt(ctx, stream)
if err != nil && log.V(2) {
// flowStreamInt may return an error during normal operation (e.g. a flow
// was canceled as part of a graceful teardown). Log this error at the INFO
// level behind a verbose flag for visibility.
log.Infof(ctx, "%v", err)
}
return err
}
| pkg/sql/distsql/server.go | 1 | https://github.com/cockroachdb/cockroach/commit/4a3592d16fc45d28640451115293119b8ff3cfe8 | [
0.009674468077719212,
0.0010089062852784991,
0.00016218169184867293,
0.00029241933953016996,
0.0018254476599395275
] |
{
"id": 3,
"code_window": [
"\t// DiskMonitor is this flow's disk monitor. All disk usage for this flow must\n",
"\t// be registered through this monitor.\n",
"\tDiskMonitor *mon.BytesMonitor\n",
"\n",
"\t// PreserveFlowSpecs is true when the flow setup code needs to be careful\n",
"\t// when modifying the specifications of processors.\n",
"\tPreserveFlowSpecs bool\n",
"}\n",
"\n",
"// NewEvalCtx returns a modifiable copy of the FlowCtx's EvalContext.\n",
"// Processors should use this method any time they need to store a pointer to\n",
"// the EvalContext, since processors may mutate the EvalContext. Specifically,\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "pkg/sql/execinfra/flow_context.go",
"type": "replace",
"edit_start_line_idx": 97
} | with_clause ::=
'WITH' ( ( ( table_alias_name ( '(' ( ( name ) ( ( ',' name ) )* ) ')' | ) 'AS' '(' preparable_stmt ')' | table_alias_name ( '(' ( ( name ) ( ( ',' name ) )* ) ')' | ) 'AS' ( 'MATERIALIZED' | 'NOT' 'MATERIALIZED' ) '(' preparable_stmt ')' ) ) ( ( ',' ( table_alias_name ( '(' ( ( name ) ( ( ',' name ) )* ) ')' | ) 'AS' '(' preparable_stmt ')' | table_alias_name ( '(' ( ( name ) ( ( ',' name ) )* ) ')' | ) 'AS' ( 'MATERIALIZED' | 'NOT' 'MATERIALIZED' ) '(' preparable_stmt ')' ) ) )* ) ( insert_stmt | update_stmt | delete_stmt | upsert_stmt | select_stmt )
| 'WITH' 'RECURSIVE' ( ( ( table_alias_name ( '(' ( ( name ) ( ( ',' name ) )* ) ')' | ) 'AS' '(' preparable_stmt ')' | table_alias_name ( '(' ( ( name ) ( ( ',' name ) )* ) ')' | ) 'AS' ( 'MATERIALIZED' | 'NOT' 'MATERIALIZED' ) '(' preparable_stmt ')' ) ) ( ( ',' ( table_alias_name ( '(' ( ( name ) ( ( ',' name ) )* ) ')' | ) 'AS' '(' preparable_stmt ')' | table_alias_name ( '(' ( ( name ) ( ( ',' name ) )* ) ')' | ) 'AS' ( 'MATERIALIZED' | 'NOT' 'MATERIALIZED' ) '(' preparable_stmt ')' ) ) )* ) ( insert_stmt | update_stmt | delete_stmt | upsert_stmt | select_stmt )
| docs/generated/sql/bnf/with_clause.bnf | 0 | https://github.com/cockroachdb/cockroach/commit/4a3592d16fc45d28640451115293119b8ff3cfe8 | [
0.00016526026593055576,
0.00016526026593055576,
0.00016526026593055576,
0.00016526026593055576,
0
] |
{
"id": 3,
"code_window": [
"\t// DiskMonitor is this flow's disk monitor. All disk usage for this flow must\n",
"\t// be registered through this monitor.\n",
"\tDiskMonitor *mon.BytesMonitor\n",
"\n",
"\t// PreserveFlowSpecs is true when the flow setup code needs to be careful\n",
"\t// when modifying the specifications of processors.\n",
"\tPreserveFlowSpecs bool\n",
"}\n",
"\n",
"// NewEvalCtx returns a modifiable copy of the FlowCtx's EvalContext.\n",
"// Processors should use this method any time they need to store a pointer to\n",
"// the EvalContext, since processors may mutate the EvalContext. Specifically,\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "pkg/sql/execinfra/flow_context.go",
"type": "replace",
"edit_start_line_idx": 97
} | template
package main
func a() {
x := b(true)
x = b(false)
}
// execgen:template<t>
func b(t bool) int {
if t {
x = 3
} else {
x = 4
}
return x
}
----
----
package main
func a() {
x := b_true()
x = b_false()
}
const _ = "template_b"
func b_true() int {
x = 3
return x
}
func b_false() int {
x = 4
return x
}
----
----
template
package main
func a() {
x := b(3, true, true)
x = b(6, true, false)
b(4, false, true)
b(5, false, false)
}
// execgen:inline
// execgen:template<t, u>
func b(a int, t bool, u bool) int {
var x int
if t {
x = 3
} else {
x = 4
if u {
x += 1
}
}
return x
}
----
----
package main
func a() {
x := b_true_true(3)
x = b_true_false(6)
b_false_true(4)
b_false_false(5)
}
// execgen:inline
const _ = "template_b"
// execgen:inline
func b_true_true(a int) int {
var x int
x = 3
return x
}
// execgen:inline
func b_true_false(a int) int {
var x int
x = 3
return x
}
// execgen:inline
func b_false_true(a int) int {
var x int
x = 4
x += 1
return x
}
// execgen:inline
func b_false_false(a int) int {
var x int
x = 4
return x
}
----
----
# Test templates calling each other.
template
package main
func main() {
a(1, true)
a(1, false)
}
// execgen:inline
// execgen:template<y>
func a(x int, y bool) {
b(x, y, true)
b(x, y, false)
}
// execgen:inline
// execgen:template<y, z>
func b(x int, y bool, z bool) int {
if y {
if z {
fmt.Println("y and z")
} else {
fmt.Println("y not z")
}
} else {
if z {
fmt.Println("not y and z")
} else {
fmt.Println("not y not z")
}
}
}
----
----
package main
func main() {
a_true(1)
a_false(1)
}
// execgen:inline
const _ = "template_a"
// execgen:inline
const _ = "template_b"
// execgen:inline
func a_true(x int) {
b_true_true(x)
b_true_false(x)
}
// execgen:inline
func a_false(x int) {
b_false_true(x)
b_false_false(x)
}
// execgen:inline
func b_true_true(x int) int {
fmt.Println("y and z")
}
// execgen:inline
func b_true_false(x int) int {
fmt.Println("y not z")
}
// execgen:inline
func b_false_true(x int) int {
fmt.Println("not y and z")
}
// execgen:inline
func b_false_false(x int) int {
fmt.Println("not y not z")
}
----
----
# Test templates calling each other in reverse order.
template
package main
func main() {
a(1, true)
a(1, false)
}
// execgen:inline
// execgen:template<y, z>
func b(x int, y bool, z bool) int {
if y {
if z {
fmt.Println("y and z")
} else {
fmt.Println("y not z")
}
} else {
if z {
fmt.Println("not y and z")
} else {
fmt.Println("not y not z")
}
}
}
// execgen:inline
// execgen:template<y>
func a(x int, y bool) {
b(x, y, true)
b(x, y, false)
}
----
----
package main
func main() {
a_true(1)
a_false(1)
}
// execgen:inline
const _ = "template_b"
// execgen:inline
const _ = "template_a"
// execgen:inline
func a_true(x int) {
b_true_true(x)
b_true_false(x)
}
// execgen:inline
func a_false(x int) {
b_false_true(x)
b_false_false(x)
}
// execgen:inline
func b_true_true(x int) int {
fmt.Println("y and z")
}
// execgen:inline
func b_true_false(x int) int {
fmt.Println("y not z")
}
// execgen:inline
func b_false_true(x int) int {
fmt.Println("not y and z")
}
// execgen:inline
func b_false_false(x int) int {
fmt.Println("not y not z")
}
----
----
# Test non-bool templates and execgen:switch
template
package main
func main() {
a(1, blah.Foo)
a(1, *blah.Bar)
a(1, *blah.Bar)
}
// execgen:inline
// execgen:template<y>
func a(x int, y blah.Derp) {
// execgen:switch
switch y {
case blah.Foo:
fmt.Println("foo")
b(x, true, y)
case *blah.Bar:
fmt.Println("bar")
b(x, false, y)
}
}
// execgen:template<b, y>
func b(x int, b bool, y blah.Derp) {
if !b {
switch y {
case blah.Foo:
fmt.Println("foo false")
case *blah.Bar:
fmt.Println("bar false")
}
} else {
switch y {
case blah.Foo:
fmt.Println("foo true")
case *blah.Bar:
fmt.Println("bar true")
}
}
}
----
----
package main
func main() {
a_blahDOTFoo(1)
a_STARblahDOTBar(1)
a_STARblahDOTBar(1)
}
// execgen:inline
const _ = "template_a"
const _ = "template_b"
// execgen:inline
func a_blahDOTFoo(x int) {
fmt.Println("foo")
b_true_blahDOTFoo(x)
}
// execgen:inline
func a_STARblahDOTBar(x int) {
fmt.Println("bar")
b_false_STARblahDOTBar(x)
}
func b_true_blahDOTFoo(x int) {
fmt.Println("foo true")
}
func b_false_STARblahDOTBar(x int) {
fmt.Println("bar false")
}
----
----
# Test an example type template
template
package main
// execgen:template<family, width>
func frobnicateColumn(col coldata.Vec, family types.Family, width int32) {
switch family {
case types.Int:
switch width {
case 32:
fmt.Println("i'm an int32!", col.Int32()[0])
}
case types.Interval, types.Interval2:
fmt.Println("I'm an interval!", col.Interval()[0])
}
}
func otherFunc(col coldata.Vec) {
frobnicateColumn(col, types.Int, 32)
frobnicateColumn(col, types.Interval, 0)
frobnicateColumn(col, types.Interval2, 0)
}
----
----
package main
const _ = "template_frobnicateColumn"
func otherFunc(col coldata.Vec) {
frobnicateColumn_typesDOTInt_32(col)
frobnicateColumn_typesDOTInterval_0(col)
frobnicateColumn_typesDOTInterval2_0(col)
}
func frobnicateColumn_typesDOTInt_32(col coldata.Vec) {
fmt.Println("i'm an int32!", col.Int32()[0])
}
func frobnicateColumn_typesDOTInterval_0(col coldata.Vec) {
fmt.Println("I'm an interval!", col.Interval()[0])
}
func frobnicateColumn_typesDOTInterval2_0(col coldata.Vec) {
fmt.Println("I'm an interval!", col.Interval()[0])
}
----
----
# Don't drop comments.
template
package main
func a() {
b(true)
b(false)
}
// execgen:template<t>
// execgen:inline
func b(t bool) int {
if t {
//gcassert:bce
}
col[i] = 20
}
----
----
package main
func a() {
b_true()
b_false()
}
// execgen:inline
const _ = "template_b"
// execgen:inline
func b_true() int {
//gcassert:bce
col[i] = 20
}
// execgen:inline
func b_false() int {
col[i] = 20
}
----
----
# Do not include comment decoration in template function names.
template
package main
func a() {
b(true /* t */)
}
// execgen:inline
// execgen:template<t>
func b(t bool) int {
if t {
return 0
} else {
return 1
}
}
----
----
package main
func a() {
b_true()
}
// execgen:inline
const _ = "template_b"
// execgen:inline
func b_true() int {
return 0
}
----
----
| pkg/sql/colexec/execgen/testdata/template | 0 | https://github.com/cockroachdb/cockroach/commit/4a3592d16fc45d28640451115293119b8ff3cfe8 | [
0.00017781311180442572,
0.00016919051995500922,
0.00015823224384803325,
0.00017067210865207016,
0.000004258503395249136
] |
{
"id": 3,
"code_window": [
"\t// DiskMonitor is this flow's disk monitor. All disk usage for this flow must\n",
"\t// be registered through this monitor.\n",
"\tDiskMonitor *mon.BytesMonitor\n",
"\n",
"\t// PreserveFlowSpecs is true when the flow setup code needs to be careful\n",
"\t// when modifying the specifications of processors.\n",
"\tPreserveFlowSpecs bool\n",
"}\n",
"\n",
"// NewEvalCtx returns a modifiable copy of the FlowCtx's EvalContext.\n",
"// Processors should use this method any time they need to store a pointer to\n",
"// the EvalContext, since processors may mutate the EvalContext. Specifically,\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "pkg/sql/execinfra/flow_context.go",
"type": "replace",
"edit_start_line_idx": 97
} | statement ok
SET CLUSTER SETTING sql.cross_db_views.enabled = TRUE
statement ok
SET CLUSTER SETTING sql.cross_db_sequence_references.enabled = TRUE
query TTTTT
SHOW DATABASES
----
defaultdb root NULL {} NULL
postgres root NULL {} NULL
system node NULL {} NULL
test root NULL {} NULL
query TTTB
SHOW GRANTS ON DATABASE test
----
test admin ALL true
test public CONNECT false
test root ALL true
statement ok
CREATE TABLE kv (
k INT PRIMARY KEY,
v INT
)
statement ok
INSERT INTO kv VALUES (1, 2), (3, 4), (5, 6), (7, 8)
query II rowsort
SELECT * FROM kv
----
1 2
3 4
5 6
7 8
statement ok
SET sql_safe_updates = TRUE;
statement error RENAME DATABASE on current database
ALTER DATABASE test RENAME TO u
statement ok
SET sql_safe_updates = FALSE;
ALTER DATABASE test RENAME TO u
statement error pgcode 3D000 database "test" does not exist
SELECT * FROM kv
statement error target database or schema does not exist
SHOW GRANTS ON DATABASE test
query TTTTT
SHOW DATABASES
----
defaultdb root NULL {} NULL
postgres root NULL {} NULL
system node NULL {} NULL
u root NULL {} NULL
# check the name in descriptor is also changed
query TTTB
SHOW GRANTS ON DATABASE u
----
u admin ALL true
u public CONNECT false
u root ALL true
statement ok
SET DATABASE = u
query II rowsort
SELECT * FROM kv
----
1 2
3 4
5 6
7 8
statement error pgcode 42601 empty database name
ALTER DATABASE "" RENAME TO u
statement error pgcode 42601 empty database name
ALTER DATABASE u RENAME TO ""
statement ok
ALTER DATABASE u RENAME TO u
statement ok
CREATE DATABASE t
statement error the new database name "u" already exists
ALTER DATABASE t RENAME TO u
statement ok
GRANT ALL ON DATABASE t TO testuser
user testuser
statement error must be owner of database t
ALTER DATABASE t RENAME TO v
query TTTTT
SHOW DATABASES
----
defaultdb root NULL {} NULL
postgres root NULL {} NULL
t root NULL {} NULL
u root NULL {} NULL
# Test that owners can rename databases as long as they have the CREATEDB
# privilege.
user root
statement ok
ALTER USER testuser CREATEDB
user testuser
statement ok
CREATE DATABASE testuserdb
statement ok
ALTER DATABASE testuserdb RENAME TO testuserdb2
user root
statement ok
ALTER USER testuser NOCREATEDB
user testuser
statement error permission denied to rename database
ALTER DATABASE testuserdb2 RENAME TO testuserdb3
user root
statement ok
DROP DATABASE testuserdb2
# Test that renames aren't allowed while views refer to any of a DB's tables,
# both for views in that database and for views in a different database.
statement ok
CREATE VIEW t.v AS SELECT k,v FROM u.kv
query TTTTIT
SHOW TABLES FROM u
----
public kv table root 0 NULL
statement error cannot rename database because relation "t.public.v" depends on relation "u.public.kv"
ALTER DATABASE u RENAME TO v
statement ok
DROP VIEW t.v
statement ok
ALTER DATABASE u RENAME TO v
statement ok
CREATE VIEW v.v AS SELECT k,v FROM v.kv
statement error cannot rename database because relation "v.public.v" depends on relation "v.public.kv"\s.*you can drop "v.public.v" instead
ALTER DATABASE v RENAME TO u
# Check that the default databases can be renamed like any other.
statement ok
ALTER DATABASE defaultdb RENAME TO w;
ALTER DATABASE postgres RENAME TO defaultdb;
ALTER DATABASE w RENAME TO postgres
query TTTTT
SHOW DATABASES
----
defaultdb root NULL {} NULL
postgres root NULL {} NULL
system node NULL {} NULL
t root NULL {} NULL
v root NULL {} NULL
statement ok
SET vectorize=on
query T
EXPLAIN ALTER DATABASE v RENAME TO x
----
distribution: local
vectorized: true
·
• alter database
statement ok
RESET vectorize
# Verify that the EXPLAIN above does not actually rename the database (#30543)
query TTTTT colnames
SHOW DATABASES
----
database_name owner primary_region regions survival_goal
defaultdb root NULL {} NULL
postgres root NULL {} NULL
system node NULL {} NULL
t root NULL {} NULL
v root NULL {} NULL
# Test dependent sequences on different databases upon renames
# are allowed now, as well as testing
# renaming databases with sequences in the same DB is successful.
subtest regression_45411
statement ok
CREATE DATABASE db1; CREATE SEQUENCE db1.seq
statement ok
CREATE DATABASE db2; CREATE TABLE db2.tbl (a int DEFAULT nextval('db1.seq'))
statement ok
ALTER DATABASE db1 RENAME TO db3
statement ok
DROP DATABASE db2 CASCADE;
statement ok
DROP DATABASE db3 CASCADE
statement ok
CREATE DATABASE db1;
statement ok
CREATE SEQUENCE db1.a_seq;
CREATE SEQUENCE db1.b_seq;
statement ok
USE db1;
statement ok
CREATE TABLE db1.a (a int default nextval('a_seq') + nextval('b_seq') + 1); ALTER DATABASE db1 RENAME TO db2; USE db2;
statement ok
DROP TABLE db2.a;
statement ok
CREATE TABLE db2.a (a int default nextval('a_seq') + nextval('db2.b_seq') + 1);
statement ok
ALTER DATABASE db2 RENAME TO db1;
statement ok
ALTER DATABASE db1 RENAME TO db2
statement ok
DROP TABLE db2.a;
statement ok
CREATE TABLE db2.a (a int default nextval('a_seq') + nextval('db2.public.b_seq') + 1);
statement ok
ALTER DATABASE db2 RENAME TO db1; ALTER DATABASE db1 RENAME TO db2
statement ok
DROP TABLE db2.a;
statement ok
CREATE TABLE db2.a (a int default nextval('a_seq') + nextval('public.b_seq') + 1);
statement ok
ALTER DATABASE db2 RENAME TO db1
statement ok
USE defaultdb; DROP DATABASE db1 CASCADE
| pkg/sql/logictest/testdata/logic_test/rename_database | 0 | https://github.com/cockroachdb/cockroach/commit/4a3592d16fc45d28640451115293119b8ff3cfe8 | [
0.00017423521785531193,
0.00016880770272109658,
0.00016254781803581864,
0.0001694895327091217,
0.0000033740584512997884
] |
{
"id": 0,
"code_window": [
"\t\thigh = kv.Key(high).PrefixNext()\n",
"\t}\n",
"\treturn low, high\n",
"}\n",
"\n",
"// SplitRangesBySign split the ranges into two parts:\n",
"// 1. signedRanges is less or equal than maxInt64\n",
"// 2. unsignedRanges is greater than maxInt64\n",
"// We do that because the encoding of tikv key takes every key as a int. As a result MaxUInt64 is indeed\n",
"// small than zero. So we must\n",
"// 1. pick the range that straddles the MaxInt64\n",
"// 2. split that range into two parts : smaller than max int64 and greater than it.\n",
"// 3. if the ascent order is required, return signed first, vice versa.\n",
"// 4. if no order is required, is better to return the unsigned one. That's because it's the normal order\n",
"// of tikv scan.\n",
"func SplitRangesBySign(ranges []*ranger.Range, keepOrder bool, desc bool, isCommonHandle bool) ([]*ranger.Range, []*ranger.Range) {\n",
"\tif isCommonHandle || len(ranges) == 0 || ranges[0].LowVal[0].Kind() == types.KindInt64 {\n",
"\t\treturn ranges, nil\n",
"\t}\n",
"\tidx := sort.Search(len(ranges), func(i int) bool { return ranges[i].HighVal[0].GetUint64() > math.MaxInt64 })\n",
"\tif idx == len(ranges) {\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"// SplitRangesAcrossInt64Boundary split the ranges into two groups:\n",
"// 1. signedRanges is less or equal than MaxInt64\n",
"// 2. unsignedRanges is greater than MaxInt64\n",
"//\n",
"// We do this because every key of tikv is encoded as an int64. As a result, MaxUInt64 is small than zero when\n",
"// interpreted as an int64 variable.\n",
"//\n",
"// This function does the following:\n",
"// 1. split ranges into two groups as described above.\n",
"// 2. if there's a range that straddles the int64 boundary, split it into two ranges, which results in one smaller and\n",
"// one greater than MaxInt64.\n",
"//\n",
"// if `KeepOrder` is false, we merge the two groups of ranges into one group, to save an rpc call later\n",
"// if `desc` is false, return signed ranges first, vice versa.\n",
"func SplitRangesAcrossInt64Boundary(ranges []*ranger.Range, keepOrder bool, desc bool, isCommonHandle bool) ([]*ranger.Range, []*ranger.Range) {\n"
],
"file_path": "distsql/request_builder.go",
"type": "replace",
"edit_start_line_idx": 395
} | // Copyright 2017 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package executor
import (
"bytes"
"context"
"fmt"
"math"
"math/rand"
"runtime"
"sort"
"strconv"
"strings"
"sync"
"sync/atomic"
"time"
"github.com/cznic/mathutil"
"github.com/pingcap/errors"
"github.com/pingcap/failpoint"
"github.com/pingcap/parser/ast"
"github.com/pingcap/parser/model"
"github.com/pingcap/parser/mysql"
"github.com/pingcap/parser/terror"
"github.com/pingcap/tidb/distsql"
"github.com/pingcap/tidb/domain"
"github.com/pingcap/tidb/infoschema"
"github.com/pingcap/tidb/kv"
"github.com/pingcap/tidb/metrics"
"github.com/pingcap/tidb/planner/core"
"github.com/pingcap/tidb/sessionctx"
"github.com/pingcap/tidb/sessionctx/stmtctx"
"github.com/pingcap/tidb/sessionctx/variable"
"github.com/pingcap/tidb/statistics"
"github.com/pingcap/tidb/store/tikv"
tikvstore "github.com/pingcap/tidb/store/tikv/kv"
"github.com/pingcap/tidb/table"
"github.com/pingcap/tidb/tablecodec"
"github.com/pingcap/tidb/types"
"github.com/pingcap/tidb/util/chunk"
"github.com/pingcap/tidb/util/codec"
"github.com/pingcap/tidb/util/logutil"
"github.com/pingcap/tidb/util/ranger"
"github.com/pingcap/tidb/util/sqlexec"
"github.com/pingcap/tipb/go-tipb"
"go.uber.org/zap"
)
var _ Executor = &AnalyzeExec{}
// AnalyzeExec represents Analyze executor.
type AnalyzeExec struct {
baseExecutor
tasks []*analyzeTask
wg *sync.WaitGroup
opts map[ast.AnalyzeOptionType]uint64
}
var (
// RandSeed is the seed for randing package.
// It's public for test.
RandSeed = int64(1)
)
const (
maxRegionSampleSize = 1000
maxSketchSize = 10000
)
// Next implements the Executor Next interface.
func (e *AnalyzeExec) Next(ctx context.Context, req *chunk.Chunk) error {
concurrency, err := getBuildStatsConcurrency(e.ctx)
if err != nil {
return err
}
taskCh := make(chan *analyzeTask, len(e.tasks))
resultCh := make(chan analyzeResult, len(e.tasks))
e.wg.Add(concurrency)
for i := 0; i < concurrency; i++ {
go e.analyzeWorker(taskCh, resultCh, i == 0)
}
for _, task := range e.tasks {
statistics.AddNewAnalyzeJob(task.job)
}
for _, task := range e.tasks {
taskCh <- task
}
close(taskCh)
statsHandle := domain.GetDomain(e.ctx).StatsHandle()
panicCnt := 0
pruneMode := variable.PartitionPruneMode(e.ctx.GetSessionVars().PartitionPruneMode.Load())
// needGlobalStats used to indicate whether we should merge the partition-level stats to global-level stats.
needGlobalStats := pruneMode == variable.Dynamic
type globalStatsKey struct {
tableID int64
indexID int64
}
type globalStatsInfo struct {
isIndex int
// When the `isIndex == 0`, the idxID will be the column ID.
// Otherwise, the idxID will be the index ID.
idxID int64
statsVersion int
}
// globalStatsMap is a map used to store which partition tables and the corresponding indexes need global-level stats.
// The meaning of key in map is the structure that used to store the tableID and indexID.
// The meaning of value in map is some additional information needed to build global-level stats.
globalStatsMap := make(map[globalStatsKey]globalStatsInfo)
finishJobWithLogFn := func(ctx context.Context, job *statistics.AnalyzeJob, meetError bool) {
job.Finish(meetError)
if job != nil {
logutil.Logger(ctx).Info(fmt.Sprintf("analyze table `%s`.`%s` has %s", job.DBName, job.TableName, job.State),
zap.String("partition", job.PartitionName),
zap.String("job info", job.JobInfo),
zap.Time("start time", job.StartTime),
zap.Time("end time", job.EndTime),
zap.String("cost", job.EndTime.Sub(job.StartTime).String()))
}
}
for panicCnt < concurrency {
result, ok := <-resultCh
if !ok {
break
}
if result.Err != nil {
err = result.Err
if err == errAnalyzeWorkerPanic {
panicCnt++
} else {
logutil.Logger(ctx).Error("analyze failed", zap.Error(err))
}
finishJobWithLogFn(ctx, result.job, true)
continue
}
statisticsID := result.TableID.GetStatisticsID()
for i, hg := range result.Hist {
if result.TableID.IsPartitionTable() && needGlobalStats {
// If it does not belong to the statistics of index, we need to set it to -1 to distinguish.
idxID := int64(-1)
if result.IsIndex != 0 {
idxID = hg.ID
}
globalStatsID := globalStatsKey{result.TableID.TableID, idxID}
if _, ok := globalStatsMap[globalStatsID]; !ok {
globalStatsMap[globalStatsID] = globalStatsInfo{result.IsIndex, hg.ID, result.StatsVer}
}
}
err1 := statsHandle.SaveStatsToStorage(statisticsID, result.Count, result.IsIndex, hg, result.Cms[i], result.TopNs[i], result.Fms[i], result.StatsVer, 1)
if err1 != nil {
err = err1
logutil.Logger(ctx).Error("save stats to storage failed", zap.Error(err))
finishJobWithLogFn(ctx, result.job, true)
continue
}
}
if err1 := statsHandle.SaveExtendedStatsToStorage(statisticsID, result.ExtStats, false); err1 != nil {
err = err1
logutil.Logger(ctx).Error("save extended stats to storage failed", zap.Error(err))
finishJobWithLogFn(ctx, result.job, true)
} else {
finishJobWithLogFn(ctx, result.job, false)
}
}
for _, task := range e.tasks {
statistics.MoveToHistory(task.job)
}
if err != nil {
return err
}
if needGlobalStats {
for globalStatsID, info := range globalStatsMap {
globalStats, err := statsHandle.MergePartitionStats2GlobalStatsByTableID(e.ctx, e.opts, infoschema.GetInfoSchema(e.ctx), globalStatsID.tableID, info.isIndex, info.idxID)
if err != nil {
if types.ErrPartitionStatsMissing.Equal(err) {
// When we find some partition-level stats are missing, we need to report warning.
e.ctx.GetSessionVars().StmtCtx.AppendWarning(err)
continue
}
return err
}
for i := 0; i < globalStats.Num; i++ {
hg, cms, topN, fms := globalStats.Hg[i], globalStats.Cms[i], globalStats.TopN[i], globalStats.Fms[i]
err = statsHandle.SaveStatsToStorage(globalStatsID.tableID, globalStats.Count, info.isIndex, hg, cms, topN, fms, info.statsVersion, 1)
if err != nil {
logutil.Logger(ctx).Error("save global-level stats to storage failed", zap.Error(err))
}
}
}
}
return statsHandle.Update(infoschema.GetInfoSchema(e.ctx))
}
func getBuildStatsConcurrency(ctx sessionctx.Context) (int, error) {
sessionVars := ctx.GetSessionVars()
concurrency, err := variable.GetSessionSystemVar(sessionVars, variable.TiDBBuildStatsConcurrency)
if err != nil {
return 0, err
}
c, err := strconv.ParseInt(concurrency, 10, 64)
return int(c), err
}
type taskType int
const (
colTask taskType = iota
idxTask
fastTask
pkIncrementalTask
idxIncrementalTask
)
type analyzeTask struct {
taskType taskType
idxExec *AnalyzeIndexExec
colExec *AnalyzeColumnsExec
fastExec *AnalyzeFastExec
idxIncrementalExec *analyzeIndexIncrementalExec
colIncrementalExec *analyzePKIncrementalExec
job *statistics.AnalyzeJob
}
var errAnalyzeWorkerPanic = errors.New("analyze worker panic")
func (e *AnalyzeExec) analyzeWorker(taskCh <-chan *analyzeTask, resultCh chan<- analyzeResult, isCloseChanThread bool) {
var task *analyzeTask
defer func() {
if r := recover(); r != nil {
buf := make([]byte, 4096)
stackSize := runtime.Stack(buf, false)
buf = buf[:stackSize]
logutil.BgLogger().Error("analyze worker panicked", zap.String("stack", string(buf)))
metrics.PanicCounter.WithLabelValues(metrics.LabelAnalyze).Inc()
resultCh <- analyzeResult{
Err: errAnalyzeWorkerPanic,
job: task.job,
}
}
e.wg.Done()
if isCloseChanThread {
e.wg.Wait()
close(resultCh)
}
}()
for {
var ok bool
task, ok = <-taskCh
if !ok {
break
}
task.job.Start()
switch task.taskType {
case colTask:
task.colExec.job = task.job
for _, result := range analyzeColumnsPushdown(task.colExec) {
resultCh <- result
}
case idxTask:
task.idxExec.job = task.job
resultCh <- analyzeIndexPushdown(task.idxExec)
case fastTask:
task.fastExec.job = task.job
task.job.Start()
for _, result := range analyzeFastExec(task.fastExec) {
resultCh <- result
}
case pkIncrementalTask:
task.colIncrementalExec.job = task.job
resultCh <- analyzePKIncremental(task.colIncrementalExec)
case idxIncrementalTask:
task.idxIncrementalExec.job = task.job
resultCh <- analyzeIndexIncremental(task.idxIncrementalExec)
}
}
}
func analyzeIndexPushdown(idxExec *AnalyzeIndexExec) analyzeResult {
ranges := ranger.FullRange()
// For single-column index, we do not load null rows from TiKV, so the built histogram would not include
// null values, and its `NullCount` would be set by result of another distsql call to get null rows.
// For multi-column index, we cannot define null for the rows, so we still use full range, and the rows
// containing null fields would exist in built histograms. Note that, the `NullCount` of histograms for
// multi-column index is always 0 then.
if len(idxExec.idxInfo.Columns) == 1 {
ranges = ranger.FullNotNullRange()
}
hist, cms, fms, topN, err := idxExec.buildStats(ranges, true)
if err != nil {
return analyzeResult{Err: err, job: idxExec.job}
}
var statsVer = statistics.Version1
if idxExec.analyzePB.IdxReq.Version != nil {
statsVer = int(*idxExec.analyzePB.IdxReq.Version)
}
result := analyzeResult{
TableID: idxExec.tableID,
Hist: []*statistics.Histogram{hist},
Cms: []*statistics.CMSketch{cms},
TopNs: []*statistics.TopN{topN},
Fms: []*statistics.FMSketch{fms},
IsIndex: 1,
job: idxExec.job,
StatsVer: statsVer,
}
result.Count = hist.NullCount
if hist.Len() > 0 {
result.Count += hist.Buckets[hist.Len()-1].Count
}
if topN.TotalCount() > 0 {
result.Count += int64(topN.TotalCount())
}
return result
}
// AnalyzeIndexExec represents analyze index push down executor.
type AnalyzeIndexExec struct {
ctx sessionctx.Context
tableID core.AnalyzeTableID
idxInfo *model.IndexInfo
isCommonHandle bool
concurrency int
analyzePB *tipb.AnalyzeReq
result distsql.SelectResult
countNullRes distsql.SelectResult
opts map[ast.AnalyzeOptionType]uint64
job *statistics.AnalyzeJob
}
// fetchAnalyzeResult builds and dispatches the `kv.Request` from given ranges, and stores the `SelectResult`
// in corresponding fields based on the input `isNullRange` argument, which indicates if the range is the
// special null range for single-column index to get the null count.
func (e *AnalyzeIndexExec) fetchAnalyzeResult(ranges []*ranger.Range, isNullRange bool) error {
var builder distsql.RequestBuilder
var kvReqBuilder *distsql.RequestBuilder
if e.isCommonHandle && e.idxInfo.Primary {
kvReqBuilder = builder.SetHandleRangesForTables(e.ctx.GetSessionVars().StmtCtx, []int64{e.tableID.GetStatisticsID()}, true, ranges, nil)
} else {
kvReqBuilder = builder.SetIndexRangesForTables(e.ctx.GetSessionVars().StmtCtx, []int64{e.tableID.GetStatisticsID()}, e.idxInfo.ID, ranges)
}
kvReq, err := kvReqBuilder.
SetAnalyzeRequest(e.analyzePB).
SetStartTS(math.MaxUint64).
SetKeepOrder(true).
SetConcurrency(e.concurrency).
Build()
if err != nil {
return err
}
ctx := context.TODO()
result, err := distsql.Analyze(ctx, e.ctx.GetClient(), kvReq, e.ctx.GetSessionVars().KVVars, e.ctx.GetSessionVars().InRestrictedSQL, e.ctx.GetSessionVars().StmtCtx.MemTracker)
if err != nil {
return err
}
if isNullRange {
e.countNullRes = result
} else {
e.result = result
}
return nil
}
func (e *AnalyzeIndexExec) open(ranges []*ranger.Range, considerNull bool) error {
err := e.fetchAnalyzeResult(ranges, false)
if err != nil {
return err
}
if considerNull && len(e.idxInfo.Columns) == 1 {
ranges = ranger.NullRange()
err = e.fetchAnalyzeResult(ranges, true)
if err != nil {
return err
}
}
return nil
}
func updateIndexResult(
ctx *stmtctx.StatementContext,
resp *tipb.AnalyzeIndexResp,
job *statistics.AnalyzeJob,
hist *statistics.Histogram,
cms *statistics.CMSketch,
fms *statistics.FMSketch,
topn *statistics.TopN,
idxInfo *model.IndexInfo,
numBuckets int,
numTopN int,
statsVer int,
) (
*statistics.Histogram,
*statistics.CMSketch,
*statistics.FMSketch,
*statistics.TopN,
error,
) {
var err error
needCMS := cms != nil
respHist := statistics.HistogramFromProto(resp.Hist)
if job != nil {
job.Update(int64(respHist.TotalRowCount()))
}
hist, err = statistics.MergeHistograms(ctx, hist, respHist, numBuckets, statsVer)
if err != nil {
return nil, nil, nil, nil, err
}
if needCMS {
if resp.Cms == nil {
logutil.Logger(context.TODO()).Warn("nil CMS in response", zap.String("table", idxInfo.Table.O), zap.String("index", idxInfo.Name.O))
} else {
cm, tmpTopN := statistics.CMSketchAndTopNFromProto(resp.Cms)
if err := cms.MergeCMSketch(cm); err != nil {
return nil, nil, nil, nil, err
}
statistics.MergeTopNAndUpdateCMSketch(topn, tmpTopN, cms, uint32(numTopN))
}
}
if fms != nil && resp.Collector != nil && resp.Collector.FmSketch != nil {
fms.MergeFMSketch(statistics.FMSketchFromProto(resp.Collector.FmSketch))
}
return hist, cms, fms, topn, nil
}
func (e *AnalyzeIndexExec) buildStatsFromResult(result distsql.SelectResult, needCMS bool) (*statistics.Histogram, *statistics.CMSketch, *statistics.FMSketch, *statistics.TopN, error) {
failpoint.Inject("buildStatsFromResult", func(val failpoint.Value) {
if val.(bool) {
failpoint.Return(nil, nil, nil, nil, errors.New("mock buildStatsFromResult error"))
}
})
hist := &statistics.Histogram{}
var cms *statistics.CMSketch
var topn *statistics.TopN
if needCMS {
cms = statistics.NewCMSketch(int32(e.opts[ast.AnalyzeOptCMSketchDepth]), int32(e.opts[ast.AnalyzeOptCMSketchWidth]))
topn = statistics.NewTopN(int(e.opts[ast.AnalyzeOptNumTopN]))
}
fms := statistics.NewFMSketch(maxSketchSize)
statsVer := statistics.Version1
if e.analyzePB.IdxReq.Version != nil {
statsVer = int(*e.analyzePB.IdxReq.Version)
}
for {
data, err := result.NextRaw(context.TODO())
if err != nil {
return nil, nil, nil, nil, err
}
if data == nil {
break
}
resp := &tipb.AnalyzeIndexResp{}
err = resp.Unmarshal(data)
if err != nil {
return nil, nil, nil, nil, err
}
hist, cms, fms, topn, err = updateIndexResult(e.ctx.GetSessionVars().StmtCtx, resp, e.job, hist, cms, fms, topn,
e.idxInfo, int(e.opts[ast.AnalyzeOptNumBuckets]), int(e.opts[ast.AnalyzeOptNumTopN]), statsVer)
if err != nil {
return nil, nil, nil, nil, err
}
}
if needCMS && topn.TotalCount() > 0 {
hist.RemoveVals(topn.TopN)
}
if needCMS && cms != nil {
cms.CalcDefaultValForAnalyze(uint64(hist.NDV))
}
return hist, cms, fms, topn, nil
}
func (e *AnalyzeIndexExec) buildStats(ranges []*ranger.Range, considerNull bool) (hist *statistics.Histogram, cms *statistics.CMSketch, fms *statistics.FMSketch, topN *statistics.TopN, err error) {
if err = e.open(ranges, considerNull); err != nil {
return nil, nil, nil, nil, err
}
defer func() {
err1 := closeAll(e.result, e.countNullRes)
if err == nil {
err = err1
}
}()
hist, cms, fms, topN, err = e.buildStatsFromResult(e.result, true)
if err != nil {
return nil, nil, nil, nil, err
}
if e.countNullRes != nil {
nullHist, _, _, _, err := e.buildStatsFromResult(e.countNullRes, false)
if err != nil {
return nil, nil, nil, nil, err
}
if l := nullHist.Len(); l > 0 {
hist.NullCount = nullHist.Buckets[l-1].Count
}
}
hist.ID = e.idxInfo.ID
return hist, cms, fms, topN, nil
}
func analyzeColumnsPushdown(colExec *AnalyzeColumnsExec) []analyzeResult {
var ranges []*ranger.Range
if hc := colExec.handleCols; hc != nil {
if hc.IsInt() {
ranges = ranger.FullIntRange(mysql.HasUnsignedFlag(hc.GetCol(0).RetType.Flag))
} else {
ranges = ranger.FullNotNullRange()
}
} else {
ranges = ranger.FullIntRange(false)
}
collExtStats := colExec.ctx.GetSessionVars().EnableExtendedStats
hists, cms, topNs, fms, extStats, err := colExec.buildStats(ranges, collExtStats)
if err != nil {
return []analyzeResult{{Err: err, job: colExec.job}}
}
if hasPkHist(colExec.handleCols) {
PKresult := analyzeResult{
TableID: colExec.tableID,
Hist: hists[:1],
Cms: cms[:1],
TopNs: topNs[:1],
Fms: fms[:1],
ExtStats: nil,
job: nil,
StatsVer: statistics.Version1,
}
PKresult.Count = int64(PKresult.Hist[0].TotalRowCount())
restResult := analyzeResult{
TableID: colExec.tableID,
Hist: hists[1:],
Cms: cms[1:],
TopNs: topNs[1:],
Fms: fms[1:],
ExtStats: extStats,
job: colExec.job,
StatsVer: colExec.analyzeVer,
}
restResult.Count = PKresult.Count
return []analyzeResult{PKresult, restResult}
}
var result []analyzeResult
if colExec.analyzePB.Tp == tipb.AnalyzeType_TypeMixed {
result = append(result, analyzeResult{
TableID: colExec.tableID,
Hist: []*statistics.Histogram{hists[0]},
Cms: []*statistics.CMSketch{cms[0]},
TopNs: []*statistics.TopN{topNs[0]},
Fms: []*statistics.FMSketch{nil},
IsIndex: 1,
job: colExec.job,
StatsVer: colExec.analyzeVer,
})
hists = hists[1:]
cms = cms[1:]
topNs = topNs[1:]
}
colResult := analyzeResult{
TableID: colExec.tableID,
Hist: hists,
Cms: cms,
TopNs: topNs,
Fms: fms,
ExtStats: extStats,
job: colExec.job,
StatsVer: colExec.analyzeVer,
}
colResult.Count = int64(colResult.Hist[0].TotalRowCount())
if colResult.StatsVer == statistics.Version2 {
colResult.Count += int64(topNs[0].TotalCount())
}
return append(result, colResult)
}
// AnalyzeColumnsExec represents Analyze columns push down executor.
type AnalyzeColumnsExec struct {
ctx sessionctx.Context
tableID core.AnalyzeTableID
colsInfo []*model.ColumnInfo
handleCols core.HandleCols
concurrency int
analyzePB *tipb.AnalyzeReq
commonHandle *model.IndexInfo
resultHandler *tableResultHandler
opts map[ast.AnalyzeOptionType]uint64
job *statistics.AnalyzeJob
analyzeVer int
}
func (e *AnalyzeColumnsExec) open(ranges []*ranger.Range) error {
e.resultHandler = &tableResultHandler{}
firstPartRanges, secondPartRanges := distsql.SplitRangesBySign(ranges, true, false, !hasPkHist(e.handleCols))
firstResult, err := e.buildResp(firstPartRanges)
if err != nil {
return err
}
if len(secondPartRanges) == 0 {
e.resultHandler.open(nil, firstResult)
return nil
}
var secondResult distsql.SelectResult
secondResult, err = e.buildResp(secondPartRanges)
if err != nil {
return err
}
e.resultHandler.open(firstResult, secondResult)
return nil
}
func (e *AnalyzeColumnsExec) buildResp(ranges []*ranger.Range) (distsql.SelectResult, error) {
var builder distsql.RequestBuilder
reqBuilder := builder.SetHandleRangesForTables(e.ctx.GetSessionVars().StmtCtx, []int64{e.tableID.GetStatisticsID()}, e.handleCols != nil && !e.handleCols.IsInt(), ranges, nil)
// Always set KeepOrder of the request to be true, in order to compute
// correct `correlation` of columns.
kvReq, err := reqBuilder.
SetAnalyzeRequest(e.analyzePB).
SetStartTS(math.MaxUint64).
SetKeepOrder(true).
SetConcurrency(e.concurrency).
Build()
if err != nil {
return nil, err
}
ctx := context.TODO()
result, err := distsql.Analyze(ctx, e.ctx.GetClient(), kvReq, e.ctx.GetSessionVars().KVVars, e.ctx.GetSessionVars().InRestrictedSQL, e.ctx.GetSessionVars().StmtCtx.MemTracker)
if err != nil {
return nil, err
}
return result, nil
}
func (e *AnalyzeColumnsExec) buildStats(ranges []*ranger.Range, needExtStats bool) (hists []*statistics.Histogram, cms []*statistics.CMSketch, topNs []*statistics.TopN, fms []*statistics.FMSketch, extStats *statistics.ExtendedStatsColl, err error) {
if err = e.open(ranges); err != nil {
return nil, nil, nil, nil, nil, err
}
defer func() {
if err1 := e.resultHandler.Close(); err1 != nil {
hists = nil
cms = nil
extStats = nil
err = err1
}
}()
var handleHist *statistics.Histogram
var handleCms *statistics.CMSketch
var handleFms *statistics.FMSketch
var handleTopn *statistics.TopN
statsVer := statistics.Version1
if e.analyzePB.Tp == tipb.AnalyzeType_TypeMixed {
handleHist = &statistics.Histogram{}
handleCms = statistics.NewCMSketch(int32(e.opts[ast.AnalyzeOptCMSketchDepth]), int32(e.opts[ast.AnalyzeOptCMSketchWidth]))
handleTopn = statistics.NewTopN(int(e.opts[ast.AnalyzeOptNumTopN]))
handleFms = statistics.NewFMSketch(maxSketchSize)
if e.analyzePB.IdxReq.Version != nil {
statsVer = int(*e.analyzePB.IdxReq.Version)
}
}
pkHist := &statistics.Histogram{}
collectors := make([]*statistics.SampleCollector, len(e.colsInfo))
for i := range collectors {
collectors[i] = &statistics.SampleCollector{
IsMerger: true,
FMSketch: statistics.NewFMSketch(maxSketchSize),
MaxSampleSize: int64(e.opts[ast.AnalyzeOptNumSamples]),
CMSketch: statistics.NewCMSketch(int32(e.opts[ast.AnalyzeOptCMSketchDepth]), int32(e.opts[ast.AnalyzeOptCMSketchWidth])),
}
}
for {
data, err1 := e.resultHandler.nextRaw(context.TODO())
if err1 != nil {
return nil, nil, nil, nil, nil, err1
}
if data == nil {
break
}
sc := e.ctx.GetSessionVars().StmtCtx
var colResp *tipb.AnalyzeColumnsResp
if e.analyzePB.Tp == tipb.AnalyzeType_TypeMixed {
resp := &tipb.AnalyzeMixedResp{}
err = resp.Unmarshal(data)
if err != nil {
return nil, nil, nil, nil, nil, err
}
colResp = resp.ColumnsResp
handleHist, handleCms, handleFms, handleTopn, err = updateIndexResult(sc, resp.IndexResp, nil, handleHist,
handleCms, handleFms, handleTopn, e.commonHandle, int(e.opts[ast.AnalyzeOptNumBuckets]),
int(e.opts[ast.AnalyzeOptNumTopN]), statsVer)
if err != nil {
return nil, nil, nil, nil, nil, err
}
} else {
colResp = &tipb.AnalyzeColumnsResp{}
err = colResp.Unmarshal(data)
}
rowCount := int64(0)
if hasPkHist(e.handleCols) {
respHist := statistics.HistogramFromProto(colResp.PkHist)
rowCount = int64(respHist.TotalRowCount())
pkHist, err = statistics.MergeHistograms(sc, pkHist, respHist, int(e.opts[ast.AnalyzeOptNumBuckets]), statistics.Version1)
if err != nil {
return nil, nil, nil, nil, nil, err
}
}
for i, rc := range colResp.Collectors {
respSample := statistics.SampleCollectorFromProto(rc)
rowCount = respSample.Count + respSample.NullCount
collectors[i].MergeSampleCollector(sc, respSample)
}
e.job.Update(rowCount)
}
timeZone := e.ctx.GetSessionVars().Location()
if hasPkHist(e.handleCols) {
pkInfo := e.handleCols.GetCol(0)
pkHist.ID = pkInfo.ID
err = pkHist.DecodeTo(pkInfo.RetType, timeZone)
if err != nil {
return nil, nil, nil, nil, nil, err
}
hists = append(hists, pkHist)
cms = append(cms, nil)
topNs = append(topNs, nil)
fms = append(fms, nil)
}
for i, col := range e.colsInfo {
if e.analyzeVer < 2 {
// In analyze version 2, we don't collect TopN this way. We will collect TopN from samples in `BuildColumnHistAndTopN()` below.
err := collectors[i].ExtractTopN(uint32(e.opts[ast.AnalyzeOptNumTopN]), e.ctx.GetSessionVars().StmtCtx, &col.FieldType, timeZone)
if err != nil {
return nil, nil, nil, nil, nil, err
}
topNs = append(topNs, collectors[i].TopN)
}
for j, s := range collectors[i].Samples {
collectors[i].Samples[j].Ordinal = j
collectors[i].Samples[j].Value, err = tablecodec.DecodeColumnValue(s.Value.GetBytes(), &col.FieldType, timeZone)
if err != nil {
return nil, nil, nil, nil, nil, err
}
// When collation is enabled, we store the Key representation of the sampling data. So we set it to kind `Bytes` here
// to avoid to convert it to its Key representation once more.
if collectors[i].Samples[j].Value.Kind() == types.KindString {
collectors[i].Samples[j].Value.SetBytes(collectors[i].Samples[j].Value.GetBytes())
}
}
var hg *statistics.Histogram
var err error
var topn *statistics.TopN
if e.analyzeVer < 2 {
hg, err = statistics.BuildColumn(e.ctx, int64(e.opts[ast.AnalyzeOptNumBuckets]), col.ID, collectors[i], &col.FieldType)
} else {
hg, topn, err = statistics.BuildColumnHistAndTopN(e.ctx, int(e.opts[ast.AnalyzeOptNumBuckets]), int(e.opts[ast.AnalyzeOptNumTopN]), col.ID, collectors[i], &col.FieldType)
topNs = append(topNs, topn)
}
if err != nil {
return nil, nil, nil, nil, nil, err
}
hists = append(hists, hg)
collectors[i].CMSketch.CalcDefaultValForAnalyze(uint64(hg.NDV))
cms = append(cms, collectors[i].CMSketch)
fms = append(fms, collectors[i].FMSketch)
}
if needExtStats {
statsHandle := domain.GetDomain(e.ctx).StatsHandle()
extStats, err = statsHandle.BuildExtendedStats(e.tableID.GetStatisticsID(), e.colsInfo, collectors)
if err != nil {
return nil, nil, nil, nil, nil, err
}
}
if handleHist != nil {
handleHist.ID = e.commonHandle.ID
if handleTopn != nil && handleTopn.TotalCount() > 0 {
handleHist.RemoveVals(handleTopn.TopN)
}
if handleCms != nil {
handleCms.CalcDefaultValForAnalyze(uint64(handleHist.NDV))
}
hists = append([]*statistics.Histogram{handleHist}, hists...)
cms = append([]*statistics.CMSketch{handleCms}, cms...)
fms = append([]*statistics.FMSketch{handleFms}, fms...)
topNs = append([]*statistics.TopN{handleTopn}, topNs...)
}
return hists, cms, topNs, fms, extStats, nil
}
func hasPkHist(handleCols core.HandleCols) bool {
return handleCols != nil && handleCols.IsInt()
}
func pkColsCount(handleCols core.HandleCols) int {
if handleCols == nil {
return 0
}
return handleCols.NumCols()
}
var (
fastAnalyzeHistogramSample = metrics.FastAnalyzeHistogram.WithLabelValues(metrics.LblGeneral, "sample")
fastAnalyzeHistogramAccessRegions = metrics.FastAnalyzeHistogram.WithLabelValues(metrics.LblGeneral, "access_regions")
fastAnalyzeHistogramScanKeys = metrics.FastAnalyzeHistogram.WithLabelValues(metrics.LblGeneral, "scan_keys")
)
func analyzeFastExec(exec *AnalyzeFastExec) []analyzeResult {
hists, cms, topNs, fms, err := exec.buildStats()
if err != nil {
return []analyzeResult{{Err: err, job: exec.job}}
}
var results []analyzeResult
pkColCount := pkColsCount(exec.handleCols)
if len(exec.idxsInfo) > 0 {
for i := pkColCount + len(exec.colsInfo); i < len(hists); i++ {
idxResult := analyzeResult{
TableID: exec.tableID,
Hist: []*statistics.Histogram{hists[i]},
Cms: []*statistics.CMSketch{cms[i]},
TopNs: []*statistics.TopN{topNs[i]},
Fms: []*statistics.FMSketch{nil},
IsIndex: 1,
Count: hists[i].NullCount,
job: exec.job,
StatsVer: statistics.Version1,
}
if hists[i].Len() > 0 {
idxResult.Count += hists[i].Buckets[hists[i].Len()-1].Count
}
if exec.rowCount != 0 {
idxResult.Count = exec.rowCount
}
results = append(results, idxResult)
}
}
hist := hists[0]
colResult := analyzeResult{
TableID: exec.tableID,
Hist: hists[:pkColCount+len(exec.colsInfo)],
Cms: cms[:pkColCount+len(exec.colsInfo)],
TopNs: topNs[:pkColCount+len(exec.colsInfo)],
Fms: fms[:pkColCount+len(exec.colsInfo)],
Count: hist.NullCount,
job: exec.job,
StatsVer: statistics.Version1,
}
if hist.Len() > 0 {
colResult.Count += hist.Buckets[hist.Len()-1].Count
}
if exec.rowCount != 0 {
colResult.Count = exec.rowCount
}
results = append(results, colResult)
return results
}
// AnalyzeFastExec represents Fast Analyze executor.
type AnalyzeFastExec struct {
ctx sessionctx.Context
tableID core.AnalyzeTableID
handleCols core.HandleCols
colsInfo []*model.ColumnInfo
idxsInfo []*model.IndexInfo
concurrency int
opts map[ast.AnalyzeOptionType]uint64
tblInfo *model.TableInfo
cache *tikv.RegionCache
wg *sync.WaitGroup
rowCount int64
sampCursor int32
sampTasks []*tikv.KeyLocation
scanTasks []*tikv.KeyLocation
collectors []*statistics.SampleCollector
randSeed int64
job *statistics.AnalyzeJob
estSampStep uint32
}
func (e *AnalyzeFastExec) calculateEstimateSampleStep() (err error) {
exec := e.ctx.(sqlexec.RestrictedSQLExecutor)
var stmt ast.StmtNode
stmt, err = exec.ParseWithParams(context.TODO(), "select flag from mysql.stats_histograms where table_id = %?", e.tableID.GetStatisticsID())
if err != nil {
return
}
var rows []chunk.Row
rows, _, err = exec.ExecRestrictedStmt(context.TODO(), stmt)
if err != nil {
return
}
var historyRowCount uint64
hasBeenAnalyzed := len(rows) != 0 && rows[0].GetInt64(0) == statistics.AnalyzeFlag
if hasBeenAnalyzed {
historyRowCount = uint64(domain.GetDomain(e.ctx).StatsHandle().GetPartitionStats(e.tblInfo, e.tableID.GetStatisticsID()).Count)
} else {
dbInfo, ok := domain.GetDomain(e.ctx).InfoSchema().SchemaByTable(e.tblInfo)
if !ok {
err = errors.Errorf("database not found for table '%s'", e.tblInfo.Name)
return
}
var rollbackFn func() error
rollbackFn, err = e.activateTxnForRowCount()
if err != nil {
return
}
defer func() {
if rollbackFn != nil {
err = rollbackFn()
}
}()
sql := new(strings.Builder)
sqlexec.MustFormatSQL(sql, "select count(*) from %n.%n", dbInfo.Name.L, e.tblInfo.Name.L)
if e.tblInfo.ID != e.tableID.GetStatisticsID() {
for _, definition := range e.tblInfo.Partition.Definitions {
if definition.ID == e.tableID.GetStatisticsID() {
sqlexec.MustFormatSQL(sql, " partition(%n)", definition.Name.L)
break
}
}
}
var rs sqlexec.RecordSet
rs, err = e.ctx.(sqlexec.SQLExecutor).ExecuteInternal(context.TODO(), sql.String())
if err != nil {
return
}
if rs == nil {
err = errors.Trace(errors.Errorf("empty record set"))
return
}
defer terror.Call(rs.Close)
chk := rs.NewChunk()
err = rs.Next(context.TODO(), chk)
if err != nil {
return
}
e.rowCount = chk.GetRow(0).GetInt64(0)
historyRowCount = uint64(e.rowCount)
}
totalSampSize := e.opts[ast.AnalyzeOptNumSamples]
e.estSampStep = uint32(historyRowCount / totalSampSize)
return
}
func (e *AnalyzeFastExec) activateTxnForRowCount() (rollbackFn func() error, err error) {
txn, err := e.ctx.Txn(true)
if err != nil {
if kv.ErrInvalidTxn.Equal(err) {
_, err := e.ctx.(sqlexec.SQLExecutor).ExecuteInternal(context.TODO(), "begin")
if err != nil {
return nil, errors.Trace(err)
}
rollbackFn = func() error {
_, err := e.ctx.(sqlexec.SQLExecutor).ExecuteInternal(context.TODO(), "rollback")
return err
}
} else {
return nil, errors.Trace(err)
}
}
txn.SetOption(tikvstore.Priority, tikvstore.PriorityLow)
txn.SetOption(tikvstore.IsolationLevel, tikvstore.RC)
txn.SetOption(tikvstore.NotFillCache, true)
return rollbackFn, nil
}
// buildSampTask build sample tasks.
func (e *AnalyzeFastExec) buildSampTask() (err error) {
bo := tikv.NewBackofferWithVars(context.Background(), 500, nil)
store, _ := e.ctx.GetStore().(tikv.Storage)
e.cache = store.GetRegionCache()
accessRegionsCounter := 0
pid := e.tableID.GetStatisticsID()
startKey, endKey := tablecodec.GetTableHandleKeyRange(pid)
targetKey := startKey
for {
// Search for the region which contains the targetKey.
loc, err := e.cache.LocateKey(bo, targetKey)
if err != nil {
return err
}
if bytes.Compare(endKey, loc.StartKey) < 0 {
break
}
accessRegionsCounter++
// Set the next search key.
targetKey = loc.EndKey
// If the KV pairs in the region all belonging to the table, add it to the sample task.
if bytes.Compare(startKey, loc.StartKey) <= 0 && len(loc.EndKey) != 0 && bytes.Compare(loc.EndKey, endKey) <= 0 {
e.sampTasks = append(e.sampTasks, loc)
continue
}
e.scanTasks = append(e.scanTasks, loc)
if bytes.Compare(loc.StartKey, startKey) < 0 {
loc.StartKey = startKey
}
if bytes.Compare(endKey, loc.EndKey) < 0 || len(loc.EndKey) == 0 {
loc.EndKey = endKey
break
}
}
fastAnalyzeHistogramAccessRegions.Observe(float64(accessRegionsCounter))
return nil
}
func (e *AnalyzeFastExec) decodeValues(handle kv.Handle, sValue []byte, wantCols map[int64]*types.FieldType) (values map[int64]types.Datum, err error) {
loc := e.ctx.GetSessionVars().Location()
values, err = tablecodec.DecodeRowToDatumMap(sValue, wantCols, loc)
if err != nil || e.handleCols == nil {
return values, err
}
wantCols = make(map[int64]*types.FieldType, e.handleCols.NumCols())
handleColIDs := make([]int64, e.handleCols.NumCols())
for i := 0; i < e.handleCols.NumCols(); i++ {
c := e.handleCols.GetCol(i)
handleColIDs[i] = c.ID
wantCols[c.ID] = c.RetType
}
return tablecodec.DecodeHandleToDatumMap(handle, handleColIDs, wantCols, loc, values)
}
func (e *AnalyzeFastExec) getValueByInfo(colInfo *model.ColumnInfo, values map[int64]types.Datum) (types.Datum, error) {
val, ok := values[colInfo.ID]
if !ok {
return table.GetColOriginDefaultValue(e.ctx, colInfo)
}
return val, nil
}
func (e *AnalyzeFastExec) updateCollectorSamples(sValue []byte, sKey kv.Key, samplePos int32) (err error) {
var handle kv.Handle
handle, err = tablecodec.DecodeRowKey(sKey)
if err != nil {
return err
}
// Decode cols for analyze table
wantCols := make(map[int64]*types.FieldType, len(e.colsInfo))
for _, col := range e.colsInfo {
wantCols[col.ID] = &col.FieldType
}
// Pre-build index->cols relationship and refill wantCols if not exists(analyze index)
index2Cols := make([][]*model.ColumnInfo, len(e.idxsInfo))
for i, idxInfo := range e.idxsInfo {
for _, idxCol := range idxInfo.Columns {
colInfo := e.tblInfo.Columns[idxCol.Offset]
index2Cols[i] = append(index2Cols[i], colInfo)
wantCols[colInfo.ID] = &colInfo.FieldType
}
}
// Decode the cols value in order.
var values map[int64]types.Datum
values, err = e.decodeValues(handle, sValue, wantCols)
if err != nil {
return err
}
// Update the primary key collector.
pkColsCount := pkColsCount(e.handleCols)
for i := 0; i < pkColsCount; i++ {
col := e.handleCols.GetCol(i)
v, ok := values[col.ID]
if !ok {
return errors.Trace(errors.Errorf("Primary key column not found"))
}
if e.collectors[i].Samples[samplePos] == nil {
e.collectors[i].Samples[samplePos] = &statistics.SampleItem{}
}
e.collectors[i].Samples[samplePos].Handle = handle
e.collectors[i].Samples[samplePos].Value = v
}
// Update the columns' collectors.
for j, colInfo := range e.colsInfo {
v, err := e.getValueByInfo(colInfo, values)
if err != nil {
return err
}
if e.collectors[pkColsCount+j].Samples[samplePos] == nil {
e.collectors[pkColsCount+j].Samples[samplePos] = &statistics.SampleItem{}
}
e.collectors[pkColsCount+j].Samples[samplePos].Handle = handle
e.collectors[pkColsCount+j].Samples[samplePos].Value = v
}
// Update the indexes' collectors.
for j, idxInfo := range e.idxsInfo {
idxVals := make([]types.Datum, 0, len(idxInfo.Columns))
cols := index2Cols[j]
for _, colInfo := range cols {
v, err := e.getValueByInfo(colInfo, values)
if err != nil {
return err
}
idxVals = append(idxVals, v)
}
var bytes []byte
bytes, err = codec.EncodeKey(e.ctx.GetSessionVars().StmtCtx, bytes, idxVals...)
if err != nil {
return err
}
if e.collectors[len(e.colsInfo)+pkColsCount+j].Samples[samplePos] == nil {
e.collectors[len(e.colsInfo)+pkColsCount+j].Samples[samplePos] = &statistics.SampleItem{}
}
e.collectors[len(e.colsInfo)+pkColsCount+j].Samples[samplePos].Handle = handle
e.collectors[len(e.colsInfo)+pkColsCount+j].Samples[samplePos].Value = types.NewBytesDatum(bytes)
}
return nil
}
func (e *AnalyzeFastExec) handleBatchSeekResponse(kvMap map[string][]byte) (err error) {
length := int32(len(kvMap))
newCursor := atomic.AddInt32(&e.sampCursor, length)
samplePos := newCursor - length
for sKey, sValue := range kvMap {
exceedNeededSampleCounts := uint64(samplePos) >= e.opts[ast.AnalyzeOptNumSamples]
if exceedNeededSampleCounts {
atomic.StoreInt32(&e.sampCursor, int32(e.opts[ast.AnalyzeOptNumSamples]))
break
}
err = e.updateCollectorSamples(sValue, kv.Key(sKey), samplePos)
if err != nil {
return err
}
samplePos++
}
return nil
}
func (e *AnalyzeFastExec) handleScanIter(iter kv.Iterator) (scanKeysSize int, err error) {
rander := rand.New(rand.NewSource(e.randSeed))
sampleSize := int64(e.opts[ast.AnalyzeOptNumSamples])
for ; iter.Valid() && err == nil; err = iter.Next() {
// reservoir sampling
scanKeysSize++
randNum := rander.Int63n(int64(e.sampCursor) + int64(scanKeysSize))
if randNum > sampleSize && e.sampCursor == int32(sampleSize) {
continue
}
p := rander.Int31n(int32(sampleSize))
if e.sampCursor < int32(sampleSize) {
p = e.sampCursor
e.sampCursor++
}
err = e.updateCollectorSamples(iter.Value(), iter.Key(), p)
if err != nil {
return
}
}
return
}
func (e *AnalyzeFastExec) handleScanTasks(bo *tikv.Backoffer) (keysSize int, err error) {
snapshot := e.ctx.GetStore().GetSnapshot(kv.MaxVersion)
if e.ctx.GetSessionVars().GetReplicaRead().IsFollowerRead() {
snapshot.SetOption(tikvstore.ReplicaRead, tikvstore.ReplicaReadFollower)
}
for _, t := range e.scanTasks {
iter, err := snapshot.Iter(kv.Key(t.StartKey), kv.Key(t.EndKey))
if err != nil {
return keysSize, err
}
size, err := e.handleScanIter(iter)
keysSize += size
if err != nil {
return keysSize, err
}
}
return keysSize, nil
}
func (e *AnalyzeFastExec) handleSampTasks(workID int, step uint32, err *error) {
defer e.wg.Done()
snapshot := e.ctx.GetStore().GetSnapshot(kv.MaxVersion)
snapshot.SetOption(tikvstore.NotFillCache, true)
snapshot.SetOption(tikvstore.IsolationLevel, tikvstore.RC)
snapshot.SetOption(tikvstore.Priority, tikvstore.PriorityLow)
if e.ctx.GetSessionVars().GetReplicaRead().IsFollowerRead() {
snapshot.SetOption(tikvstore.ReplicaRead, tikvstore.ReplicaReadFollower)
}
rander := rand.New(rand.NewSource(e.randSeed))
for i := workID; i < len(e.sampTasks); i += e.concurrency {
task := e.sampTasks[i]
// randomize the estimate step in range [step - 2 * sqrt(step), step]
if step > 4 { // 2*sqrt(x) < x
lower, upper := step-uint32(2*math.Sqrt(float64(step))), step
step = uint32(rander.Intn(int(upper-lower))) + lower
}
snapshot.SetOption(tikvstore.SampleStep, step)
kvMap := make(map[string][]byte)
var iter kv.Iterator
iter, *err = snapshot.Iter(kv.Key(task.StartKey), kv.Key(task.EndKey))
if *err != nil {
return
}
for iter.Valid() {
kvMap[string(iter.Key())] = iter.Value()
*err = iter.Next()
if *err != nil {
return
}
}
fastAnalyzeHistogramSample.Observe(float64(len(kvMap)))
*err = e.handleBatchSeekResponse(kvMap)
if *err != nil {
return
}
}
}
func (e *AnalyzeFastExec) buildColumnStats(ID int64, collector *statistics.SampleCollector, tp *types.FieldType, rowCount int64) (*statistics.Histogram, *statistics.CMSketch, *statistics.TopN, *statistics.FMSketch, error) {
sc := e.ctx.GetSessionVars().StmtCtx
data := make([][]byte, 0, len(collector.Samples))
fmSketch := statistics.NewFMSketch(maxSketchSize)
for i, sample := range collector.Samples {
sample.Ordinal = i
if sample.Value.IsNull() {
collector.NullCount++
continue
}
err := fmSketch.InsertValue(sc, sample.Value)
if err != nil {
return nil, nil, nil, nil, err
}
bytes, err := tablecodec.EncodeValue(sc, nil, sample.Value)
if err != nil {
return nil, nil, nil, nil, err
}
data = append(data, bytes)
}
// Build CMSketch.
cmSketch, topN, ndv, scaleRatio := statistics.NewCMSketchAndTopN(int32(e.opts[ast.AnalyzeOptCMSketchDepth]), int32(e.opts[ast.AnalyzeOptCMSketchWidth]), data, uint32(e.opts[ast.AnalyzeOptNumTopN]), uint64(rowCount))
// Build Histogram.
hist, err := statistics.BuildColumnHist(e.ctx, int64(e.opts[ast.AnalyzeOptNumBuckets]), ID, collector, tp, rowCount, int64(ndv), collector.NullCount*int64(scaleRatio))
return hist, cmSketch, topN, fmSketch, err
}
func (e *AnalyzeFastExec) buildIndexStats(idxInfo *model.IndexInfo, collector *statistics.SampleCollector, rowCount int64) (*statistics.Histogram, *statistics.CMSketch, *statistics.TopN, error) {
data := make([][][]byte, len(idxInfo.Columns))
for _, sample := range collector.Samples {
var preLen int
remained := sample.Value.GetBytes()
// We need to insert each prefix values into CM Sketch.
for i := 0; i < len(idxInfo.Columns); i++ {
var err error
var value []byte
value, remained, err = codec.CutOne(remained)
if err != nil {
return nil, nil, nil, err
}
preLen += len(value)
data[i] = append(data[i], sample.Value.GetBytes()[:preLen])
}
}
numTop := uint32(e.opts[ast.AnalyzeOptNumTopN])
cmSketch, topN, ndv, scaleRatio := statistics.NewCMSketchAndTopN(int32(e.opts[ast.AnalyzeOptCMSketchDepth]), int32(e.opts[ast.AnalyzeOptCMSketchWidth]), data[0], numTop, uint64(rowCount))
// Build CM Sketch for each prefix and merge them into one.
for i := 1; i < len(idxInfo.Columns); i++ {
var curCMSketch *statistics.CMSketch
var curTopN *statistics.TopN
// `ndv` should be the ndv of full index, so just rewrite it here.
curCMSketch, curTopN, ndv, scaleRatio = statistics.NewCMSketchAndTopN(int32(e.opts[ast.AnalyzeOptCMSketchDepth]), int32(e.opts[ast.AnalyzeOptCMSketchWidth]), data[i], numTop, uint64(rowCount))
err := cmSketch.MergeCMSketch(curCMSketch)
if err != nil {
return nil, nil, nil, err
}
statistics.MergeTopNAndUpdateCMSketch(topN, curTopN, cmSketch, numTop)
}
// Build Histogram.
hist, err := statistics.BuildColumnHist(e.ctx, int64(e.opts[ast.AnalyzeOptNumBuckets]), idxInfo.ID, collector, types.NewFieldType(mysql.TypeBlob), rowCount, int64(ndv), collector.NullCount*int64(scaleRatio))
return hist, cmSketch, topN, err
}
func (e *AnalyzeFastExec) runTasks() ([]*statistics.Histogram, []*statistics.CMSketch, []*statistics.TopN, []*statistics.FMSketch, error) {
errs := make([]error, e.concurrency)
pkColCount := pkColsCount(e.handleCols)
// collect column samples and primary key samples and index samples.
length := len(e.colsInfo) + pkColCount + len(e.idxsInfo)
e.collectors = make([]*statistics.SampleCollector, length)
for i := range e.collectors {
e.collectors[i] = &statistics.SampleCollector{
MaxSampleSize: int64(e.opts[ast.AnalyzeOptNumSamples]),
Samples: make([]*statistics.SampleItem, e.opts[ast.AnalyzeOptNumSamples]),
}
}
e.wg.Add(e.concurrency)
bo := tikv.NewBackofferWithVars(context.Background(), 500, nil)
for i := 0; i < e.concurrency; i++ {
go e.handleSampTasks(i, e.estSampStep, &errs[i])
}
e.wg.Wait()
for _, err := range errs {
if err != nil {
return nil, nil, nil, nil, err
}
}
scanKeysSize, err := e.handleScanTasks(bo)
fastAnalyzeHistogramScanKeys.Observe(float64(scanKeysSize))
if err != nil {
return nil, nil, nil, nil, err
}
stats := domain.GetDomain(e.ctx).StatsHandle()
var rowCount int64 = 0
if stats.Lease() > 0 {
if t := stats.GetPartitionStats(e.tblInfo, e.tableID.GetStatisticsID()); !t.Pseudo {
rowCount = t.Count
}
}
hists, cms, topNs, fms := make([]*statistics.Histogram, length), make([]*statistics.CMSketch, length), make([]*statistics.TopN, length), make([]*statistics.FMSketch, length)
for i := 0; i < length; i++ {
// Build collector properties.
collector := e.collectors[i]
collector.Samples = collector.Samples[:e.sampCursor]
sort.Slice(collector.Samples, func(i, j int) bool { return collector.Samples[i].Handle.Compare(collector.Samples[j].Handle) < 0 })
collector.CalcTotalSize()
// Adjust the row count in case the count of `tblStats` is not accurate and too small.
rowCount = mathutil.MaxInt64(rowCount, int64(len(collector.Samples)))
// Scale the total column size.
if len(collector.Samples) > 0 {
collector.TotalSize *= rowCount / int64(len(collector.Samples))
}
if i < pkColCount {
pkCol := e.handleCols.GetCol(i)
hists[i], cms[i], topNs[i], fms[i], err = e.buildColumnStats(pkCol.ID, e.collectors[i], pkCol.RetType, rowCount)
} else if i < pkColCount+len(e.colsInfo) {
hists[i], cms[i], topNs[i], fms[i], err = e.buildColumnStats(e.colsInfo[i-pkColCount].ID, e.collectors[i], &e.colsInfo[i-pkColCount].FieldType, rowCount)
} else {
hists[i], cms[i], topNs[i], err = e.buildIndexStats(e.idxsInfo[i-pkColCount-len(e.colsInfo)], e.collectors[i], rowCount)
}
if err != nil {
return nil, nil, nil, nil, err
}
}
return hists, cms, topNs, fms, nil
}
func (e *AnalyzeFastExec) buildStats() (hists []*statistics.Histogram, cms []*statistics.CMSketch, topNs []*statistics.TopN, fms []*statistics.FMSketch, err error) {
// To set rand seed, it's for unit test.
// To ensure that random sequences are different in non-test environments, RandSeed must be set time.Now().
if RandSeed == 1 {
atomic.StoreInt64(&e.randSeed, time.Now().UnixNano())
} else {
atomic.StoreInt64(&e.randSeed, RandSeed)
}
err = e.buildSampTask()
if err != nil {
return nil, nil, nil, nil, err
}
return e.runTasks()
}
// AnalyzeTestFastExec is for fast sample in unit test.
type AnalyzeTestFastExec struct {
AnalyzeFastExec
Ctx sessionctx.Context
TableID core.AnalyzeTableID
HandleCols core.HandleCols
ColsInfo []*model.ColumnInfo
IdxsInfo []*model.IndexInfo
Concurrency int
Collectors []*statistics.SampleCollector
TblInfo *model.TableInfo
Opts map[ast.AnalyzeOptionType]uint64
}
// TestFastSample only test the fast sample in unit test.
func (e *AnalyzeTestFastExec) TestFastSample() error {
e.ctx = e.Ctx
e.handleCols = e.HandleCols
e.colsInfo = e.ColsInfo
e.idxsInfo = e.IdxsInfo
e.concurrency = e.Concurrency
e.tableID = e.TableID
e.wg = &sync.WaitGroup{}
e.job = &statistics.AnalyzeJob{}
e.tblInfo = e.TblInfo
e.opts = e.Opts
_, _, _, _, err := e.buildStats()
e.Collectors = e.collectors
return err
}
type analyzeIndexIncrementalExec struct {
AnalyzeIndexExec
oldHist *statistics.Histogram
oldCMS *statistics.CMSketch
oldTopN *statistics.TopN
}
func analyzeIndexIncremental(idxExec *analyzeIndexIncrementalExec) analyzeResult {
var statsVer = statistics.Version1
if idxExec.analyzePB.IdxReq.Version != nil {
statsVer = int(*idxExec.analyzePB.IdxReq.Version)
}
pruneMode := variable.PartitionPruneMode(idxExec.ctx.GetSessionVars().PartitionPruneMode.Load())
if idxExec.tableID.IsPartitionTable() && pruneMode == variable.Dynamic {
err := errors.Errorf("[stats]: global statistics for partitioned tables unavailable in ANALYZE INCREMENTAL")
return analyzeResult{Err: err, job: idxExec.job}
}
startPos := idxExec.oldHist.GetUpper(idxExec.oldHist.Len() - 1)
values, _, err := codec.DecodeRange(startPos.GetBytes(), len(idxExec.idxInfo.Columns), nil, nil)
if err != nil {
return analyzeResult{Err: err, job: idxExec.job}
}
ran := ranger.Range{LowVal: values, HighVal: []types.Datum{types.MaxValueDatum()}}
hist, cms, fms, topN, err := idxExec.buildStats([]*ranger.Range{&ran}, false)
if err != nil {
return analyzeResult{Err: err, job: idxExec.job}
}
hist, err = statistics.MergeHistograms(idxExec.ctx.GetSessionVars().StmtCtx, idxExec.oldHist, hist, int(idxExec.opts[ast.AnalyzeOptNumBuckets]), statsVer)
if err != nil {
return analyzeResult{Err: err, job: idxExec.job}
}
if idxExec.oldCMS != nil && cms != nil {
err = cms.MergeCMSketch4IncrementalAnalyze(idxExec.oldCMS, uint32(idxExec.opts[ast.AnalyzeOptNumTopN]))
if err != nil {
return analyzeResult{Err: err, job: idxExec.job}
}
cms.CalcDefaultValForAnalyze(uint64(hist.NDV))
}
if statsVer == statistics.Version2 {
poped := statistics.MergeTopNAndUpdateCMSketch(topN, idxExec.oldTopN, cms, uint32(idxExec.opts[ast.AnalyzeOptNumTopN]))
hist.AddIdxVals(poped)
}
result := analyzeResult{
TableID: idxExec.tableID,
Hist: []*statistics.Histogram{hist},
Cms: []*statistics.CMSketch{cms},
TopNs: []*statistics.TopN{topN},
Fms: []*statistics.FMSketch{fms},
IsIndex: 1,
job: idxExec.job,
StatsVer: statsVer,
}
result.Count = hist.NullCount
if hist.Len() > 0 {
result.Count += hist.Buckets[hist.Len()-1].Count
}
return result
}
type analyzePKIncrementalExec struct {
AnalyzeColumnsExec
oldHist *statistics.Histogram
}
func analyzePKIncremental(colExec *analyzePKIncrementalExec) analyzeResult {
var maxVal types.Datum
pkInfo := colExec.handleCols.GetCol(0)
if mysql.HasUnsignedFlag(pkInfo.RetType.Flag) {
maxVal = types.NewUintDatum(math.MaxUint64)
} else {
maxVal = types.NewIntDatum(math.MaxInt64)
}
startPos := *colExec.oldHist.GetUpper(colExec.oldHist.Len() - 1)
ran := ranger.Range{LowVal: []types.Datum{startPos}, LowExclude: true, HighVal: []types.Datum{maxVal}}
hists, _, _, _, _, err := colExec.buildStats([]*ranger.Range{&ran}, false)
if err != nil {
return analyzeResult{Err: err, job: colExec.job}
}
hist := hists[0]
hist, err = statistics.MergeHistograms(colExec.ctx.GetSessionVars().StmtCtx, colExec.oldHist, hist, int(colExec.opts[ast.AnalyzeOptNumBuckets]), statistics.Version1)
if err != nil {
return analyzeResult{Err: err, job: colExec.job}
}
result := analyzeResult{
TableID: colExec.tableID,
Hist: []*statistics.Histogram{hist},
Cms: []*statistics.CMSketch{nil},
TopNs: []*statistics.TopN{nil},
Fms: []*statistics.FMSketch{nil},
job: colExec.job,
StatsVer: statistics.Version1,
}
if hist.Len() > 0 {
result.Count += hist.Buckets[hist.Len()-1].Count
}
return result
}
// analyzeResult is used to represent analyze result.
type analyzeResult struct {
TableID core.AnalyzeTableID
Hist []*statistics.Histogram
Cms []*statistics.CMSketch
TopNs []*statistics.TopN
Fms []*statistics.FMSketch
ExtStats *statistics.ExtendedStatsColl
Count int64
IsIndex int
Err error
job *statistics.AnalyzeJob
StatsVer int
}
| executor/analyze.go | 1 | https://github.com/pingcap/tidb/commit/3f6dfe91daab7acb178ad24941dfca1745993afd | [
0.9987285733222961,
0.006872005295008421,
0.00015912843809928745,
0.00016863984637893736,
0.08071721345186234
] |
{
"id": 0,
"code_window": [
"\t\thigh = kv.Key(high).PrefixNext()\n",
"\t}\n",
"\treturn low, high\n",
"}\n",
"\n",
"// SplitRangesBySign split the ranges into two parts:\n",
"// 1. signedRanges is less or equal than maxInt64\n",
"// 2. unsignedRanges is greater than maxInt64\n",
"// We do that because the encoding of tikv key takes every key as a int. As a result MaxUInt64 is indeed\n",
"// small than zero. So we must\n",
"// 1. pick the range that straddles the MaxInt64\n",
"// 2. split that range into two parts : smaller than max int64 and greater than it.\n",
"// 3. if the ascent order is required, return signed first, vice versa.\n",
"// 4. if no order is required, is better to return the unsigned one. That's because it's the normal order\n",
"// of tikv scan.\n",
"func SplitRangesBySign(ranges []*ranger.Range, keepOrder bool, desc bool, isCommonHandle bool) ([]*ranger.Range, []*ranger.Range) {\n",
"\tif isCommonHandle || len(ranges) == 0 || ranges[0].LowVal[0].Kind() == types.KindInt64 {\n",
"\t\treturn ranges, nil\n",
"\t}\n",
"\tidx := sort.Search(len(ranges), func(i int) bool { return ranges[i].HighVal[0].GetUint64() > math.MaxInt64 })\n",
"\tif idx == len(ranges) {\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"// SplitRangesAcrossInt64Boundary split the ranges into two groups:\n",
"// 1. signedRanges is less or equal than MaxInt64\n",
"// 2. unsignedRanges is greater than MaxInt64\n",
"//\n",
"// We do this because every key of tikv is encoded as an int64. As a result, MaxUInt64 is small than zero when\n",
"// interpreted as an int64 variable.\n",
"//\n",
"// This function does the following:\n",
"// 1. split ranges into two groups as described above.\n",
"// 2. if there's a range that straddles the int64 boundary, split it into two ranges, which results in one smaller and\n",
"// one greater than MaxInt64.\n",
"//\n",
"// if `KeepOrder` is false, we merge the two groups of ranges into one group, to save an rpc call later\n",
"// if `desc` is false, return signed ranges first, vice versa.\n",
"func SplitRangesAcrossInt64Boundary(ranges []*ranger.Range, keepOrder bool, desc bool, isCommonHandle bool) ([]*ranger.Range, []*ranger.Range) {\n"
],
"file_path": "distsql/request_builder.go",
"type": "replace",
"edit_start_line_idx": 395
} | // Copyright 2019-present PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package tikv
import (
"bytes"
"encoding/binary"
"strconv"
"sync"
"sync/atomic"
"time"
"unsafe"
"github.com/pingcap/badger"
"github.com/pingcap/badger/y"
"github.com/pingcap/errors"
"github.com/pingcap/kvproto/pkg/errorpb"
"github.com/pingcap/kvproto/pkg/kvrpcpb"
"github.com/pingcap/kvproto/pkg/metapb"
"github.com/pingcap/kvproto/pkg/pdpb"
"github.com/pingcap/log"
"github.com/pingcap/tidb/store/mockstore/unistore/metrics"
"github.com/pingcap/tidb/store/mockstore/unistore/pd"
"github.com/pingcap/tidb/store/mockstore/unistore/tikv/mvcc"
"github.com/pingcap/tidb/util/codec"
"go.uber.org/zap"
"golang.org/x/net/context"
)
// InternalKey
var (
InternalKeyPrefix = []byte{0xff}
InternalRegionMetaPrefix = append(InternalKeyPrefix, "region"...)
InternalStoreMetaKey = append(InternalKeyPrefix, "store"...)
InternalSafePointKey = append(InternalKeyPrefix, "safepoint"...)
)
// InternalRegionMetaKey returns internal region meta key with the given region id.
func InternalRegionMetaKey(regionID uint64) []byte {
return []byte(string(InternalRegionMetaPrefix) + strconv.FormatUint(regionID, 10))
}
// RegionCtx defines the region context interface.
type RegionCtx interface {
Meta() *metapb.Region
Diff() *int64
RawStart() []byte
RawEnd() []byte
AcquireLatches(hashes []uint64)
ReleaseLatches(hashes []uint64)
}
type regionCtx struct {
meta *metapb.Region
regionEpoch unsafe.Pointer // *metapb.RegionEpoch
rawStartKey []byte
rawEndKey []byte
approximateSize int64
diff int64
latches *latches
}
type latches struct {
slots [256]map[uint64]*sync.WaitGroup
locks [256]sync.Mutex
}
func newLatches() *latches {
l := &latches{}
for i := 0; i < 256; i++ {
l.slots[i] = map[uint64]*sync.WaitGroup{}
}
return l
}
func (l *latches) acquire(keyHashes []uint64) (waitCnt int) {
wg := new(sync.WaitGroup)
wg.Add(1)
for _, hash := range keyHashes {
waitCnt += l.acquireOne(hash, wg)
}
return
}
func (l *latches) acquireOne(hash uint64, wg *sync.WaitGroup) (waitCnt int) {
slotID := hash >> 56
for {
m := l.slots[slotID]
l.locks[slotID].Lock()
w, ok := m[hash]
if !ok {
m[hash] = wg
}
l.locks[slotID].Unlock()
if ok {
w.Wait()
waitCnt++
continue
}
return
}
}
func (l *latches) release(keyHashes []uint64) {
var w *sync.WaitGroup
for _, hash := range keyHashes {
slotID := hash >> 56
l.locks[slotID].Lock()
m := l.slots[slotID]
if w == nil {
w = m[hash]
}
delete(m, hash)
l.locks[slotID].Unlock()
}
if w != nil {
w.Done()
}
}
func newRegionCtx(meta *metapb.Region, latches *latches, _ interface{}) *regionCtx {
regCtx := ®ionCtx{
meta: meta,
latches: latches,
regionEpoch: unsafe.Pointer(meta.GetRegionEpoch()),
}
regCtx.rawStartKey = regCtx.decodeRawStartKey()
regCtx.rawEndKey = regCtx.decodeRawEndKey()
if len(regCtx.rawEndKey) == 0 {
// Avoid reading internal meta data.
regCtx.rawEndKey = InternalKeyPrefix
}
return regCtx
}
func (ri *regionCtx) Meta() *metapb.Region {
return ri.meta
}
func (ri *regionCtx) Diff() *int64 {
return &ri.diff
}
func (ri *regionCtx) RawStart() []byte {
return ri.rawStartKey
}
func (ri *regionCtx) RawEnd() []byte {
return ri.rawEndKey
}
func (ri *regionCtx) getRegionEpoch() *metapb.RegionEpoch {
return (*metapb.RegionEpoch)(atomic.LoadPointer(&ri.regionEpoch))
}
func (ri *regionCtx) updateRegionEpoch(epoch *metapb.RegionEpoch) {
atomic.StorePointer(&ri.regionEpoch, (unsafe.Pointer)(epoch))
}
func (ri *regionCtx) decodeRawStartKey() []byte {
if len(ri.meta.StartKey) == 0 {
return nil
}
_, rawKey, err := codec.DecodeBytes(ri.meta.StartKey, nil)
if err != nil {
panic("invalid region start key")
}
return rawKey
}
func (ri *regionCtx) decodeRawEndKey() []byte {
if len(ri.meta.EndKey) == 0 {
return nil
}
_, rawKey, err := codec.DecodeBytes(ri.meta.EndKey, nil)
if err != nil {
panic("invalid region end key")
}
return rawKey
}
func (ri *regionCtx) lessThanStartKey(key []byte) bool {
return bytes.Compare(key, ri.rawStartKey) < 0
}
func (ri *regionCtx) greaterEqualEndKey(key []byte) bool {
return len(ri.rawEndKey) > 0 && bytes.Compare(key, ri.rawEndKey) >= 0
}
func (ri *regionCtx) greaterThanEndKey(key []byte) bool {
return len(ri.rawEndKey) > 0 && bytes.Compare(key, ri.rawEndKey) > 0
}
func newPeerMeta(peerID, storeID uint64) *metapb.Peer {
return &metapb.Peer{
Id: peerID,
StoreId: storeID,
}
}
func (ri *regionCtx) incConfVer() {
ri.meta.RegionEpoch = &metapb.RegionEpoch{
ConfVer: ri.meta.GetRegionEpoch().GetConfVer() + 1,
Version: ri.meta.GetRegionEpoch().GetVersion(),
}
ri.updateRegionEpoch(ri.meta.RegionEpoch)
}
func (ri *regionCtx) addPeer(peerID, storeID uint64) {
ri.meta.Peers = append(ri.meta.Peers, newPeerMeta(peerID, storeID))
ri.incConfVer()
}
func (ri *regionCtx) unmarshal(data []byte) error {
ri.approximateSize = int64(binary.LittleEndian.Uint64(data))
data = data[8:]
ri.meta = &metapb.Region{}
err := ri.meta.Unmarshal(data)
if err != nil {
return errors.Trace(err)
}
ri.rawStartKey = ri.decodeRawStartKey()
ri.rawEndKey = ri.decodeRawEndKey()
ri.regionEpoch = unsafe.Pointer(ri.meta.RegionEpoch)
return nil
}
func (ri *regionCtx) marshal() []byte {
data := make([]byte, 8+ri.meta.Size())
binary.LittleEndian.PutUint64(data, uint64(ri.approximateSize))
_, err := ri.meta.MarshalTo(data[8:])
if err != nil {
log.Error("region ctx marshal failed", zap.Error(err))
}
return data
}
// AcquireLatches add latches for all input hashVals, the input hashVals should be
// sorted and have no duplicates
func (ri *regionCtx) AcquireLatches(hashVals []uint64) {
start := time.Now()
waitCnt := ri.latches.acquire(hashVals)
dur := time.Since(start)
metrics.LatchWait.Observe(dur.Seconds())
if dur > time.Millisecond*50 {
log.S().Warnf("region %d acquire %d locks takes %v, waitCnt %d", ri.meta.Id, len(hashVals), dur, waitCnt)
}
}
func (ri *regionCtx) ReleaseLatches(hashVals []uint64) {
ri.latches.release(hashVals)
}
// RegionOptions represents the region options.
type RegionOptions struct {
StoreAddr string
PDAddr string
RegionSize int64
}
// RegionManager defines the region manager interface.
type RegionManager interface {
GetRegionFromCtx(ctx *kvrpcpb.Context) (RegionCtx, *errorpb.Error)
GetStoreInfoFromCtx(ctx *kvrpcpb.Context) (string, uint64, *errorpb.Error)
SplitRegion(req *kvrpcpb.SplitRegionRequest) *kvrpcpb.SplitRegionResponse
GetStoreIDByAddr(addr string) (uint64, error)
GetStoreAddrByStoreID(storeID uint64) (string, error)
Close() error
}
type regionManager struct {
storeMeta *metapb.Store
mu sync.RWMutex
regions map[uint64]*regionCtx
latches *latches
}
func (rm *regionManager) GetStoreIDByAddr(addr string) (uint64, error) {
if rm.storeMeta.Address != addr {
return 0, errors.New("store not match")
}
return rm.storeMeta.Id, nil
}
func (rm *regionManager) GetStoreAddrByStoreID(storeID uint64) (string, error) {
if rm.storeMeta.Id != storeID {
return "", errors.New("store not match")
}
return rm.storeMeta.Address, nil
}
func (rm *regionManager) GetStoreInfoFromCtx(ctx *kvrpcpb.Context) (string, uint64, *errorpb.Error) {
if ctx.GetPeer() != nil && ctx.GetPeer().GetStoreId() != rm.storeMeta.Id {
return "", 0, &errorpb.Error{
Message: "store not match",
StoreNotMatch: &errorpb.StoreNotMatch{},
}
}
return rm.storeMeta.Address, rm.storeMeta.Id, nil
}
func (rm *regionManager) GetRegionFromCtx(ctx *kvrpcpb.Context) (RegionCtx, *errorpb.Error) {
ctxPeer := ctx.GetPeer()
if ctxPeer != nil && ctxPeer.GetStoreId() != rm.storeMeta.Id {
return nil, &errorpb.Error{
Message: "store not match",
StoreNotMatch: &errorpb.StoreNotMatch{},
}
}
rm.mu.RLock()
ri := rm.regions[ctx.RegionId]
rm.mu.RUnlock()
if ri == nil {
return nil, &errorpb.Error{
Message: "region not found",
RegionNotFound: &errorpb.RegionNotFound{
RegionId: ctx.GetRegionId(),
},
}
}
// Region epoch does not match.
if rm.isEpochStale(ri.getRegionEpoch(), ctx.GetRegionEpoch()) {
return nil, &errorpb.Error{
Message: "stale epoch",
EpochNotMatch: &errorpb.EpochNotMatch{
CurrentRegions: []*metapb.Region{{
Id: ri.meta.Id,
StartKey: ri.meta.StartKey,
EndKey: ri.meta.EndKey,
RegionEpoch: ri.getRegionEpoch(),
Peers: ri.meta.Peers,
}},
},
}
}
return ri, nil
}
func (rm *regionManager) isEpochStale(lhs, rhs *metapb.RegionEpoch) bool {
return lhs.GetConfVer() != rhs.GetConfVer() || lhs.GetVersion() != rhs.GetVersion()
}
func (rm *regionManager) loadFromLocal(bundle *mvcc.DBBundle, f func(*regionCtx)) error {
err := bundle.DB.View(func(txn *badger.Txn) error {
item, err1 := txn.Get(InternalStoreMetaKey)
if err1 != nil {
return err1
}
val, err1 := item.Value()
if err1 != nil {
return err1
}
err1 = rm.storeMeta.Unmarshal(val)
if err1 != nil {
return err1
}
// load region meta
opts := badger.DefaultIteratorOptions
it := txn.NewIterator(opts)
defer it.Close()
prefix := InternalRegionMetaPrefix
for it.Seek(prefix); it.ValidForPrefix(prefix); it.Next() {
item := it.Item()
val, err1 = item.Value()
if err1 != nil {
return err1
}
r := new(regionCtx)
err := r.unmarshal(val)
if err != nil {
return errors.Trace(err)
}
r.latches = rm.latches
rm.regions[r.meta.Id] = r
f(r)
}
return nil
})
if err == badger.ErrKeyNotFound {
err = nil
}
return err
}
// StandAloneRegionManager represents a standalone region manager.
type StandAloneRegionManager struct {
regionManager
bundle *mvcc.DBBundle
pdc pd.Client
clusterID uint64
regionSize int64
closeCh chan struct{}
wg sync.WaitGroup
}
// NewStandAloneRegionManager returns a new standalone region manager.
func NewStandAloneRegionManager(bundle *mvcc.DBBundle, opts RegionOptions, pdc pd.Client) *StandAloneRegionManager {
var err error
clusterID := pdc.GetClusterID(context.TODO())
log.S().Infof("cluster id %v", clusterID)
rm := &StandAloneRegionManager{
bundle: bundle,
pdc: pdc,
clusterID: clusterID,
regionSize: opts.RegionSize,
closeCh: make(chan struct{}),
regionManager: regionManager{
regions: make(map[uint64]*regionCtx),
storeMeta: new(metapb.Store),
latches: newLatches(),
},
}
err = rm.loadFromLocal(bundle, func(r *regionCtx) {
req := &pdpb.RegionHeartbeatRequest{
Region: r.meta,
Leader: r.meta.Peers[0],
ApproximateSize: uint64(r.approximateSize),
}
rm.pdc.ReportRegion(req)
})
if err != nil {
log.Fatal("load from local failed", zap.Error(err))
}
if rm.storeMeta.Id == 0 {
err = rm.initStore(opts.StoreAddr)
if err != nil {
log.Fatal("init store failed", zap.Error(err))
}
}
rm.storeMeta.Address = opts.StoreAddr
err = rm.pdc.PutStore(context.TODO(), rm.storeMeta)
if err != nil {
log.Fatal("put store failed", zap.Error(err))
}
rm.wg.Add(2)
go rm.runSplitWorker()
go rm.storeHeartBeatLoop()
return rm
}
func (rm *StandAloneRegionManager) initStore(storeAddr string) error {
log.Info("initializing store")
ids, err := rm.allocIDs(3)
if err != nil {
return err
}
storeID, regionID, peerID := ids[0], ids[1], ids[2]
rm.storeMeta.Id = storeID
rm.storeMeta.Address = storeAddr
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
rootRegion := &metapb.Region{
Id: regionID,
RegionEpoch: &metapb.RegionEpoch{ConfVer: 1, Version: 1},
Peers: []*metapb.Peer{{Id: peerID, StoreId: storeID}},
}
rm.regions[rootRegion.Id] = newRegionCtx(rootRegion, rm.latches, nil)
_, err = rm.pdc.Bootstrap(ctx, rm.storeMeta, rootRegion)
cancel()
if err != nil {
log.Fatal("initialize failed", zap.Error(err))
}
rm.initialSplit(rootRegion)
storeBuf, err := rm.storeMeta.Marshal()
if err != nil {
log.Fatal("marshal store meta failed", zap.Error(err))
}
err = rm.bundle.DB.Update(func(txn *badger.Txn) error {
ts := atomic.AddUint64(&rm.bundle.StateTS, 1)
err = txn.SetEntry(&badger.Entry{
Key: y.KeyWithTs(InternalStoreMetaKey, ts),
Value: storeBuf,
})
if err != nil {
return err
}
for rid, region := range rm.regions {
regionBuf := region.marshal()
err = txn.SetEntry(&badger.Entry{
Key: y.KeyWithTs(InternalRegionMetaKey(rid), ts),
Value: regionBuf,
})
if err != nil {
log.Fatal("save region info failed", zap.Error(err))
}
}
return nil
})
for _, region := range rm.regions {
req := &pdpb.RegionHeartbeatRequest{
Region: region.meta,
Leader: region.meta.Peers[0],
ApproximateSize: uint64(region.approximateSize),
}
rm.pdc.ReportRegion(req)
}
log.Info("Initialize success")
return nil
}
// initSplit splits the cluster into multiple regions.
func (rm *StandAloneRegionManager) initialSplit(root *metapb.Region) {
root.EndKey = codec.EncodeBytes(nil, []byte{'m'})
root.RegionEpoch.Version = 2
rm.regions[root.Id] = newRegionCtx(root, rm.latches, nil)
preSplitStartKeys := [][]byte{{'m'}, {'n'}, {'t'}, {'u'}}
ids, err := rm.allocIDs(len(preSplitStartKeys) * 2)
if err != nil {
log.Fatal("alloc ids failed", zap.Error(err))
}
for i, startKey := range preSplitStartKeys {
var endKey []byte
if i < len(preSplitStartKeys)-1 {
endKey = codec.EncodeBytes(nil, preSplitStartKeys[i+1])
}
newRegion := &metapb.Region{
Id: ids[i*2],
RegionEpoch: &metapb.RegionEpoch{ConfVer: 1, Version: 1},
Peers: []*metapb.Peer{{Id: ids[i*2+1], StoreId: rm.storeMeta.Id}},
StartKey: codec.EncodeBytes(nil, startKey),
EndKey: endKey,
}
rm.regions[newRegion.Id] = newRegionCtx(newRegion, rm.latches, nil)
}
}
func (rm *StandAloneRegionManager) allocIDs(n int) ([]uint64, error) {
ids := make([]uint64, n)
for i := 0; i < n; i++ {
id, err := rm.pdc.AllocID(context.Background())
if err != nil {
return nil, errors.Trace(err)
}
ids[i] = id
}
return ids, nil
}
func (rm *StandAloneRegionManager) storeHeartBeatLoop() {
defer rm.wg.Done()
ticker := time.Tick(time.Second * 3)
for {
select {
case <-rm.closeCh:
return
case <-ticker:
}
storeStats := new(pdpb.StoreStats)
storeStats.StoreId = rm.storeMeta.Id
storeStats.Available = 1024 * 1024 * 1024
rm.mu.RLock()
storeStats.RegionCount = uint32(len(rm.regions))
rm.mu.RUnlock()
storeStats.Capacity = 2048 * 1024 * 1024
if err := rm.pdc.StoreHeartbeat(context.Background(), storeStats); err != nil {
log.Warn("store heartbeat failed", zap.Error(err))
}
}
}
type keySample struct {
key []byte
leftSize int64
}
// sampler samples keys in a region for later pick a split key.
type sampler struct {
samples [64]keySample
length int
step int
scanned int
totalSize int64
}
func newSampler() *sampler {
return &sampler{step: 1}
}
func (s *sampler) shrinkIfNeeded() {
if s.length < len(s.samples) {
return
}
for i := 0; i < len(s.samples)/2; i++ {
s.samples[i], s.samples[i*2] = s.samples[i*2], s.samples[i]
}
s.length /= 2
s.step *= 2
}
func (s *sampler) shouldSample() bool {
// It's an optimization for 's.scanned % s.step == 0'
return s.scanned&(s.step-1) == 0
}
func (s *sampler) scanKey(key []byte, size int64) {
s.totalSize += size
s.scanned++
if s.shouldSample() {
sample := s.samples[s.length]
// safe copy the key.
sample.key = append(sample.key[:0], key...)
sample.leftSize = s.totalSize
s.samples[s.length] = sample
s.length++
s.shrinkIfNeeded()
}
}
func (s *sampler) getSplitKeyAndSize() ([]byte, int64) {
targetSize := s.totalSize * 2 / 3
for _, sample := range s.samples[:s.length] {
if sample.leftSize >= targetSize {
return sample.key, sample.leftSize
}
}
return []byte{}, 0
}
func (rm *StandAloneRegionManager) runSplitWorker() {
defer rm.wg.Done()
ticker := time.NewTicker(time.Second * 5)
var regionsToCheck []*regionCtx
var regionsToSave []*regionCtx
for {
regionsToCheck = regionsToCheck[:0]
rm.mu.RLock()
for _, ri := range rm.regions {
if ri.approximateSize+atomic.LoadInt64(&ri.diff) > rm.regionSize*3/2 {
regionsToCheck = append(regionsToCheck, ri)
}
}
rm.mu.RUnlock()
for _, ri := range regionsToCheck {
err := rm.splitCheckRegion(ri)
if err != nil {
log.Error("split region failed", zap.Error(err))
}
}
regionsToSave = regionsToSave[:0]
rm.mu.RLock()
for _, ri := range rm.regions {
if atomic.LoadInt64(&ri.diff) > rm.regionSize/8 {
regionsToSave = append(regionsToSave, ri)
}
}
rm.mu.RUnlock()
rm.saveSize(regionsToSave)
select {
case <-rm.closeCh:
return
case <-ticker.C:
}
}
}
func (rm *StandAloneRegionManager) saveSize(regionsToSave []*regionCtx) {
err1 := rm.bundle.DB.Update(func(txn *badger.Txn) error {
ts := atomic.AddUint64(&rm.bundle.StateTS, 1)
for _, ri := range regionsToSave {
ri.approximateSize += atomic.LoadInt64(&ri.diff)
err := txn.SetEntry(&badger.Entry{
Key: y.KeyWithTs(InternalRegionMetaKey(ri.meta.Id), ts),
Value: ri.marshal(),
})
if err != nil {
return err
}
}
return nil
})
if err1 != nil {
log.Error("region manager save size failed", zap.Error(err1))
}
}
func (rm *StandAloneRegionManager) splitCheckRegion(region *regionCtx) error {
s := newSampler()
err := rm.bundle.DB.View(func(txn *badger.Txn) error {
iter := txn.NewIterator(badger.IteratorOptions{})
defer iter.Close()
for iter.Seek(region.rawStartKey); iter.Valid(); iter.Next() {
item := iter.Item()
if region.greaterEqualEndKey(item.Key()) {
break
}
s.scanKey(item.Key(), int64(len(item.Key())+item.ValueSize()))
}
return nil
})
if err != nil {
log.Error("sample region failed", zap.Error(err))
return errors.Trace(err)
}
// Need to update the diff to avoid split check again.
atomic.StoreInt64(®ion.diff, s.totalSize-region.approximateSize)
if s.totalSize < rm.regionSize {
return nil
}
splitKey, leftSize := s.getSplitKeyAndSize()
log.Info("try to split region", zap.Uint64("id", region.meta.Id), zap.Binary("split key", splitKey),
zap.Int64("left size", leftSize), zap.Int64("right size", s.totalSize-leftSize))
err = rm.splitRegion(region, splitKey, s.totalSize, leftSize)
if err != nil {
log.Error("split region failed", zap.Error(err))
}
return errors.Trace(err)
}
func (rm *StandAloneRegionManager) splitRegion(oldRegionCtx *regionCtx, splitKey []byte, oldSize, leftSize int64) error {
oldRegion := oldRegionCtx.meta
rightMeta := &metapb.Region{
Id: oldRegion.Id,
StartKey: codec.EncodeBytes(nil, splitKey),
EndKey: oldRegion.EndKey,
RegionEpoch: &metapb.RegionEpoch{
ConfVer: oldRegion.RegionEpoch.ConfVer,
Version: oldRegion.RegionEpoch.Version + 1,
},
Peers: oldRegion.Peers,
}
right := newRegionCtx(rightMeta, rm.latches, nil)
right.approximateSize = oldSize - leftSize
id, err := rm.pdc.AllocID(context.Background())
if err != nil {
return errors.Trace(err)
}
leftMeta := &metapb.Region{
Id: id,
StartKey: oldRegion.StartKey,
EndKey: codec.EncodeBytes(nil, splitKey),
RegionEpoch: &metapb.RegionEpoch{
ConfVer: 1,
Version: 1,
},
Peers: oldRegion.Peers,
}
left := newRegionCtx(leftMeta, rm.latches, nil)
left.approximateSize = leftSize
err1 := rm.bundle.DB.Update(func(txn *badger.Txn) error {
ts := atomic.AddUint64(&rm.bundle.StateTS, 1)
err := txn.SetEntry(&badger.Entry{
Key: y.KeyWithTs(InternalRegionMetaKey(left.meta.Id), ts),
Value: left.marshal(),
})
if err != nil {
return errors.Trace(err)
}
err = txn.SetEntry(&badger.Entry{
Key: y.KeyWithTs(InternalRegionMetaKey(right.meta.Id), ts),
Value: right.marshal(),
})
return errors.Trace(err)
})
if err1 != nil {
return errors.Trace(err1)
}
rm.mu.Lock()
rm.regions[left.meta.Id] = left
rm.regions[right.meta.Id] = right
rm.mu.Unlock()
rm.pdc.ReportRegion(&pdpb.RegionHeartbeatRequest{
Region: right.meta,
Leader: right.meta.Peers[0],
ApproximateSize: uint64(right.approximateSize),
})
rm.pdc.ReportRegion(&pdpb.RegionHeartbeatRequest{
Region: left.meta,
Leader: left.meta.Peers[0],
ApproximateSize: uint64(left.approximateSize),
})
log.Info("region splitted", zap.Uint64("old id", oldRegion.Id),
zap.Uint64("left id", left.meta.Id), zap.Int64("left size", left.approximateSize),
zap.Uint64("right id", right.meta.Id), zap.Int64("right size", right.approximateSize))
return nil
}
// SplitRegion splits a region.
func (rm *StandAloneRegionManager) SplitRegion(req *kvrpcpb.SplitRegionRequest) *kvrpcpb.SplitRegionResponse {
return &kvrpcpb.SplitRegionResponse{}
}
// Close closes the standalone region manager.
func (rm *StandAloneRegionManager) Close() error {
close(rm.closeCh)
rm.wg.Wait()
return nil
}
| store/mockstore/unistore/tikv/region.go | 0 | https://github.com/pingcap/tidb/commit/3f6dfe91daab7acb178ad24941dfca1745993afd | [
0.0005143144517205656,
0.00018675476894713938,
0.00016254828369710594,
0.00016895490989554673,
0.000051697163144126534
] |
{
"id": 0,
"code_window": [
"\t\thigh = kv.Key(high).PrefixNext()\n",
"\t}\n",
"\treturn low, high\n",
"}\n",
"\n",
"// SplitRangesBySign split the ranges into two parts:\n",
"// 1. signedRanges is less or equal than maxInt64\n",
"// 2. unsignedRanges is greater than maxInt64\n",
"// We do that because the encoding of tikv key takes every key as a int. As a result MaxUInt64 is indeed\n",
"// small than zero. So we must\n",
"// 1. pick the range that straddles the MaxInt64\n",
"// 2. split that range into two parts : smaller than max int64 and greater than it.\n",
"// 3. if the ascent order is required, return signed first, vice versa.\n",
"// 4. if no order is required, is better to return the unsigned one. That's because it's the normal order\n",
"// of tikv scan.\n",
"func SplitRangesBySign(ranges []*ranger.Range, keepOrder bool, desc bool, isCommonHandle bool) ([]*ranger.Range, []*ranger.Range) {\n",
"\tif isCommonHandle || len(ranges) == 0 || ranges[0].LowVal[0].Kind() == types.KindInt64 {\n",
"\t\treturn ranges, nil\n",
"\t}\n",
"\tidx := sort.Search(len(ranges), func(i int) bool { return ranges[i].HighVal[0].GetUint64() > math.MaxInt64 })\n",
"\tif idx == len(ranges) {\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"// SplitRangesAcrossInt64Boundary split the ranges into two groups:\n",
"// 1. signedRanges is less or equal than MaxInt64\n",
"// 2. unsignedRanges is greater than MaxInt64\n",
"//\n",
"// We do this because every key of tikv is encoded as an int64. As a result, MaxUInt64 is small than zero when\n",
"// interpreted as an int64 variable.\n",
"//\n",
"// This function does the following:\n",
"// 1. split ranges into two groups as described above.\n",
"// 2. if there's a range that straddles the int64 boundary, split it into two ranges, which results in one smaller and\n",
"// one greater than MaxInt64.\n",
"//\n",
"// if `KeepOrder` is false, we merge the two groups of ranges into one group, to save an rpc call later\n",
"// if `desc` is false, return signed ranges first, vice versa.\n",
"func SplitRangesAcrossInt64Boundary(ranges []*ranger.Range, keepOrder bool, desc bool, isCommonHandle bool) ([]*ranger.Range, []*ranger.Range) {\n"
],
"file_path": "distsql/request_builder.go",
"type": "replace",
"edit_start_line_idx": 395
} | // Copyright 2016 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package core
import (
"fmt"
. "github.com/pingcap/check"
"github.com/pingcap/parser"
"github.com/pingcap/parser/ast"
"github.com/pingcap/parser/charset"
"github.com/pingcap/parser/mysql"
"github.com/pingcap/tidb/sessionctx"
"github.com/pingcap/tidb/types"
"github.com/pingcap/tidb/util/mock"
"github.com/pingcap/tidb/util/testleak"
"github.com/pingcap/tidb/util/testutil"
)
var _ = Suite(&testExpressionSuite{})
type testExpressionSuite struct {
*parser.Parser
ctx sessionctx.Context
}
func (s *testExpressionSuite) SetUpSuite(c *C) {
s.Parser = parser.New()
s.ctx = mock.NewContext()
}
func (s *testExpressionSuite) TearDownSuite(c *C) {
}
func (s *testExpressionSuite) parseExpr(c *C, expr string) ast.ExprNode {
st, err := s.ParseOneStmt("select "+expr, "", "")
c.Assert(err, IsNil)
stmt := st.(*ast.SelectStmt)
return stmt.Fields.Fields[0].Expr
}
type testCase struct {
exprStr string
resultStr string
}
func (s *testExpressionSuite) runTests(c *C, tests []testCase) {
for _, tt := range tests {
expr := s.parseExpr(c, tt.exprStr)
val, err := evalAstExpr(s.ctx, expr)
c.Assert(err, IsNil)
valStr := fmt.Sprintf("%v", val.GetValue())
c.Assert(valStr, Equals, tt.resultStr, Commentf("for %s", tt.exprStr))
}
}
func (s *testExpressionSuite) TestBetween(c *C) {
defer testleak.AfterTest(c)()
tests := []testCase{
{exprStr: "1 between 2 and 3", resultStr: "0"},
{exprStr: "1 not between 2 and 3", resultStr: "1"},
{exprStr: "'2001-04-10 12:34:56' between cast('2001-01-01 01:01:01' as datetime) and '01-05-01'", resultStr: "1"},
{exprStr: "20010410123456 between cast('2001-01-01 01:01:01' as datetime) and 010501", resultStr: "0"},
{exprStr: "20010410123456 between cast('2001-01-01 01:01:01' as datetime) and 20010501123456", resultStr: "1"},
}
s.runTests(c, tests)
}
func (s *testExpressionSuite) TestCaseWhen(c *C) {
defer testleak.AfterTest(c)()
tests := []testCase{
{
exprStr: "case 1 when 1 then 'str1' when 2 then 'str2' end",
resultStr: "str1",
},
{
exprStr: "case 2 when 1 then 'str1' when 2 then 'str2' end",
resultStr: "str2",
},
{
exprStr: "case 3 when 1 then 'str1' when 2 then 'str2' end",
resultStr: "<nil>",
},
{
exprStr: "case 4 when 1 then 'str1' when 2 then 'str2' else 'str3' end",
resultStr: "str3",
},
}
s.runTests(c, tests)
// When expression value changed, result set back to null.
valExpr := ast.NewValueExpr(1, "", "")
whenClause := &ast.WhenClause{Expr: ast.NewValueExpr(1, "", ""), Result: ast.NewValueExpr(1, "", "")}
caseExpr := &ast.CaseExpr{
Value: valExpr,
WhenClauses: []*ast.WhenClause{whenClause},
}
v, err := evalAstExpr(s.ctx, caseExpr)
c.Assert(err, IsNil)
c.Assert(v, testutil.DatumEquals, types.NewDatum(int64(1)))
valExpr.SetValue(4)
v, err = evalAstExpr(s.ctx, caseExpr)
c.Assert(err, IsNil)
c.Assert(v.Kind(), Equals, types.KindNull)
}
func (s *testExpressionSuite) TestCast(c *C) {
defer testleak.AfterTest(c)()
f := types.NewFieldType(mysql.TypeLonglong)
expr := &ast.FuncCastExpr{
Expr: ast.NewValueExpr(1, "", ""),
Tp: f,
}
ast.SetFlag(expr)
v, err := evalAstExpr(s.ctx, expr)
c.Assert(err, IsNil)
c.Assert(v, testutil.DatumEquals, types.NewDatum(int64(1)))
f.Flag |= mysql.UnsignedFlag
v, err = evalAstExpr(s.ctx, expr)
c.Assert(err, IsNil)
c.Assert(v, testutil.DatumEquals, types.NewDatum(uint64(1)))
f.Tp = mysql.TypeString
f.Charset = charset.CharsetBin
v, err = evalAstExpr(s.ctx, expr)
c.Assert(err, IsNil)
c.Assert(v, testutil.DatumEquals, types.NewDatum([]byte("1")))
f.Tp = mysql.TypeString
f.Charset = "utf8"
v, err = evalAstExpr(s.ctx, expr)
c.Assert(err, IsNil)
c.Assert(v, testutil.DatumEquals, types.NewDatum("1"))
expr.Expr = ast.NewValueExpr(nil, "", "")
v, err = evalAstExpr(s.ctx, expr)
c.Assert(err, IsNil)
c.Assert(v.Kind(), Equals, types.KindNull)
}
func (s *testExpressionSuite) TestPatternIn(c *C) {
defer testleak.AfterTest(c)()
tests := []testCase{
{
exprStr: "1 not in (1, 2, 3)",
resultStr: "0",
},
{
exprStr: "1 in (1, 2, 3)",
resultStr: "1",
},
{
exprStr: "1 in (2, 3)",
resultStr: "0",
},
{
exprStr: "NULL in (2, 3)",
resultStr: "<nil>",
},
{
exprStr: "NULL not in (2, 3)",
resultStr: "<nil>",
},
{
exprStr: "NULL in (NULL, 3)",
resultStr: "<nil>",
},
{
exprStr: "1 in (1, NULL)",
resultStr: "1",
},
{
exprStr: "1 in (NULL, 1)",
resultStr: "1",
},
{
exprStr: "2 in (1, NULL)",
resultStr: "<nil>",
},
{
exprStr: "(-(23)++46/51*+51) in (+23)",
resultStr: "0",
},
}
s.runTests(c, tests)
}
func (s *testExpressionSuite) TestIsNull(c *C) {
defer testleak.AfterTest(c)()
tests := []testCase{
{
exprStr: "1 IS NULL",
resultStr: "0",
},
{
exprStr: "1 IS NOT NULL",
resultStr: "1",
},
{
exprStr: "NULL IS NULL",
resultStr: "1",
},
{
exprStr: "NULL IS NOT NULL",
resultStr: "0",
},
}
s.runTests(c, tests)
}
func (s *testExpressionSuite) TestCompareRow(c *C) {
defer testleak.AfterTest(c)()
tests := []testCase{
{
exprStr: "row(1,2,3)=row(1,2,3)",
resultStr: "1",
},
{
exprStr: "row(1,2,3)=row(1+3,2,3)",
resultStr: "0",
},
{
exprStr: "row(1,2,3)<>row(1,2,3)",
resultStr: "0",
},
{
exprStr: "row(1,2,3)<>row(1+3,2,3)",
resultStr: "1",
},
{
exprStr: "row(1+3,2,3)<>row(1+3,2,3)",
resultStr: "0",
},
{
exprStr: "row(1,2,3)<row(1,NULL,3)",
resultStr: "<nil>",
},
{
exprStr: "row(1,2,3)<row(2,NULL,3)",
resultStr: "1",
},
{
exprStr: "row(1,2,3)>=row(0,NULL,3)",
resultStr: "1",
},
{
exprStr: "row(1,2,3)<=row(2,NULL,3)",
resultStr: "1",
},
}
s.runTests(c, tests)
}
func (s *testExpressionSuite) TestIsTruth(c *C) {
defer testleak.AfterTest(c)()
tests := []testCase{
{
exprStr: "1 IS TRUE",
resultStr: "1",
},
{
exprStr: "2 IS TRUE",
resultStr: "1",
},
{
exprStr: "0 IS TRUE",
resultStr: "0",
},
{
exprStr: "NULL IS TRUE",
resultStr: "0",
},
{
exprStr: "1 IS FALSE",
resultStr: "0",
},
{
exprStr: "2 IS FALSE",
resultStr: "0",
},
{
exprStr: "0 IS FALSE",
resultStr: "1",
},
{
exprStr: "NULL IS NOT FALSE",
resultStr: "1",
},
{
exprStr: "1 IS NOT TRUE",
resultStr: "0",
},
{
exprStr: "2 IS NOT TRUE",
resultStr: "0",
},
{
exprStr: "0 IS NOT TRUE",
resultStr: "1",
},
{
exprStr: "NULL IS NOT TRUE",
resultStr: "1",
},
{
exprStr: "1 IS NOT FALSE",
resultStr: "1",
},
{
exprStr: "2 IS NOT FALSE",
resultStr: "1",
},
{
exprStr: "0 IS NOT FALSE",
resultStr: "0",
},
{
exprStr: "NULL IS NOT FALSE",
resultStr: "1",
},
}
s.runTests(c, tests)
}
| planner/core/expression_test.go | 0 | https://github.com/pingcap/tidb/commit/3f6dfe91daab7acb178ad24941dfca1745993afd | [
0.00017388717969879508,
0.00016614350897725672,
0.0001607216545380652,
0.00016572227468714118,
0.000001989343445529812
] |
{
"id": 0,
"code_window": [
"\t\thigh = kv.Key(high).PrefixNext()\n",
"\t}\n",
"\treturn low, high\n",
"}\n",
"\n",
"// SplitRangesBySign split the ranges into two parts:\n",
"// 1. signedRanges is less or equal than maxInt64\n",
"// 2. unsignedRanges is greater than maxInt64\n",
"// We do that because the encoding of tikv key takes every key as a int. As a result MaxUInt64 is indeed\n",
"// small than zero. So we must\n",
"// 1. pick the range that straddles the MaxInt64\n",
"// 2. split that range into two parts : smaller than max int64 and greater than it.\n",
"// 3. if the ascent order is required, return signed first, vice versa.\n",
"// 4. if no order is required, is better to return the unsigned one. That's because it's the normal order\n",
"// of tikv scan.\n",
"func SplitRangesBySign(ranges []*ranger.Range, keepOrder bool, desc bool, isCommonHandle bool) ([]*ranger.Range, []*ranger.Range) {\n",
"\tif isCommonHandle || len(ranges) == 0 || ranges[0].LowVal[0].Kind() == types.KindInt64 {\n",
"\t\treturn ranges, nil\n",
"\t}\n",
"\tidx := sort.Search(len(ranges), func(i int) bool { return ranges[i].HighVal[0].GetUint64() > math.MaxInt64 })\n",
"\tif idx == len(ranges) {\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"// SplitRangesAcrossInt64Boundary split the ranges into two groups:\n",
"// 1. signedRanges is less or equal than MaxInt64\n",
"// 2. unsignedRanges is greater than MaxInt64\n",
"//\n",
"// We do this because every key of tikv is encoded as an int64. As a result, MaxUInt64 is small than zero when\n",
"// interpreted as an int64 variable.\n",
"//\n",
"// This function does the following:\n",
"// 1. split ranges into two groups as described above.\n",
"// 2. if there's a range that straddles the int64 boundary, split it into two ranges, which results in one smaller and\n",
"// one greater than MaxInt64.\n",
"//\n",
"// if `KeepOrder` is false, we merge the two groups of ranges into one group, to save an rpc call later\n",
"// if `desc` is false, return signed ranges first, vice versa.\n",
"func SplitRangesAcrossInt64Boundary(ranges []*ranger.Range, keepOrder bool, desc bool, isCommonHandle bool) ([]*ranger.Range, []*ranger.Range) {\n"
],
"file_path": "distsql/request_builder.go",
"type": "replace",
"edit_start_line_idx": 395
} | // Copyright 2020 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package dbterror
import (
"github.com/pingcap/parser/terror"
"github.com/pingcap/tidb/errno"
)
// ErrClass represents a class of errors.
type ErrClass struct{ terror.ErrClass }
// Error classes.
var (
ClassAutoid = ErrClass{terror.ClassAutoid}
ClassDDL = ErrClass{terror.ClassDDL}
ClassDomain = ErrClass{terror.ClassDomain}
ClassExecutor = ErrClass{terror.ClassExecutor}
ClassExpression = ErrClass{terror.ClassExpression}
ClassAdmin = ErrClass{terror.ClassAdmin}
ClassKV = ErrClass{terror.ClassKV}
ClassMeta = ErrClass{terror.ClassMeta}
ClassOptimizer = ErrClass{terror.ClassOptimizer}
ClassPrivilege = ErrClass{terror.ClassPrivilege}
ClassSchema = ErrClass{terror.ClassSchema}
ClassServer = ErrClass{terror.ClassServer}
ClassStructure = ErrClass{terror.ClassStructure}
ClassVariable = ErrClass{terror.ClassVariable}
ClassXEval = ErrClass{terror.ClassXEval}
ClassTable = ErrClass{terror.ClassTable}
ClassTypes = ErrClass{terror.ClassTypes}
ClassJSON = ErrClass{terror.ClassJSON}
ClassTiKV = ErrClass{terror.ClassTiKV}
ClassSession = ErrClass{terror.ClassSession}
ClassPlugin = ErrClass{terror.ClassPlugin}
ClassUtil = ErrClass{terror.ClassUtil}
)
// NewStd calls New using the standard message for the error code
// Attention:
// this method is not goroutine-safe and
// usually be used in global variable initializer
func (ec ErrClass) NewStd(code terror.ErrCode) *terror.Error {
return ec.NewStdErr(code, errno.MySQLErrName[uint16(code)])
}
| util/dbterror/terror.go | 0 | https://github.com/pingcap/tidb/commit/3f6dfe91daab7acb178ad24941dfca1745993afd | [
0.00017434325127396733,
0.00016786566993687302,
0.0001627073943382129,
0.0001680401328485459,
0.0000037145293845242122
] |
{
"id": 1,
"code_window": [
"\t\t\treturn unsignedRanges, signedRanges\n",
"\t\t}\n",
"\t\treturn signedRanges, unsignedRanges\n",
"\t}\n",
"\tsignedRanges := make([]*ranger.Range, 0, idx+1)\n",
"\tunsignedRanges := make([]*ranger.Range, 0, len(ranges)-idx)\n",
"\tsignedRanges = append(signedRanges, ranges[0:idx]...)\n",
"\tif !(ranges[idx].LowVal[0].GetUint64() == math.MaxInt64 && ranges[idx].LowExclude) {\n",
"\t\tsignedRanges = append(signedRanges, &ranger.Range{\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t// need to split the range that straddles the int64 boundary\n"
],
"file_path": "distsql/request_builder.go",
"type": "add",
"edit_start_line_idx": 424
} | // Copyright 2017 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package core
import (
"github.com/pingcap/errors"
"github.com/pingcap/parser/model"
"github.com/pingcap/parser/mysql"
"github.com/pingcap/tidb/distsql"
"github.com/pingcap/tidb/expression"
"github.com/pingcap/tidb/expression/aggregation"
"github.com/pingcap/tidb/kv"
"github.com/pingcap/tidb/sessionctx"
"github.com/pingcap/tidb/table"
"github.com/pingcap/tidb/table/tables"
"github.com/pingcap/tidb/tablecodec"
"github.com/pingcap/tidb/util"
"github.com/pingcap/tidb/util/codec"
"github.com/pingcap/tidb/util/ranger"
"github.com/pingcap/tipb/go-tipb"
)
// ToPB implements PhysicalPlan ToPB interface.
func (p *basePhysicalPlan) ToPB(_ sessionctx.Context, _ kv.StoreType) (*tipb.Executor, error) {
return nil, errors.Errorf("plan %s fails converts to PB", p.basePlan.ExplainID())
}
// ToPB implements PhysicalPlan ToPB interface.
func (p *PhysicalHashAgg) ToPB(ctx sessionctx.Context, storeType kv.StoreType) (*tipb.Executor, error) {
sc := ctx.GetSessionVars().StmtCtx
client := ctx.GetClient()
groupByExprs, err := expression.ExpressionsToPBList(sc, p.GroupByItems, client)
if err != nil {
return nil, err
}
aggExec := &tipb.Aggregation{
GroupBy: groupByExprs,
}
for _, aggFunc := range p.AggFuncs {
aggExec.AggFunc = append(aggExec.AggFunc, aggregation.AggFuncToPBExpr(sc, client, aggFunc))
}
executorID := ""
if storeType == kv.TiFlash {
var err error
aggExec.Child, err = p.children[0].ToPB(ctx, storeType)
if err != nil {
return nil, errors.Trace(err)
}
executorID = p.ExplainID().String()
}
return &tipb.Executor{Tp: tipb.ExecType_TypeAggregation, Aggregation: aggExec, ExecutorId: &executorID}, nil
}
// ToPB implements PhysicalPlan ToPB interface.
func (p *PhysicalStreamAgg) ToPB(ctx sessionctx.Context, storeType kv.StoreType) (*tipb.Executor, error) {
sc := ctx.GetSessionVars().StmtCtx
client := ctx.GetClient()
groupByExprs, err := expression.ExpressionsToPBList(sc, p.GroupByItems, client)
if err != nil {
return nil, err
}
aggExec := &tipb.Aggregation{
GroupBy: groupByExprs,
}
for _, aggFunc := range p.AggFuncs {
aggExec.AggFunc = append(aggExec.AggFunc, aggregation.AggFuncToPBExpr(sc, client, aggFunc))
}
executorID := ""
if storeType == kv.TiFlash {
var err error
aggExec.Child, err = p.children[0].ToPB(ctx, storeType)
if err != nil {
return nil, errors.Trace(err)
}
executorID = p.ExplainID().String()
}
return &tipb.Executor{Tp: tipb.ExecType_TypeStreamAgg, Aggregation: aggExec, ExecutorId: &executorID}, nil
}
// ToPB implements PhysicalPlan ToPB interface.
func (p *PhysicalSelection) ToPB(ctx sessionctx.Context, storeType kv.StoreType) (*tipb.Executor, error) {
sc := ctx.GetSessionVars().StmtCtx
client := ctx.GetClient()
conditions, err := expression.ExpressionsToPBList(sc, p.Conditions, client)
if err != nil {
return nil, err
}
selExec := &tipb.Selection{
Conditions: conditions,
}
executorID := ""
if storeType == kv.TiFlash {
var err error
selExec.Child, err = p.children[0].ToPB(ctx, storeType)
if err != nil {
return nil, errors.Trace(err)
}
executorID = p.ExplainID().String()
}
return &tipb.Executor{Tp: tipb.ExecType_TypeSelection, Selection: selExec, ExecutorId: &executorID}, nil
}
// ToPB implements PhysicalPlan ToPB interface.
func (p *PhysicalProjection) ToPB(ctx sessionctx.Context, storeType kv.StoreType) (*tipb.Executor, error) {
sc := ctx.GetSessionVars().StmtCtx
client := ctx.GetClient()
exprs, err := expression.ExpressionsToPBList(sc, p.Exprs, client)
if err != nil {
return nil, err
}
projExec := &tipb.Projection{
Exprs: exprs,
}
executorID := ""
if storeType == kv.TiFlash {
var err error
projExec.Child, err = p.children[0].ToPB(ctx, storeType)
if err != nil {
return nil, errors.Trace(err)
}
executorID = p.ExplainID().String()
} else {
return nil, errors.Errorf("The projection can only be pushed down to TiFlash now, not %s.", storeType.Name())
}
return &tipb.Executor{Tp: tipb.ExecType_TypeProjection, Projection: projExec, ExecutorId: &executorID}, nil
}
// ToPB implements PhysicalPlan ToPB interface.
func (p *PhysicalTopN) ToPB(ctx sessionctx.Context, storeType kv.StoreType) (*tipb.Executor, error) {
sc := ctx.GetSessionVars().StmtCtx
client := ctx.GetClient()
topNExec := &tipb.TopN{
Limit: p.Count,
}
for _, item := range p.ByItems {
topNExec.OrderBy = append(topNExec.OrderBy, expression.SortByItemToPB(sc, client, item.Expr, item.Desc))
}
executorID := ""
if storeType == kv.TiFlash {
var err error
topNExec.Child, err = p.children[0].ToPB(ctx, storeType)
if err != nil {
return nil, errors.Trace(err)
}
executorID = p.ExplainID().String()
}
return &tipb.Executor{Tp: tipb.ExecType_TypeTopN, TopN: topNExec, ExecutorId: &executorID}, nil
}
// ToPB implements PhysicalPlan ToPB interface.
func (p *PhysicalLimit) ToPB(ctx sessionctx.Context, storeType kv.StoreType) (*tipb.Executor, error) {
limitExec := &tipb.Limit{
Limit: p.Count,
}
executorID := ""
if storeType == kv.TiFlash {
var err error
limitExec.Child, err = p.children[0].ToPB(ctx, storeType)
if err != nil {
return nil, errors.Trace(err)
}
executorID = p.ExplainID().String()
}
return &tipb.Executor{Tp: tipb.ExecType_TypeLimit, Limit: limitExec, ExecutorId: &executorID}, nil
}
// ToPB implements PhysicalPlan ToPB interface.
func (p *PhysicalTableScan) ToPB(ctx sessionctx.Context, storeType kv.StoreType) (*tipb.Executor, error) {
tsExec := tables.BuildTableScanFromInfos(p.Table, p.Columns)
tsExec.Desc = p.Desc
if p.isPartition {
tsExec.TableId = p.physicalTableID
}
executorID := ""
if storeType == kv.TiFlash && p.IsGlobalRead {
tsExec.NextReadEngine = tipb.EngineType_TiFlash
splitedRanges, _ := distsql.SplitRangesBySign(p.Ranges, false, false, p.Table.IsCommonHandle)
ranges, err := distsql.TableHandleRangesToKVRanges(ctx.GetSessionVars().StmtCtx, []int64{tsExec.TableId}, p.Table.IsCommonHandle, splitedRanges, nil)
if err != nil {
return nil, err
}
for _, keyRange := range ranges {
tsExec.Ranges = append(tsExec.Ranges, tipb.KeyRange{Low: keyRange.StartKey, High: keyRange.EndKey})
}
}
if storeType == kv.TiFlash {
executorID = p.ExplainID().String()
}
err := SetPBColumnsDefaultValue(ctx, tsExec.Columns, p.Columns)
return &tipb.Executor{Tp: tipb.ExecType_TypeTableScan, TblScan: tsExec, ExecutorId: &executorID}, err
}
// checkCoverIndex checks whether we can pass unique info to TiKV. We should push it if and only if the length of
// range and index are equal.
func checkCoverIndex(idx *model.IndexInfo, ranges []*ranger.Range) bool {
// If the index is (c1, c2) but the query range only contains c1, it is not a unique get.
if !idx.Unique {
return false
}
for _, rg := range ranges {
if len(rg.LowVal) != len(idx.Columns) {
return false
}
}
return true
}
func findColumnInfoByID(infos []*model.ColumnInfo, id int64) *model.ColumnInfo {
for _, info := range infos {
if info.ID == id {
return info
}
}
return nil
}
// ToPB generates the pb structure.
func (e *PhysicalExchangeSender) ToPB(ctx sessionctx.Context, storeType kv.StoreType) (*tipb.Executor, error) {
child, err := e.Children()[0].ToPB(ctx, kv.TiFlash)
if err != nil {
return nil, errors.Trace(err)
}
encodedTask := make([][]byte, 0, len(e.TargetTasks))
for _, task := range e.TargetTasks {
encodedStr, err := task.ToPB().Marshal()
if err != nil {
return nil, errors.Trace(err)
}
encodedTask = append(encodedTask, encodedStr)
}
hashCols := make([]expression.Expression, 0, len(e.HashCols))
for _, col := range e.HashCols {
hashCols = append(hashCols, col)
}
hashColPb, err := expression.ExpressionsToPBList(ctx.GetSessionVars().StmtCtx, hashCols, ctx.GetClient())
if err != nil {
return nil, errors.Trace(err)
}
ecExec := &tipb.ExchangeSender{
Tp: e.ExchangeType,
EncodedTaskMeta: encodedTask,
PartitionKeys: hashColPb,
Child: child,
}
executorID := e.ExplainID().String()
return &tipb.Executor{
Tp: tipb.ExecType_TypeExchangeSender,
ExchangeSender: ecExec,
ExecutorId: &executorID,
}, nil
}
// ToPB generates the pb structure.
func (e *PhysicalExchangeReceiver) ToPB(ctx sessionctx.Context, storeType kv.StoreType) (*tipb.Executor, error) {
encodedTask := make([][]byte, 0, len(e.Tasks))
for _, task := range e.Tasks {
encodedStr, err := task.ToPB().Marshal()
if err != nil {
return nil, errors.Trace(err)
}
encodedTask = append(encodedTask, encodedStr)
}
fieldTypes := make([]*tipb.FieldType, 0, len(e.Schema().Columns))
for _, column := range e.Schema().Columns {
pbType := expression.ToPBFieldType(column.RetType)
if column.RetType.Tp == mysql.TypeEnum {
pbType.Elems = append(pbType.Elems, column.RetType.Elems...)
}
fieldTypes = append(fieldTypes, pbType)
}
ecExec := &tipb.ExchangeReceiver{
EncodedTaskMeta: encodedTask,
FieldTypes: fieldTypes,
}
executorID := e.ExplainID().String()
return &tipb.Executor{
Tp: tipb.ExecType_TypeExchangeReceiver,
ExchangeReceiver: ecExec,
ExecutorId: &executorID,
}, nil
}
// ToPB implements PhysicalPlan ToPB interface.
func (p *PhysicalIndexScan) ToPB(ctx sessionctx.Context, _ kv.StoreType) (*tipb.Executor, error) {
columns := make([]*model.ColumnInfo, 0, p.schema.Len())
tableColumns := p.Table.Cols()
for _, col := range p.schema.Columns {
if col.ID == model.ExtraHandleID {
columns = append(columns, model.NewExtraHandleColInfo())
} else if col.ID == model.ExtraPidColID {
columns = append(columns, model.NewExtraPartitionIDColInfo())
} else {
columns = append(columns, findColumnInfoByID(tableColumns, col.ID))
}
}
var pkColIds []int64
if p.NeedCommonHandle {
pkColIds = tables.TryGetCommonPkColumnIds(p.Table)
}
idxExec := &tipb.IndexScan{
TableId: p.Table.ID,
IndexId: p.Index.ID,
Columns: util.ColumnsToProto(columns, p.Table.PKIsHandle),
Desc: p.Desc,
PrimaryColumnIds: pkColIds,
}
if p.isPartition {
idxExec.TableId = p.physicalTableID
}
unique := checkCoverIndex(p.Index, p.Ranges)
idxExec.Unique = &unique
return &tipb.Executor{Tp: tipb.ExecType_TypeIndexScan, IdxScan: idxExec}, nil
}
// ToPB implements PhysicalPlan ToPB interface.
func (p *PhysicalHashJoin) ToPB(ctx sessionctx.Context, storeType kv.StoreType) (*tipb.Executor, error) {
sc := ctx.GetSessionVars().StmtCtx
client := ctx.GetClient()
leftJoinKeys := make([]expression.Expression, 0, len(p.LeftJoinKeys))
rightJoinKeys := make([]expression.Expression, 0, len(p.RightJoinKeys))
for _, leftKey := range p.LeftJoinKeys {
leftJoinKeys = append(leftJoinKeys, leftKey)
}
for _, rightKey := range p.RightJoinKeys {
rightJoinKeys = append(rightJoinKeys, rightKey)
}
lChildren, err := p.children[0].ToPB(ctx, storeType)
if err != nil {
return nil, errors.Trace(err)
}
rChildren, err := p.children[1].ToPB(ctx, storeType)
if err != nil {
return nil, errors.Trace(err)
}
left, err := expression.ExpressionsToPBList(sc, leftJoinKeys, client)
if err != nil {
return nil, err
}
right, err := expression.ExpressionsToPBList(sc, rightJoinKeys, client)
if err != nil {
return nil, err
}
leftConditions, err := expression.ExpressionsToPBList(sc, p.LeftConditions, client)
if err != nil {
return nil, err
}
rightConditions, err := expression.ExpressionsToPBList(sc, p.RightConditions, client)
if err != nil {
return nil, err
}
otherConditions, err := expression.ExpressionsToPBList(sc, p.OtherConditions, client)
if err != nil {
return nil, err
}
pbJoinType := tipb.JoinType_TypeInnerJoin
switch p.JoinType {
case LeftOuterJoin:
pbJoinType = tipb.JoinType_TypeLeftOuterJoin
case RightOuterJoin:
pbJoinType = tipb.JoinType_TypeRightOuterJoin
case SemiJoin:
pbJoinType = tipb.JoinType_TypeSemiJoin
case AntiSemiJoin:
pbJoinType = tipb.JoinType_TypeAntiSemiJoin
case LeftOuterSemiJoin:
pbJoinType = tipb.JoinType_TypeLeftOuterSemiJoin
case AntiLeftOuterSemiJoin:
pbJoinType = tipb.JoinType_TypeAntiLeftOuterSemiJoin
}
probeFiledTypes := make([]*tipb.FieldType, 0, len(p.EqualConditions))
buildFiledTypes := make([]*tipb.FieldType, 0, len(p.EqualConditions))
for _, equalCondition := range p.EqualConditions {
retType := equalCondition.RetType.Clone()
chs, coll := equalCondition.CharsetAndCollation(ctx)
retType.Charset = chs
retType.Collate = coll
probeFiledTypes = append(probeFiledTypes, expression.ToPBFieldType(retType))
buildFiledTypes = append(buildFiledTypes, expression.ToPBFieldType(retType))
}
join := &tipb.Join{
JoinType: pbJoinType,
JoinExecType: tipb.JoinExecType_TypeHashJoin,
InnerIdx: int64(p.InnerChildIdx),
LeftJoinKeys: left,
RightJoinKeys: right,
ProbeTypes: probeFiledTypes,
BuildTypes: buildFiledTypes,
LeftConditions: leftConditions,
RightConditions: rightConditions,
OtherConditions: otherConditions,
Children: []*tipb.Executor{lChildren, rChildren},
}
executorID := p.ExplainID().String()
return &tipb.Executor{Tp: tipb.ExecType_TypeJoin, Join: join, ExecutorId: &executorID}, nil
}
// SetPBColumnsDefaultValue sets the default values of tipb.ColumnInfos.
func SetPBColumnsDefaultValue(ctx sessionctx.Context, pbColumns []*tipb.ColumnInfo, columns []*model.ColumnInfo) error {
for i, c := range columns {
// For virtual columns, we set their default values to NULL so that TiKV will return NULL properly,
// They real values will be compute later.
if c.IsGenerated() && !c.GeneratedStored {
pbColumns[i].DefaultVal = []byte{codec.NilFlag}
}
if c.GetOriginDefaultValue() == nil {
continue
}
sessVars := ctx.GetSessionVars()
originStrict := sessVars.StrictSQLMode
sessVars.StrictSQLMode = false
d, err := table.GetColOriginDefaultValue(ctx, c)
sessVars.StrictSQLMode = originStrict
if err != nil {
return err
}
pbColumns[i].DefaultVal, err = tablecodec.EncodeValue(sessVars.StmtCtx, nil, d)
if err != nil {
return err
}
}
return nil
}
// SupportStreaming returns true if a pushed down operation supports using coprocessor streaming API.
// Note that this function handle pushed down physical plan only! It's called in constructDAGReq.
// Some plans are difficult (if possible) to implement streaming, and some are pointless to do so.
// TODO: Support more kinds of physical plan.
func SupportStreaming(p PhysicalPlan) bool {
switch p.(type) {
case *PhysicalIndexScan, *PhysicalSelection, *PhysicalTableScan:
return true
}
return false
}
| planner/core/plan_to_pb.go | 1 | https://github.com/pingcap/tidb/commit/3f6dfe91daab7acb178ad24941dfca1745993afd | [
0.013900868594646454,
0.0005677648005075753,
0.0001599710958544165,
0.0001672053331276402,
0.0020152973011136055
] |
{
"id": 1,
"code_window": [
"\t\t\treturn unsignedRanges, signedRanges\n",
"\t\t}\n",
"\t\treturn signedRanges, unsignedRanges\n",
"\t}\n",
"\tsignedRanges := make([]*ranger.Range, 0, idx+1)\n",
"\tunsignedRanges := make([]*ranger.Range, 0, len(ranges)-idx)\n",
"\tsignedRanges = append(signedRanges, ranges[0:idx]...)\n",
"\tif !(ranges[idx].LowVal[0].GetUint64() == math.MaxInt64 && ranges[idx].LowExclude) {\n",
"\t\tsignedRanges = append(signedRanges, &ranger.Range{\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t// need to split the range that straddles the int64 boundary\n"
],
"file_path": "distsql/request_builder.go",
"type": "add",
"edit_start_line_idx": 424
} | package aggfuncs_test
import (
. "github.com/pingcap/check"
"github.com/pingcap/parser/ast"
"github.com/pingcap/parser/mysql"
)
func (s *testSuite) TestMergePartialResult4Varsamp(c *C) {
tests := []aggTest{
buildAggTester(ast.AggFuncVarSamp, mysql.TypeDouble, 5, 2.5, 1, 1.9821428571428572),
}
for _, test := range tests {
s.testMergePartialResult(c, test)
}
}
func (s *testSuite) TestVarsamp(c *C) {
tests := []aggTest{
buildAggTester(ast.AggFuncVarSamp, mysql.TypeDouble, 5, nil, 2.5),
}
for _, test := range tests {
s.testAggFunc(c, test)
}
}
| executor/aggfuncs/func_varsamp_test.go | 0 | https://github.com/pingcap/tidb/commit/3f6dfe91daab7acb178ad24941dfca1745993afd | [
0.0001707887277007103,
0.00016829825472086668,
0.00016559955838602036,
0.00016850646352395415,
0.0000021235794065432856
] |
{
"id": 1,
"code_window": [
"\t\t\treturn unsignedRanges, signedRanges\n",
"\t\t}\n",
"\t\treturn signedRanges, unsignedRanges\n",
"\t}\n",
"\tsignedRanges := make([]*ranger.Range, 0, idx+1)\n",
"\tunsignedRanges := make([]*ranger.Range, 0, len(ranges)-idx)\n",
"\tsignedRanges = append(signedRanges, ranges[0:idx]...)\n",
"\tif !(ranges[idx].LowVal[0].GetUint64() == math.MaxInt64 && ranges[idx].LowExclude) {\n",
"\t\tsignedRanges = append(signedRanges, &ranger.Range{\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t// need to split the range that straddles the int64 boundary\n"
],
"file_path": "distsql/request_builder.go",
"type": "add",
"edit_start_line_idx": 424
} | // Copyright 2018 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package ddl
import (
"fmt"
"github.com/pingcap/parser/ast"
)
// AlterAlgorithm is used to store supported alter algorithm.
// For now, TiDB only support AlterAlgorithmInplace and AlterAlgorithmInstant.
// The most alter operations are using instant algorithm, and only the add index is using inplace(not really inplace,
// because we never block the DML but costs some time to backfill the index data)
// See https://dev.mysql.com/doc/refman/8.0/en/alter-table.html#alter-table-performance.
type AlterAlgorithm struct {
// supported MUST store algorithms in the order 'INSTANT, INPLACE, COPY'
supported []ast.AlgorithmType
// If the alter algorithm is not given, the defAlgorithm will be used.
defAlgorithm ast.AlgorithmType
}
var (
instantAlgorithm = &AlterAlgorithm{
supported: []ast.AlgorithmType{ast.AlgorithmTypeInstant},
defAlgorithm: ast.AlgorithmTypeInstant,
}
inplaceAlgorithm = &AlterAlgorithm{
supported: []ast.AlgorithmType{ast.AlgorithmTypeInplace},
defAlgorithm: ast.AlgorithmTypeInplace,
}
)
func getProperAlgorithm(specify ast.AlgorithmType, algorithm *AlterAlgorithm) (ast.AlgorithmType, error) {
if specify == ast.AlgorithmTypeDefault {
return algorithm.defAlgorithm, nil
}
r := ast.AlgorithmTypeDefault
for _, a := range algorithm.supported {
if specify <= a {
r = a
break
}
}
var err error
if specify != r {
err = ErrAlterOperationNotSupported.GenWithStackByArgs(fmt.Sprintf("ALGORITHM=%s", specify), fmt.Sprintf("Cannot alter table by %s", specify), fmt.Sprintf("ALGORITHM=%s", algorithm.defAlgorithm))
}
return r, err
}
// ResolveAlterAlgorithm resolves the algorithm of the alterSpec.
// If specify is the ast.AlterAlgorithmDefault, then the default algorithm of the alter action will be returned.
// If specify algorithm is not supported by the alter action, it will try to find a better algorithm in the order `INSTANT > INPLACE > COPY`, errAlterOperationNotSupported will be returned.
// E.g. INSTANT may be returned if specify=INPLACE
// If failed to choose any valid algorithm, AlgorithmTypeDefault and errAlterOperationNotSupported will be returned
func ResolveAlterAlgorithm(alterSpec *ast.AlterTableSpec, specify ast.AlgorithmType) (ast.AlgorithmType, error) {
switch alterSpec.Tp {
// For now, TiDB only support inplace algorithm and instant algorithm.
case ast.AlterTableAddConstraint:
return getProperAlgorithm(specify, inplaceAlgorithm)
default:
return getProperAlgorithm(specify, instantAlgorithm)
}
}
| ddl/ddl_algorithm.go | 0 | https://github.com/pingcap/tidb/commit/3f6dfe91daab7acb178ad24941dfca1745993afd | [
0.008805210702121258,
0.0011392351007089019,
0.00016517582116648555,
0.00017508740711491555,
0.002710409928113222
] |
{
"id": 1,
"code_window": [
"\t\t\treturn unsignedRanges, signedRanges\n",
"\t\t}\n",
"\t\treturn signedRanges, unsignedRanges\n",
"\t}\n",
"\tsignedRanges := make([]*ranger.Range, 0, idx+1)\n",
"\tunsignedRanges := make([]*ranger.Range, 0, len(ranges)-idx)\n",
"\tsignedRanges = append(signedRanges, ranges[0:idx]...)\n",
"\tif !(ranges[idx].LowVal[0].GetUint64() == math.MaxInt64 && ranges[idx].LowExclude) {\n",
"\t\tsignedRanges = append(signedRanges, &ranger.Range{\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t// need to split the range that straddles the int64 boundary\n"
],
"file_path": "distsql/request_builder.go",
"type": "add",
"edit_start_line_idx": 424
} | // Copyright 2020 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package variable
import (
"math"
)
// The following sysVars are noops.
// Some applications will depend on certain variables to be present or settable,
// for example query_cache_time. These are included for MySQL compatibility,
// but changing them has no effect on behavior.
var noopSysVars = []*SysVar{
// It is unsafe to pretend that any variation of "read only" is enabled when the server
// does not support it. It is possible that these features will be supported in future,
// but until then...
{Scope: ScopeGlobal | ScopeSession, Name: TxReadOnly, Value: BoolOff, Type: TypeBool, Validation: func(vars *SessionVars, normalizedValue string, originalValue string, scope ScopeFlag) (string, error) {
return checkReadOnly(vars, normalizedValue, originalValue, scope, false)
}},
{Scope: ScopeGlobal | ScopeSession, Name: TransactionReadOnly, Value: BoolOff, Type: TypeBool, Validation: func(vars *SessionVars, normalizedValue string, originalValue string, scope ScopeFlag) (string, error) {
return checkReadOnly(vars, normalizedValue, originalValue, scope, false)
}},
{Scope: ScopeGlobal, Name: OfflineMode, Value: BoolOff, Type: TypeBool, Validation: func(vars *SessionVars, normalizedValue string, originalValue string, scope ScopeFlag) (string, error) {
return checkReadOnly(vars, normalizedValue, originalValue, scope, true)
}},
{Scope: ScopeGlobal, Name: SuperReadOnly, Value: BoolOff, Type: TypeBool, Validation: func(vars *SessionVars, normalizedValue string, originalValue string, scope ScopeFlag) (string, error) {
return checkReadOnly(vars, normalizedValue, originalValue, scope, false)
}},
{Scope: ScopeGlobal, Name: serverReadOnly, Value: BoolOff, Type: TypeBool, Validation: func(vars *SessionVars, normalizedValue string, originalValue string, scope ScopeFlag) (string, error) {
return checkReadOnly(vars, normalizedValue, originalValue, scope, false)
}},
{Scope: ScopeGlobal, Name: ConnectTimeout, Value: "10", Type: TypeUnsigned, MinValue: 2, MaxValue: secondsPerYear, AutoConvertOutOfRange: true},
{Scope: ScopeGlobal | ScopeSession, Name: QueryCacheWlockInvalidate, Value: BoolOff, Type: TypeBool},
{Scope: ScopeGlobal | ScopeSession, Name: "sql_buffer_result", Value: BoolOff, IsHintUpdatable: true},
{Scope: ScopeGlobal, Name: MyISAMUseMmap, Value: BoolOff, Type: TypeBool, AutoConvertNegativeBool: true},
{Scope: ScopeGlobal, Name: "gtid_mode", Value: BoolOff, Type: TypeBool},
{Scope: ScopeGlobal, Name: FlushTime, Value: "0", Type: TypeUnsigned, MinValue: 0, MaxValue: secondsPerYear, AutoConvertOutOfRange: true},
{Scope: ScopeNone, Name: "performance_schema_max_mutex_classes", Value: "200"},
{Scope: ScopeGlobal | ScopeSession, Name: LowPriorityUpdates, Value: BoolOff, Type: TypeBool},
{Scope: ScopeGlobal | ScopeSession, Name: SessionTrackGtids, Value: BoolOff, Type: TypeEnum, PossibleValues: []string{BoolOff, "OWN_GTID", "ALL_GTIDS"}},
{Scope: ScopeGlobal | ScopeSession, Name: "ndbinfo_max_rows", Value: ""},
{Scope: ScopeGlobal | ScopeSession, Name: "ndb_index_stat_option", Value: ""},
{Scope: ScopeGlobal | ScopeSession, Name: OldPasswords, Value: "0", Type: TypeUnsigned, MinValue: 0, MaxValue: 2, AutoConvertOutOfRange: true},
{Scope: ScopeNone, Name: "innodb_version", Value: "5.6.25"},
{Scope: ScopeGlobal | ScopeSession, Name: BigTables, Value: BoolOff, Type: TypeBool},
{Scope: ScopeNone, Name: "skip_external_locking", Value: "1"},
{Scope: ScopeNone, Name: "innodb_sync_array_size", Value: "1"},
{Scope: ScopeSession, Name: "rand_seed2", Value: ""},
{Scope: ScopeGlobal, Name: ValidatePasswordCheckUserName, Value: BoolOff, Type: TypeBool},
{Scope: ScopeGlobal, Name: ValidatePasswordNumberCount, Value: "1", Type: TypeUnsigned, MinValue: 0, MaxValue: math.MaxUint64, AutoConvertOutOfRange: true},
{Scope: ScopeSession, Name: "gtid_next", Value: ""},
{Scope: ScopeGlobal, Name: "ndb_show_foreign_key_mock_tables", Value: ""},
{Scope: ScopeNone, Name: "multi_range_count", Value: "256"},
{Scope: ScopeGlobal | ScopeSession, Name: "binlog_error_action", Value: "IGNORE_ERROR"},
{Scope: ScopeGlobal | ScopeSession, Name: "default_storage_engine", Value: "InnoDB"},
{Scope: ScopeNone, Name: "ft_query_expansion_limit", Value: "20"},
{Scope: ScopeGlobal, Name: MaxConnectErrors, Value: "100", Type: TypeUnsigned, MinValue: 1, MaxValue: math.MaxUint64, AutoConvertOutOfRange: true},
{Scope: ScopeGlobal, Name: SyncBinlog, Value: "0", Type: TypeUnsigned, MinValue: 0, MaxValue: 4294967295, AutoConvertOutOfRange: true},
{Scope: ScopeNone, Name: "max_digest_length", Value: "1024"},
{Scope: ScopeNone, Name: "innodb_force_load_corrupted", Value: "0"},
{Scope: ScopeNone, Name: "performance_schema_max_table_handles", Value: "4000"},
{Scope: ScopeGlobal, Name: InnodbFastShutdown, Value: "1", Type: TypeUnsigned, MinValue: 0, MaxValue: 2, AutoConvertOutOfRange: true},
{Scope: ScopeNone, Name: "ft_max_word_len", Value: "84"},
{Scope: ScopeGlobal, Name: "log_backward_compatible_user_definitions", Value: ""},
{Scope: ScopeNone, Name: "lc_messages_dir", Value: "/usr/local/mysql-5.6.25-osx10.8-x86_64/share/"},
{Scope: ScopeGlobal, Name: "ft_boolean_syntax", Value: "+ -><()~*:\"\"&|"},
{Scope: ScopeGlobal, Name: TableDefinitionCache, Value: "-1", Type: TypeUnsigned, MinValue: 400, MaxValue: 524288, AutoConvertOutOfRange: true},
{Scope: ScopeNone, Name: SkipNameResolve, Value: BoolOff, Type: TypeBool},
{Scope: ScopeNone, Name: "performance_schema_max_file_handles", Value: "32768"},
{Scope: ScopeSession, Name: "transaction_allow_batching", Value: ""},
{Scope: ScopeNone, Name: "performance_schema_max_statement_classes", Value: "168"},
{Scope: ScopeGlobal, Name: "server_id", Value: "0"},
{Scope: ScopeGlobal, Name: "innodb_flushing_avg_loops", Value: "30"},
{Scope: ScopeGlobal | ScopeSession, Name: TmpTableSize, Value: "16777216", Type: TypeUnsigned, MinValue: 1024, MaxValue: math.MaxUint64, AutoConvertOutOfRange: true, IsHintUpdatable: true},
{Scope: ScopeGlobal, Name: "innodb_max_purge_lag", Value: "0"},
{Scope: ScopeGlobal | ScopeSession, Name: "preload_buffer_size", Value: "32768"},
{Scope: ScopeGlobal, Name: CheckProxyUsers, Value: BoolOff, Type: TypeBool},
{Scope: ScopeNone, Name: "have_query_cache", Value: "YES"},
{Scope: ScopeGlobal, Name: "innodb_flush_log_at_timeout", Value: "1"},
{Scope: ScopeGlobal, Name: "innodb_max_undo_log_size", Value: ""},
{Scope: ScopeGlobal | ScopeSession, Name: "range_alloc_block_size", Value: "4096", IsHintUpdatable: true},
{Scope: ScopeNone, Name: "have_rtree_keys", Value: "YES"},
{Scope: ScopeGlobal, Name: "innodb_old_blocks_pct", Value: "37"},
{Scope: ScopeGlobal, Name: "innodb_file_format", Value: "Barracuda", Type: TypeEnum, PossibleValues: []string{"Antelope", "Barracuda"}},
{Scope: ScopeGlobal, Name: "innodb_default_row_format", Value: "dynamic", Type: TypeEnum, PossibleValues: []string{"redundant", "compact", "dynamic"}},
{Scope: ScopeGlobal, Name: "innodb_compression_failure_threshold_pct", Value: "5"},
{Scope: ScopeNone, Name: "performance_schema_events_waits_history_long_size", Value: "10000"},
{Scope: ScopeGlobal, Name: "innodb_checksum_algorithm", Value: "innodb"},
{Scope: ScopeNone, Name: "innodb_ft_sort_pll_degree", Value: "2"},
{Scope: ScopeNone, Name: "thread_stack", Value: "262144"},
{Scope: ScopeGlobal, Name: "relay_log_info_repository", Value: "FILE"},
{Scope: ScopeGlobal | ScopeSession, Name: "max_delayed_threads", Value: "20"},
{Scope: ScopeNone, Name: "protocol_version", Value: "10"},
{Scope: ScopeGlobal | ScopeSession, Name: "new", Value: BoolOff},
{Scope: ScopeGlobal | ScopeSession, Name: "myisam_sort_buffer_size", Value: "8388608"},
{Scope: ScopeGlobal | ScopeSession, Name: "optimizer_trace_offset", Value: "-1"},
{Scope: ScopeGlobal, Name: InnodbBufferPoolDumpAtShutdown, Value: "0"},
{Scope: ScopeGlobal | ScopeSession, Name: SQLNotes, Value: "1"},
{Scope: ScopeGlobal, Name: InnodbCmpPerIndexEnabled, Value: BoolOff, Type: TypeBool, AutoConvertNegativeBool: true},
{Scope: ScopeGlobal, Name: "innodb_ft_server_stopword_table", Value: ""},
{Scope: ScopeNone, Name: "performance_schema_max_file_instances", Value: "7693"},
{Scope: ScopeNone, Name: "log_output", Value: "FILE"},
{Scope: ScopeGlobal, Name: "binlog_group_commit_sync_delay", Value: ""},
{Scope: ScopeGlobal, Name: "binlog_group_commit_sync_no_delay_count", Value: ""},
{Scope: ScopeNone, Name: "have_crypt", Value: "YES"},
{Scope: ScopeGlobal, Name: "innodb_log_write_ahead_size", Value: ""},
{Scope: ScopeNone, Name: "innodb_log_group_home_dir", Value: "./"},
{Scope: ScopeNone, Name: "performance_schema_events_statements_history_size", Value: "10"},
{Scope: ScopeGlobal, Name: GeneralLog, Value: BoolOff, Type: TypeBool},
{Scope: ScopeGlobal, Name: "validate_password_dictionary_file", Value: ""},
{Scope: ScopeGlobal, Name: BinlogOrderCommits, Value: BoolOn, Type: TypeBool},
{Scope: ScopeGlobal, Name: "key_cache_division_limit", Value: "100"},
{Scope: ScopeGlobal | ScopeSession, Name: "max_insert_delayed_threads", Value: "20"},
{Scope: ScopeNone, Name: "performance_schema_session_connect_attrs_size", Value: "512"},
{Scope: ScopeGlobal, Name: "innodb_max_dirty_pages_pct", Value: "75"},
{Scope: ScopeGlobal, Name: InnodbFilePerTable, Value: BoolOn, Type: TypeBool, AutoConvertNegativeBool: true},
{Scope: ScopeGlobal, Name: InnodbLogCompressedPages, Value: "1"},
{Scope: ScopeNone, Name: "skip_networking", Value: "0"},
{Scope: ScopeGlobal, Name: "innodb_monitor_reset", Value: ""},
{Scope: ScopeNone, Name: "ssl_cipher", Value: ""},
{Scope: ScopeNone, Name: "tls_version", Value: "TLSv1,TLSv1.1,TLSv1.2"},
{Scope: ScopeGlobal, Name: InnodbPrintAllDeadlocks, Value: BoolOff, Type: TypeBool, AutoConvertNegativeBool: true},
{Scope: ScopeNone, Name: "innodb_autoinc_lock_mode", Value: "1"},
{Scope: ScopeGlobal, Name: "key_buffer_size", Value: "8388608"},
{Scope: ScopeGlobal, Name: "host_cache_size", Value: "279"},
{Scope: ScopeGlobal, Name: DelayKeyWrite, Value: BoolOn, Type: TypeEnum, PossibleValues: []string{BoolOff, BoolOn, "ALL"}},
{Scope: ScopeNone, Name: "metadata_locks_cache_size", Value: "1024"},
{Scope: ScopeNone, Name: "innodb_force_recovery", Value: "0"},
{Scope: ScopeGlobal, Name: "innodb_file_format_max", Value: "Antelope"},
{Scope: ScopeGlobal | ScopeSession, Name: "debug", Value: ""},
{Scope: ScopeGlobal, Name: "log_warnings", Value: "1"},
{Scope: ScopeGlobal | ScopeSession, Name: InnodbStrictMode, Value: "1", Type: TypeBool, AutoConvertNegativeBool: true},
{Scope: ScopeGlobal, Name: "innodb_rollback_segments", Value: "128"},
{Scope: ScopeGlobal | ScopeSession, Name: "join_buffer_size", Value: "262144", IsHintUpdatable: true},
{Scope: ScopeNone, Name: "innodb_mirrored_log_groups", Value: "1"},
{Scope: ScopeGlobal, Name: "max_binlog_size", Value: "1073741824"},
{Scope: ScopeGlobal, Name: "concurrent_insert", Value: "AUTO"},
{Scope: ScopeGlobal, Name: InnodbAdaptiveHashIndex, Value: BoolOn, Type: TypeBool, AutoConvertNegativeBool: true},
{Scope: ScopeGlobal, Name: InnodbFtEnableStopword, Value: BoolOn, Type: TypeBool, AutoConvertNegativeBool: true},
{Scope: ScopeGlobal, Name: "general_log_file", Value: "/usr/local/mysql/data/localhost.log"},
{Scope: ScopeGlobal | ScopeSession, Name: InnodbSupportXA, Value: "1"},
{Scope: ScopeGlobal, Name: "innodb_compression_level", Value: "6"},
{Scope: ScopeNone, Name: "innodb_file_format_check", Value: "1"},
{Scope: ScopeNone, Name: "myisam_mmap_size", Value: "18446744073709551615"},
{Scope: ScopeNone, Name: "innodb_buffer_pool_instances", Value: "8"},
{Scope: ScopeGlobal | ScopeSession, Name: "max_length_for_sort_data", Value: "1024", IsHintUpdatable: true},
{Scope: ScopeNone, Name: "character_set_system", Value: "utf8"},
{Scope: ScopeGlobal, Name: InnodbOptimizeFullTextOnly, Value: "0"},
{Scope: ScopeNone, Name: "character_sets_dir", Value: "/usr/local/mysql-5.6.25-osx10.8-x86_64/share/charsets/"},
{Scope: ScopeGlobal | ScopeSession, Name: QueryCacheType, Value: BoolOff, Type: TypeEnum, PossibleValues: []string{BoolOff, BoolOn, "DEMAND"}},
{Scope: ScopeNone, Name: "innodb_rollback_on_timeout", Value: "0"},
{Scope: ScopeGlobal | ScopeSession, Name: "query_alloc_block_size", Value: "8192"},
{Scope: ScopeNone, Name: "have_compress", Value: "YES"},
{Scope: ScopeNone, Name: "thread_concurrency", Value: "10"},
{Scope: ScopeGlobal | ScopeSession, Name: "query_prealloc_size", Value: "8192"},
{Scope: ScopeNone, Name: "relay_log_space_limit", Value: "0"},
{Scope: ScopeGlobal | ScopeSession, Name: MaxUserConnections, Value: "0", Type: TypeUnsigned, MinValue: 0, MaxValue: 4294967295, AutoConvertOutOfRange: true},
{Scope: ScopeNone, Name: "performance_schema_max_thread_classes", Value: "50"},
{Scope: ScopeGlobal, Name: "innodb_api_trx_level", Value: "0"},
{Scope: ScopeNone, Name: "disconnect_on_expired_password", Value: "1"},
{Scope: ScopeNone, Name: "performance_schema_max_file_classes", Value: "50"},
{Scope: ScopeGlobal, Name: "expire_logs_days", Value: "0"},
{Scope: ScopeGlobal | ScopeSession, Name: BinlogRowQueryLogEvents, Value: BoolOff, Type: TypeBool},
{Scope: ScopeGlobal, Name: "default_password_lifetime", Value: ""},
{Scope: ScopeNone, Name: "pid_file", Value: "/usr/local/mysql/data/localhost.pid"},
{Scope: ScopeNone, Name: "innodb_undo_tablespaces", Value: "0"},
{Scope: ScopeGlobal, Name: InnodbStatusOutputLocks, Value: BoolOff, Type: TypeBool, AutoConvertNegativeBool: true},
{Scope: ScopeNone, Name: "performance_schema_accounts_size", Value: "100"},
{Scope: ScopeGlobal | ScopeSession, Name: "max_error_count", Value: "64", IsHintUpdatable: true},
{Scope: ScopeGlobal, Name: "max_write_lock_count", Value: "18446744073709551615"},
{Scope: ScopeNone, Name: "performance_schema_max_socket_instances", Value: "322"},
{Scope: ScopeNone, Name: "performance_schema_max_table_instances", Value: "12500"},
{Scope: ScopeGlobal, Name: "innodb_stats_persistent_sample_pages", Value: "20"},
{Scope: ScopeGlobal, Name: "show_compatibility_56", Value: ""},
{Scope: ScopeNone, Name: "innodb_open_files", Value: "2000"},
{Scope: ScopeGlobal, Name: "innodb_spin_wait_delay", Value: "6"},
{Scope: ScopeGlobal, Name: "thread_cache_size", Value: "9"},
{Scope: ScopeGlobal, Name: LogSlowAdminStatements, Value: BoolOff, Type: TypeBool},
{Scope: ScopeNone, Name: "innodb_checksums", Type: TypeBool, Value: BoolOn},
{Scope: ScopeNone, Name: "ft_stopword_file", Value: "(built-in)"},
{Scope: ScopeGlobal, Name: "innodb_max_dirty_pages_pct_lwm", Value: "0"},
{Scope: ScopeGlobal, Name: LogQueriesNotUsingIndexes, Value: BoolOff, Type: TypeBool},
{Scope: ScopeGlobal | ScopeSession, Name: "max_heap_table_size", Value: "16777216", IsHintUpdatable: true},
{Scope: ScopeGlobal | ScopeSession, Name: "div_precision_increment", Value: "4", IsHintUpdatable: true},
{Scope: ScopeGlobal, Name: "innodb_lru_scan_depth", Value: "1024"},
{Scope: ScopeGlobal, Name: "innodb_purge_rseg_truncate_frequency", Value: ""},
{Scope: ScopeGlobal | ScopeSession, Name: SQLAutoIsNull, Value: BoolOff, Type: TypeBool, IsHintUpdatable: true},
{Scope: ScopeNone, Name: "innodb_api_enable_binlog", Value: "0"},
{Scope: ScopeGlobal | ScopeSession, Name: "innodb_ft_user_stopword_table", Value: ""},
{Scope: ScopeNone, Name: "server_id_bits", Value: "32"},
{Scope: ScopeGlobal, Name: "innodb_log_checksum_algorithm", Value: ""},
{Scope: ScopeNone, Name: "innodb_buffer_pool_load_at_startup", Value: "1"},
{Scope: ScopeGlobal | ScopeSession, Name: "sort_buffer_size", Value: "262144", IsHintUpdatable: true},
{Scope: ScopeGlobal, Name: "innodb_flush_neighbors", Value: "1"},
{Scope: ScopeNone, Name: "innodb_use_sys_malloc", Value: "1"},
{Scope: ScopeSession, Name: PluginLoad, Value: ""},
{Scope: ScopeSession, Name: PluginDir, Value: "/data/deploy/plugin"},
{Scope: ScopeNone, Name: "performance_schema_max_socket_classes", Value: "10"},
{Scope: ScopeNone, Name: "performance_schema_max_stage_classes", Value: "150"},
{Scope: ScopeGlobal, Name: "innodb_purge_batch_size", Value: "300"},
{Scope: ScopeNone, Name: "have_profiling", Value: "NO"},
{Scope: ScopeGlobal, Name: InnodbBufferPoolDumpNow, Value: BoolOff, Type: TypeBool, AutoConvertNegativeBool: true},
{Scope: ScopeGlobal, Name: RelayLogPurge, Value: BoolOn, Type: TypeBool},
{Scope: ScopeGlobal, Name: "ndb_distribution", Value: ""},
{Scope: ScopeGlobal, Name: "myisam_data_pointer_size", Value: "6"},
{Scope: ScopeGlobal, Name: "ndb_optimization_delay", Value: ""},
{Scope: ScopeGlobal, Name: "innodb_ft_num_word_optimize", Value: "2000"},
{Scope: ScopeGlobal | ScopeSession, Name: "max_join_size", Value: "18446744073709551615", IsHintUpdatable: true},
{Scope: ScopeNone, Name: CoreFile, Value: BoolOff, Type: TypeBool},
{Scope: ScopeGlobal | ScopeSession, Name: "max_seeks_for_key", Value: "18446744073709551615", IsHintUpdatable: true},
{Scope: ScopeNone, Name: "innodb_log_buffer_size", Value: "8388608"},
{Scope: ScopeGlobal, Name: "delayed_insert_timeout", Value: "300"},
{Scope: ScopeGlobal, Name: "max_relay_log_size", Value: "0"},
{Scope: ScopeGlobal | ScopeSession, Name: MaxSortLength, Value: "1024", Type: TypeUnsigned, MinValue: 4, MaxValue: 8388608, AutoConvertOutOfRange: true, IsHintUpdatable: true},
{Scope: ScopeNone, Name: "metadata_locks_hash_instances", Value: "8"},
{Scope: ScopeGlobal, Name: "ndb_eventbuffer_free_percent", Value: ""},
{Scope: ScopeNone, Name: "large_files_support", Value: "1"},
{Scope: ScopeGlobal, Name: "binlog_max_flush_queue_time", Value: "0"},
{Scope: ScopeGlobal, Name: "innodb_fill_factor", Value: ""},
{Scope: ScopeGlobal, Name: "log_syslog_facility", Value: ""},
{Scope: ScopeNone, Name: "innodb_ft_min_token_size", Value: "3"},
{Scope: ScopeGlobal | ScopeSession, Name: "transaction_write_set_extraction", Value: ""},
{Scope: ScopeGlobal | ScopeSession, Name: "ndb_blob_write_batch_bytes", Value: ""},
{Scope: ScopeGlobal, Name: "automatic_sp_privileges", Value: "1"},
{Scope: ScopeGlobal, Name: "innodb_flush_sync", Value: ""},
{Scope: ScopeNone, Name: "performance_schema_events_statements_history_long_size", Value: "10000"},
{Scope: ScopeGlobal, Name: "innodb_monitor_disable", Value: ""},
{Scope: ScopeNone, Name: "innodb_doublewrite", Value: "1"},
{Scope: ScopeNone, Name: "log_bin_use_v1_row_events", Value: "0"},
{Scope: ScopeSession, Name: "innodb_optimize_point_storage", Value: ""},
{Scope: ScopeNone, Name: "innodb_api_disable_rowlock", Value: "0"},
{Scope: ScopeGlobal, Name: "innodb_adaptive_flushing_lwm", Value: "10"},
{Scope: ScopeNone, Name: "innodb_log_files_in_group", Value: "2"},
{Scope: ScopeGlobal, Name: InnodbBufferPoolLoadNow, Value: BoolOff, Type: TypeBool, AutoConvertNegativeBool: true},
{Scope: ScopeNone, Name: "performance_schema_max_rwlock_classes", Value: "40"},
{Scope: ScopeNone, Name: "binlog_gtid_simple_recovery", Value: "1"},
{Scope: ScopeNone, Name: "performance_schema_digests_size", Value: "10000"},
{Scope: ScopeGlobal | ScopeSession, Name: Profiling, Value: BoolOff, Type: TypeBool},
{Scope: ScopeSession, Name: "rand_seed1", Value: ""},
{Scope: ScopeGlobal, Name: "sha256_password_proxy_users", Value: ""},
{Scope: ScopeGlobal | ScopeSession, Name: SQLQuoteShowCreate, Value: BoolOn, Type: TypeBool},
{Scope: ScopeGlobal | ScopeSession, Name: "binlogging_impossible_mode", Value: "IGNORE_ERROR"},
{Scope: ScopeGlobal | ScopeSession, Name: QueryCacheSize, Value: "1048576"},
{Scope: ScopeGlobal, Name: "innodb_stats_transient_sample_pages", Value: "8"},
{Scope: ScopeGlobal, Name: InnodbStatsOnMetadata, Value: "0"},
{Scope: ScopeNone, Name: "server_uuid", Value: "00000000-0000-0000-0000-000000000000"},
{Scope: ScopeNone, Name: "open_files_limit", Value: "5000"},
{Scope: ScopeGlobal | ScopeSession, Name: "ndb_force_send", Value: ""},
{Scope: ScopeNone, Name: "skip_show_database", Value: "0"},
{Scope: ScopeGlobal, Name: "log_timestamps", Value: ""},
{Scope: ScopeNone, Name: "version_compile_machine", Value: "x86_64"},
{Scope: ScopeGlobal, Name: "event_scheduler", Value: BoolOff},
{Scope: ScopeGlobal | ScopeSession, Name: "ndb_deferred_constraints", Value: ""},
{Scope: ScopeGlobal, Name: "log_syslog_include_pid", Value: ""},
{Scope: ScopeNone, Name: "innodb_ft_cache_size", Value: "8000000"},
{Scope: ScopeGlobal, Name: InnodbDisableSortFileCache, Value: "0"},
{Scope: ScopeGlobal, Name: "log_error_verbosity", Value: ""},
{Scope: ScopeNone, Name: "performance_schema_hosts_size", Value: "100"},
{Scope: ScopeGlobal, Name: "innodb_replication_delay", Value: "0"},
{Scope: ScopeGlobal, Name: SlowQueryLog, Value: "0"},
{Scope: ScopeSession, Name: "debug_sync", Value: ""},
{Scope: ScopeGlobal, Name: InnodbStatsAutoRecalc, Value: "1"},
{Scope: ScopeGlobal | ScopeSession, Name: "lc_messages", Value: "en_US"},
{Scope: ScopeGlobal | ScopeSession, Name: "bulk_insert_buffer_size", Value: "8388608", IsHintUpdatable: true},
{Scope: ScopeGlobal | ScopeSession, Name: BinlogDirectNonTransactionalUpdates, Value: BoolOff, Type: TypeBool},
{Scope: ScopeGlobal, Name: "innodb_change_buffering", Value: "all"},
{Scope: ScopeGlobal | ScopeSession, Name: SQLBigSelects, Value: BoolOn, Type: TypeBool, IsHintUpdatable: true},
{Scope: ScopeGlobal, Name: "innodb_max_purge_lag_delay", Value: "0"},
{Scope: ScopeGlobal | ScopeSession, Name: "session_track_schema", Value: ""},
{Scope: ScopeGlobal, Name: "innodb_io_capacity_max", Value: "2000"},
{Scope: ScopeGlobal, Name: "innodb_autoextend_increment", Value: "64"},
{Scope: ScopeGlobal | ScopeSession, Name: "binlog_format", Value: "STATEMENT"},
{Scope: ScopeGlobal | ScopeSession, Name: "optimizer_trace", Value: "enabled=off,one_line=off"},
{Scope: ScopeGlobal | ScopeSession, Name: "read_rnd_buffer_size", Value: "262144", IsHintUpdatable: true},
{Scope: ScopeGlobal | ScopeSession, Name: NetWriteTimeout, Value: "60"},
{Scope: ScopeGlobal, Name: InnodbBufferPoolLoadAbort, Value: BoolOff, Type: TypeBool, AutoConvertNegativeBool: true},
{Scope: ScopeGlobal | ScopeSession, Name: "transaction_prealloc_size", Value: "4096"},
{Scope: ScopeNone, Name: "performance_schema_setup_objects_size", Value: "100"},
{Scope: ScopeGlobal, Name: "sync_relay_log", Value: "10000"},
{Scope: ScopeGlobal, Name: "innodb_ft_result_cache_limit", Value: "2000000000"},
{Scope: ScopeNone, Name: "innodb_sort_buffer_size", Value: "1048576"},
{Scope: ScopeGlobal, Name: "innodb_ft_enable_diag_print", Type: TypeBool, Value: BoolOff},
{Scope: ScopeNone, Name: "thread_handling", Value: "one-thread-per-connection"},
{Scope: ScopeGlobal, Name: "stored_program_cache", Value: "256"},
{Scope: ScopeNone, Name: "performance_schema_max_mutex_instances", Value: "15906"},
{Scope: ScopeGlobal, Name: "innodb_adaptive_max_sleep_delay", Value: "150000"},
{Scope: ScopeNone, Name: "large_pages", Value: BoolOff},
{Scope: ScopeGlobal | ScopeSession, Name: "session_track_system_variables", Value: ""},
{Scope: ScopeGlobal, Name: "innodb_change_buffer_max_size", Value: "25"},
{Scope: ScopeGlobal, Name: LogBinTrustFunctionCreators, Value: BoolOff, Type: TypeBool},
{Scope: ScopeNone, Name: "innodb_write_io_threads", Value: "4"},
{Scope: ScopeGlobal, Name: "mysql_native_password_proxy_users", Value: ""},
{Scope: ScopeNone, Name: "large_page_size", Value: "0"},
{Scope: ScopeNone, Name: "table_open_cache_instances", Value: "1"},
{Scope: ScopeGlobal, Name: InnodbStatsPersistent, Value: BoolOn, Type: TypeBool, AutoConvertNegativeBool: true},
{Scope: ScopeGlobal | ScopeSession, Name: "session_track_state_change", Value: ""},
{Scope: ScopeNone, Name: OptimizerSwitch, Value: "index_merge=on,index_merge_union=on,index_merge_sort_union=on,index_merge_intersection=on,engine_condition_pushdown=on,index_condition_pushdown=on,mrr=on,mrr_cost_based=on,block_nested_loop=on,batched_key_access=off,materialization=on,semijoin=on,loosescan=on,firstmatch=on,subquery_materialization_cost_based=on,use_index_extensions=on", IsHintUpdatable: true},
{Scope: ScopeGlobal, Name: "delayed_queue_size", Value: "1000"},
{Scope: ScopeNone, Name: "innodb_read_only", Value: "0"},
{Scope: ScopeNone, Name: "datetime_format", Value: "%Y-%m-%d %H:%i:%s"},
{Scope: ScopeGlobal, Name: "log_syslog", Value: ""},
{Scope: ScopeGlobal | ScopeSession, Name: "transaction_alloc_block_size", Value: "8192"},
{Scope: ScopeGlobal, Name: "innodb_large_prefix", Type: TypeBool, Value: BoolOff},
{Scope: ScopeNone, Name: "performance_schema_max_cond_classes", Value: "80"},
{Scope: ScopeGlobal, Name: "innodb_io_capacity", Value: "200"},
{Scope: ScopeGlobal, Name: "max_binlog_cache_size", Value: "18446744073709547520"},
{Scope: ScopeGlobal | ScopeSession, Name: "ndb_index_stat_enable", Value: ""},
{Scope: ScopeGlobal, Name: "executed_gtids_compression_period", Value: ""},
{Scope: ScopeNone, Name: "time_format", Value: "%H:%i:%s"},
{Scope: ScopeGlobal | ScopeSession, Name: OldAlterTable, Value: BoolOff, Type: TypeBool},
{Scope: ScopeGlobal | ScopeSession, Name: "long_query_time", Value: "10.000000"},
{Scope: ScopeNone, Name: "innodb_use_native_aio", Value: "0"},
{Scope: ScopeGlobal, Name: "log_throttle_queries_not_using_indexes", Value: "0"},
{Scope: ScopeNone, Name: "locked_in_memory", Value: "0"},
{Scope: ScopeNone, Name: "innodb_api_enable_mdl", Value: "0"},
{Scope: ScopeGlobal, Name: "binlog_cache_size", Value: "32768"},
{Scope: ScopeGlobal, Name: "innodb_compression_pad_pct_max", Value: "50"},
{Scope: ScopeGlobal, Name: InnodbCommitConcurrency, Value: "0", Type: TypeUnsigned, MinValue: 0, MaxValue: 1000, AutoConvertOutOfRange: true},
{Scope: ScopeNone, Name: "ft_min_word_len", Value: "4"},
{Scope: ScopeGlobal, Name: EnforceGtidConsistency, Value: BoolOff, Type: TypeEnum, PossibleValues: []string{BoolOff, BoolOn, "WARN"}},
{Scope: ScopeGlobal, Name: SecureAuth, Value: BoolOn, Type: TypeBool, Validation: func(vars *SessionVars, normalizedValue string, originalValue string, scope ScopeFlag) (string, error) {
if TiDBOptOn(normalizedValue) {
return BoolOn, nil
}
return normalizedValue, ErrWrongValueForVar.GenWithStackByArgs(SecureAuth, originalValue)
}},
{Scope: ScopeNone, Name: "max_tmp_tables", Value: "32"},
{Scope: ScopeGlobal, Name: InnodbRandomReadAhead, Value: BoolOff, Type: TypeBool, AutoConvertNegativeBool: true},
{Scope: ScopeGlobal | ScopeSession, Name: UniqueChecks, Value: BoolOn, Type: TypeBool, IsHintUpdatable: true},
{Scope: ScopeGlobal, Name: "internal_tmp_disk_storage_engine", Value: ""},
{Scope: ScopeGlobal | ScopeSession, Name: "myisam_repair_threads", Value: "1"},
{Scope: ScopeGlobal, Name: "ndb_eventbuffer_max_alloc", Value: ""},
{Scope: ScopeGlobal, Name: "innodb_read_ahead_threshold", Value: "56"},
{Scope: ScopeGlobal, Name: "key_cache_block_size", Value: "1024"},
{Scope: ScopeNone, Name: "ndb_recv_thread_cpu_mask", Value: ""},
{Scope: ScopeGlobal, Name: "gtid_purged", Value: ""},
{Scope: ScopeGlobal, Name: "max_binlog_stmt_cache_size", Value: "18446744073709547520"},
{Scope: ScopeGlobal | ScopeSession, Name: "lock_wait_timeout", Value: "31536000"},
{Scope: ScopeGlobal | ScopeSession, Name: "read_buffer_size", Value: "131072", IsHintUpdatable: true},
{Scope: ScopeNone, Name: "innodb_read_io_threads", Value: "4"},
{Scope: ScopeGlobal | ScopeSession, Name: MaxSpRecursionDepth, Value: "0", Type: TypeUnsigned, MinValue: 0, MaxValue: 255, AutoConvertOutOfRange: true},
{Scope: ScopeNone, Name: "ignore_builtin_innodb", Value: "0"},
{Scope: ScopeGlobal, Name: "slow_query_log_file", Value: "/usr/local/mysql/data/localhost-slow.log"},
{Scope: ScopeGlobal, Name: "innodb_thread_sleep_delay", Value: "10000"},
{Scope: ScopeGlobal, Name: "innodb_ft_aux_table", Value: ""},
{Scope: ScopeGlobal | ScopeSession, Name: SQLWarnings, Value: BoolOff, Type: TypeBool},
{Scope: ScopeGlobal | ScopeSession, Name: KeepFilesOnCreate, Value: BoolOff, Type: TypeBool},
{Scope: ScopeNone, Name: "innodb_data_file_path", Value: "ibdata1:12M:autoextend"},
{Scope: ScopeNone, Name: "performance_schema_setup_actors_size", Value: "100"},
{Scope: ScopeNone, Name: "innodb_additional_mem_pool_size", Value: "8388608"},
{Scope: ScopeNone, Name: "log_error", Value: "/usr/local/mysql/data/localhost.err"},
{Scope: ScopeGlobal, Name: "binlog_stmt_cache_size", Value: "32768"},
{Scope: ScopeNone, Name: "relay_log_info_file", Value: "relay-log.info"},
{Scope: ScopeNone, Name: "innodb_ft_total_cache_size", Value: "640000000"},
{Scope: ScopeNone, Name: "performance_schema_max_rwlock_instances", Value: "9102"},
{Scope: ScopeGlobal, Name: "table_open_cache", Value: "2000"},
{Scope: ScopeNone, Name: "performance_schema_events_stages_history_long_size", Value: "10000"},
{Scope: ScopeSession, Name: "insert_id", Value: ""},
{Scope: ScopeGlobal | ScopeSession, Name: "default_tmp_storage_engine", Value: "InnoDB", IsHintUpdatable: true},
{Scope: ScopeGlobal | ScopeSession, Name: "optimizer_search_depth", Value: "62", IsHintUpdatable: true},
{Scope: ScopeGlobal | ScopeSession, Name: "max_points_in_geometry", Value: "65536", IsHintUpdatable: true},
{Scope: ScopeGlobal, Name: "innodb_stats_sample_pages", Value: "8"},
{Scope: ScopeGlobal | ScopeSession, Name: "profiling_history_size", Value: "15"},
{Scope: ScopeNone, Name: "have_symlink", Value: "YES"},
{Scope: ScopeGlobal | ScopeSession, Name: "storage_engine", Value: "InnoDB"},
{Scope: ScopeGlobal | ScopeSession, Name: "sql_log_off", Value: "0"},
// In MySQL, the default value of `explicit_defaults_for_timestamp` is `0`.
// But In TiDB, it's set to `1` to be consistent with TiDB timestamp behavior.
// See: https://github.com/pingcap/tidb/pull/6068 for details
{Scope: ScopeNone, Name: "explicit_defaults_for_timestamp", Value: BoolOn, Type: TypeBool},
{Scope: ScopeNone, Name: "performance_schema_events_waits_history_size", Value: "10"},
{Scope: ScopeGlobal, Name: "log_syslog_tag", Value: ""},
{Scope: ScopeGlobal, Name: "innodb_undo_log_truncate", Value: ""},
{Scope: ScopeSession, Name: "innodb_create_intrinsic", Value: ""},
{Scope: ScopeGlobal, Name: "gtid_executed_compression_period", Value: ""},
{Scope: ScopeGlobal, Name: "ndb_log_empty_epochs", Value: ""},
{Scope: ScopeNone, Name: "have_geometry", Value: "YES"},
{Scope: ScopeGlobal | ScopeSession, Name: "optimizer_trace_max_mem_size", Value: "16384"},
{Scope: ScopeGlobal | ScopeSession, Name: "net_retry_count", Value: "10"},
{Scope: ScopeSession, Name: "ndb_table_no_logging", Value: ""},
{Scope: ScopeGlobal | ScopeSession, Name: "optimizer_trace_features", Value: "greedy_search=on,range_optimizer=on,dynamic_range=on,repeated_subselect=on"},
{Scope: ScopeGlobal, Name: "innodb_flush_log_at_trx_commit", Value: "1"},
{Scope: ScopeGlobal, Name: "rewriter_enabled", Value: ""},
{Scope: ScopeGlobal, Name: "query_cache_min_res_unit", Value: "4096"},
{Scope: ScopeGlobal | ScopeSession, Name: "updatable_views_with_limit", Value: "YES", IsHintUpdatable: true},
{Scope: ScopeGlobal | ScopeSession, Name: "optimizer_prune_level", Value: "1", IsHintUpdatable: true},
{Scope: ScopeGlobal | ScopeSession, Name: "completion_type", Value: "NO_CHAIN"},
{Scope: ScopeGlobal, Name: "binlog_checksum", Value: "CRC32"},
{Scope: ScopeNone, Name: "report_port", Value: "3306"},
{Scope: ScopeGlobal | ScopeSession, Name: ShowOldTemporals, Value: BoolOff, Type: TypeBool},
{Scope: ScopeGlobal, Name: "query_cache_limit", Value: "1048576"},
{Scope: ScopeGlobal, Name: "innodb_buffer_pool_size", Value: "134217728"},
{Scope: ScopeGlobal, Name: InnodbAdaptiveFlushing, Value: BoolOn, Type: TypeBool, AutoConvertNegativeBool: true},
{Scope: ScopeGlobal, Name: "innodb_monitor_enable", Value: ""},
{Scope: ScopeNone, Name: "date_format", Value: "%Y-%m-%d"},
{Scope: ScopeGlobal, Name: "innodb_buffer_pool_filename", Value: "ib_buffer_pool"},
{Scope: ScopeGlobal, Name: "slow_launch_time", Value: "2"},
{Scope: ScopeGlobal | ScopeSession, Name: "ndb_use_transactions", Value: ""},
{Scope: ScopeNone, Name: "innodb_purge_threads", Value: "1"},
{Scope: ScopeGlobal, Name: "innodb_concurrency_tickets", Value: "5000"},
{Scope: ScopeGlobal, Name: "innodb_monitor_reset_all", Value: ""},
{Scope: ScopeNone, Name: "performance_schema_users_size", Value: "100"},
{Scope: ScopeGlobal, Name: "ndb_log_updated_only", Value: ""},
{Scope: ScopeNone, Name: "basedir", Value: "/usr/local/mysql"},
{Scope: ScopeGlobal, Name: "innodb_old_blocks_time", Value: "1000"},
{Scope: ScopeGlobal, Name: "innodb_stats_method", Value: "nulls_equal"},
{Scope: ScopeGlobal, Name: LocalInFile, Value: BoolOn, Type: TypeBool},
{Scope: ScopeGlobal | ScopeSession, Name: "myisam_stats_method", Value: "nulls_unequal"},
{Scope: ScopeNone, Name: "version_compile_os", Value: "osx10.8"},
{Scope: ScopeNone, Name: "relay_log_recovery", Value: "0"},
{Scope: ScopeNone, Name: "old", Value: "0"},
{Scope: ScopeGlobal | ScopeSession, Name: InnodbTableLocks, Value: BoolOn, Type: TypeBool, AutoConvertNegativeBool: true},
{Scope: ScopeNone, Name: PerformanceSchema, Value: BoolOff, Type: TypeBool},
{Scope: ScopeNone, Name: "myisam_recover_options", Value: BoolOff},
{Scope: ScopeGlobal | ScopeSession, Name: NetBufferLength, Value: "16384"},
{Scope: ScopeGlobal | ScopeSession, Name: "binlog_row_image", Value: "FULL"},
{Scope: ScopeNone, Name: "innodb_locks_unsafe_for_binlog", Value: "0"},
{Scope: ScopeSession, Name: "rbr_exec_mode", Value: ""},
{Scope: ScopeGlobal, Name: "myisam_max_sort_file_size", Value: "9223372036853727232"},
{Scope: ScopeNone, Name: "back_log", Value: "80"},
{Scope: ScopeSession, Name: "pseudo_thread_id", Value: ""},
{Scope: ScopeNone, Name: "have_dynamic_loading", Value: "YES"},
{Scope: ScopeGlobal, Name: "rewriter_verbose", Value: ""},
{Scope: ScopeGlobal, Name: "innodb_undo_logs", Value: "128"},
{Scope: ScopeNone, Name: "performance_schema_max_cond_instances", Value: "3504"},
{Scope: ScopeGlobal, Name: "delayed_insert_limit", Value: "100"},
{Scope: ScopeGlobal, Name: Flush, Value: BoolOff, Type: TypeBool},
{Scope: ScopeGlobal | ScopeSession, Name: "eq_range_index_dive_limit", Value: "200", IsHintUpdatable: true},
{Scope: ScopeNone, Name: "performance_schema_events_stages_history_size", Value: "10"},
{Scope: ScopeGlobal | ScopeSession, Name: "ndb_join_pushdown", Value: ""},
{Scope: ScopeGlobal, Name: "validate_password_special_char_count", Value: "1"},
{Scope: ScopeNone, Name: "performance_schema_max_thread_instances", Value: "402"},
{Scope: ScopeGlobal | ScopeSession, Name: "ndbinfo_show_hidden", Value: ""},
{Scope: ScopeGlobal | ScopeSession, Name: "net_read_timeout", Value: "30"},
{Scope: ScopeNone, Name: "innodb_page_size", Value: "16384"},
{Scope: ScopeNone, Name: "innodb_log_file_size", Value: "50331648"},
{Scope: ScopeGlobal, Name: "sync_relay_log_info", Value: "10000"},
{Scope: ScopeGlobal | ScopeSession, Name: "optimizer_trace_limit", Value: "1"},
{Scope: ScopeNone, Name: "innodb_ft_max_token_size", Value: "84"},
{Scope: ScopeGlobal, Name: ValidatePasswordLength, Value: "8", Type: TypeUnsigned, MinValue: 0, MaxValue: math.MaxUint64, AutoConvertOutOfRange: true},
{Scope: ScopeGlobal, Name: "ndb_log_binlog_index", Value: ""},
{Scope: ScopeGlobal, Name: "innodb_api_bk_commit_interval", Value: "5"},
{Scope: ScopeNone, Name: "innodb_undo_directory", Value: "."},
{Scope: ScopeNone, Name: "bind_address", Value: "*"},
{Scope: ScopeGlobal, Name: "innodb_sync_spin_loops", Value: "30"},
{Scope: ScopeGlobal | ScopeSession, Name: SQLSafeUpdates, Value: BoolOff, Type: TypeBool, IsHintUpdatable: true},
{Scope: ScopeNone, Name: "tmpdir", Value: "/var/tmp/"},
{Scope: ScopeGlobal, Name: "innodb_thread_concurrency", Value: "0"},
{Scope: ScopeGlobal, Name: "innodb_buffer_pool_dump_pct", Value: ""},
{Scope: ScopeGlobal | ScopeSession, Name: "lc_time_names", Value: "en_US"},
{Scope: ScopeGlobal | ScopeSession, Name: "max_statement_time", Value: ""},
{Scope: ScopeGlobal | ScopeSession, Name: EndMarkersInJSON, Value: BoolOff, Type: TypeBool, IsHintUpdatable: true},
{Scope: ScopeGlobal, Name: AvoidTemporalUpgrade, Value: BoolOff, Type: TypeBool},
{Scope: ScopeGlobal, Name: "key_cache_age_threshold", Value: "300"},
{Scope: ScopeGlobal, Name: InnodbStatusOutput, Value: BoolOff, Type: TypeBool, AutoConvertNegativeBool: true},
{Scope: ScopeSession, Name: "identity", Value: ""},
{Scope: ScopeGlobal | ScopeSession, Name: "min_examined_row_limit", Value: "0"},
{Scope: ScopeGlobal, Name: "sync_frm", Type: TypeBool, Value: BoolOn},
{Scope: ScopeGlobal, Name: "innodb_online_alter_log_max_size", Value: "134217728"},
{Scope: ScopeGlobal | ScopeSession, Name: "information_schema_stats_expiry", Value: "86400"},
{Scope: ScopeGlobal, Name: ThreadPoolSize, Value: "16", Type: TypeUnsigned, MinValue: 1, MaxValue: 64, AutoConvertOutOfRange: true},
{Scope: ScopeNone, Name: "lower_case_file_system", Value: "1"},
// for compatibility purpose, we should leave them alone.
// TODO: Follow the Terminology Updates of MySQL after their changes arrived.
// https://mysqlhighavailability.com/mysql-terminology-updates/
{Scope: ScopeSession, Name: PseudoSlaveMode, Value: "", Type: TypeInt},
{Scope: ScopeGlobal, Name: "slave_pending_jobs_size_max", Value: "16777216"},
{Scope: ScopeGlobal, Name: "slave_transaction_retries", Value: "10"},
{Scope: ScopeGlobal, Name: "slave_checkpoint_period", Value: "300"},
{Scope: ScopeGlobal, Name: MasterVerifyChecksum, Value: BoolOff, Type: TypeBool},
{Scope: ScopeGlobal, Name: "rpl_semi_sync_master_trace_level", Value: ""},
{Scope: ScopeGlobal, Name: "master_info_repository", Value: "FILE"},
{Scope: ScopeGlobal, Name: "rpl_stop_slave_timeout", Value: "31536000"},
{Scope: ScopeGlobal, Name: "slave_net_timeout", Value: "3600"},
{Scope: ScopeGlobal, Name: "sync_master_info", Value: "10000"},
{Scope: ScopeGlobal, Name: "init_slave", Value: ""},
{Scope: ScopeGlobal, Name: SlaveCompressedProtocol, Value: BoolOff, Type: TypeBool},
{Scope: ScopeGlobal, Name: "rpl_semi_sync_slave_trace_level", Value: ""},
{Scope: ScopeGlobal, Name: LogSlowSlaveStatements, Value: BoolOff, Type: TypeBool},
{Scope: ScopeGlobal, Name: "slave_checkpoint_group", Value: "512"},
{Scope: ScopeNone, Name: "slave_load_tmpdir", Value: "/var/tmp/"},
{Scope: ScopeGlobal, Name: "slave_parallel_type", Value: ""},
{Scope: ScopeGlobal, Name: "slave_parallel_workers", Value: "0"},
{Scope: ScopeGlobal, Name: "rpl_semi_sync_master_timeout", Value: "10000", Type: TypeInt},
{Scope: ScopeNone, Name: "slave_skip_errors", Value: BoolOff},
{Scope: ScopeGlobal, Name: "sql_slave_skip_counter", Value: "0"},
{Scope: ScopeGlobal, Name: "rpl_semi_sync_slave_enabled", Value: BoolOff, Type: TypeBool},
{Scope: ScopeGlobal, Name: "rpl_semi_sync_master_enabled", Value: BoolOff, Type: TypeBool},
{Scope: ScopeGlobal, Name: "slave_preserve_commit_order", Value: BoolOff, Type: TypeBool},
{Scope: ScopeGlobal, Name: "slave_exec_mode", Value: "STRICT"},
{Scope: ScopeNone, Name: "log_slave_updates", Value: BoolOff, Type: TypeBool},
{Scope: ScopeGlobal, Name: "rpl_semi_sync_master_wait_point", Value: "AFTER_SYNC", Type: TypeEnum, PossibleValues: []string{"AFTER_SYNC", "AFTER_COMMIT"}},
{Scope: ScopeGlobal, Name: "slave_sql_verify_checksum", Value: BoolOn, Type: TypeBool},
{Scope: ScopeGlobal, Name: "slave_max_allowed_packet", Value: "1073741824"},
{Scope: ScopeGlobal, Name: "rpl_semi_sync_master_wait_for_slave_count", Value: "1", Type: TypeInt, MinValue: 1, MaxValue: 65535},
{Scope: ScopeGlobal, Name: "rpl_semi_sync_master_wait_no_slave", Value: BoolOn, Type: TypeBool},
{Scope: ScopeGlobal, Name: "slave_rows_search_algorithms", Value: "TABLE_SCAN,INDEX_SCAN"},
{Scope: ScopeGlobal, Name: SlaveAllowBatching, Value: BoolOff, Type: TypeBool},
}
| sessionctx/variable/noop.go | 0 | https://github.com/pingcap/tidb/commit/3f6dfe91daab7acb178ad24941dfca1745993afd | [
0.0001875996676972136,
0.00017121274140663445,
0.00016458280151709914,
0.0001713034580461681,
0.0000030207879717636388
] |
{
"id": 2,
"code_window": [
"\n",
"func (e *AnalyzeColumnsExec) open(ranges []*ranger.Range) error {\n",
"\te.resultHandler = &tableResultHandler{}\n",
"\tfirstPartRanges, secondPartRanges := distsql.SplitRangesBySign(ranges, true, false, !hasPkHist(e.handleCols))\n",
"\tfirstResult, err := e.buildResp(firstPartRanges)\n",
"\tif err != nil {\n",
"\t\treturn err\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tfirstPartRanges, secondPartRanges := distsql.SplitRangesAcrossInt64Boundary(ranges, true, false, !hasPkHist(e.handleCols))\n"
],
"file_path": "executor/analyze.go",
"type": "replace",
"edit_start_line_idx": 600
} | // Copyright 2017 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package executor
import (
"bytes"
"context"
"fmt"
"math"
"math/rand"
"runtime"
"sort"
"strconv"
"strings"
"sync"
"sync/atomic"
"time"
"github.com/cznic/mathutil"
"github.com/pingcap/errors"
"github.com/pingcap/failpoint"
"github.com/pingcap/parser/ast"
"github.com/pingcap/parser/model"
"github.com/pingcap/parser/mysql"
"github.com/pingcap/parser/terror"
"github.com/pingcap/tidb/distsql"
"github.com/pingcap/tidb/domain"
"github.com/pingcap/tidb/infoschema"
"github.com/pingcap/tidb/kv"
"github.com/pingcap/tidb/metrics"
"github.com/pingcap/tidb/planner/core"
"github.com/pingcap/tidb/sessionctx"
"github.com/pingcap/tidb/sessionctx/stmtctx"
"github.com/pingcap/tidb/sessionctx/variable"
"github.com/pingcap/tidb/statistics"
"github.com/pingcap/tidb/store/tikv"
tikvstore "github.com/pingcap/tidb/store/tikv/kv"
"github.com/pingcap/tidb/table"
"github.com/pingcap/tidb/tablecodec"
"github.com/pingcap/tidb/types"
"github.com/pingcap/tidb/util/chunk"
"github.com/pingcap/tidb/util/codec"
"github.com/pingcap/tidb/util/logutil"
"github.com/pingcap/tidb/util/ranger"
"github.com/pingcap/tidb/util/sqlexec"
"github.com/pingcap/tipb/go-tipb"
"go.uber.org/zap"
)
var _ Executor = &AnalyzeExec{}
// AnalyzeExec represents Analyze executor.
type AnalyzeExec struct {
baseExecutor
tasks []*analyzeTask
wg *sync.WaitGroup
opts map[ast.AnalyzeOptionType]uint64
}
var (
// RandSeed is the seed for randing package.
// It's public for test.
RandSeed = int64(1)
)
const (
maxRegionSampleSize = 1000
maxSketchSize = 10000
)
// Next implements the Executor Next interface.
func (e *AnalyzeExec) Next(ctx context.Context, req *chunk.Chunk) error {
concurrency, err := getBuildStatsConcurrency(e.ctx)
if err != nil {
return err
}
taskCh := make(chan *analyzeTask, len(e.tasks))
resultCh := make(chan analyzeResult, len(e.tasks))
e.wg.Add(concurrency)
for i := 0; i < concurrency; i++ {
go e.analyzeWorker(taskCh, resultCh, i == 0)
}
for _, task := range e.tasks {
statistics.AddNewAnalyzeJob(task.job)
}
for _, task := range e.tasks {
taskCh <- task
}
close(taskCh)
statsHandle := domain.GetDomain(e.ctx).StatsHandle()
panicCnt := 0
pruneMode := variable.PartitionPruneMode(e.ctx.GetSessionVars().PartitionPruneMode.Load())
// needGlobalStats used to indicate whether we should merge the partition-level stats to global-level stats.
needGlobalStats := pruneMode == variable.Dynamic
type globalStatsKey struct {
tableID int64
indexID int64
}
type globalStatsInfo struct {
isIndex int
// When the `isIndex == 0`, the idxID will be the column ID.
// Otherwise, the idxID will be the index ID.
idxID int64
statsVersion int
}
// globalStatsMap is a map used to store which partition tables and the corresponding indexes need global-level stats.
// The meaning of key in map is the structure that used to store the tableID and indexID.
// The meaning of value in map is some additional information needed to build global-level stats.
globalStatsMap := make(map[globalStatsKey]globalStatsInfo)
finishJobWithLogFn := func(ctx context.Context, job *statistics.AnalyzeJob, meetError bool) {
job.Finish(meetError)
if job != nil {
logutil.Logger(ctx).Info(fmt.Sprintf("analyze table `%s`.`%s` has %s", job.DBName, job.TableName, job.State),
zap.String("partition", job.PartitionName),
zap.String("job info", job.JobInfo),
zap.Time("start time", job.StartTime),
zap.Time("end time", job.EndTime),
zap.String("cost", job.EndTime.Sub(job.StartTime).String()))
}
}
for panicCnt < concurrency {
result, ok := <-resultCh
if !ok {
break
}
if result.Err != nil {
err = result.Err
if err == errAnalyzeWorkerPanic {
panicCnt++
} else {
logutil.Logger(ctx).Error("analyze failed", zap.Error(err))
}
finishJobWithLogFn(ctx, result.job, true)
continue
}
statisticsID := result.TableID.GetStatisticsID()
for i, hg := range result.Hist {
if result.TableID.IsPartitionTable() && needGlobalStats {
// If it does not belong to the statistics of index, we need to set it to -1 to distinguish.
idxID := int64(-1)
if result.IsIndex != 0 {
idxID = hg.ID
}
globalStatsID := globalStatsKey{result.TableID.TableID, idxID}
if _, ok := globalStatsMap[globalStatsID]; !ok {
globalStatsMap[globalStatsID] = globalStatsInfo{result.IsIndex, hg.ID, result.StatsVer}
}
}
err1 := statsHandle.SaveStatsToStorage(statisticsID, result.Count, result.IsIndex, hg, result.Cms[i], result.TopNs[i], result.Fms[i], result.StatsVer, 1)
if err1 != nil {
err = err1
logutil.Logger(ctx).Error("save stats to storage failed", zap.Error(err))
finishJobWithLogFn(ctx, result.job, true)
continue
}
}
if err1 := statsHandle.SaveExtendedStatsToStorage(statisticsID, result.ExtStats, false); err1 != nil {
err = err1
logutil.Logger(ctx).Error("save extended stats to storage failed", zap.Error(err))
finishJobWithLogFn(ctx, result.job, true)
} else {
finishJobWithLogFn(ctx, result.job, false)
}
}
for _, task := range e.tasks {
statistics.MoveToHistory(task.job)
}
if err != nil {
return err
}
if needGlobalStats {
for globalStatsID, info := range globalStatsMap {
globalStats, err := statsHandle.MergePartitionStats2GlobalStatsByTableID(e.ctx, e.opts, infoschema.GetInfoSchema(e.ctx), globalStatsID.tableID, info.isIndex, info.idxID)
if err != nil {
if types.ErrPartitionStatsMissing.Equal(err) {
// When we find some partition-level stats are missing, we need to report warning.
e.ctx.GetSessionVars().StmtCtx.AppendWarning(err)
continue
}
return err
}
for i := 0; i < globalStats.Num; i++ {
hg, cms, topN, fms := globalStats.Hg[i], globalStats.Cms[i], globalStats.TopN[i], globalStats.Fms[i]
err = statsHandle.SaveStatsToStorage(globalStatsID.tableID, globalStats.Count, info.isIndex, hg, cms, topN, fms, info.statsVersion, 1)
if err != nil {
logutil.Logger(ctx).Error("save global-level stats to storage failed", zap.Error(err))
}
}
}
}
return statsHandle.Update(infoschema.GetInfoSchema(e.ctx))
}
func getBuildStatsConcurrency(ctx sessionctx.Context) (int, error) {
sessionVars := ctx.GetSessionVars()
concurrency, err := variable.GetSessionSystemVar(sessionVars, variable.TiDBBuildStatsConcurrency)
if err != nil {
return 0, err
}
c, err := strconv.ParseInt(concurrency, 10, 64)
return int(c), err
}
type taskType int
const (
colTask taskType = iota
idxTask
fastTask
pkIncrementalTask
idxIncrementalTask
)
type analyzeTask struct {
taskType taskType
idxExec *AnalyzeIndexExec
colExec *AnalyzeColumnsExec
fastExec *AnalyzeFastExec
idxIncrementalExec *analyzeIndexIncrementalExec
colIncrementalExec *analyzePKIncrementalExec
job *statistics.AnalyzeJob
}
var errAnalyzeWorkerPanic = errors.New("analyze worker panic")
func (e *AnalyzeExec) analyzeWorker(taskCh <-chan *analyzeTask, resultCh chan<- analyzeResult, isCloseChanThread bool) {
var task *analyzeTask
defer func() {
if r := recover(); r != nil {
buf := make([]byte, 4096)
stackSize := runtime.Stack(buf, false)
buf = buf[:stackSize]
logutil.BgLogger().Error("analyze worker panicked", zap.String("stack", string(buf)))
metrics.PanicCounter.WithLabelValues(metrics.LabelAnalyze).Inc()
resultCh <- analyzeResult{
Err: errAnalyzeWorkerPanic,
job: task.job,
}
}
e.wg.Done()
if isCloseChanThread {
e.wg.Wait()
close(resultCh)
}
}()
for {
var ok bool
task, ok = <-taskCh
if !ok {
break
}
task.job.Start()
switch task.taskType {
case colTask:
task.colExec.job = task.job
for _, result := range analyzeColumnsPushdown(task.colExec) {
resultCh <- result
}
case idxTask:
task.idxExec.job = task.job
resultCh <- analyzeIndexPushdown(task.idxExec)
case fastTask:
task.fastExec.job = task.job
task.job.Start()
for _, result := range analyzeFastExec(task.fastExec) {
resultCh <- result
}
case pkIncrementalTask:
task.colIncrementalExec.job = task.job
resultCh <- analyzePKIncremental(task.colIncrementalExec)
case idxIncrementalTask:
task.idxIncrementalExec.job = task.job
resultCh <- analyzeIndexIncremental(task.idxIncrementalExec)
}
}
}
func analyzeIndexPushdown(idxExec *AnalyzeIndexExec) analyzeResult {
ranges := ranger.FullRange()
// For single-column index, we do not load null rows from TiKV, so the built histogram would not include
// null values, and its `NullCount` would be set by result of another distsql call to get null rows.
// For multi-column index, we cannot define null for the rows, so we still use full range, and the rows
// containing null fields would exist in built histograms. Note that, the `NullCount` of histograms for
// multi-column index is always 0 then.
if len(idxExec.idxInfo.Columns) == 1 {
ranges = ranger.FullNotNullRange()
}
hist, cms, fms, topN, err := idxExec.buildStats(ranges, true)
if err != nil {
return analyzeResult{Err: err, job: idxExec.job}
}
var statsVer = statistics.Version1
if idxExec.analyzePB.IdxReq.Version != nil {
statsVer = int(*idxExec.analyzePB.IdxReq.Version)
}
result := analyzeResult{
TableID: idxExec.tableID,
Hist: []*statistics.Histogram{hist},
Cms: []*statistics.CMSketch{cms},
TopNs: []*statistics.TopN{topN},
Fms: []*statistics.FMSketch{fms},
IsIndex: 1,
job: idxExec.job,
StatsVer: statsVer,
}
result.Count = hist.NullCount
if hist.Len() > 0 {
result.Count += hist.Buckets[hist.Len()-1].Count
}
if topN.TotalCount() > 0 {
result.Count += int64(topN.TotalCount())
}
return result
}
// AnalyzeIndexExec represents analyze index push down executor.
type AnalyzeIndexExec struct {
ctx sessionctx.Context
tableID core.AnalyzeTableID
idxInfo *model.IndexInfo
isCommonHandle bool
concurrency int
analyzePB *tipb.AnalyzeReq
result distsql.SelectResult
countNullRes distsql.SelectResult
opts map[ast.AnalyzeOptionType]uint64
job *statistics.AnalyzeJob
}
// fetchAnalyzeResult builds and dispatches the `kv.Request` from given ranges, and stores the `SelectResult`
// in corresponding fields based on the input `isNullRange` argument, which indicates if the range is the
// special null range for single-column index to get the null count.
func (e *AnalyzeIndexExec) fetchAnalyzeResult(ranges []*ranger.Range, isNullRange bool) error {
var builder distsql.RequestBuilder
var kvReqBuilder *distsql.RequestBuilder
if e.isCommonHandle && e.idxInfo.Primary {
kvReqBuilder = builder.SetHandleRangesForTables(e.ctx.GetSessionVars().StmtCtx, []int64{e.tableID.GetStatisticsID()}, true, ranges, nil)
} else {
kvReqBuilder = builder.SetIndexRangesForTables(e.ctx.GetSessionVars().StmtCtx, []int64{e.tableID.GetStatisticsID()}, e.idxInfo.ID, ranges)
}
kvReq, err := kvReqBuilder.
SetAnalyzeRequest(e.analyzePB).
SetStartTS(math.MaxUint64).
SetKeepOrder(true).
SetConcurrency(e.concurrency).
Build()
if err != nil {
return err
}
ctx := context.TODO()
result, err := distsql.Analyze(ctx, e.ctx.GetClient(), kvReq, e.ctx.GetSessionVars().KVVars, e.ctx.GetSessionVars().InRestrictedSQL, e.ctx.GetSessionVars().StmtCtx.MemTracker)
if err != nil {
return err
}
if isNullRange {
e.countNullRes = result
} else {
e.result = result
}
return nil
}
func (e *AnalyzeIndexExec) open(ranges []*ranger.Range, considerNull bool) error {
err := e.fetchAnalyzeResult(ranges, false)
if err != nil {
return err
}
if considerNull && len(e.idxInfo.Columns) == 1 {
ranges = ranger.NullRange()
err = e.fetchAnalyzeResult(ranges, true)
if err != nil {
return err
}
}
return nil
}
func updateIndexResult(
ctx *stmtctx.StatementContext,
resp *tipb.AnalyzeIndexResp,
job *statistics.AnalyzeJob,
hist *statistics.Histogram,
cms *statistics.CMSketch,
fms *statistics.FMSketch,
topn *statistics.TopN,
idxInfo *model.IndexInfo,
numBuckets int,
numTopN int,
statsVer int,
) (
*statistics.Histogram,
*statistics.CMSketch,
*statistics.FMSketch,
*statistics.TopN,
error,
) {
var err error
needCMS := cms != nil
respHist := statistics.HistogramFromProto(resp.Hist)
if job != nil {
job.Update(int64(respHist.TotalRowCount()))
}
hist, err = statistics.MergeHistograms(ctx, hist, respHist, numBuckets, statsVer)
if err != nil {
return nil, nil, nil, nil, err
}
if needCMS {
if resp.Cms == nil {
logutil.Logger(context.TODO()).Warn("nil CMS in response", zap.String("table", idxInfo.Table.O), zap.String("index", idxInfo.Name.O))
} else {
cm, tmpTopN := statistics.CMSketchAndTopNFromProto(resp.Cms)
if err := cms.MergeCMSketch(cm); err != nil {
return nil, nil, nil, nil, err
}
statistics.MergeTopNAndUpdateCMSketch(topn, tmpTopN, cms, uint32(numTopN))
}
}
if fms != nil && resp.Collector != nil && resp.Collector.FmSketch != nil {
fms.MergeFMSketch(statistics.FMSketchFromProto(resp.Collector.FmSketch))
}
return hist, cms, fms, topn, nil
}
func (e *AnalyzeIndexExec) buildStatsFromResult(result distsql.SelectResult, needCMS bool) (*statistics.Histogram, *statistics.CMSketch, *statistics.FMSketch, *statistics.TopN, error) {
failpoint.Inject("buildStatsFromResult", func(val failpoint.Value) {
if val.(bool) {
failpoint.Return(nil, nil, nil, nil, errors.New("mock buildStatsFromResult error"))
}
})
hist := &statistics.Histogram{}
var cms *statistics.CMSketch
var topn *statistics.TopN
if needCMS {
cms = statistics.NewCMSketch(int32(e.opts[ast.AnalyzeOptCMSketchDepth]), int32(e.opts[ast.AnalyzeOptCMSketchWidth]))
topn = statistics.NewTopN(int(e.opts[ast.AnalyzeOptNumTopN]))
}
fms := statistics.NewFMSketch(maxSketchSize)
statsVer := statistics.Version1
if e.analyzePB.IdxReq.Version != nil {
statsVer = int(*e.analyzePB.IdxReq.Version)
}
for {
data, err := result.NextRaw(context.TODO())
if err != nil {
return nil, nil, nil, nil, err
}
if data == nil {
break
}
resp := &tipb.AnalyzeIndexResp{}
err = resp.Unmarshal(data)
if err != nil {
return nil, nil, nil, nil, err
}
hist, cms, fms, topn, err = updateIndexResult(e.ctx.GetSessionVars().StmtCtx, resp, e.job, hist, cms, fms, topn,
e.idxInfo, int(e.opts[ast.AnalyzeOptNumBuckets]), int(e.opts[ast.AnalyzeOptNumTopN]), statsVer)
if err != nil {
return nil, nil, nil, nil, err
}
}
if needCMS && topn.TotalCount() > 0 {
hist.RemoveVals(topn.TopN)
}
if needCMS && cms != nil {
cms.CalcDefaultValForAnalyze(uint64(hist.NDV))
}
return hist, cms, fms, topn, nil
}
func (e *AnalyzeIndexExec) buildStats(ranges []*ranger.Range, considerNull bool) (hist *statistics.Histogram, cms *statistics.CMSketch, fms *statistics.FMSketch, topN *statistics.TopN, err error) {
if err = e.open(ranges, considerNull); err != nil {
return nil, nil, nil, nil, err
}
defer func() {
err1 := closeAll(e.result, e.countNullRes)
if err == nil {
err = err1
}
}()
hist, cms, fms, topN, err = e.buildStatsFromResult(e.result, true)
if err != nil {
return nil, nil, nil, nil, err
}
if e.countNullRes != nil {
nullHist, _, _, _, err := e.buildStatsFromResult(e.countNullRes, false)
if err != nil {
return nil, nil, nil, nil, err
}
if l := nullHist.Len(); l > 0 {
hist.NullCount = nullHist.Buckets[l-1].Count
}
}
hist.ID = e.idxInfo.ID
return hist, cms, fms, topN, nil
}
func analyzeColumnsPushdown(colExec *AnalyzeColumnsExec) []analyzeResult {
var ranges []*ranger.Range
if hc := colExec.handleCols; hc != nil {
if hc.IsInt() {
ranges = ranger.FullIntRange(mysql.HasUnsignedFlag(hc.GetCol(0).RetType.Flag))
} else {
ranges = ranger.FullNotNullRange()
}
} else {
ranges = ranger.FullIntRange(false)
}
collExtStats := colExec.ctx.GetSessionVars().EnableExtendedStats
hists, cms, topNs, fms, extStats, err := colExec.buildStats(ranges, collExtStats)
if err != nil {
return []analyzeResult{{Err: err, job: colExec.job}}
}
if hasPkHist(colExec.handleCols) {
PKresult := analyzeResult{
TableID: colExec.tableID,
Hist: hists[:1],
Cms: cms[:1],
TopNs: topNs[:1],
Fms: fms[:1],
ExtStats: nil,
job: nil,
StatsVer: statistics.Version1,
}
PKresult.Count = int64(PKresult.Hist[0].TotalRowCount())
restResult := analyzeResult{
TableID: colExec.tableID,
Hist: hists[1:],
Cms: cms[1:],
TopNs: topNs[1:],
Fms: fms[1:],
ExtStats: extStats,
job: colExec.job,
StatsVer: colExec.analyzeVer,
}
restResult.Count = PKresult.Count
return []analyzeResult{PKresult, restResult}
}
var result []analyzeResult
if colExec.analyzePB.Tp == tipb.AnalyzeType_TypeMixed {
result = append(result, analyzeResult{
TableID: colExec.tableID,
Hist: []*statistics.Histogram{hists[0]},
Cms: []*statistics.CMSketch{cms[0]},
TopNs: []*statistics.TopN{topNs[0]},
Fms: []*statistics.FMSketch{nil},
IsIndex: 1,
job: colExec.job,
StatsVer: colExec.analyzeVer,
})
hists = hists[1:]
cms = cms[1:]
topNs = topNs[1:]
}
colResult := analyzeResult{
TableID: colExec.tableID,
Hist: hists,
Cms: cms,
TopNs: topNs,
Fms: fms,
ExtStats: extStats,
job: colExec.job,
StatsVer: colExec.analyzeVer,
}
colResult.Count = int64(colResult.Hist[0].TotalRowCount())
if colResult.StatsVer == statistics.Version2 {
colResult.Count += int64(topNs[0].TotalCount())
}
return append(result, colResult)
}
// AnalyzeColumnsExec represents Analyze columns push down executor.
type AnalyzeColumnsExec struct {
ctx sessionctx.Context
tableID core.AnalyzeTableID
colsInfo []*model.ColumnInfo
handleCols core.HandleCols
concurrency int
analyzePB *tipb.AnalyzeReq
commonHandle *model.IndexInfo
resultHandler *tableResultHandler
opts map[ast.AnalyzeOptionType]uint64
job *statistics.AnalyzeJob
analyzeVer int
}
func (e *AnalyzeColumnsExec) open(ranges []*ranger.Range) error {
e.resultHandler = &tableResultHandler{}
firstPartRanges, secondPartRanges := distsql.SplitRangesBySign(ranges, true, false, !hasPkHist(e.handleCols))
firstResult, err := e.buildResp(firstPartRanges)
if err != nil {
return err
}
if len(secondPartRanges) == 0 {
e.resultHandler.open(nil, firstResult)
return nil
}
var secondResult distsql.SelectResult
secondResult, err = e.buildResp(secondPartRanges)
if err != nil {
return err
}
e.resultHandler.open(firstResult, secondResult)
return nil
}
func (e *AnalyzeColumnsExec) buildResp(ranges []*ranger.Range) (distsql.SelectResult, error) {
var builder distsql.RequestBuilder
reqBuilder := builder.SetHandleRangesForTables(e.ctx.GetSessionVars().StmtCtx, []int64{e.tableID.GetStatisticsID()}, e.handleCols != nil && !e.handleCols.IsInt(), ranges, nil)
// Always set KeepOrder of the request to be true, in order to compute
// correct `correlation` of columns.
kvReq, err := reqBuilder.
SetAnalyzeRequest(e.analyzePB).
SetStartTS(math.MaxUint64).
SetKeepOrder(true).
SetConcurrency(e.concurrency).
Build()
if err != nil {
return nil, err
}
ctx := context.TODO()
result, err := distsql.Analyze(ctx, e.ctx.GetClient(), kvReq, e.ctx.GetSessionVars().KVVars, e.ctx.GetSessionVars().InRestrictedSQL, e.ctx.GetSessionVars().StmtCtx.MemTracker)
if err != nil {
return nil, err
}
return result, nil
}
func (e *AnalyzeColumnsExec) buildStats(ranges []*ranger.Range, needExtStats bool) (hists []*statistics.Histogram, cms []*statistics.CMSketch, topNs []*statistics.TopN, fms []*statistics.FMSketch, extStats *statistics.ExtendedStatsColl, err error) {
if err = e.open(ranges); err != nil {
return nil, nil, nil, nil, nil, err
}
defer func() {
if err1 := e.resultHandler.Close(); err1 != nil {
hists = nil
cms = nil
extStats = nil
err = err1
}
}()
var handleHist *statistics.Histogram
var handleCms *statistics.CMSketch
var handleFms *statistics.FMSketch
var handleTopn *statistics.TopN
statsVer := statistics.Version1
if e.analyzePB.Tp == tipb.AnalyzeType_TypeMixed {
handleHist = &statistics.Histogram{}
handleCms = statistics.NewCMSketch(int32(e.opts[ast.AnalyzeOptCMSketchDepth]), int32(e.opts[ast.AnalyzeOptCMSketchWidth]))
handleTopn = statistics.NewTopN(int(e.opts[ast.AnalyzeOptNumTopN]))
handleFms = statistics.NewFMSketch(maxSketchSize)
if e.analyzePB.IdxReq.Version != nil {
statsVer = int(*e.analyzePB.IdxReq.Version)
}
}
pkHist := &statistics.Histogram{}
collectors := make([]*statistics.SampleCollector, len(e.colsInfo))
for i := range collectors {
collectors[i] = &statistics.SampleCollector{
IsMerger: true,
FMSketch: statistics.NewFMSketch(maxSketchSize),
MaxSampleSize: int64(e.opts[ast.AnalyzeOptNumSamples]),
CMSketch: statistics.NewCMSketch(int32(e.opts[ast.AnalyzeOptCMSketchDepth]), int32(e.opts[ast.AnalyzeOptCMSketchWidth])),
}
}
for {
data, err1 := e.resultHandler.nextRaw(context.TODO())
if err1 != nil {
return nil, nil, nil, nil, nil, err1
}
if data == nil {
break
}
sc := e.ctx.GetSessionVars().StmtCtx
var colResp *tipb.AnalyzeColumnsResp
if e.analyzePB.Tp == tipb.AnalyzeType_TypeMixed {
resp := &tipb.AnalyzeMixedResp{}
err = resp.Unmarshal(data)
if err != nil {
return nil, nil, nil, nil, nil, err
}
colResp = resp.ColumnsResp
handleHist, handleCms, handleFms, handleTopn, err = updateIndexResult(sc, resp.IndexResp, nil, handleHist,
handleCms, handleFms, handleTopn, e.commonHandle, int(e.opts[ast.AnalyzeOptNumBuckets]),
int(e.opts[ast.AnalyzeOptNumTopN]), statsVer)
if err != nil {
return nil, nil, nil, nil, nil, err
}
} else {
colResp = &tipb.AnalyzeColumnsResp{}
err = colResp.Unmarshal(data)
}
rowCount := int64(0)
if hasPkHist(e.handleCols) {
respHist := statistics.HistogramFromProto(colResp.PkHist)
rowCount = int64(respHist.TotalRowCount())
pkHist, err = statistics.MergeHistograms(sc, pkHist, respHist, int(e.opts[ast.AnalyzeOptNumBuckets]), statistics.Version1)
if err != nil {
return nil, nil, nil, nil, nil, err
}
}
for i, rc := range colResp.Collectors {
respSample := statistics.SampleCollectorFromProto(rc)
rowCount = respSample.Count + respSample.NullCount
collectors[i].MergeSampleCollector(sc, respSample)
}
e.job.Update(rowCount)
}
timeZone := e.ctx.GetSessionVars().Location()
if hasPkHist(e.handleCols) {
pkInfo := e.handleCols.GetCol(0)
pkHist.ID = pkInfo.ID
err = pkHist.DecodeTo(pkInfo.RetType, timeZone)
if err != nil {
return nil, nil, nil, nil, nil, err
}
hists = append(hists, pkHist)
cms = append(cms, nil)
topNs = append(topNs, nil)
fms = append(fms, nil)
}
for i, col := range e.colsInfo {
if e.analyzeVer < 2 {
// In analyze version 2, we don't collect TopN this way. We will collect TopN from samples in `BuildColumnHistAndTopN()` below.
err := collectors[i].ExtractTopN(uint32(e.opts[ast.AnalyzeOptNumTopN]), e.ctx.GetSessionVars().StmtCtx, &col.FieldType, timeZone)
if err != nil {
return nil, nil, nil, nil, nil, err
}
topNs = append(topNs, collectors[i].TopN)
}
for j, s := range collectors[i].Samples {
collectors[i].Samples[j].Ordinal = j
collectors[i].Samples[j].Value, err = tablecodec.DecodeColumnValue(s.Value.GetBytes(), &col.FieldType, timeZone)
if err != nil {
return nil, nil, nil, nil, nil, err
}
// When collation is enabled, we store the Key representation of the sampling data. So we set it to kind `Bytes` here
// to avoid to convert it to its Key representation once more.
if collectors[i].Samples[j].Value.Kind() == types.KindString {
collectors[i].Samples[j].Value.SetBytes(collectors[i].Samples[j].Value.GetBytes())
}
}
var hg *statistics.Histogram
var err error
var topn *statistics.TopN
if e.analyzeVer < 2 {
hg, err = statistics.BuildColumn(e.ctx, int64(e.opts[ast.AnalyzeOptNumBuckets]), col.ID, collectors[i], &col.FieldType)
} else {
hg, topn, err = statistics.BuildColumnHistAndTopN(e.ctx, int(e.opts[ast.AnalyzeOptNumBuckets]), int(e.opts[ast.AnalyzeOptNumTopN]), col.ID, collectors[i], &col.FieldType)
topNs = append(topNs, topn)
}
if err != nil {
return nil, nil, nil, nil, nil, err
}
hists = append(hists, hg)
collectors[i].CMSketch.CalcDefaultValForAnalyze(uint64(hg.NDV))
cms = append(cms, collectors[i].CMSketch)
fms = append(fms, collectors[i].FMSketch)
}
if needExtStats {
statsHandle := domain.GetDomain(e.ctx).StatsHandle()
extStats, err = statsHandle.BuildExtendedStats(e.tableID.GetStatisticsID(), e.colsInfo, collectors)
if err != nil {
return nil, nil, nil, nil, nil, err
}
}
if handleHist != nil {
handleHist.ID = e.commonHandle.ID
if handleTopn != nil && handleTopn.TotalCount() > 0 {
handleHist.RemoveVals(handleTopn.TopN)
}
if handleCms != nil {
handleCms.CalcDefaultValForAnalyze(uint64(handleHist.NDV))
}
hists = append([]*statistics.Histogram{handleHist}, hists...)
cms = append([]*statistics.CMSketch{handleCms}, cms...)
fms = append([]*statistics.FMSketch{handleFms}, fms...)
topNs = append([]*statistics.TopN{handleTopn}, topNs...)
}
return hists, cms, topNs, fms, extStats, nil
}
func hasPkHist(handleCols core.HandleCols) bool {
return handleCols != nil && handleCols.IsInt()
}
func pkColsCount(handleCols core.HandleCols) int {
if handleCols == nil {
return 0
}
return handleCols.NumCols()
}
var (
fastAnalyzeHistogramSample = metrics.FastAnalyzeHistogram.WithLabelValues(metrics.LblGeneral, "sample")
fastAnalyzeHistogramAccessRegions = metrics.FastAnalyzeHistogram.WithLabelValues(metrics.LblGeneral, "access_regions")
fastAnalyzeHistogramScanKeys = metrics.FastAnalyzeHistogram.WithLabelValues(metrics.LblGeneral, "scan_keys")
)
func analyzeFastExec(exec *AnalyzeFastExec) []analyzeResult {
hists, cms, topNs, fms, err := exec.buildStats()
if err != nil {
return []analyzeResult{{Err: err, job: exec.job}}
}
var results []analyzeResult
pkColCount := pkColsCount(exec.handleCols)
if len(exec.idxsInfo) > 0 {
for i := pkColCount + len(exec.colsInfo); i < len(hists); i++ {
idxResult := analyzeResult{
TableID: exec.tableID,
Hist: []*statistics.Histogram{hists[i]},
Cms: []*statistics.CMSketch{cms[i]},
TopNs: []*statistics.TopN{topNs[i]},
Fms: []*statistics.FMSketch{nil},
IsIndex: 1,
Count: hists[i].NullCount,
job: exec.job,
StatsVer: statistics.Version1,
}
if hists[i].Len() > 0 {
idxResult.Count += hists[i].Buckets[hists[i].Len()-1].Count
}
if exec.rowCount != 0 {
idxResult.Count = exec.rowCount
}
results = append(results, idxResult)
}
}
hist := hists[0]
colResult := analyzeResult{
TableID: exec.tableID,
Hist: hists[:pkColCount+len(exec.colsInfo)],
Cms: cms[:pkColCount+len(exec.colsInfo)],
TopNs: topNs[:pkColCount+len(exec.colsInfo)],
Fms: fms[:pkColCount+len(exec.colsInfo)],
Count: hist.NullCount,
job: exec.job,
StatsVer: statistics.Version1,
}
if hist.Len() > 0 {
colResult.Count += hist.Buckets[hist.Len()-1].Count
}
if exec.rowCount != 0 {
colResult.Count = exec.rowCount
}
results = append(results, colResult)
return results
}
// AnalyzeFastExec represents Fast Analyze executor.
type AnalyzeFastExec struct {
ctx sessionctx.Context
tableID core.AnalyzeTableID
handleCols core.HandleCols
colsInfo []*model.ColumnInfo
idxsInfo []*model.IndexInfo
concurrency int
opts map[ast.AnalyzeOptionType]uint64
tblInfo *model.TableInfo
cache *tikv.RegionCache
wg *sync.WaitGroup
rowCount int64
sampCursor int32
sampTasks []*tikv.KeyLocation
scanTasks []*tikv.KeyLocation
collectors []*statistics.SampleCollector
randSeed int64
job *statistics.AnalyzeJob
estSampStep uint32
}
func (e *AnalyzeFastExec) calculateEstimateSampleStep() (err error) {
exec := e.ctx.(sqlexec.RestrictedSQLExecutor)
var stmt ast.StmtNode
stmt, err = exec.ParseWithParams(context.TODO(), "select flag from mysql.stats_histograms where table_id = %?", e.tableID.GetStatisticsID())
if err != nil {
return
}
var rows []chunk.Row
rows, _, err = exec.ExecRestrictedStmt(context.TODO(), stmt)
if err != nil {
return
}
var historyRowCount uint64
hasBeenAnalyzed := len(rows) != 0 && rows[0].GetInt64(0) == statistics.AnalyzeFlag
if hasBeenAnalyzed {
historyRowCount = uint64(domain.GetDomain(e.ctx).StatsHandle().GetPartitionStats(e.tblInfo, e.tableID.GetStatisticsID()).Count)
} else {
dbInfo, ok := domain.GetDomain(e.ctx).InfoSchema().SchemaByTable(e.tblInfo)
if !ok {
err = errors.Errorf("database not found for table '%s'", e.tblInfo.Name)
return
}
var rollbackFn func() error
rollbackFn, err = e.activateTxnForRowCount()
if err != nil {
return
}
defer func() {
if rollbackFn != nil {
err = rollbackFn()
}
}()
sql := new(strings.Builder)
sqlexec.MustFormatSQL(sql, "select count(*) from %n.%n", dbInfo.Name.L, e.tblInfo.Name.L)
if e.tblInfo.ID != e.tableID.GetStatisticsID() {
for _, definition := range e.tblInfo.Partition.Definitions {
if definition.ID == e.tableID.GetStatisticsID() {
sqlexec.MustFormatSQL(sql, " partition(%n)", definition.Name.L)
break
}
}
}
var rs sqlexec.RecordSet
rs, err = e.ctx.(sqlexec.SQLExecutor).ExecuteInternal(context.TODO(), sql.String())
if err != nil {
return
}
if rs == nil {
err = errors.Trace(errors.Errorf("empty record set"))
return
}
defer terror.Call(rs.Close)
chk := rs.NewChunk()
err = rs.Next(context.TODO(), chk)
if err != nil {
return
}
e.rowCount = chk.GetRow(0).GetInt64(0)
historyRowCount = uint64(e.rowCount)
}
totalSampSize := e.opts[ast.AnalyzeOptNumSamples]
e.estSampStep = uint32(historyRowCount / totalSampSize)
return
}
func (e *AnalyzeFastExec) activateTxnForRowCount() (rollbackFn func() error, err error) {
txn, err := e.ctx.Txn(true)
if err != nil {
if kv.ErrInvalidTxn.Equal(err) {
_, err := e.ctx.(sqlexec.SQLExecutor).ExecuteInternal(context.TODO(), "begin")
if err != nil {
return nil, errors.Trace(err)
}
rollbackFn = func() error {
_, err := e.ctx.(sqlexec.SQLExecutor).ExecuteInternal(context.TODO(), "rollback")
return err
}
} else {
return nil, errors.Trace(err)
}
}
txn.SetOption(tikvstore.Priority, tikvstore.PriorityLow)
txn.SetOption(tikvstore.IsolationLevel, tikvstore.RC)
txn.SetOption(tikvstore.NotFillCache, true)
return rollbackFn, nil
}
// buildSampTask build sample tasks.
func (e *AnalyzeFastExec) buildSampTask() (err error) {
bo := tikv.NewBackofferWithVars(context.Background(), 500, nil)
store, _ := e.ctx.GetStore().(tikv.Storage)
e.cache = store.GetRegionCache()
accessRegionsCounter := 0
pid := e.tableID.GetStatisticsID()
startKey, endKey := tablecodec.GetTableHandleKeyRange(pid)
targetKey := startKey
for {
// Search for the region which contains the targetKey.
loc, err := e.cache.LocateKey(bo, targetKey)
if err != nil {
return err
}
if bytes.Compare(endKey, loc.StartKey) < 0 {
break
}
accessRegionsCounter++
// Set the next search key.
targetKey = loc.EndKey
// If the KV pairs in the region all belonging to the table, add it to the sample task.
if bytes.Compare(startKey, loc.StartKey) <= 0 && len(loc.EndKey) != 0 && bytes.Compare(loc.EndKey, endKey) <= 0 {
e.sampTasks = append(e.sampTasks, loc)
continue
}
e.scanTasks = append(e.scanTasks, loc)
if bytes.Compare(loc.StartKey, startKey) < 0 {
loc.StartKey = startKey
}
if bytes.Compare(endKey, loc.EndKey) < 0 || len(loc.EndKey) == 0 {
loc.EndKey = endKey
break
}
}
fastAnalyzeHistogramAccessRegions.Observe(float64(accessRegionsCounter))
return nil
}
func (e *AnalyzeFastExec) decodeValues(handle kv.Handle, sValue []byte, wantCols map[int64]*types.FieldType) (values map[int64]types.Datum, err error) {
loc := e.ctx.GetSessionVars().Location()
values, err = tablecodec.DecodeRowToDatumMap(sValue, wantCols, loc)
if err != nil || e.handleCols == nil {
return values, err
}
wantCols = make(map[int64]*types.FieldType, e.handleCols.NumCols())
handleColIDs := make([]int64, e.handleCols.NumCols())
for i := 0; i < e.handleCols.NumCols(); i++ {
c := e.handleCols.GetCol(i)
handleColIDs[i] = c.ID
wantCols[c.ID] = c.RetType
}
return tablecodec.DecodeHandleToDatumMap(handle, handleColIDs, wantCols, loc, values)
}
func (e *AnalyzeFastExec) getValueByInfo(colInfo *model.ColumnInfo, values map[int64]types.Datum) (types.Datum, error) {
val, ok := values[colInfo.ID]
if !ok {
return table.GetColOriginDefaultValue(e.ctx, colInfo)
}
return val, nil
}
func (e *AnalyzeFastExec) updateCollectorSamples(sValue []byte, sKey kv.Key, samplePos int32) (err error) {
var handle kv.Handle
handle, err = tablecodec.DecodeRowKey(sKey)
if err != nil {
return err
}
// Decode cols for analyze table
wantCols := make(map[int64]*types.FieldType, len(e.colsInfo))
for _, col := range e.colsInfo {
wantCols[col.ID] = &col.FieldType
}
// Pre-build index->cols relationship and refill wantCols if not exists(analyze index)
index2Cols := make([][]*model.ColumnInfo, len(e.idxsInfo))
for i, idxInfo := range e.idxsInfo {
for _, idxCol := range idxInfo.Columns {
colInfo := e.tblInfo.Columns[idxCol.Offset]
index2Cols[i] = append(index2Cols[i], colInfo)
wantCols[colInfo.ID] = &colInfo.FieldType
}
}
// Decode the cols value in order.
var values map[int64]types.Datum
values, err = e.decodeValues(handle, sValue, wantCols)
if err != nil {
return err
}
// Update the primary key collector.
pkColsCount := pkColsCount(e.handleCols)
for i := 0; i < pkColsCount; i++ {
col := e.handleCols.GetCol(i)
v, ok := values[col.ID]
if !ok {
return errors.Trace(errors.Errorf("Primary key column not found"))
}
if e.collectors[i].Samples[samplePos] == nil {
e.collectors[i].Samples[samplePos] = &statistics.SampleItem{}
}
e.collectors[i].Samples[samplePos].Handle = handle
e.collectors[i].Samples[samplePos].Value = v
}
// Update the columns' collectors.
for j, colInfo := range e.colsInfo {
v, err := e.getValueByInfo(colInfo, values)
if err != nil {
return err
}
if e.collectors[pkColsCount+j].Samples[samplePos] == nil {
e.collectors[pkColsCount+j].Samples[samplePos] = &statistics.SampleItem{}
}
e.collectors[pkColsCount+j].Samples[samplePos].Handle = handle
e.collectors[pkColsCount+j].Samples[samplePos].Value = v
}
// Update the indexes' collectors.
for j, idxInfo := range e.idxsInfo {
idxVals := make([]types.Datum, 0, len(idxInfo.Columns))
cols := index2Cols[j]
for _, colInfo := range cols {
v, err := e.getValueByInfo(colInfo, values)
if err != nil {
return err
}
idxVals = append(idxVals, v)
}
var bytes []byte
bytes, err = codec.EncodeKey(e.ctx.GetSessionVars().StmtCtx, bytes, idxVals...)
if err != nil {
return err
}
if e.collectors[len(e.colsInfo)+pkColsCount+j].Samples[samplePos] == nil {
e.collectors[len(e.colsInfo)+pkColsCount+j].Samples[samplePos] = &statistics.SampleItem{}
}
e.collectors[len(e.colsInfo)+pkColsCount+j].Samples[samplePos].Handle = handle
e.collectors[len(e.colsInfo)+pkColsCount+j].Samples[samplePos].Value = types.NewBytesDatum(bytes)
}
return nil
}
func (e *AnalyzeFastExec) handleBatchSeekResponse(kvMap map[string][]byte) (err error) {
length := int32(len(kvMap))
newCursor := atomic.AddInt32(&e.sampCursor, length)
samplePos := newCursor - length
for sKey, sValue := range kvMap {
exceedNeededSampleCounts := uint64(samplePos) >= e.opts[ast.AnalyzeOptNumSamples]
if exceedNeededSampleCounts {
atomic.StoreInt32(&e.sampCursor, int32(e.opts[ast.AnalyzeOptNumSamples]))
break
}
err = e.updateCollectorSamples(sValue, kv.Key(sKey), samplePos)
if err != nil {
return err
}
samplePos++
}
return nil
}
func (e *AnalyzeFastExec) handleScanIter(iter kv.Iterator) (scanKeysSize int, err error) {
rander := rand.New(rand.NewSource(e.randSeed))
sampleSize := int64(e.opts[ast.AnalyzeOptNumSamples])
for ; iter.Valid() && err == nil; err = iter.Next() {
// reservoir sampling
scanKeysSize++
randNum := rander.Int63n(int64(e.sampCursor) + int64(scanKeysSize))
if randNum > sampleSize && e.sampCursor == int32(sampleSize) {
continue
}
p := rander.Int31n(int32(sampleSize))
if e.sampCursor < int32(sampleSize) {
p = e.sampCursor
e.sampCursor++
}
err = e.updateCollectorSamples(iter.Value(), iter.Key(), p)
if err != nil {
return
}
}
return
}
func (e *AnalyzeFastExec) handleScanTasks(bo *tikv.Backoffer) (keysSize int, err error) {
snapshot := e.ctx.GetStore().GetSnapshot(kv.MaxVersion)
if e.ctx.GetSessionVars().GetReplicaRead().IsFollowerRead() {
snapshot.SetOption(tikvstore.ReplicaRead, tikvstore.ReplicaReadFollower)
}
for _, t := range e.scanTasks {
iter, err := snapshot.Iter(kv.Key(t.StartKey), kv.Key(t.EndKey))
if err != nil {
return keysSize, err
}
size, err := e.handleScanIter(iter)
keysSize += size
if err != nil {
return keysSize, err
}
}
return keysSize, nil
}
func (e *AnalyzeFastExec) handleSampTasks(workID int, step uint32, err *error) {
defer e.wg.Done()
snapshot := e.ctx.GetStore().GetSnapshot(kv.MaxVersion)
snapshot.SetOption(tikvstore.NotFillCache, true)
snapshot.SetOption(tikvstore.IsolationLevel, tikvstore.RC)
snapshot.SetOption(tikvstore.Priority, tikvstore.PriorityLow)
if e.ctx.GetSessionVars().GetReplicaRead().IsFollowerRead() {
snapshot.SetOption(tikvstore.ReplicaRead, tikvstore.ReplicaReadFollower)
}
rander := rand.New(rand.NewSource(e.randSeed))
for i := workID; i < len(e.sampTasks); i += e.concurrency {
task := e.sampTasks[i]
// randomize the estimate step in range [step - 2 * sqrt(step), step]
if step > 4 { // 2*sqrt(x) < x
lower, upper := step-uint32(2*math.Sqrt(float64(step))), step
step = uint32(rander.Intn(int(upper-lower))) + lower
}
snapshot.SetOption(tikvstore.SampleStep, step)
kvMap := make(map[string][]byte)
var iter kv.Iterator
iter, *err = snapshot.Iter(kv.Key(task.StartKey), kv.Key(task.EndKey))
if *err != nil {
return
}
for iter.Valid() {
kvMap[string(iter.Key())] = iter.Value()
*err = iter.Next()
if *err != nil {
return
}
}
fastAnalyzeHistogramSample.Observe(float64(len(kvMap)))
*err = e.handleBatchSeekResponse(kvMap)
if *err != nil {
return
}
}
}
func (e *AnalyzeFastExec) buildColumnStats(ID int64, collector *statistics.SampleCollector, tp *types.FieldType, rowCount int64) (*statistics.Histogram, *statistics.CMSketch, *statistics.TopN, *statistics.FMSketch, error) {
sc := e.ctx.GetSessionVars().StmtCtx
data := make([][]byte, 0, len(collector.Samples))
fmSketch := statistics.NewFMSketch(maxSketchSize)
for i, sample := range collector.Samples {
sample.Ordinal = i
if sample.Value.IsNull() {
collector.NullCount++
continue
}
err := fmSketch.InsertValue(sc, sample.Value)
if err != nil {
return nil, nil, nil, nil, err
}
bytes, err := tablecodec.EncodeValue(sc, nil, sample.Value)
if err != nil {
return nil, nil, nil, nil, err
}
data = append(data, bytes)
}
// Build CMSketch.
cmSketch, topN, ndv, scaleRatio := statistics.NewCMSketchAndTopN(int32(e.opts[ast.AnalyzeOptCMSketchDepth]), int32(e.opts[ast.AnalyzeOptCMSketchWidth]), data, uint32(e.opts[ast.AnalyzeOptNumTopN]), uint64(rowCount))
// Build Histogram.
hist, err := statistics.BuildColumnHist(e.ctx, int64(e.opts[ast.AnalyzeOptNumBuckets]), ID, collector, tp, rowCount, int64(ndv), collector.NullCount*int64(scaleRatio))
return hist, cmSketch, topN, fmSketch, err
}
func (e *AnalyzeFastExec) buildIndexStats(idxInfo *model.IndexInfo, collector *statistics.SampleCollector, rowCount int64) (*statistics.Histogram, *statistics.CMSketch, *statistics.TopN, error) {
data := make([][][]byte, len(idxInfo.Columns))
for _, sample := range collector.Samples {
var preLen int
remained := sample.Value.GetBytes()
// We need to insert each prefix values into CM Sketch.
for i := 0; i < len(idxInfo.Columns); i++ {
var err error
var value []byte
value, remained, err = codec.CutOne(remained)
if err != nil {
return nil, nil, nil, err
}
preLen += len(value)
data[i] = append(data[i], sample.Value.GetBytes()[:preLen])
}
}
numTop := uint32(e.opts[ast.AnalyzeOptNumTopN])
cmSketch, topN, ndv, scaleRatio := statistics.NewCMSketchAndTopN(int32(e.opts[ast.AnalyzeOptCMSketchDepth]), int32(e.opts[ast.AnalyzeOptCMSketchWidth]), data[0], numTop, uint64(rowCount))
// Build CM Sketch for each prefix and merge them into one.
for i := 1; i < len(idxInfo.Columns); i++ {
var curCMSketch *statistics.CMSketch
var curTopN *statistics.TopN
// `ndv` should be the ndv of full index, so just rewrite it here.
curCMSketch, curTopN, ndv, scaleRatio = statistics.NewCMSketchAndTopN(int32(e.opts[ast.AnalyzeOptCMSketchDepth]), int32(e.opts[ast.AnalyzeOptCMSketchWidth]), data[i], numTop, uint64(rowCount))
err := cmSketch.MergeCMSketch(curCMSketch)
if err != nil {
return nil, nil, nil, err
}
statistics.MergeTopNAndUpdateCMSketch(topN, curTopN, cmSketch, numTop)
}
// Build Histogram.
hist, err := statistics.BuildColumnHist(e.ctx, int64(e.opts[ast.AnalyzeOptNumBuckets]), idxInfo.ID, collector, types.NewFieldType(mysql.TypeBlob), rowCount, int64(ndv), collector.NullCount*int64(scaleRatio))
return hist, cmSketch, topN, err
}
func (e *AnalyzeFastExec) runTasks() ([]*statistics.Histogram, []*statistics.CMSketch, []*statistics.TopN, []*statistics.FMSketch, error) {
errs := make([]error, e.concurrency)
pkColCount := pkColsCount(e.handleCols)
// collect column samples and primary key samples and index samples.
length := len(e.colsInfo) + pkColCount + len(e.idxsInfo)
e.collectors = make([]*statistics.SampleCollector, length)
for i := range e.collectors {
e.collectors[i] = &statistics.SampleCollector{
MaxSampleSize: int64(e.opts[ast.AnalyzeOptNumSamples]),
Samples: make([]*statistics.SampleItem, e.opts[ast.AnalyzeOptNumSamples]),
}
}
e.wg.Add(e.concurrency)
bo := tikv.NewBackofferWithVars(context.Background(), 500, nil)
for i := 0; i < e.concurrency; i++ {
go e.handleSampTasks(i, e.estSampStep, &errs[i])
}
e.wg.Wait()
for _, err := range errs {
if err != nil {
return nil, nil, nil, nil, err
}
}
scanKeysSize, err := e.handleScanTasks(bo)
fastAnalyzeHistogramScanKeys.Observe(float64(scanKeysSize))
if err != nil {
return nil, nil, nil, nil, err
}
stats := domain.GetDomain(e.ctx).StatsHandle()
var rowCount int64 = 0
if stats.Lease() > 0 {
if t := stats.GetPartitionStats(e.tblInfo, e.tableID.GetStatisticsID()); !t.Pseudo {
rowCount = t.Count
}
}
hists, cms, topNs, fms := make([]*statistics.Histogram, length), make([]*statistics.CMSketch, length), make([]*statistics.TopN, length), make([]*statistics.FMSketch, length)
for i := 0; i < length; i++ {
// Build collector properties.
collector := e.collectors[i]
collector.Samples = collector.Samples[:e.sampCursor]
sort.Slice(collector.Samples, func(i, j int) bool { return collector.Samples[i].Handle.Compare(collector.Samples[j].Handle) < 0 })
collector.CalcTotalSize()
// Adjust the row count in case the count of `tblStats` is not accurate and too small.
rowCount = mathutil.MaxInt64(rowCount, int64(len(collector.Samples)))
// Scale the total column size.
if len(collector.Samples) > 0 {
collector.TotalSize *= rowCount / int64(len(collector.Samples))
}
if i < pkColCount {
pkCol := e.handleCols.GetCol(i)
hists[i], cms[i], topNs[i], fms[i], err = e.buildColumnStats(pkCol.ID, e.collectors[i], pkCol.RetType, rowCount)
} else if i < pkColCount+len(e.colsInfo) {
hists[i], cms[i], topNs[i], fms[i], err = e.buildColumnStats(e.colsInfo[i-pkColCount].ID, e.collectors[i], &e.colsInfo[i-pkColCount].FieldType, rowCount)
} else {
hists[i], cms[i], topNs[i], err = e.buildIndexStats(e.idxsInfo[i-pkColCount-len(e.colsInfo)], e.collectors[i], rowCount)
}
if err != nil {
return nil, nil, nil, nil, err
}
}
return hists, cms, topNs, fms, nil
}
func (e *AnalyzeFastExec) buildStats() (hists []*statistics.Histogram, cms []*statistics.CMSketch, topNs []*statistics.TopN, fms []*statistics.FMSketch, err error) {
// To set rand seed, it's for unit test.
// To ensure that random sequences are different in non-test environments, RandSeed must be set time.Now().
if RandSeed == 1 {
atomic.StoreInt64(&e.randSeed, time.Now().UnixNano())
} else {
atomic.StoreInt64(&e.randSeed, RandSeed)
}
err = e.buildSampTask()
if err != nil {
return nil, nil, nil, nil, err
}
return e.runTasks()
}
// AnalyzeTestFastExec is for fast sample in unit test.
type AnalyzeTestFastExec struct {
AnalyzeFastExec
Ctx sessionctx.Context
TableID core.AnalyzeTableID
HandleCols core.HandleCols
ColsInfo []*model.ColumnInfo
IdxsInfo []*model.IndexInfo
Concurrency int
Collectors []*statistics.SampleCollector
TblInfo *model.TableInfo
Opts map[ast.AnalyzeOptionType]uint64
}
// TestFastSample only test the fast sample in unit test.
func (e *AnalyzeTestFastExec) TestFastSample() error {
e.ctx = e.Ctx
e.handleCols = e.HandleCols
e.colsInfo = e.ColsInfo
e.idxsInfo = e.IdxsInfo
e.concurrency = e.Concurrency
e.tableID = e.TableID
e.wg = &sync.WaitGroup{}
e.job = &statistics.AnalyzeJob{}
e.tblInfo = e.TblInfo
e.opts = e.Opts
_, _, _, _, err := e.buildStats()
e.Collectors = e.collectors
return err
}
type analyzeIndexIncrementalExec struct {
AnalyzeIndexExec
oldHist *statistics.Histogram
oldCMS *statistics.CMSketch
oldTopN *statistics.TopN
}
func analyzeIndexIncremental(idxExec *analyzeIndexIncrementalExec) analyzeResult {
var statsVer = statistics.Version1
if idxExec.analyzePB.IdxReq.Version != nil {
statsVer = int(*idxExec.analyzePB.IdxReq.Version)
}
pruneMode := variable.PartitionPruneMode(idxExec.ctx.GetSessionVars().PartitionPruneMode.Load())
if idxExec.tableID.IsPartitionTable() && pruneMode == variable.Dynamic {
err := errors.Errorf("[stats]: global statistics for partitioned tables unavailable in ANALYZE INCREMENTAL")
return analyzeResult{Err: err, job: idxExec.job}
}
startPos := idxExec.oldHist.GetUpper(idxExec.oldHist.Len() - 1)
values, _, err := codec.DecodeRange(startPos.GetBytes(), len(idxExec.idxInfo.Columns), nil, nil)
if err != nil {
return analyzeResult{Err: err, job: idxExec.job}
}
ran := ranger.Range{LowVal: values, HighVal: []types.Datum{types.MaxValueDatum()}}
hist, cms, fms, topN, err := idxExec.buildStats([]*ranger.Range{&ran}, false)
if err != nil {
return analyzeResult{Err: err, job: idxExec.job}
}
hist, err = statistics.MergeHistograms(idxExec.ctx.GetSessionVars().StmtCtx, idxExec.oldHist, hist, int(idxExec.opts[ast.AnalyzeOptNumBuckets]), statsVer)
if err != nil {
return analyzeResult{Err: err, job: idxExec.job}
}
if idxExec.oldCMS != nil && cms != nil {
err = cms.MergeCMSketch4IncrementalAnalyze(idxExec.oldCMS, uint32(idxExec.opts[ast.AnalyzeOptNumTopN]))
if err != nil {
return analyzeResult{Err: err, job: idxExec.job}
}
cms.CalcDefaultValForAnalyze(uint64(hist.NDV))
}
if statsVer == statistics.Version2 {
poped := statistics.MergeTopNAndUpdateCMSketch(topN, idxExec.oldTopN, cms, uint32(idxExec.opts[ast.AnalyzeOptNumTopN]))
hist.AddIdxVals(poped)
}
result := analyzeResult{
TableID: idxExec.tableID,
Hist: []*statistics.Histogram{hist},
Cms: []*statistics.CMSketch{cms},
TopNs: []*statistics.TopN{topN},
Fms: []*statistics.FMSketch{fms},
IsIndex: 1,
job: idxExec.job,
StatsVer: statsVer,
}
result.Count = hist.NullCount
if hist.Len() > 0 {
result.Count += hist.Buckets[hist.Len()-1].Count
}
return result
}
type analyzePKIncrementalExec struct {
AnalyzeColumnsExec
oldHist *statistics.Histogram
}
func analyzePKIncremental(colExec *analyzePKIncrementalExec) analyzeResult {
var maxVal types.Datum
pkInfo := colExec.handleCols.GetCol(0)
if mysql.HasUnsignedFlag(pkInfo.RetType.Flag) {
maxVal = types.NewUintDatum(math.MaxUint64)
} else {
maxVal = types.NewIntDatum(math.MaxInt64)
}
startPos := *colExec.oldHist.GetUpper(colExec.oldHist.Len() - 1)
ran := ranger.Range{LowVal: []types.Datum{startPos}, LowExclude: true, HighVal: []types.Datum{maxVal}}
hists, _, _, _, _, err := colExec.buildStats([]*ranger.Range{&ran}, false)
if err != nil {
return analyzeResult{Err: err, job: colExec.job}
}
hist := hists[0]
hist, err = statistics.MergeHistograms(colExec.ctx.GetSessionVars().StmtCtx, colExec.oldHist, hist, int(colExec.opts[ast.AnalyzeOptNumBuckets]), statistics.Version1)
if err != nil {
return analyzeResult{Err: err, job: colExec.job}
}
result := analyzeResult{
TableID: colExec.tableID,
Hist: []*statistics.Histogram{hist},
Cms: []*statistics.CMSketch{nil},
TopNs: []*statistics.TopN{nil},
Fms: []*statistics.FMSketch{nil},
job: colExec.job,
StatsVer: statistics.Version1,
}
if hist.Len() > 0 {
result.Count += hist.Buckets[hist.Len()-1].Count
}
return result
}
// analyzeResult is used to represent analyze result.
type analyzeResult struct {
TableID core.AnalyzeTableID
Hist []*statistics.Histogram
Cms []*statistics.CMSketch
TopNs []*statistics.TopN
Fms []*statistics.FMSketch
ExtStats *statistics.ExtendedStatsColl
Count int64
IsIndex int
Err error
job *statistics.AnalyzeJob
StatsVer int
}
| executor/analyze.go | 1 | https://github.com/pingcap/tidb/commit/3f6dfe91daab7acb178ad24941dfca1745993afd | [
0.9981563687324524,
0.05017303675413132,
0.00015895464457571507,
0.00025608111172914505,
0.20259413123130798
] |
{
"id": 2,
"code_window": [
"\n",
"func (e *AnalyzeColumnsExec) open(ranges []*ranger.Range) error {\n",
"\te.resultHandler = &tableResultHandler{}\n",
"\tfirstPartRanges, secondPartRanges := distsql.SplitRangesBySign(ranges, true, false, !hasPkHist(e.handleCols))\n",
"\tfirstResult, err := e.buildResp(firstPartRanges)\n",
"\tif err != nil {\n",
"\t\treturn err\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tfirstPartRanges, secondPartRanges := distsql.SplitRangesAcrossInt64Boundary(ranges, true, false, !hasPkHist(e.handleCols))\n"
],
"file_path": "executor/analyze.go",
"type": "replace",
"edit_start_line_idx": 600
} | // Copyright 2016 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package executor_test
import (
"context"
"fmt"
"math"
"strconv"
"strings"
"time"
. "github.com/pingcap/check"
"github.com/pingcap/failpoint"
"github.com/pingcap/parser/model"
"github.com/pingcap/parser/mysql"
"github.com/pingcap/parser/terror"
"github.com/pingcap/tidb/ddl"
ddltestutil "github.com/pingcap/tidb/ddl/testutil"
ddlutil "github.com/pingcap/tidb/ddl/util"
"github.com/pingcap/tidb/domain"
"github.com/pingcap/tidb/errno"
"github.com/pingcap/tidb/infoschema"
"github.com/pingcap/tidb/kv"
"github.com/pingcap/tidb/meta"
"github.com/pingcap/tidb/meta/autoid"
plannercore "github.com/pingcap/tidb/planner/core"
"github.com/pingcap/tidb/sessionctx"
"github.com/pingcap/tidb/sessionctx/variable"
"github.com/pingcap/tidb/table"
"github.com/pingcap/tidb/table/tables"
"github.com/pingcap/tidb/types"
"github.com/pingcap/tidb/util/chunk"
"github.com/pingcap/tidb/util/testkit"
"github.com/pingcap/tidb/util/testutil"
)
func (s *testSuite6) TestTruncateTable(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec(`drop table if exists truncate_test;`)
tk.MustExec(`create table truncate_test (a int)`)
tk.MustExec(`insert truncate_test values (1),(2),(3)`)
result := tk.MustQuery("select * from truncate_test")
result.Check(testkit.Rows("1", "2", "3"))
tk.MustExec("truncate table truncate_test")
result = tk.MustQuery("select * from truncate_test")
result.Check(nil)
}
// TestInTxnExecDDLFail tests the following case:
// 1. Execute the SQL of "begin";
// 2. A SQL that will fail to execute;
// 3. Execute DDL.
func (s *testSuite6) TestInTxnExecDDLFail(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("create table t (i int key);")
tk.MustExec("insert into t values (1);")
tk.MustExec("begin;")
tk.MustExec("insert into t values (1);")
_, err := tk.Exec("truncate table t;")
c.Assert(err.Error(), Equals, "[kv:1062]Duplicate entry '1' for key 'PRIMARY'")
result := tk.MustQuery("select count(*) from t")
result.Check(testkit.Rows("1"))
}
func (s *testSuite6) TestInTxnExecDDLInvalid(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t;")
tk.MustExec("create table t (c_int int, c_str varchar(40));")
tk.MustExec("insert into t values (1, 'quizzical hofstadter');")
tk.MustExec("begin;")
_ = tk.MustQuery("select c_int from t where c_str is not null for update;")
tk.MustExec("alter table t add index idx_4 (c_str);")
}
func (s *testSuite6) TestCreateTable(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
// Test create an exist database
_, err := tk.Exec("CREATE database test")
c.Assert(err, NotNil)
// Test create an exist table
tk.MustExec("CREATE TABLE create_test (id INT NOT NULL DEFAULT 1, name varchar(255), PRIMARY KEY(id));")
_, err = tk.Exec("CREATE TABLE create_test (id INT NOT NULL DEFAULT 1, name varchar(255), PRIMARY KEY(id));")
c.Assert(err, NotNil)
// Test "if not exist"
tk.MustExec("CREATE TABLE if not exists test(id INT NOT NULL DEFAULT 1, name varchar(255), PRIMARY KEY(id));")
// Testcase for https://github.com/pingcap/tidb/issues/312
tk.MustExec(`create table issue312_1 (c float(24));`)
tk.MustExec(`create table issue312_2 (c float(25));`)
rs, err := tk.Exec(`desc issue312_1`)
c.Assert(err, IsNil)
ctx := context.Background()
req := rs.NewChunk()
it := chunk.NewIterator4Chunk(req)
for {
err1 := rs.Next(ctx, req)
c.Assert(err1, IsNil)
if req.NumRows() == 0 {
break
}
for row := it.Begin(); row != it.End(); row = it.Next() {
c.Assert(row.GetString(1), Equals, "float")
}
}
rs, err = tk.Exec(`desc issue312_2`)
c.Assert(err, IsNil)
req = rs.NewChunk()
it = chunk.NewIterator4Chunk(req)
for {
err1 := rs.Next(ctx, req)
c.Assert(err1, IsNil)
if req.NumRows() == 0 {
break
}
for row := it.Begin(); row != it.End(); row = it.Next() {
c.Assert(req.GetRow(0).GetString(1), Equals, "double")
}
}
c.Assert(rs.Close(), IsNil)
// test multiple collate specified in column when create.
tk.MustExec("drop table if exists test_multiple_column_collate;")
tk.MustExec("create table test_multiple_column_collate (a char(1) collate utf8_bin collate utf8_general_ci) charset utf8mb4 collate utf8mb4_bin")
t, err := domain.GetDomain(tk.Se).InfoSchema().TableByName(model.NewCIStr("test"), model.NewCIStr("test_multiple_column_collate"))
c.Assert(err, IsNil)
c.Assert(t.Cols()[0].Charset, Equals, "utf8")
c.Assert(t.Cols()[0].Collate, Equals, "utf8_general_ci")
c.Assert(t.Meta().Charset, Equals, "utf8mb4")
c.Assert(t.Meta().Collate, Equals, "utf8mb4_bin")
tk.MustExec("drop table if exists test_multiple_column_collate;")
tk.MustExec("create table test_multiple_column_collate (a char(1) charset utf8 collate utf8_bin collate utf8_general_ci) charset utf8mb4 collate utf8mb4_bin")
t, err = domain.GetDomain(tk.Se).InfoSchema().TableByName(model.NewCIStr("test"), model.NewCIStr("test_multiple_column_collate"))
c.Assert(err, IsNil)
c.Assert(t.Cols()[0].Charset, Equals, "utf8")
c.Assert(t.Cols()[0].Collate, Equals, "utf8_general_ci")
c.Assert(t.Meta().Charset, Equals, "utf8mb4")
c.Assert(t.Meta().Collate, Equals, "utf8mb4_bin")
// test Err case for multiple collate specified in column when create.
tk.MustExec("drop table if exists test_err_multiple_collate;")
_, err = tk.Exec("create table test_err_multiple_collate (a char(1) charset utf8mb4 collate utf8_unicode_ci collate utf8_general_ci) charset utf8mb4 collate utf8mb4_bin")
c.Assert(err, NotNil)
c.Assert(err.Error(), Equals, ddl.ErrCollationCharsetMismatch.GenWithStackByArgs("utf8_unicode_ci", "utf8mb4").Error())
tk.MustExec("drop table if exists test_err_multiple_collate;")
_, err = tk.Exec("create table test_err_multiple_collate (a char(1) collate utf8_unicode_ci collate utf8mb4_general_ci) charset utf8mb4 collate utf8mb4_bin")
c.Assert(err, NotNil)
c.Assert(err.Error(), Equals, ddl.ErrCollationCharsetMismatch.GenWithStackByArgs("utf8mb4_general_ci", "utf8").Error())
// table option is auto-increment
tk.MustExec("drop table if exists create_auto_increment_test;")
tk.MustExec("create table create_auto_increment_test (id int not null auto_increment, name varchar(255), primary key(id)) auto_increment = 999;")
tk.MustExec("insert into create_auto_increment_test (name) values ('aa')")
tk.MustExec("insert into create_auto_increment_test (name) values ('bb')")
tk.MustExec("insert into create_auto_increment_test (name) values ('cc')")
r := tk.MustQuery("select * from create_auto_increment_test;")
r.Check(testkit.Rows("999 aa", "1000 bb", "1001 cc"))
tk.MustExec("drop table create_auto_increment_test")
tk.MustExec("create table create_auto_increment_test (id int not null auto_increment, name varchar(255), primary key(id)) auto_increment = 1999;")
tk.MustExec("insert into create_auto_increment_test (name) values ('aa')")
tk.MustExec("insert into create_auto_increment_test (name) values ('bb')")
tk.MustExec("insert into create_auto_increment_test (name) values ('cc')")
r = tk.MustQuery("select * from create_auto_increment_test;")
r.Check(testkit.Rows("1999 aa", "2000 bb", "2001 cc"))
tk.MustExec("drop table create_auto_increment_test")
tk.MustExec("create table create_auto_increment_test (id int not null auto_increment, name varchar(255), key(id)) auto_increment = 1000;")
tk.MustExec("insert into create_auto_increment_test (name) values ('aa')")
r = tk.MustQuery("select * from create_auto_increment_test;")
r.Check(testkit.Rows("1000 aa"))
// Test for `drop table if exists`.
tk.MustExec("drop table if exists t_if_exists;")
tk.MustQuery("show warnings;").Check(testkit.Rows("Note 1051 Unknown table 'test.t_if_exists'"))
tk.MustExec("create table if not exists t1_if_exists(c int)")
tk.MustExec("drop table if exists t1_if_exists,t2_if_exists,t3_if_exists")
tk.MustQuery("show warnings").Check(testutil.RowsWithSep("|", "Note|1051|Unknown table 'test.t2_if_exists'", "Note|1051|Unknown table 'test.t3_if_exists'"))
}
func (s *testSuite6) TestCreateView(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
// create an source table
tk.MustExec("CREATE TABLE source_table (id INT NOT NULL DEFAULT 1, name varchar(255), PRIMARY KEY(id));")
// test create a exist view
tk.MustExec("CREATE VIEW view_t AS select id , name from source_table")
defer tk.MustExec("DROP VIEW IF EXISTS view_t")
_, err := tk.Exec("CREATE VIEW view_t AS select id , name from source_table")
c.Assert(err.Error(), Equals, "[schema:1050]Table 'test.view_t' already exists")
// create view on nonexistent table
_, err = tk.Exec("create view v1 (c,d) as select a,b from t1")
c.Assert(err.Error(), Equals, "[schema:1146]Table 'test.t1' doesn't exist")
// simple view
tk.MustExec("create table t1 (a int ,b int)")
tk.MustExec("insert into t1 values (1,2), (1,3), (2,4), (2,5), (3,10)")
// view with colList and SelectFieldExpr
tk.MustExec("create view v1 (c) as select b+1 from t1")
// view with SelectFieldExpr
tk.MustExec("create view v2 as select b+1 from t1")
// view with SelectFieldExpr and AsName
tk.MustExec("create view v3 as select b+1 as c from t1")
// view with colList , SelectField and AsName
tk.MustExec("create view v4 (c) as select b+1 as d from t1")
// view with select wild card
tk.MustExec("create view v5 as select * from t1")
tk.MustExec("create view v6 (c,d) as select * from t1")
_, err = tk.Exec("create view v7 (c,d,e) as select * from t1")
c.Assert(err.Error(), Equals, ddl.ErrViewWrongList.Error())
// drop multiple views in a statement
tk.MustExec("drop view v1,v2,v3,v4,v5,v6")
// view with variable
tk.MustExec("create view v1 (c,d) as select a,b+@@global.max_user_connections from t1")
_, err = tk.Exec("create view v1 (c,d) as select a,b from t1 where a = @@global.max_user_connections")
c.Assert(err.Error(), Equals, "[schema:1050]Table 'test.v1' already exists")
tk.MustExec("drop view v1")
// view with different col counts
_, err = tk.Exec("create view v1 (c,d,e) as select a,b from t1 ")
c.Assert(err.Error(), Equals, ddl.ErrViewWrongList.Error())
_, err = tk.Exec("create view v1 (c) as select a,b from t1 ")
c.Assert(err.Error(), Equals, ddl.ErrViewWrongList.Error())
// view with or_replace flag
tk.MustExec("drop view if exists v1")
tk.MustExec("create view v1 (c,d) as select a,b from t1")
tk.MustExec("create or replace view v1 (c,d) as select a,b from t1 ")
tk.MustExec("create table if not exists t1 (a int ,b int)")
_, err = tk.Exec("create or replace view t1 as select * from t1")
c.Assert(err.Error(), Equals, ddl.ErrWrongObject.GenWithStackByArgs("test", "t1", "VIEW").Error())
// create view using prepare
tk.MustExec(`prepare stmt from "create view v10 (x) as select 1";`)
tk.MustExec("execute stmt")
// create view on union
tk.MustExec("drop table if exists t1, t2")
tk.MustExec("drop view if exists v")
_, err = tk.Exec("create view v as select * from t1 union select * from t2")
c.Assert(terror.ErrorEqual(err, infoschema.ErrTableNotExists), IsTrue)
tk.MustExec("create table t1(a int, b int)")
tk.MustExec("create table t2(a int, b int)")
tk.MustExec("insert into t1 values(1,2), (1,1), (1,2)")
tk.MustExec("insert into t2 values(1,1),(1,3)")
tk.MustExec("create definer='root'@'localhost' view v as select * from t1 union select * from t2")
tk.MustQuery("select * from v").Sort().Check(testkit.Rows("1 1", "1 2", "1 3"))
tk.MustExec("alter table t1 drop column a")
_, err = tk.Exec("select * from v")
c.Assert(terror.ErrorEqual(err, plannercore.ErrViewInvalid), IsTrue)
tk.MustExec("alter table t1 add column a int")
tk.MustQuery("select * from v").Sort().Check(testkit.Rows("1 1", "1 3", "<nil> 1", "<nil> 2"))
tk.MustExec("alter table t1 drop column a")
tk.MustExec("alter table t2 drop column b")
_, err = tk.Exec("select * from v")
c.Assert(terror.ErrorEqual(err, plannercore.ErrViewInvalid), IsTrue)
tk.MustExec("drop view v")
tk.MustExec("create view v as (select * from t1)")
tk.MustExec("drop view v")
tk.MustExec("create view v as (select * from t1 union select * from t2)")
tk.MustExec("drop view v")
// Test for `drop view if exists`.
tk.MustExec("drop view if exists v_if_exists;")
tk.MustQuery("show warnings;").Check(testkit.Rows("Note 1051 Unknown table 'test.v_if_exists'"))
tk.MustExec("create view v1_if_exists as (select * from t1)")
tk.MustExec("drop view if exists v1_if_exists,v2_if_exists,v3_if_exists")
tk.MustQuery("show warnings").Check(testutil.RowsWithSep("|", "Note|1051|Unknown table 'test.v2_if_exists'", "Note|1051|Unknown table 'test.v3_if_exists'"))
// Test for create nested view.
tk.MustExec("create table test_v_nested(a int)")
tk.MustExec("create definer='root'@'localhost' view v_nested as select * from test_v_nested")
tk.MustExec("create definer='root'@'localhost' view v_nested2 as select * from v_nested")
_, err = tk.Exec("create or replace definer='root'@'localhost' view v_nested as select * from v_nested2")
c.Assert(terror.ErrorEqual(err, plannercore.ErrNoSuchTable), IsTrue)
tk.MustExec("drop table test_v_nested")
tk.MustExec("drop view v_nested, v_nested2")
}
func (s *testSuite6) TestViewRecursion(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("create table if not exists t(a int)")
tk.MustExec("create definer='root'@'localhost' view recursive_view1 as select * from t")
tk.MustExec("create definer='root'@'localhost' view recursive_view2 as select * from recursive_view1")
tk.MustExec("drop table t")
tk.MustExec("rename table recursive_view2 to t")
_, err := tk.Exec("select * from recursive_view1")
c.Assert(terror.ErrorEqual(err, plannercore.ErrViewRecursive), IsTrue)
tk.MustExec("drop view recursive_view1, t")
}
func (s *testSuite6) TestIssue16250(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("create table if not exists t(a int)")
tk.MustExec("create view view_issue16250 as select * from t")
_, err := tk.Exec("truncate table view_issue16250")
c.Assert(err.Error(), Equals, "[schema:1146]Table 'test.view_issue16250' doesn't exist")
}
func (s testSuite6) TestTruncateSequence(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("create sequence if not exists seq")
_, err := tk.Exec("truncate table seq")
c.Assert(err.Error(), Equals, "[schema:1146]Table 'test.seq' doesn't exist")
tk.MustExec("create sequence if not exists seq1 start 10 increment 2 maxvalue 10000 cycle")
_, err = tk.Exec("truncate table seq1")
c.Assert(err.Error(), Equals, "[schema:1146]Table 'test.seq1' doesn't exist")
tk.MustExec("drop sequence if exists seq")
tk.MustExec("drop sequence if exists seq1")
}
func (s *testSuite6) TestCreateViewWithOverlongColName(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("create table t(a int)")
defer tk.MustExec("drop table t")
tk.MustExec("create view v as select distinct'" + strings.Repeat("a", 65) + "', " +
"max('" + strings.Repeat("b", 65) + "'), " +
"'cccccccccc', '" + strings.Repeat("d", 65) + "';")
resultCreateStmt := "CREATE ALGORITHM=UNDEFINED DEFINER=``@`` SQL SECURITY DEFINER VIEW `v` (`name_exp_1`, `name_exp_2`, `cccccccccc`, `name_exp_4`) AS SELECT DISTINCT _UTF8MB4'" + strings.Repeat("a", 65) + "',MAX(_UTF8MB4'" + strings.Repeat("b", 65) + "'),_UTF8MB4'cccccccccc',_UTF8MB4'" + strings.Repeat("d", 65) + "'"
tk.MustQuery("select * from v")
tk.MustQuery("select name_exp_1, name_exp_2, cccccccccc, name_exp_4 from v")
tk.MustQuery("show create view v").Check(testkit.Rows("v " + resultCreateStmt + " "))
tk.MustExec("drop view v;")
tk.MustExec(resultCreateStmt)
tk.MustExec("drop view v ")
tk.MustExec("create definer='root'@'localhost' view v as select 'a', '" + strings.Repeat("b", 65) + "' from t " +
"union select '" + strings.Repeat("c", 65) + "', " +
"count(distinct '" + strings.Repeat("b", 65) + "', " +
"'c');")
resultCreateStmt = "CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v` (`a`, `name_exp_2`) AS SELECT _UTF8MB4'a',_UTF8MB4'" + strings.Repeat("b", 65) + "' FROM `test`.`t` UNION SELECT _UTF8MB4'" + strings.Repeat("c", 65) + "',COUNT(DISTINCT _UTF8MB4'" + strings.Repeat("b", 65) + "', _UTF8MB4'c')"
tk.MustQuery("select * from v")
tk.MustQuery("select a, name_exp_2 from v")
tk.MustQuery("show create view v").Check(testkit.Rows("v " + resultCreateStmt + " "))
tk.MustExec("drop view v;")
tk.MustExec(resultCreateStmt)
tk.MustExec("drop view v ")
tk.MustExec("create definer='root'@'localhost' view v as select 'a' as '" + strings.Repeat("b", 65) + "' from t;")
tk.MustQuery("select * from v")
tk.MustQuery("select name_exp_1 from v")
resultCreateStmt = "CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v` (`name_exp_1`) AS SELECT _UTF8MB4'a' AS `" + strings.Repeat("b", 65) + "` FROM `test`.`t`"
tk.MustQuery("show create view v").Check(testkit.Rows("v " + resultCreateStmt + " "))
tk.MustExec("drop view v;")
tk.MustExec(resultCreateStmt)
tk.MustExec("drop view v ")
err := tk.ExecToErr("create view v(`" + strings.Repeat("b", 65) + "`) as select a from t;")
c.Assert(err.Error(), Equals, "[ddl:1059]Identifier name 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' is too long")
}
func (s *testSuite6) TestCreateDropDatabase(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("create database if not exists drop_test;")
tk.MustExec("drop database if exists drop_test;")
tk.MustExec("create database drop_test;")
tk.MustExec("use drop_test;")
tk.MustExec("drop database drop_test;")
_, err := tk.Exec("drop table t;")
c.Assert(err.Error(), Equals, plannercore.ErrNoDB.Error())
err = tk.ExecToErr("select * from t;")
c.Assert(err.Error(), Equals, plannercore.ErrNoDB.Error())
_, err = tk.Exec("drop database mysql")
c.Assert(err, NotNil)
tk.MustExec("create database charset_test charset ascii;")
tk.MustQuery("show create database charset_test;").Check(testutil.RowsWithSep("|",
"charset_test|CREATE DATABASE `charset_test` /*!40100 DEFAULT CHARACTER SET ascii */",
))
tk.MustExec("drop database charset_test;")
tk.MustExec("create database charset_test charset binary;")
tk.MustQuery("show create database charset_test;").Check(testutil.RowsWithSep("|",
"charset_test|CREATE DATABASE `charset_test` /*!40100 DEFAULT CHARACTER SET binary */",
))
tk.MustExec("drop database charset_test;")
tk.MustExec("create database charset_test collate utf8_general_ci;")
tk.MustQuery("show create database charset_test;").Check(testutil.RowsWithSep("|",
"charset_test|CREATE DATABASE `charset_test` /*!40100 DEFAULT CHARACTER SET utf8 COLLATE utf8_general_ci */",
))
tk.MustExec("drop database charset_test;")
tk.MustExec("create database charset_test charset utf8 collate utf8_general_ci;")
tk.MustQuery("show create database charset_test;").Check(testutil.RowsWithSep("|",
"charset_test|CREATE DATABASE `charset_test` /*!40100 DEFAULT CHARACTER SET utf8 COLLATE utf8_general_ci */",
))
tk.MustGetErrMsg("create database charset_test charset utf8 collate utf8mb4_unicode_ci;", "[ddl:1253]COLLATION 'utf8mb4_unicode_ci' is not valid for CHARACTER SET 'utf8'")
}
func (s *testSuite6) TestCreateDropTable(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("create table if not exists drop_test (a int)")
tk.MustExec("drop table if exists drop_test")
tk.MustExec("create table drop_test (a int)")
tk.MustExec("drop table drop_test")
_, err := tk.Exec("drop table mysql.gc_delete_range")
c.Assert(err, NotNil)
}
func (s *testSuite6) TestCreateDropView(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("create or replace view drop_test as select 1,2")
_, err := tk.Exec("drop table drop_test")
c.Assert(err.Error(), Equals, "[schema:1051]Unknown table 'test.drop_test'")
_, err = tk.Exec("drop view if exists drop_test")
c.Assert(err, IsNil)
_, err = tk.Exec("drop view mysql.gc_delete_range")
c.Assert(err.Error(), Equals, "Drop tidb system table 'mysql.gc_delete_range' is forbidden")
_, err = tk.Exec("drop view drop_test")
c.Assert(err.Error(), Equals, "[schema:1051]Unknown table 'test.drop_test'")
tk.MustExec("create table t_v(a int)")
_, err = tk.Exec("drop view t_v")
c.Assert(err.Error(), Equals, "[ddl:1347]'test.t_v' is not VIEW")
tk.MustExec("create table t_v1(a int, b int);")
tk.MustExec("create table t_v2(a int, b int);")
tk.MustExec("create view v as select * from t_v1;")
tk.MustExec("create or replace view v as select * from t_v2;")
tk.MustQuery("select * from information_schema.views where table_name ='v';").Check(
testkit.Rows("def test v SELECT `test`.`t_v2`.`a`,`test`.`t_v2`.`b` FROM `test`.`t_v2` CASCADED NO @ DEFINER utf8mb4 utf8mb4_bin"))
}
func (s *testSuite6) TestCreateDropIndex(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("create table if not exists drop_test (a int)")
tk.MustExec("create index idx_a on drop_test (a)")
tk.MustExec("drop index idx_a on drop_test")
tk.MustExec("drop table drop_test")
}
func (s *testSuite6) TestAlterTableAddColumn(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("create table if not exists alter_test (c1 int)")
tk.MustExec("insert into alter_test values(1)")
tk.MustExec("alter table alter_test add column c2 timestamp default current_timestamp")
time.Sleep(1 * time.Millisecond)
now := time.Now().Add(-1 * time.Millisecond).Format(types.TimeFormat)
r, err := tk.Exec("select c2 from alter_test")
c.Assert(err, IsNil)
req := r.NewChunk()
err = r.Next(context.Background(), req)
c.Assert(err, IsNil)
row := req.GetRow(0)
c.Assert(row.Len(), Equals, 1)
c.Assert(now, GreaterEqual, row.GetTime(0).String())
c.Assert(r.Close(), IsNil)
tk.MustExec("alter table alter_test add column c3 varchar(50) default 'CURRENT_TIMESTAMP'")
tk.MustQuery("select c3 from alter_test").Check(testkit.Rows("CURRENT_TIMESTAMP"))
tk.MustExec("create or replace view alter_view as select c1,c2 from alter_test")
_, err = tk.Exec("alter table alter_view add column c4 varchar(50)")
c.Assert(err.Error(), Equals, ddl.ErrWrongObject.GenWithStackByArgs("test", "alter_view", "BASE TABLE").Error())
tk.MustExec("drop view alter_view")
tk.MustExec("create sequence alter_seq")
_, err = tk.Exec("alter table alter_seq add column c int")
c.Assert(err.Error(), Equals, ddl.ErrWrongObject.GenWithStackByArgs("test", "alter_seq", "BASE TABLE").Error())
tk.MustExec("drop sequence alter_seq")
}
func (s *testSuite6) TestAlterTableAddColumns(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("create table if not exists alter_test (c1 int)")
tk.MustExec("insert into alter_test values(1)")
tk.MustExec("alter table alter_test add column c2 timestamp default current_timestamp, add column c8 varchar(50) default 'CURRENT_TIMESTAMP'")
tk.MustExec("alter table alter_test add column (c7 timestamp default current_timestamp, c3 varchar(50) default 'CURRENT_TIMESTAMP')")
r, err := tk.Exec("select c2 from alter_test")
c.Assert(err, IsNil)
req := r.NewChunk()
err = r.Next(context.Background(), req)
c.Assert(err, IsNil)
row := req.GetRow(0)
c.Assert(row.Len(), Equals, 1)
c.Assert(r.Close(), IsNil)
tk.MustQuery("select c3 from alter_test").Check(testkit.Rows("CURRENT_TIMESTAMP"))
tk.MustExec("create or replace view alter_view as select c1,c2 from alter_test")
_, err = tk.Exec("alter table alter_view add column (c4 varchar(50), c5 varchar(50))")
c.Assert(err.Error(), Equals, ddl.ErrWrongObject.GenWithStackByArgs("test", "alter_view", "BASE TABLE").Error())
tk.MustExec("drop view alter_view")
tk.MustExec("create sequence alter_seq")
_, err = tk.Exec("alter table alter_seq add column (c1 int, c2 varchar(10))")
c.Assert(err.Error(), Equals, ddl.ErrWrongObject.GenWithStackByArgs("test", "alter_seq", "BASE TABLE").Error())
tk.MustExec("drop sequence alter_seq")
}
func (s *testSuite6) TestAddNotNullColumnNoDefault(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("create table nn (c1 int)")
tk.MustExec("insert nn values (1), (2)")
tk.MustExec("alter table nn add column c2 int not null")
tbl, err := domain.GetDomain(tk.Se).InfoSchema().TableByName(model.NewCIStr("test"), model.NewCIStr("nn"))
c.Assert(err, IsNil)
col2 := tbl.Meta().Columns[1]
c.Assert(col2.DefaultValue, IsNil)
c.Assert(col2.OriginDefaultValue, Equals, "0")
tk.MustQuery("select * from nn").Check(testkit.Rows("1 0", "2 0"))
_, err = tk.Exec("insert nn (c1) values (3)")
c.Check(err, NotNil)
tk.MustExec("set sql_mode=''")
tk.MustExec("insert nn (c1) values (3)")
tk.MustQuery("select * from nn").Check(testkit.Rows("1 0", "2 0", "3 0"))
}
func (s *testSuite6) TestAlterTableModifyColumn(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists mc")
tk.MustExec("create table mc(c1 int, c2 varchar(10), c3 bit)")
_, err := tk.Exec("alter table mc modify column c1 short")
c.Assert(err, NotNil)
tk.MustExec("alter table mc modify column c1 bigint")
_, err = tk.Exec("alter table mc modify column c2 blob")
c.Assert(err, NotNil)
_, err = tk.Exec("alter table mc modify column c2 varchar(8)")
c.Assert(err, NotNil)
tk.MustExec("alter table mc modify column c2 varchar(11)")
tk.MustGetErrCode("alter table mc modify column c2 text(13)", errno.ErrUnsupportedDDLOperation)
tk.MustGetErrCode("alter table mc modify column c2 text", errno.ErrUnsupportedDDLOperation)
tk.MustExec("alter table mc modify column c3 bit")
result := tk.MustQuery("show create table mc")
createSQL := result.Rows()[0][1]
expected := "CREATE TABLE `mc` (\n `c1` bigint(20) DEFAULT NULL,\n `c2` varchar(11) DEFAULT NULL,\n `c3` bit(1) DEFAULT NULL\n) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin"
c.Assert(createSQL, Equals, expected)
tk.MustExec("create or replace view alter_view as select c1,c2 from mc")
_, err = tk.Exec("alter table alter_view modify column c2 text")
c.Assert(err.Error(), Equals, ddl.ErrWrongObject.GenWithStackByArgs("test", "alter_view", "BASE TABLE").Error())
tk.MustExec("drop view alter_view")
tk.MustExec("create sequence alter_seq")
_, err = tk.Exec("alter table alter_seq modify column c int")
c.Assert(err.Error(), Equals, ddl.ErrWrongObject.GenWithStackByArgs("test", "alter_seq", "BASE TABLE").Error())
tk.MustExec("drop sequence alter_seq")
// test multiple collate modification in column.
tk.MustExec("drop table if exists modify_column_multiple_collate")
tk.MustExec("create table modify_column_multiple_collate (a char(1) collate utf8_bin collate utf8_general_ci) charset utf8mb4 collate utf8mb4_bin")
_, err = tk.Exec("alter table modify_column_multiple_collate modify column a char(1) collate utf8mb4_bin;")
c.Assert(err, IsNil)
t, err := domain.GetDomain(tk.Se).InfoSchema().TableByName(model.NewCIStr("test"), model.NewCIStr("modify_column_multiple_collate"))
c.Assert(err, IsNil)
c.Assert(t.Cols()[0].Charset, Equals, "utf8mb4")
c.Assert(t.Cols()[0].Collate, Equals, "utf8mb4_bin")
c.Assert(t.Meta().Charset, Equals, "utf8mb4")
c.Assert(t.Meta().Collate, Equals, "utf8mb4_bin")
tk.MustExec("drop table if exists modify_column_multiple_collate;")
tk.MustExec("create table modify_column_multiple_collate (a char(1) collate utf8_bin collate utf8_general_ci) charset utf8mb4 collate utf8mb4_bin")
_, err = tk.Exec("alter table modify_column_multiple_collate modify column a char(1) charset utf8mb4 collate utf8mb4_bin;")
c.Assert(err, IsNil)
t, err = domain.GetDomain(tk.Se).InfoSchema().TableByName(model.NewCIStr("test"), model.NewCIStr("modify_column_multiple_collate"))
c.Assert(err, IsNil)
c.Assert(t.Cols()[0].Charset, Equals, "utf8mb4")
c.Assert(t.Cols()[0].Collate, Equals, "utf8mb4_bin")
c.Assert(t.Meta().Charset, Equals, "utf8mb4")
c.Assert(t.Meta().Collate, Equals, "utf8mb4_bin")
// test Err case for multiple collate modification in column.
tk.MustExec("drop table if exists err_modify_multiple_collate;")
tk.MustExec("create table err_modify_multiple_collate (a char(1) collate utf8_bin collate utf8_general_ci) charset utf8mb4 collate utf8mb4_bin")
_, err = tk.Exec("alter table err_modify_multiple_collate modify column a char(1) charset utf8mb4 collate utf8_bin;")
c.Assert(err, NotNil)
c.Assert(err.Error(), Equals, ddl.ErrCollationCharsetMismatch.GenWithStackByArgs("utf8_bin", "utf8mb4").Error())
tk.MustExec("drop table if exists err_modify_multiple_collate;")
tk.MustExec("create table err_modify_multiple_collate (a char(1) collate utf8_bin collate utf8_general_ci) charset utf8mb4 collate utf8mb4_bin")
_, err = tk.Exec("alter table err_modify_multiple_collate modify column a char(1) collate utf8_bin collate utf8mb4_bin;")
c.Assert(err, NotNil)
c.Assert(err.Error(), Equals, ddl.ErrCollationCharsetMismatch.GenWithStackByArgs("utf8mb4_bin", "utf8").Error())
}
func (s *testSuite6) TestDefaultDBAfterDropCurDB(c *C) {
tk := testkit.NewTestKit(c, s.store)
testSQL := `create database if not exists test_db CHARACTER SET latin1 COLLATE latin1_swedish_ci;`
tk.MustExec(testSQL)
testSQL = `use test_db;`
tk.MustExec(testSQL)
tk.MustQuery(`select database();`).Check(testkit.Rows("test_db"))
tk.MustQuery(`select @@character_set_database;`).Check(testkit.Rows("latin1"))
tk.MustQuery(`select @@collation_database;`).Check(testkit.Rows("latin1_swedish_ci"))
testSQL = `drop database test_db;`
tk.MustExec(testSQL)
tk.MustQuery(`select database();`).Check(testkit.Rows("<nil>"))
tk.MustQuery(`select @@character_set_database;`).Check(testkit.Rows(mysql.DefaultCharset))
tk.MustQuery(`select @@collation_database;`).Check(testkit.Rows(mysql.DefaultCollationName))
}
func (s *testSuite6) TestColumnCharsetAndCollate(c *C) {
tk := testkit.NewTestKit(c, s.store)
dbName := "col_charset_collate"
tk.MustExec("create database " + dbName)
tk.MustExec("use " + dbName)
tests := []struct {
colType string
charset string
collates string
exptCharset string
exptCollate string
errMsg string
}{
{
colType: "varchar(10)",
charset: "charset utf8",
collates: "collate utf8_bin",
exptCharset: "utf8",
exptCollate: "utf8_bin",
errMsg: "",
},
{
colType: "varchar(10)",
charset: "charset utf8mb4",
collates: "",
exptCharset: "utf8mb4",
exptCollate: "utf8mb4_bin",
errMsg: "",
},
{
colType: "varchar(10)",
charset: "charset utf16",
collates: "",
exptCharset: "",
exptCollate: "",
errMsg: "Unknown charset utf16",
},
{
colType: "varchar(10)",
charset: "charset latin1",
collates: "",
exptCharset: "latin1",
exptCollate: "latin1_bin",
errMsg: "",
},
{
colType: "varchar(10)",
charset: "charset binary",
collates: "",
exptCharset: "binary",
exptCollate: "binary",
errMsg: "",
},
{
colType: "varchar(10)",
charset: "charset ascii",
collates: "",
exptCharset: "ascii",
exptCollate: "ascii_bin",
errMsg: "",
},
}
sctx := tk.Se.(sessionctx.Context)
dm := domain.GetDomain(sctx)
for i, tt := range tests {
tblName := fmt.Sprintf("t%d", i)
sql := fmt.Sprintf("create table %s (a %s %s %s)", tblName, tt.colType, tt.charset, tt.collates)
if tt.errMsg == "" {
tk.MustExec(sql)
is := dm.InfoSchema()
c.Assert(is, NotNil)
tb, err := is.TableByName(model.NewCIStr(dbName), model.NewCIStr(tblName))
c.Assert(err, IsNil)
c.Assert(tb.Meta().Columns[0].Charset, Equals, tt.exptCharset, Commentf(sql))
c.Assert(tb.Meta().Columns[0].Collate, Equals, tt.exptCollate, Commentf(sql))
} else {
_, err := tk.Exec(sql)
c.Assert(err, NotNil, Commentf(sql))
}
}
tk.MustExec("drop database " + dbName)
}
func (s *testSuite6) TestTooLargeIdentifierLength(c *C) {
tk := testkit.NewTestKit(c, s.store)
// for database.
dbName1, dbName2 := strings.Repeat("a", mysql.MaxDatabaseNameLength), strings.Repeat("a", mysql.MaxDatabaseNameLength+1)
tk.MustExec(fmt.Sprintf("create database %s", dbName1))
tk.MustExec(fmt.Sprintf("drop database %s", dbName1))
_, err := tk.Exec(fmt.Sprintf("create database %s", dbName2))
c.Assert(err.Error(), Equals, fmt.Sprintf("[ddl:1059]Identifier name '%s' is too long", dbName2))
// for table.
tk.MustExec("use test")
tableName1, tableName2 := strings.Repeat("b", mysql.MaxTableNameLength), strings.Repeat("b", mysql.MaxTableNameLength+1)
tk.MustExec(fmt.Sprintf("create table %s(c int)", tableName1))
tk.MustExec(fmt.Sprintf("drop table %s", tableName1))
_, err = tk.Exec(fmt.Sprintf("create table %s(c int)", tableName2))
c.Assert(err.Error(), Equals, fmt.Sprintf("[ddl:1059]Identifier name '%s' is too long", tableName2))
// for column.
tk.MustExec("drop table if exists t;")
columnName1, columnName2 := strings.Repeat("c", mysql.MaxColumnNameLength), strings.Repeat("c", mysql.MaxColumnNameLength+1)
tk.MustExec(fmt.Sprintf("create table t(%s int)", columnName1))
tk.MustExec("drop table t")
_, err = tk.Exec(fmt.Sprintf("create table t(%s int)", columnName2))
c.Assert(err.Error(), Equals, fmt.Sprintf("[ddl:1059]Identifier name '%s' is too long", columnName2))
// for index.
tk.MustExec("create table t(c int);")
indexName1, indexName2 := strings.Repeat("d", mysql.MaxIndexIdentifierLen), strings.Repeat("d", mysql.MaxIndexIdentifierLen+1)
tk.MustExec(fmt.Sprintf("create index %s on t(c)", indexName1))
tk.MustExec(fmt.Sprintf("drop index %s on t", indexName1))
_, err = tk.Exec(fmt.Sprintf("create index %s on t(c)", indexName2))
c.Assert(err.Error(), Equals, fmt.Sprintf("[ddl:1059]Identifier name '%s' is too long", indexName2))
// for create table with index.
tk.MustExec("drop table t;")
_, err = tk.Exec(fmt.Sprintf("create table t(c int, index %s(c));", indexName2))
c.Assert(err.Error(), Equals, fmt.Sprintf("[ddl:1059]Identifier name '%s' is too long", indexName2))
}
func (s *testSuite8) TestShardRowIDBits(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("create table t (a int) shard_row_id_bits = 15")
for i := 0; i < 100; i++ {
tk.MustExec("insert into t values (?)", i)
}
dom := domain.GetDomain(tk.Se)
tbl, err := dom.InfoSchema().TableByName(model.NewCIStr("test"), model.NewCIStr("t"))
c.Assert(err, IsNil)
assertCountAndShard := func(t table.Table, expectCount int) {
var hasShardedID bool
var count int
c.Assert(tk.Se.NewTxn(context.Background()), IsNil)
err = tables.IterRecords(t, tk.Se, nil, func(h kv.Handle, rec []types.Datum, cols []*table.Column) (more bool, err error) {
c.Assert(h.IntValue(), GreaterEqual, int64(0))
first8bits := h.IntValue() >> 56
if first8bits > 0 {
hasShardedID = true
}
count++
return true, nil
})
c.Assert(err, IsNil)
c.Assert(count, Equals, expectCount)
c.Assert(hasShardedID, IsTrue)
}
assertCountAndShard(tbl, 100)
// After PR 10759, shard_row_id_bits is supported with tables with auto_increment column.
tk.MustExec("create table auto (id int not null auto_increment unique) shard_row_id_bits = 4")
tk.MustExec("alter table auto shard_row_id_bits = 5")
tk.MustExec("drop table auto")
tk.MustExec("create table auto (id int not null auto_increment unique) shard_row_id_bits = 0")
tk.MustExec("alter table auto shard_row_id_bits = 5")
tk.MustExec("drop table auto")
tk.MustExec("create table auto (id int not null auto_increment unique)")
tk.MustExec("alter table auto shard_row_id_bits = 5")
tk.MustExec("drop table auto")
tk.MustExec("create table auto (id int not null auto_increment unique) shard_row_id_bits = 4")
tk.MustExec("alter table auto shard_row_id_bits = 0")
tk.MustExec("drop table auto")
errMsg := "[ddl:8200]Unsupported shard_row_id_bits for table with primary key as row id"
tk.MustGetErrMsg("create table auto (id varchar(255) primary key clustered, b int) shard_row_id_bits = 4;", errMsg)
tk.MustExec("create table auto (id varchar(255) primary key clustered, b int) shard_row_id_bits = 0;")
tk.MustGetErrMsg("alter table auto shard_row_id_bits = 5;", errMsg)
tk.MustExec("alter table auto shard_row_id_bits = 0;")
tk.MustExec("drop table if exists auto;")
// After PR 10759, shard_row_id_bits is not supported with pk_is_handle tables.
tk.MustGetErrMsg("create table auto (id int not null auto_increment primary key, b int) shard_row_id_bits = 4", errMsg)
tk.MustExec("create table auto (id int not null auto_increment primary key, b int) shard_row_id_bits = 0")
tk.MustGetErrMsg("alter table auto shard_row_id_bits = 5", errMsg)
tk.MustExec("alter table auto shard_row_id_bits = 0")
// Hack an existing table with shard_row_id_bits and primary key as handle
db, ok := dom.InfoSchema().SchemaByName(model.NewCIStr("test"))
c.Assert(ok, IsTrue)
tbl, err = dom.InfoSchema().TableByName(model.NewCIStr("test"), model.NewCIStr("auto"))
tblInfo := tbl.Meta()
tblInfo.ShardRowIDBits = 5
tblInfo.MaxShardRowIDBits = 5
err = kv.RunInNewTxn(context.Background(), s.store, false, func(ctx context.Context, txn kv.Transaction) error {
m := meta.NewMeta(txn)
_, err = m.GenSchemaVersion()
c.Assert(err, IsNil)
c.Assert(m.UpdateTable(db.ID, tblInfo), IsNil)
return nil
})
c.Assert(err, IsNil)
err = dom.Reload()
c.Assert(err, IsNil)
tk.MustExec("insert auto(b) values (1), (3), (5)")
tk.MustQuery("select id from auto order by id").Check(testkit.Rows("1", "2", "3"))
tk.MustExec("alter table auto shard_row_id_bits = 0")
tk.MustExec("drop table auto")
// Test shard_row_id_bits with auto_increment column
tk.MustExec("create table auto (a int, b int auto_increment unique) shard_row_id_bits = 15")
for i := 0; i < 100; i++ {
tk.MustExec("insert into auto(a) values (?)", i)
}
tbl, err = dom.InfoSchema().TableByName(model.NewCIStr("test"), model.NewCIStr("auto"))
assertCountAndShard(tbl, 100)
prevB, err := strconv.Atoi(tk.MustQuery("select b from auto where a=0").Rows()[0][0].(string))
c.Assert(err, IsNil)
for i := 1; i < 100; i++ {
b, err := strconv.Atoi(tk.MustQuery(fmt.Sprintf("select b from auto where a=%d", i)).Rows()[0][0].(string))
c.Assert(err, IsNil)
c.Assert(b, Greater, prevB)
prevB = b
}
// Test overflow
tk.MustExec("drop table if exists t1")
tk.MustExec("create table t1 (a int) shard_row_id_bits = 15")
defer tk.MustExec("drop table if exists t1")
tbl, err = dom.InfoSchema().TableByName(model.NewCIStr("test"), model.NewCIStr("t1"))
c.Assert(err, IsNil)
maxID := 1<<(64-15-1) - 1
err = tbl.RebaseAutoID(tk.Se, int64(maxID)-1, false, autoid.RowIDAllocType)
c.Assert(err, IsNil)
tk.MustExec("insert into t1 values(1)")
// continue inserting will fail.
_, err = tk.Exec("insert into t1 values(2)")
c.Assert(autoid.ErrAutoincReadFailed.Equal(err), IsTrue, Commentf("err:%v", err))
_, err = tk.Exec("insert into t1 values(3)")
c.Assert(autoid.ErrAutoincReadFailed.Equal(err), IsTrue, Commentf("err:%v", err))
}
type testAutoRandomSuite struct {
*baseTestSuite
}
func (s *testAutoRandomSuite) TestAutoRandomBitsData(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("create database if not exists test_auto_random_bits")
defer tk.MustExec("drop database if exists test_auto_random_bits")
tk.MustExec("use test_auto_random_bits")
tk.MustExec("drop table if exists t")
extractAllHandles := func() []int64 {
allHds, err := ddltestutil.ExtractAllTableHandles(tk.Se, "test_auto_random_bits", "t")
c.Assert(err, IsNil)
return allHds
}
tk.MustExec("set @@allow_auto_random_explicit_insert = true")
tk.MustExec("create table t (a bigint primary key clustered auto_random(15), b int)")
for i := 0; i < 100; i++ {
tk.MustExec("insert into t(b) values (?)", i)
}
allHandles := extractAllHandles()
tk.MustExec("drop table t")
// Test auto random id number.
c.Assert(len(allHandles), Equals, 100)
// Test the handles are not all zero.
allZero := true
for _, h := range allHandles {
allZero = allZero && (h>>(64-16)) == 0
}
c.Assert(allZero, IsFalse)
// Test non-shard-bits part of auto random id is monotonic increasing and continuous.
orderedHandles := testutil.MaskSortHandles(allHandles, 15, mysql.TypeLonglong)
size := int64(len(allHandles))
for i := int64(1); i <= size; i++ {
c.Assert(i, Equals, orderedHandles[i-1])
}
// Test explicit insert.
autoRandBitsUpperBound := 2<<47 - 1
tk.MustExec("create table t (a bigint primary key clustered auto_random(15), b int)")
for i := -10; i < 10; i++ {
tk.MustExec(fmt.Sprintf("insert into t values(%d, %d)", i+autoRandBitsUpperBound, i))
}
_, err := tk.Exec("insert into t (b) values (0)")
c.Assert(err, NotNil)
c.Assert(err.Error(), Equals, autoid.ErrAutoRandReadFailed.GenWithStackByArgs().Error())
tk.MustExec("drop table t")
// Test overflow.
tk.MustExec("create table t (a bigint primary key auto_random(15), b int)")
// Here we cannot fill the all values for a `bigint` column,
// so firstly we rebase auto_rand to the position before overflow.
tk.MustExec(fmt.Sprintf("insert into t values (%d, %d)", autoRandBitsUpperBound, 1))
_, err = tk.Exec("insert into t (b) values (0)")
c.Assert(err, NotNil)
c.Assert(err.Error(), Equals, autoid.ErrAutoRandReadFailed.GenWithStackByArgs().Error())
tk.MustExec("drop table t")
tk.MustExec("create table t (a bigint primary key auto_random(15), b int)")
tk.MustExec("insert into t values (1, 2)")
tk.MustExec(fmt.Sprintf("update t set a = %d where a = 1", autoRandBitsUpperBound))
_, err = tk.Exec("insert into t (b) values (0)")
c.Assert(err, NotNil)
c.Assert(err.Error(), Equals, autoid.ErrAutoRandReadFailed.GenWithStackByArgs().Error())
tk.MustExec("drop table t")
// Test insert negative integers explicitly won't trigger rebase.
tk.MustExec("create table t (a bigint primary key auto_random(15), b int)")
for i := 1; i <= 100; i++ {
tk.MustExec("insert into t(b) values (?)", i)
tk.MustExec("insert into t(a, b) values (?, ?)", -i, i)
}
// orderedHandles should be [-100, -99, ..., -2, -1, 1, 2, ..., 99, 100]
orderedHandles = testutil.MaskSortHandles(extractAllHandles(), 15, mysql.TypeLonglong)
size = int64(len(allHandles))
for i := int64(0); i < 100; i++ {
c.Assert(orderedHandles[i], Equals, i-100)
}
for i := int64(100); i < size; i++ {
c.Assert(orderedHandles[i], Equals, i-99)
}
tk.MustExec("drop table t")
// Test signed/unsigned types.
tk.MustExec("create table t (a bigint primary key auto_random(10), b int)")
for i := 0; i < 100; i++ {
tk.MustExec("insert into t (b) values(?)", i)
}
for _, h := range extractAllHandles() {
// Sign bit should be reserved.
c.Assert(h > 0, IsTrue)
}
tk.MustExec("drop table t")
tk.MustExec("create table t (a bigint unsigned primary key auto_random(10), b int)")
for i := 0; i < 100; i++ {
tk.MustExec("insert into t (b) values(?)", i)
}
signBitUnused := true
for _, h := range extractAllHandles() {
signBitUnused = signBitUnused && (h > 0)
}
// Sign bit should be used for shard.
c.Assert(signBitUnused, IsFalse)
tk.MustExec("drop table t;")
// Test rename table does not affect incremental part of auto_random ID.
tk.MustExec("create database test_auto_random_bits_rename;")
tk.MustExec("create table t (a bigint auto_random primary key);")
for i := 0; i < 10; i++ {
tk.MustExec("insert into t values ();")
}
tk.MustExec("alter table t rename to test_auto_random_bits_rename.t1;")
for i := 0; i < 10; i++ {
tk.MustExec("insert into test_auto_random_bits_rename.t1 values ();")
}
tk.MustExec("alter table test_auto_random_bits_rename.t1 rename to t;")
for i := 0; i < 10; i++ {
tk.MustExec("insert into t values ();")
}
uniqueHandles := make(map[int64]struct{})
for _, h := range extractAllHandles() {
uniqueHandles[h&((1<<(63-5))-1)] = struct{}{}
}
c.Assert(len(uniqueHandles), Equals, 30)
tk.MustExec("drop database test_auto_random_bits_rename;")
tk.MustExec("drop table t;")
}
func (s *testAutoRandomSuite) TestAutoRandomTableOption(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
// test table option is auto-random
tk.MustExec("drop table if exists auto_random_table_option")
tk.MustExec("create table auto_random_table_option (a bigint auto_random(5) key) auto_random_base = 1000")
t, err := domain.GetDomain(tk.Se).InfoSchema().TableByName(model.NewCIStr("test"), model.NewCIStr("auto_random_table_option"))
c.Assert(err, IsNil)
c.Assert(t.Meta().AutoRandID, Equals, int64(1000))
tk.MustExec("insert into auto_random_table_option values (),(),(),(),()")
allHandles, err := ddltestutil.ExtractAllTableHandles(tk.Se, "test", "auto_random_table_option")
c.Assert(err, IsNil)
c.Assert(len(allHandles), Equals, 5)
// Test non-shard-bits part of auto random id is monotonic increasing and continuous.
orderedHandles := testutil.MaskSortHandles(allHandles, 5, mysql.TypeLonglong)
size := int64(len(allHandles))
for i := int64(0); i < size; i++ {
c.Assert(i+1000, Equals, orderedHandles[i])
}
tk.MustExec("drop table if exists alter_table_auto_random_option")
tk.MustExec("create table alter_table_auto_random_option (a bigint primary key auto_random(4), b int)")
t, err = domain.GetDomain(tk.Se).InfoSchema().TableByName(model.NewCIStr("test"), model.NewCIStr("alter_table_auto_random_option"))
c.Assert(err, IsNil)
c.Assert(t.Meta().AutoRandID, Equals, int64(0))
tk.MustExec("insert into alter_table_auto_random_option values(),(),(),(),()")
allHandles, err = ddltestutil.ExtractAllTableHandles(tk.Se, "test", "alter_table_auto_random_option")
c.Assert(err, IsNil)
orderedHandles = testutil.MaskSortHandles(allHandles, 5, mysql.TypeLonglong)
size = int64(len(allHandles))
for i := int64(0); i < size; i++ {
c.Assert(orderedHandles[i], Equals, i+1)
}
tk.MustExec("delete from alter_table_auto_random_option")
// alter table to change the auto_random option (it will dismiss the local allocator cache)
// To avoid the new base is in the range of local cache, which will leading the next
// value is not what we rebased, because the local cache is dropped, here we choose
// a quite big value to do this.
tk.MustExec("alter table alter_table_auto_random_option auto_random_base = 3000000")
t, err = domain.GetDomain(tk.Se).InfoSchema().TableByName(model.NewCIStr("test"), model.NewCIStr("alter_table_auto_random_option"))
c.Assert(err, IsNil)
c.Assert(t.Meta().AutoRandID, Equals, int64(3000000))
tk.MustExec("insert into alter_table_auto_random_option values(),(),(),(),()")
allHandles, err = ddltestutil.ExtractAllTableHandles(tk.Se, "test", "alter_table_auto_random_option")
c.Assert(err, IsNil)
orderedHandles = testutil.MaskSortHandles(allHandles, 5, mysql.TypeLonglong)
size = int64(len(allHandles))
for i := int64(0); i < size; i++ {
c.Assert(orderedHandles[i], Equals, i+3000000)
}
tk.MustExec("drop table alter_table_auto_random_option")
// Alter auto_random_base on non auto_random table.
tk.MustExec("create table alter_auto_random_normal (a int)")
_, err = tk.Exec("alter table alter_auto_random_normal auto_random_base = 100")
c.Assert(err, NotNil)
c.Assert(strings.Contains(err.Error(), autoid.AutoRandomRebaseNotApplicable), IsTrue, Commentf(err.Error()))
}
// Test filter different kind of allocators.
// In special ddl type, for example:
// 1: ActionRenameTable : it will abandon all the old allocators.
// 2: ActionRebaseAutoID : it will drop row-id-type allocator.
// 3: ActionModifyTableAutoIdCache : it will drop row-id-type allocator.
// 3: ActionRebaseAutoRandomBase : it will drop auto-rand-type allocator.
func (s *testAutoRandomSuite) TestFilterDifferentAllocators(c *C) {
tk := testkit.NewTestKitWithInit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("drop table if exists t1")
tk.MustExec("create table t(a bigint auto_random(5) key, b int auto_increment unique)")
tk.MustExec("insert into t values()")
tk.MustQuery("select b from t").Check(testkit.Rows("1"))
allHandles, err := ddltestutil.ExtractAllTableHandles(tk.Se, "test", "t")
c.Assert(err, IsNil)
c.Assert(len(allHandles), Equals, 1)
orderedHandles := testutil.MaskSortHandles(allHandles, 5, mysql.TypeLonglong)
c.Assert(orderedHandles[0], Equals, int64(1))
tk.MustExec("delete from t")
// Test rebase auto_increment.
tk.MustExec("alter table t auto_increment 3000000")
tk.MustExec("insert into t values()")
tk.MustQuery("select b from t").Check(testkit.Rows("3000000"))
allHandles, err = ddltestutil.ExtractAllTableHandles(tk.Se, "test", "t")
c.Assert(err, IsNil)
c.Assert(len(allHandles), Equals, 1)
orderedHandles = testutil.MaskSortHandles(allHandles, 5, mysql.TypeLonglong)
c.Assert(orderedHandles[0], Equals, int64(2))
tk.MustExec("delete from t")
// Test rebase auto_random.
tk.MustExec("alter table t auto_random_base 3000000")
tk.MustExec("insert into t values()")
tk.MustQuery("select b from t").Check(testkit.Rows("3000001"))
allHandles, err = ddltestutil.ExtractAllTableHandles(tk.Se, "test", "t")
c.Assert(err, IsNil)
c.Assert(len(allHandles), Equals, 1)
orderedHandles = testutil.MaskSortHandles(allHandles, 5, mysql.TypeLonglong)
c.Assert(orderedHandles[0], Equals, int64(3000000))
tk.MustExec("delete from t")
// Test rename table.
tk.MustExec("rename table t to t1")
tk.MustExec("insert into t1 values()")
res := tk.MustQuery("select b from t1")
strInt64, err := strconv.ParseInt(res.Rows()[0][0].(string), 10, 64)
c.Assert(err, IsNil)
c.Assert(strInt64, Greater, int64(3000002))
allHandles, err = ddltestutil.ExtractAllTableHandles(tk.Se, "test", "t1")
c.Assert(err, IsNil)
c.Assert(len(allHandles), Equals, 1)
orderedHandles = testutil.MaskSortHandles(allHandles, 5, mysql.TypeLonglong)
c.Assert(orderedHandles[0], Greater, int64(3000001))
}
func (s *testSuite6) TestMaxHandleAddIndex(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("create table t(a bigint PRIMARY KEY, b int)")
tk.MustExec(fmt.Sprintf("insert into t values(%v, 1)", math.MaxInt64))
tk.MustExec(fmt.Sprintf("insert into t values(%v, 1)", math.MinInt64))
tk.MustExec("alter table t add index idx_b(b)")
tk.MustExec("admin check table t")
tk.MustExec("create table t1(a bigint UNSIGNED PRIMARY KEY, b int)")
tk.MustExec(fmt.Sprintf("insert into t1 values(%v, 1)", uint64(math.MaxUint64)))
tk.MustExec(fmt.Sprintf("insert into t1 values(%v, 1)", 0))
tk.MustExec("alter table t1 add index idx_b(b)")
tk.MustExec("admin check table t1")
}
func (s *testSuite6) TestSetDDLReorgWorkerCnt(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
err := ddlutil.LoadDDLReorgVars(tk.Se)
c.Assert(err, IsNil)
c.Assert(variable.GetDDLReorgWorkerCounter(), Equals, int32(variable.DefTiDBDDLReorgWorkerCount))
tk.MustExec("set @@global.tidb_ddl_reorg_worker_cnt = 1")
err = ddlutil.LoadDDLReorgVars(tk.Se)
c.Assert(err, IsNil)
c.Assert(variable.GetDDLReorgWorkerCounter(), Equals, int32(1))
tk.MustExec("set @@global.tidb_ddl_reorg_worker_cnt = 100")
err = ddlutil.LoadDDLReorgVars(tk.Se)
c.Assert(err, IsNil)
c.Assert(variable.GetDDLReorgWorkerCounter(), Equals, int32(100))
_, err = tk.Exec("set @@global.tidb_ddl_reorg_worker_cnt = invalid_val")
c.Assert(terror.ErrorEqual(err, variable.ErrWrongTypeForVar), IsTrue, Commentf("err %v", err))
tk.MustExec("set @@global.tidb_ddl_reorg_worker_cnt = 100")
err = ddlutil.LoadDDLReorgVars(tk.Se)
c.Assert(err, IsNil)
c.Assert(variable.GetDDLReorgWorkerCounter(), Equals, int32(100))
_, err = tk.Exec("set @@global.tidb_ddl_reorg_worker_cnt = -1")
c.Assert(terror.ErrorEqual(err, variable.ErrWrongValueForVar), IsTrue, Commentf("err %v", err))
tk.MustExec("set @@global.tidb_ddl_reorg_worker_cnt = 100")
res := tk.MustQuery("select @@global.tidb_ddl_reorg_worker_cnt")
res.Check(testkit.Rows("100"))
res = tk.MustQuery("select @@global.tidb_ddl_reorg_worker_cnt")
res.Check(testkit.Rows("100"))
tk.MustExec("set @@global.tidb_ddl_reorg_worker_cnt = 100")
res = tk.MustQuery("select @@global.tidb_ddl_reorg_worker_cnt")
res.Check(testkit.Rows("100"))
_, err = tk.Exec("set @@global.tidb_ddl_reorg_worker_cnt = 129")
c.Assert(terror.ErrorEqual(err, variable.ErrWrongValueForVar), IsTrue, Commentf("err %v", err))
}
func (s *testSuite6) TestSetDDLReorgBatchSize(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
err := ddlutil.LoadDDLReorgVars(tk.Se)
c.Assert(err, IsNil)
c.Assert(variable.GetDDLReorgBatchSize(), Equals, int32(variable.DefTiDBDDLReorgBatchSize))
tk.MustExec("set @@global.tidb_ddl_reorg_batch_size = 1")
tk.MustQuery("show warnings;").Check(testkit.Rows("Warning 1292 Truncated incorrect tidb_ddl_reorg_batch_size value: '1'"))
err = ddlutil.LoadDDLReorgVars(tk.Se)
c.Assert(err, IsNil)
c.Assert(variable.GetDDLReorgBatchSize(), Equals, variable.MinDDLReorgBatchSize)
tk.MustExec(fmt.Sprintf("set @@global.tidb_ddl_reorg_batch_size = %v", variable.MaxDDLReorgBatchSize+1))
tk.MustQuery("show warnings;").Check(testkit.Rows(fmt.Sprintf("Warning 1292 Truncated incorrect tidb_ddl_reorg_batch_size value: '%d'", variable.MaxDDLReorgBatchSize+1)))
err = ddlutil.LoadDDLReorgVars(tk.Se)
c.Assert(err, IsNil)
c.Assert(variable.GetDDLReorgBatchSize(), Equals, variable.MaxDDLReorgBatchSize)
_, err = tk.Exec("set @@global.tidb_ddl_reorg_batch_size = invalid_val")
c.Assert(terror.ErrorEqual(err, variable.ErrWrongTypeForVar), IsTrue, Commentf("err %v", err))
tk.MustExec("set @@global.tidb_ddl_reorg_batch_size = 100")
err = ddlutil.LoadDDLReorgVars(tk.Se)
c.Assert(err, IsNil)
c.Assert(variable.GetDDLReorgBatchSize(), Equals, int32(100))
tk.MustExec("set @@global.tidb_ddl_reorg_batch_size = -1")
tk.MustQuery("show warnings;").Check(testkit.Rows("Warning 1292 Truncated incorrect tidb_ddl_reorg_batch_size value: '-1'"))
tk.MustExec("set @@global.tidb_ddl_reorg_batch_size = 100")
res := tk.MustQuery("select @@global.tidb_ddl_reorg_batch_size")
res.Check(testkit.Rows("100"))
res = tk.MustQuery("select @@global.tidb_ddl_reorg_batch_size")
res.Check(testkit.Rows(fmt.Sprintf("%v", 100)))
tk.MustExec("set @@global.tidb_ddl_reorg_batch_size = 1000")
res = tk.MustQuery("select @@global.tidb_ddl_reorg_batch_size")
res.Check(testkit.Rows("1000"))
}
func (s *testSuite6) TestIllegalFunctionCall4GeneratedColumns(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
// Test create an exist database
_, err := tk.Exec("CREATE database test")
c.Assert(err, NotNil)
_, err = tk.Exec("create table t1 (b double generated always as (rand()) virtual);")
c.Assert(err.Error(), Equals, ddl.ErrGeneratedColumnFunctionIsNotAllowed.GenWithStackByArgs("b").Error())
_, err = tk.Exec("create table t1 (a varchar(64), b varchar(1024) generated always as (load_file(a)) virtual);")
c.Assert(err.Error(), Equals, ddl.ErrGeneratedColumnFunctionIsNotAllowed.GenWithStackByArgs("b").Error())
_, err = tk.Exec("create table t1 (a datetime generated always as (curdate()) virtual);")
c.Assert(err.Error(), Equals, ddl.ErrGeneratedColumnFunctionIsNotAllowed.GenWithStackByArgs("a").Error())
_, err = tk.Exec("create table t1 (a datetime generated always as (current_time()) virtual);")
c.Assert(err.Error(), Equals, ddl.ErrGeneratedColumnFunctionIsNotAllowed.GenWithStackByArgs("a").Error())
_, err = tk.Exec("create table t1 (a datetime generated always as (current_timestamp()) virtual);")
c.Assert(err.Error(), Equals, ddl.ErrGeneratedColumnFunctionIsNotAllowed.GenWithStackByArgs("a").Error())
_, err = tk.Exec("create table t1 (a datetime, b varchar(10) generated always as (localtime()) virtual);")
c.Assert(err.Error(), Equals, ddl.ErrGeneratedColumnFunctionIsNotAllowed.GenWithStackByArgs("b").Error())
_, err = tk.Exec("create table t1 (a varchar(1024) generated always as (uuid()) virtual);")
c.Assert(err.Error(), Equals, ddl.ErrGeneratedColumnFunctionIsNotAllowed.GenWithStackByArgs("a").Error())
_, err = tk.Exec("create table t1 (a varchar(1024), b varchar(1024) generated always as (is_free_lock(a)) virtual);")
c.Assert(err.Error(), Equals, ddl.ErrGeneratedColumnFunctionIsNotAllowed.GenWithStackByArgs("b").Error())
tk.MustExec("create table t1 (a bigint not null primary key auto_increment, b bigint, c bigint as (b + 1));")
_, err = tk.Exec("alter table t1 add column d varchar(1024) generated always as (database());")
c.Assert(err.Error(), Equals, ddl.ErrGeneratedColumnFunctionIsNotAllowed.GenWithStackByArgs("d").Error())
tk.MustExec("alter table t1 add column d bigint generated always as (b + 1); ")
_, err = tk.Exec("alter table t1 modify column d bigint generated always as (connection_id());")
c.Assert(err.Error(), Equals, ddl.ErrGeneratedColumnFunctionIsNotAllowed.GenWithStackByArgs("d").Error())
_, err = tk.Exec("alter table t1 change column c cc bigint generated always as (connection_id());")
c.Assert(err.Error(), Equals, ddl.ErrGeneratedColumnFunctionIsNotAllowed.GenWithStackByArgs("cc").Error())
}
func (s *testSuite6) TestGeneratedColumnRelatedDDL(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
// Test create an exist database
_, err := tk.Exec("CREATE database test")
c.Assert(err, NotNil)
_, err = tk.Exec("create table t1 (a bigint not null primary key auto_increment, b bigint as (a + 1));")
c.Assert(err.Error(), Equals, ddl.ErrGeneratedColumnRefAutoInc.GenWithStackByArgs("b").Error())
tk.MustExec("create table t1 (a bigint not null primary key auto_increment, b bigint, c bigint as (b + 1));")
_, err = tk.Exec("alter table t1 add column d bigint generated always as (a + 1);")
c.Assert(err.Error(), Equals, ddl.ErrGeneratedColumnRefAutoInc.GenWithStackByArgs("d").Error())
tk.MustExec("alter table t1 add column d bigint generated always as (b + 1);")
_, err = tk.Exec("alter table t1 modify column d bigint generated always as (a + 1);")
c.Assert(err.Error(), Equals, ddl.ErrGeneratedColumnRefAutoInc.GenWithStackByArgs("d").Error())
_, err = tk.Exec("alter table t1 add column e bigint as (z + 1);")
c.Assert(err.Error(), Equals, ddl.ErrBadField.GenWithStackByArgs("z", "generated column function").Error())
tk.MustExec("drop table t1;")
tk.MustExec("create table t1(a int, b int as (a+1), c int as (b+1));")
tk.MustExec("insert into t1 (a) values (1);")
tk.MustGetErrCode("alter table t1 modify column c int as (b+1) first;", mysql.ErrGeneratedColumnNonPrior)
tk.MustGetErrCode("alter table t1 modify column b int as (a+1) after c;", mysql.ErrGeneratedColumnNonPrior)
tk.MustQuery("select * from t1").Check(testkit.Rows("1 2 3"))
}
func (s *testSuite6) TestSetDDLErrorCountLimit(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
err := ddlutil.LoadDDLVars(tk.Se)
c.Assert(err, IsNil)
c.Assert(variable.GetDDLErrorCountLimit(), Equals, int64(variable.DefTiDBDDLErrorCountLimit))
tk.MustExec("set @@global.tidb_ddl_error_count_limit = -1")
tk.MustQuery("show warnings;").Check(testkit.Rows("Warning 1292 Truncated incorrect tidb_ddl_error_count_limit value: '-1'"))
err = ddlutil.LoadDDLVars(tk.Se)
c.Assert(err, IsNil)
c.Assert(variable.GetDDLErrorCountLimit(), Equals, int64(0))
tk.MustExec(fmt.Sprintf("set @@global.tidb_ddl_error_count_limit = %v", uint64(math.MaxInt64)+1))
tk.MustQuery("show warnings;").Check(testkit.Rows(fmt.Sprintf("Warning 1292 Truncated incorrect tidb_ddl_error_count_limit value: '%d'", uint64(math.MaxInt64)+1)))
err = ddlutil.LoadDDLVars(tk.Se)
c.Assert(err, IsNil)
c.Assert(variable.GetDDLErrorCountLimit(), Equals, int64(math.MaxInt64))
_, err = tk.Exec("set @@global.tidb_ddl_error_count_limit = invalid_val")
c.Assert(terror.ErrorEqual(err, variable.ErrWrongTypeForVar), IsTrue, Commentf("err %v", err))
tk.MustExec("set @@global.tidb_ddl_error_count_limit = 100")
err = ddlutil.LoadDDLVars(tk.Se)
c.Assert(err, IsNil)
c.Assert(variable.GetDDLErrorCountLimit(), Equals, int64(100))
res := tk.MustQuery("select @@global.tidb_ddl_error_count_limit")
res.Check(testkit.Rows("100"))
}
// Test issue #9205, fix the precision problem for time type default values
// See https://github.com/pingcap/tidb/issues/9205 for details
func (s *testSuite6) TestIssue9205(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec(`drop table if exists t;`)
tk.MustExec(`create table t(c time DEFAULT '12:12:12.8');`)
tk.MustQuery("show create table `t`").Check(testutil.RowsWithSep("|",
""+
"t CREATE TABLE `t` (\n"+
" `c` time DEFAULT '12:12:13'\n"+
") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin",
))
tk.MustExec(`alter table t add column c1 time default '12:12:12.000000';`)
tk.MustQuery("show create table `t`").Check(testutil.RowsWithSep("|",
""+
"t CREATE TABLE `t` (\n"+
" `c` time DEFAULT '12:12:13',\n"+
" `c1` time DEFAULT '12:12:12'\n"+
") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin",
))
tk.MustExec(`alter table t alter column c1 set default '2019-02-01 12:12:10.4';`)
tk.MustQuery("show create table `t`").Check(testutil.RowsWithSep("|",
""+
"t CREATE TABLE `t` (\n"+
" `c` time DEFAULT '12:12:13',\n"+
" `c1` time DEFAULT '12:12:10'\n"+
") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin",
))
tk.MustExec(`alter table t modify c1 time DEFAULT '770:12:12.000000';`)
tk.MustQuery("show create table `t`").Check(testutil.RowsWithSep("|",
""+
"t CREATE TABLE `t` (\n"+
" `c` time DEFAULT '12:12:13',\n"+
" `c1` time DEFAULT '770:12:12'\n"+
") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin",
))
}
func (s *testSuite6) TestCheckDefaultFsp(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec(`drop table if exists t;`)
_, err := tk.Exec("create table t ( tt timestamp default now(1));")
c.Assert(err.Error(), Equals, "[ddl:1067]Invalid default value for 'tt'")
_, err = tk.Exec("create table t ( tt timestamp(1) default current_timestamp);")
c.Assert(err.Error(), Equals, "[ddl:1067]Invalid default value for 'tt'")
_, err = tk.Exec("create table t ( tt timestamp(1) default now(2));")
c.Assert(err.Error(), Equals, "[ddl:1067]Invalid default value for 'tt'")
tk.MustExec("create table t ( tt timestamp(1) default now(1));")
tk.MustExec("create table t2 ( tt timestamp default current_timestamp());")
tk.MustExec("create table t3 ( tt timestamp default current_timestamp(0));")
_, err = tk.Exec("alter table t add column ttt timestamp default now(2);")
c.Assert(err.Error(), Equals, "[ddl:1067]Invalid default value for 'ttt'")
_, err = tk.Exec("alter table t add column ttt timestamp(5) default current_timestamp;")
c.Assert(err.Error(), Equals, "[ddl:1067]Invalid default value for 'ttt'")
_, err = tk.Exec("alter table t add column ttt timestamp(5) default now(2);")
c.Assert(err.Error(), Equals, "[ddl:1067]Invalid default value for 'ttt'")
_, err = tk.Exec("alter table t modify column tt timestamp(1) default now();")
c.Assert(err.Error(), Equals, "[ddl:1067]Invalid default value for 'tt'")
_, err = tk.Exec("alter table t modify column tt timestamp(4) default now(5);")
c.Assert(err.Error(), Equals, "[ddl:1067]Invalid default value for 'tt'")
_, err = tk.Exec("alter table t change column tt tttt timestamp(4) default now(5);")
c.Assert(err.Error(), Equals, "[ddl:1067]Invalid default value for 'tttt'")
_, err = tk.Exec("alter table t change column tt tttt timestamp(1) default now();")
c.Assert(err.Error(), Equals, "[ddl:1067]Invalid default value for 'tttt'")
}
func (s *testSuite6) TestTimestampMinDefaultValue(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists tdv;")
tk.MustExec("create table tdv(a int);")
tk.MustExec("ALTER TABLE tdv ADD COLUMN ts timestamp DEFAULT '1970-01-01 08:00:01';")
}
// this test will change the fail-point `mockAutoIDChange`, so we move it to the `testRecoverTable` suite
func (s *testRecoverTable) TestRenameTable(c *C) {
c.Assert(failpoint.Enable("github.com/pingcap/tidb/meta/autoid/mockAutoIDChange", `return(true)`), IsNil)
defer func() {
c.Assert(failpoint.Disable("github.com/pingcap/tidb/meta/autoid/mockAutoIDChange"), IsNil)
}()
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("create database rename1")
tk.MustExec("create database rename2")
tk.MustExec("create database rename3")
tk.MustExec("create table rename1.t (a int primary key auto_increment)")
tk.MustExec("insert rename1.t values ()")
tk.MustExec("rename table rename1.t to rename2.t")
// Make sure the drop old database doesn't affect the rename3.t's operations.
tk.MustExec("drop database rename1")
tk.MustExec("insert rename2.t values ()")
tk.MustExec("rename table rename2.t to rename3.t")
tk.MustExec("insert rename3.t values ()")
tk.MustQuery("select * from rename3.t").Check(testkit.Rows("1", "5001", "10001"))
// Make sure the drop old database doesn't affect the rename3.t's operations.
tk.MustExec("drop database rename2")
tk.MustExec("insert rename3.t values ()")
tk.MustQuery("select * from rename3.t").Check(testkit.Rows("1", "5001", "10001", "10002"))
tk.MustExec("drop database rename3")
tk.MustExec("create database rename1")
tk.MustExec("create database rename2")
tk.MustExec("create table rename1.t (a int primary key auto_increment)")
tk.MustExec("rename table rename1.t to rename2.t1")
tk.MustExec("insert rename2.t1 values ()")
result := tk.MustQuery("select * from rename2.t1")
result.Check(testkit.Rows("1"))
// Make sure the drop old database doesn't affect the t1's operations.
tk.MustExec("drop database rename1")
tk.MustExec("insert rename2.t1 values ()")
result = tk.MustQuery("select * from rename2.t1")
result.Check(testkit.Rows("1", "2"))
// Rename a table to another table in the same database.
tk.MustExec("rename table rename2.t1 to rename2.t2")
tk.MustExec("insert rename2.t2 values ()")
result = tk.MustQuery("select * from rename2.t2")
result.Check(testkit.Rows("1", "2", "5001"))
tk.MustExec("drop database rename2")
tk.MustExec("create database rename1")
tk.MustExec("create database rename2")
tk.MustExec("create table rename1.t (a int primary key auto_increment)")
tk.MustExec("insert rename1.t values ()")
tk.MustExec("rename table rename1.t to rename2.t1")
// Make sure the value is greater than autoid.step.
tk.MustExec("insert rename2.t1 values (100000)")
tk.MustExec("insert rename2.t1 values ()")
result = tk.MustQuery("select * from rename2.t1")
result.Check(testkit.Rows("1", "100000", "100001"))
_, err := tk.Exec("insert rename1.t values ()")
c.Assert(err, NotNil)
tk.MustExec("drop database rename1")
tk.MustExec("drop database rename2")
}
func (s *testSuite6) TestAutoIncrementColumnErrorMessage(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
// Test create an exist database
_, err := tk.Exec("CREATE database test")
c.Assert(err, NotNil)
tk.MustExec("CREATE TABLE t1 (t1_id INT NOT NULL AUTO_INCREMENT PRIMARY KEY);")
_, err = tk.Exec("CREATE INDEX idx1 ON t1 ((t1_id + t1_id));")
c.Assert(err.Error(), Equals, ddl.ErrExpressionIndexCanNotRefer.GenWithStackByArgs("idx1").Error())
}
func (s *testRecoverTable) TestRenameMultiTables(c *C) {
c.Assert(failpoint.Enable("github.com/pingcap/tidb/meta/autoid/mockAutoIDChange", `return(true)`), IsNil)
defer func() {
c.Assert(failpoint.Disable("github.com/pingcap/tidb/meta/autoid/mockAutoIDChange"), IsNil)
}()
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("create database rename1")
tk.MustExec("create database rename2")
tk.MustExec("create database rename3")
tk.MustExec("create database rename4")
tk.MustExec("create table rename1.t1 (a int primary key auto_increment)")
tk.MustExec("create table rename3.t3 (a int primary key auto_increment)")
tk.MustExec("insert rename1.t1 values ()")
tk.MustExec("insert rename3.t3 values ()")
tk.MustExec("rename table rename1.t1 to rename2.t2, rename3.t3 to rename4.t4")
// Make sure the drop old database doesn't affect t2,t4's operations.
tk.MustExec("drop database rename1")
tk.MustExec("insert rename2.t2 values ()")
tk.MustExec("drop database rename3")
tk.MustExec("insert rename4.t4 values ()")
tk.MustQuery("select * from rename2.t2").Check(testkit.Rows("1", "2"))
tk.MustQuery("select * from rename4.t4").Check(testkit.Rows("1", "2"))
// Rename a table to another table in the same database.
tk.MustExec("rename table rename2.t2 to rename2.t1, rename4.t4 to rename4.t3")
tk.MustExec("insert rename2.t1 values ()")
tk.MustQuery("select * from rename2.t1").Check(testkit.Rows("1", "2", "3"))
tk.MustExec("insert rename4.t3 values ()")
tk.MustQuery("select * from rename4.t3").Check(testkit.Rows("1", "2", "3"))
tk.MustExec("drop database rename2")
tk.MustExec("drop database rename4")
tk.MustExec("create database rename1")
tk.MustExec("create database rename2")
tk.MustExec("create database rename3")
tk.MustExec("create table rename1.t1 (a int primary key auto_increment)")
tk.MustExec("create table rename3.t3 (a int primary key auto_increment)")
tk.MustGetErrCode("rename table rename1.t1 to rename2.t2, rename3.t3 to rename2.t2", errno.ErrTableExists)
tk.MustExec("rename table rename1.t1 to rename2.t2, rename2.t2 to rename1.t1")
tk.MustExec("rename table rename1.t1 to rename2.t2, rename3.t3 to rename1.t1")
tk.MustExec("use rename1")
tk.MustQuery("show tables").Check(testkit.Rows("t1"))
tk.MustExec("use rename2")
tk.MustQuery("show tables").Check(testkit.Rows("t2"))
tk.MustExec("use rename3")
tk.MustExec("create table rename3.t3 (a int primary key auto_increment)")
tk.MustGetErrCode("rename table rename1.t1 to rename1.t2, rename1.t1 to rename3.t3", errno.ErrTableExists)
tk.MustGetErrCode("rename table rename1.t1 to rename1.t2, rename1.t1 to rename3.t4", errno.ErrFileNotFound)
tk.MustExec("drop database rename1")
tk.MustExec("drop database rename2")
tk.MustExec("drop database rename3")
}
| executor/ddl_test.go | 0 | https://github.com/pingcap/tidb/commit/3f6dfe91daab7acb178ad24941dfca1745993afd | [
0.0002730288833845407,
0.0001716346450848505,
0.00015731665189377964,
0.00017149744962807745,
0.000010141273378394544
] |
{
"id": 2,
"code_window": [
"\n",
"func (e *AnalyzeColumnsExec) open(ranges []*ranger.Range) error {\n",
"\te.resultHandler = &tableResultHandler{}\n",
"\tfirstPartRanges, secondPartRanges := distsql.SplitRangesBySign(ranges, true, false, !hasPkHist(e.handleCols))\n",
"\tfirstResult, err := e.buildResp(firstPartRanges)\n",
"\tif err != nil {\n",
"\t\treturn err\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tfirstPartRanges, secondPartRanges := distsql.SplitRangesAcrossInt64Boundary(ranges, true, false, !hasPkHist(e.handleCols))\n"
],
"file_path": "executor/analyze.go",
"type": "replace",
"edit_start_line_idx": 600
} | // Copyright 2020 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package ddl
import (
"context"
. "github.com/pingcap/check"
"github.com/pingcap/failpoint"
"github.com/pingcap/parser/model"
"github.com/pingcap/parser/mysql"
"github.com/pingcap/tidb/kv"
"github.com/pingcap/tidb/sessionctx"
"github.com/pingcap/tidb/sessionctx/variable"
"github.com/pingcap/tidb/types"
)
var _ = SerialSuites(&testPartitionSuite{})
type testPartitionSuite struct {
store kv.Storage
}
func (s *testPartitionSuite) SetUpSuite(c *C) {
s.store = testCreateStore(c, "test_store")
}
func (s *testPartitionSuite) TearDownSuite(c *C) {
err := s.store.Close()
c.Assert(err, IsNil)
}
func (s *testPartitionSuite) TestDropAndTruncatePartition(c *C) {
d := testNewDDLAndStart(
context.Background(),
c,
WithStore(s.store),
WithLease(testLease),
)
defer func() {
err := d.Stop()
c.Assert(err, IsNil)
}()
dbInfo := testSchemaInfo(c, d, "test_partition")
testCreateSchema(c, testNewContext(d), d, dbInfo)
// generate 5 partition in tableInfo.
tblInfo, partIDs := buildTableInfoWithPartition(c, d)
ctx := testNewContext(d)
testCreateTable(c, ctx, d, dbInfo, tblInfo)
testDropPartition(c, ctx, d, dbInfo, tblInfo, []string{"p0", "p1"})
testTruncatePartition(c, ctx, d, dbInfo, tblInfo, []int64{partIDs[3], partIDs[4]})
}
func buildTableInfoWithPartition(c *C, d *ddl) (*model.TableInfo, []int64) {
tbl := &model.TableInfo{
Name: model.NewCIStr("t"),
}
col := &model.ColumnInfo{
Name: model.NewCIStr("c"),
Offset: 0,
State: model.StatePublic,
FieldType: *types.NewFieldType(mysql.TypeLong),
ID: allocateColumnID(tbl),
}
genIDs, err := d.genGlobalIDs(1)
c.Assert(err, IsNil)
tbl.ID = genIDs[0]
tbl.Columns = []*model.ColumnInfo{col}
tbl.Charset = "utf8"
tbl.Collate = "utf8_bin"
partIDs, err := d.genGlobalIDs(5)
c.Assert(err, IsNil)
partInfo := &model.PartitionInfo{
Type: model.PartitionTypeRange,
Expr: tbl.Columns[0].Name.L,
Enable: true,
Definitions: []model.PartitionDefinition{
{
ID: partIDs[0],
Name: model.NewCIStr("p0"),
LessThan: []string{"100"},
},
{
ID: partIDs[1],
Name: model.NewCIStr("p1"),
LessThan: []string{"200"},
},
{
ID: partIDs[2],
Name: model.NewCIStr("p2"),
LessThan: []string{"300"},
},
{
ID: partIDs[3],
Name: model.NewCIStr("p3"),
LessThan: []string{"400"},
},
{
ID: partIDs[4],
Name: model.NewCIStr("p4"),
LessThan: []string{"500"},
},
},
}
tbl.Partition = partInfo
return tbl, partIDs
}
func buildDropPartitionJob(dbInfo *model.DBInfo, tblInfo *model.TableInfo, partNames []string) *model.Job {
return &model.Job{
SchemaID: dbInfo.ID,
TableID: tblInfo.ID,
Type: model.ActionDropTablePartition,
BinlogInfo: &model.HistoryInfo{},
Args: []interface{}{partNames},
}
}
func testDropPartition(c *C, ctx sessionctx.Context, d *ddl, dbInfo *model.DBInfo, tblInfo *model.TableInfo, partNames []string) *model.Job {
job := buildDropPartitionJob(dbInfo, tblInfo, partNames)
err := d.doDDLJob(ctx, job)
c.Assert(err, IsNil)
v := getSchemaVer(c, ctx)
checkHistoryJobArgs(c, ctx, job.ID, &historyJobArgs{ver: v, tbl: tblInfo})
return job
}
func buildTruncatePartitionJob(dbInfo *model.DBInfo, tblInfo *model.TableInfo, pids []int64) *model.Job {
return &model.Job{
SchemaID: dbInfo.ID,
TableID: tblInfo.ID,
Type: model.ActionTruncateTablePartition,
BinlogInfo: &model.HistoryInfo{},
Args: []interface{}{pids},
}
}
func testTruncatePartition(c *C, ctx sessionctx.Context, d *ddl, dbInfo *model.DBInfo, tblInfo *model.TableInfo, pids []int64) *model.Job {
job := buildTruncatePartitionJob(dbInfo, tblInfo, pids)
err := d.doDDLJob(ctx, job)
c.Assert(err, IsNil)
v := getSchemaVer(c, ctx)
checkHistoryJobArgs(c, ctx, job.ID, &historyJobArgs{ver: v, tbl: tblInfo})
return job
}
func testAddPartition(c *C, ctx sessionctx.Context, d *ddl, dbInfo *model.DBInfo, tblInfo *model.TableInfo) error {
ids, err := d.genGlobalIDs(1)
c.Assert(err, IsNil)
partitionInfo := &model.PartitionInfo{
Type: model.PartitionTypeRange,
Expr: tblInfo.Columns[0].Name.L,
Enable: true,
Definitions: []model.PartitionDefinition{
{
ID: ids[0],
Name: model.NewCIStr("p2"),
LessThan: []string{"300"},
},
},
}
addPartitionJob := &model.Job{
SchemaID: dbInfo.ID,
TableID: tblInfo.ID,
Type: model.ActionAddTablePartition,
BinlogInfo: &model.HistoryInfo{},
Args: []interface{}{partitionInfo},
}
return d.doDDLJob(ctx, addPartitionJob)
}
func (s *testPartitionSuite) TestAddPartitionReplicaBiggerThanTiFlashStores(c *C) {
d := testNewDDLAndStart(
context.Background(),
c,
WithStore(s.store),
WithLease(testLease),
)
defer func() {
err := d.Stop()
c.Assert(err, IsNil)
}()
dbInfo := testSchemaInfo(c, d, "test_partition2")
testCreateSchema(c, testNewContext(d), d, dbInfo)
// Build a tableInfo with replica count = 1 while there is no real tiFlash store.
tblInfo := buildTableInfoWithReplicaInfo(c, d)
ctx := testNewContext(d)
testCreateTable(c, ctx, d, dbInfo, tblInfo)
err := testAddPartition(c, ctx, d, dbInfo, tblInfo)
// Since there is no real TiFlash store (less than replica count), adding a partition will error here.
c.Assert(err, NotNil)
c.Assert(err.Error(), Equals, "[ddl:-1][ddl] the tiflash replica count: 1 should be less than the total tiflash server count: 0")
// Test `add partition` waiting TiFlash replica can exit when its retry count is beyond the limitation.
originErrCountLimit := variable.GetDDLErrorCountLimit()
variable.SetDDLErrorCountLimit(3)
defer func() {
variable.SetDDLErrorCountLimit(originErrCountLimit)
}()
c.Assert(failpoint.Enable("github.com/pingcap/tidb/ddl/mockWaitTiFlashReplica", `return(true)`), IsNil)
defer func() {
c.Assert(failpoint.Disable("github.com/pingcap/tidb/ddl/mockWaitTiFlashReplica"), IsNil)
}()
err = testAddPartition(c, ctx, d, dbInfo, tblInfo)
c.Assert(err, NotNil)
c.Assert(err.Error(), Equals, "[ddl:-1]DDL job rollback, error msg: [ddl] add partition wait for tiflash replica to complete")
}
func buildTableInfoWithReplicaInfo(c *C, d *ddl) *model.TableInfo {
tbl := &model.TableInfo{
Name: model.NewCIStr("t1"),
}
col := &model.ColumnInfo{
Name: model.NewCIStr("c"),
Offset: 0,
State: model.StatePublic,
FieldType: *types.NewFieldType(mysql.TypeLong),
ID: allocateColumnID(tbl),
}
genIDs, err := d.genGlobalIDs(1)
c.Assert(err, IsNil)
tbl.ID = genIDs[0]
tbl.Columns = []*model.ColumnInfo{col}
tbl.Charset = "utf8"
tbl.Collate = "utf8_bin"
tbl.TiFlashReplica = &model.TiFlashReplicaInfo{
Count: 1,
Available: true,
}
partIDs, err := d.genGlobalIDs(2)
c.Assert(err, IsNil)
partInfo := &model.PartitionInfo{
Type: model.PartitionTypeRange,
Expr: tbl.Columns[0].Name.L,
Enable: true,
Definitions: []model.PartitionDefinition{
{
ID: partIDs[0],
Name: model.NewCIStr("p0"),
LessThan: []string{"100"},
},
{
ID: partIDs[1],
Name: model.NewCIStr("p1"),
LessThan: []string{"200"},
},
},
}
tbl.Partition = partInfo
return tbl
}
| ddl/partition_test.go | 0 | https://github.com/pingcap/tidb/commit/3f6dfe91daab7acb178ad24941dfca1745993afd | [
0.000267816853011027,
0.00017798178305383772,
0.00016522921214345843,
0.0001721102889860049,
0.00002142425364581868
] |
{
"id": 2,
"code_window": [
"\n",
"func (e *AnalyzeColumnsExec) open(ranges []*ranger.Range) error {\n",
"\te.resultHandler = &tableResultHandler{}\n",
"\tfirstPartRanges, secondPartRanges := distsql.SplitRangesBySign(ranges, true, false, !hasPkHist(e.handleCols))\n",
"\tfirstResult, err := e.buildResp(firstPartRanges)\n",
"\tif err != nil {\n",
"\t\treturn err\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tfirstPartRanges, secondPartRanges := distsql.SplitRangesAcrossInt64Boundary(ranges, true, false, !hasPkHist(e.handleCols))\n"
],
"file_path": "executor/analyze.go",
"type": "replace",
"edit_start_line_idx": 600
} | // Copyright 2021 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package mocktikv
import (
"github.com/gogo/protobuf/proto"
"github.com/pingcap/kvproto/pkg/errorpb"
"github.com/pingcap/kvproto/pkg/kvrpcpb"
"github.com/pingcap/kvproto/pkg/metapb"
"github.com/pingcap/tidb/ddl/placement"
)
// Session stores session scope rpc data.
type Session struct {
cluster *Cluster
mvccStore MVCCStore
// storeID stores id for current request
storeID uint64
// startKey is used for handling normal request.
startKey []byte
endKey []byte
// rawStartKey is used for handling coprocessor request.
rawStartKey []byte
rawEndKey []byte
// isolationLevel is used for current request.
isolationLevel kvrpcpb.IsolationLevel
resolvedLocks []uint64
}
func (s *Session) checkRequestContext(ctx *kvrpcpb.Context) *errorpb.Error {
ctxPeer := ctx.GetPeer()
if ctxPeer != nil && ctxPeer.GetStoreId() != s.storeID {
return &errorpb.Error{
Message: *proto.String("store not match"),
StoreNotMatch: &errorpb.StoreNotMatch{},
}
}
region, leaderID := s.cluster.GetRegion(ctx.GetRegionId())
// No region found.
if region == nil {
return &errorpb.Error{
Message: *proto.String("region not found"),
RegionNotFound: &errorpb.RegionNotFound{
RegionId: *proto.Uint64(ctx.GetRegionId()),
},
}
}
var storePeer, leaderPeer *metapb.Peer
for _, p := range region.Peers {
if p.GetStoreId() == s.storeID {
storePeer = p
}
if p.GetId() == leaderID {
leaderPeer = p
}
}
// The Store does not contain a Peer of the Region.
if storePeer == nil {
return &errorpb.Error{
Message: *proto.String("region not found"),
RegionNotFound: &errorpb.RegionNotFound{
RegionId: *proto.Uint64(ctx.GetRegionId()),
},
}
}
// No leader.
if leaderPeer == nil {
return &errorpb.Error{
Message: *proto.String("no leader"),
NotLeader: &errorpb.NotLeader{
RegionId: *proto.Uint64(ctx.GetRegionId()),
},
}
}
// The Peer on the Store is not leader. If it's tiflash store , we pass this check.
if storePeer.GetId() != leaderPeer.GetId() && !isTiFlashStore(s.cluster.GetStore(storePeer.GetStoreId())) {
return &errorpb.Error{
Message: *proto.String("not leader"),
NotLeader: &errorpb.NotLeader{
RegionId: *proto.Uint64(ctx.GetRegionId()),
Leader: leaderPeer,
},
}
}
// Region epoch does not match.
if !proto.Equal(region.GetRegionEpoch(), ctx.GetRegionEpoch()) {
nextRegion, _ := s.cluster.GetRegionByKey(region.GetEndKey())
currentRegions := []*metapb.Region{region}
if nextRegion != nil {
currentRegions = append(currentRegions, nextRegion)
}
return &errorpb.Error{
Message: *proto.String("epoch not match"),
EpochNotMatch: &errorpb.EpochNotMatch{
CurrentRegions: currentRegions,
},
}
}
s.startKey, s.endKey = region.StartKey, region.EndKey
s.isolationLevel = ctx.IsolationLevel
s.resolvedLocks = ctx.ResolvedLocks
return nil
}
func (s *Session) checkRequestSize(size int) *errorpb.Error {
// TiKV has a limitation on raft log size.
// mocktikv has no raft inside, so we check the request's size instead.
if size >= requestMaxSize {
return &errorpb.Error{
RaftEntryTooLarge: &errorpb.RaftEntryTooLarge{},
}
}
return nil
}
func (s *Session) checkRequest(ctx *kvrpcpb.Context, size int) *errorpb.Error {
if err := s.checkRequestContext(ctx); err != nil {
return err
}
return s.checkRequestSize(size)
}
func (s *Session) checkKeyInRegion(key []byte) bool {
return regionContains(s.startKey, s.endKey, NewMvccKey(key))
}
func isTiFlashStore(store *metapb.Store) bool {
for _, l := range store.GetLabels() {
if l.GetKey() == placement.EngineLabelKey && l.GetValue() == placement.EngineLabelTiFlash {
return true
}
}
return false
}
| store/mockstore/mocktikv/session.go | 0 | https://github.com/pingcap/tidb/commit/3f6dfe91daab7acb178ad24941dfca1745993afd | [
0.000179607595782727,
0.0001727012131595984,
0.0001640690170461312,
0.0001738357823342085,
0.0000039273008951568045
] |
{
"id": 3,
"code_window": [
"\t\t\t}\n",
"\t\t\tidxVals = append(idxVals, v)\n",
"\t\t}\n",
"\t\tvar bytes []byte\n",
"\t\tbytes, err = codec.EncodeKey(e.ctx.GetSessionVars().StmtCtx, bytes, idxVals...)\n",
"\t\tif err != nil {\n",
"\t\t\treturn err\n",
"\t\t}\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tvar keyBytes []byte\n",
"\t\tkeyBytes, err = codec.EncodeKey(e.ctx.GetSessionVars().StmtCtx, keyBytes, idxVals...)\n"
],
"file_path": "executor/analyze.go",
"type": "replace",
"edit_start_line_idx": 1106
} | // Copyright 2017 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package executor
import (
"bytes"
"context"
"fmt"
"math"
"math/rand"
"runtime"
"sort"
"strconv"
"strings"
"sync"
"sync/atomic"
"time"
"github.com/cznic/mathutil"
"github.com/pingcap/errors"
"github.com/pingcap/failpoint"
"github.com/pingcap/parser/ast"
"github.com/pingcap/parser/model"
"github.com/pingcap/parser/mysql"
"github.com/pingcap/parser/terror"
"github.com/pingcap/tidb/distsql"
"github.com/pingcap/tidb/domain"
"github.com/pingcap/tidb/infoschema"
"github.com/pingcap/tidb/kv"
"github.com/pingcap/tidb/metrics"
"github.com/pingcap/tidb/planner/core"
"github.com/pingcap/tidb/sessionctx"
"github.com/pingcap/tidb/sessionctx/stmtctx"
"github.com/pingcap/tidb/sessionctx/variable"
"github.com/pingcap/tidb/statistics"
"github.com/pingcap/tidb/store/tikv"
tikvstore "github.com/pingcap/tidb/store/tikv/kv"
"github.com/pingcap/tidb/table"
"github.com/pingcap/tidb/tablecodec"
"github.com/pingcap/tidb/types"
"github.com/pingcap/tidb/util/chunk"
"github.com/pingcap/tidb/util/codec"
"github.com/pingcap/tidb/util/logutil"
"github.com/pingcap/tidb/util/ranger"
"github.com/pingcap/tidb/util/sqlexec"
"github.com/pingcap/tipb/go-tipb"
"go.uber.org/zap"
)
var _ Executor = &AnalyzeExec{}
// AnalyzeExec represents Analyze executor.
type AnalyzeExec struct {
baseExecutor
tasks []*analyzeTask
wg *sync.WaitGroup
opts map[ast.AnalyzeOptionType]uint64
}
var (
// RandSeed is the seed for randing package.
// It's public for test.
RandSeed = int64(1)
)
const (
maxRegionSampleSize = 1000
maxSketchSize = 10000
)
// Next implements the Executor Next interface.
func (e *AnalyzeExec) Next(ctx context.Context, req *chunk.Chunk) error {
concurrency, err := getBuildStatsConcurrency(e.ctx)
if err != nil {
return err
}
taskCh := make(chan *analyzeTask, len(e.tasks))
resultCh := make(chan analyzeResult, len(e.tasks))
e.wg.Add(concurrency)
for i := 0; i < concurrency; i++ {
go e.analyzeWorker(taskCh, resultCh, i == 0)
}
for _, task := range e.tasks {
statistics.AddNewAnalyzeJob(task.job)
}
for _, task := range e.tasks {
taskCh <- task
}
close(taskCh)
statsHandle := domain.GetDomain(e.ctx).StatsHandle()
panicCnt := 0
pruneMode := variable.PartitionPruneMode(e.ctx.GetSessionVars().PartitionPruneMode.Load())
// needGlobalStats used to indicate whether we should merge the partition-level stats to global-level stats.
needGlobalStats := pruneMode == variable.Dynamic
type globalStatsKey struct {
tableID int64
indexID int64
}
type globalStatsInfo struct {
isIndex int
// When the `isIndex == 0`, the idxID will be the column ID.
// Otherwise, the idxID will be the index ID.
idxID int64
statsVersion int
}
// globalStatsMap is a map used to store which partition tables and the corresponding indexes need global-level stats.
// The meaning of key in map is the structure that used to store the tableID and indexID.
// The meaning of value in map is some additional information needed to build global-level stats.
globalStatsMap := make(map[globalStatsKey]globalStatsInfo)
finishJobWithLogFn := func(ctx context.Context, job *statistics.AnalyzeJob, meetError bool) {
job.Finish(meetError)
if job != nil {
logutil.Logger(ctx).Info(fmt.Sprintf("analyze table `%s`.`%s` has %s", job.DBName, job.TableName, job.State),
zap.String("partition", job.PartitionName),
zap.String("job info", job.JobInfo),
zap.Time("start time", job.StartTime),
zap.Time("end time", job.EndTime),
zap.String("cost", job.EndTime.Sub(job.StartTime).String()))
}
}
for panicCnt < concurrency {
result, ok := <-resultCh
if !ok {
break
}
if result.Err != nil {
err = result.Err
if err == errAnalyzeWorkerPanic {
panicCnt++
} else {
logutil.Logger(ctx).Error("analyze failed", zap.Error(err))
}
finishJobWithLogFn(ctx, result.job, true)
continue
}
statisticsID := result.TableID.GetStatisticsID()
for i, hg := range result.Hist {
if result.TableID.IsPartitionTable() && needGlobalStats {
// If it does not belong to the statistics of index, we need to set it to -1 to distinguish.
idxID := int64(-1)
if result.IsIndex != 0 {
idxID = hg.ID
}
globalStatsID := globalStatsKey{result.TableID.TableID, idxID}
if _, ok := globalStatsMap[globalStatsID]; !ok {
globalStatsMap[globalStatsID] = globalStatsInfo{result.IsIndex, hg.ID, result.StatsVer}
}
}
err1 := statsHandle.SaveStatsToStorage(statisticsID, result.Count, result.IsIndex, hg, result.Cms[i], result.TopNs[i], result.Fms[i], result.StatsVer, 1)
if err1 != nil {
err = err1
logutil.Logger(ctx).Error("save stats to storage failed", zap.Error(err))
finishJobWithLogFn(ctx, result.job, true)
continue
}
}
if err1 := statsHandle.SaveExtendedStatsToStorage(statisticsID, result.ExtStats, false); err1 != nil {
err = err1
logutil.Logger(ctx).Error("save extended stats to storage failed", zap.Error(err))
finishJobWithLogFn(ctx, result.job, true)
} else {
finishJobWithLogFn(ctx, result.job, false)
}
}
for _, task := range e.tasks {
statistics.MoveToHistory(task.job)
}
if err != nil {
return err
}
if needGlobalStats {
for globalStatsID, info := range globalStatsMap {
globalStats, err := statsHandle.MergePartitionStats2GlobalStatsByTableID(e.ctx, e.opts, infoschema.GetInfoSchema(e.ctx), globalStatsID.tableID, info.isIndex, info.idxID)
if err != nil {
if types.ErrPartitionStatsMissing.Equal(err) {
// When we find some partition-level stats are missing, we need to report warning.
e.ctx.GetSessionVars().StmtCtx.AppendWarning(err)
continue
}
return err
}
for i := 0; i < globalStats.Num; i++ {
hg, cms, topN, fms := globalStats.Hg[i], globalStats.Cms[i], globalStats.TopN[i], globalStats.Fms[i]
err = statsHandle.SaveStatsToStorage(globalStatsID.tableID, globalStats.Count, info.isIndex, hg, cms, topN, fms, info.statsVersion, 1)
if err != nil {
logutil.Logger(ctx).Error("save global-level stats to storage failed", zap.Error(err))
}
}
}
}
return statsHandle.Update(infoschema.GetInfoSchema(e.ctx))
}
func getBuildStatsConcurrency(ctx sessionctx.Context) (int, error) {
sessionVars := ctx.GetSessionVars()
concurrency, err := variable.GetSessionSystemVar(sessionVars, variable.TiDBBuildStatsConcurrency)
if err != nil {
return 0, err
}
c, err := strconv.ParseInt(concurrency, 10, 64)
return int(c), err
}
type taskType int
const (
colTask taskType = iota
idxTask
fastTask
pkIncrementalTask
idxIncrementalTask
)
type analyzeTask struct {
taskType taskType
idxExec *AnalyzeIndexExec
colExec *AnalyzeColumnsExec
fastExec *AnalyzeFastExec
idxIncrementalExec *analyzeIndexIncrementalExec
colIncrementalExec *analyzePKIncrementalExec
job *statistics.AnalyzeJob
}
var errAnalyzeWorkerPanic = errors.New("analyze worker panic")
func (e *AnalyzeExec) analyzeWorker(taskCh <-chan *analyzeTask, resultCh chan<- analyzeResult, isCloseChanThread bool) {
var task *analyzeTask
defer func() {
if r := recover(); r != nil {
buf := make([]byte, 4096)
stackSize := runtime.Stack(buf, false)
buf = buf[:stackSize]
logutil.BgLogger().Error("analyze worker panicked", zap.String("stack", string(buf)))
metrics.PanicCounter.WithLabelValues(metrics.LabelAnalyze).Inc()
resultCh <- analyzeResult{
Err: errAnalyzeWorkerPanic,
job: task.job,
}
}
e.wg.Done()
if isCloseChanThread {
e.wg.Wait()
close(resultCh)
}
}()
for {
var ok bool
task, ok = <-taskCh
if !ok {
break
}
task.job.Start()
switch task.taskType {
case colTask:
task.colExec.job = task.job
for _, result := range analyzeColumnsPushdown(task.colExec) {
resultCh <- result
}
case idxTask:
task.idxExec.job = task.job
resultCh <- analyzeIndexPushdown(task.idxExec)
case fastTask:
task.fastExec.job = task.job
task.job.Start()
for _, result := range analyzeFastExec(task.fastExec) {
resultCh <- result
}
case pkIncrementalTask:
task.colIncrementalExec.job = task.job
resultCh <- analyzePKIncremental(task.colIncrementalExec)
case idxIncrementalTask:
task.idxIncrementalExec.job = task.job
resultCh <- analyzeIndexIncremental(task.idxIncrementalExec)
}
}
}
func analyzeIndexPushdown(idxExec *AnalyzeIndexExec) analyzeResult {
ranges := ranger.FullRange()
// For single-column index, we do not load null rows from TiKV, so the built histogram would not include
// null values, and its `NullCount` would be set by result of another distsql call to get null rows.
// For multi-column index, we cannot define null for the rows, so we still use full range, and the rows
// containing null fields would exist in built histograms. Note that, the `NullCount` of histograms for
// multi-column index is always 0 then.
if len(idxExec.idxInfo.Columns) == 1 {
ranges = ranger.FullNotNullRange()
}
hist, cms, fms, topN, err := idxExec.buildStats(ranges, true)
if err != nil {
return analyzeResult{Err: err, job: idxExec.job}
}
var statsVer = statistics.Version1
if idxExec.analyzePB.IdxReq.Version != nil {
statsVer = int(*idxExec.analyzePB.IdxReq.Version)
}
result := analyzeResult{
TableID: idxExec.tableID,
Hist: []*statistics.Histogram{hist},
Cms: []*statistics.CMSketch{cms},
TopNs: []*statistics.TopN{topN},
Fms: []*statistics.FMSketch{fms},
IsIndex: 1,
job: idxExec.job,
StatsVer: statsVer,
}
result.Count = hist.NullCount
if hist.Len() > 0 {
result.Count += hist.Buckets[hist.Len()-1].Count
}
if topN.TotalCount() > 0 {
result.Count += int64(topN.TotalCount())
}
return result
}
// AnalyzeIndexExec represents analyze index push down executor.
type AnalyzeIndexExec struct {
ctx sessionctx.Context
tableID core.AnalyzeTableID
idxInfo *model.IndexInfo
isCommonHandle bool
concurrency int
analyzePB *tipb.AnalyzeReq
result distsql.SelectResult
countNullRes distsql.SelectResult
opts map[ast.AnalyzeOptionType]uint64
job *statistics.AnalyzeJob
}
// fetchAnalyzeResult builds and dispatches the `kv.Request` from given ranges, and stores the `SelectResult`
// in corresponding fields based on the input `isNullRange` argument, which indicates if the range is the
// special null range for single-column index to get the null count.
func (e *AnalyzeIndexExec) fetchAnalyzeResult(ranges []*ranger.Range, isNullRange bool) error {
var builder distsql.RequestBuilder
var kvReqBuilder *distsql.RequestBuilder
if e.isCommonHandle && e.idxInfo.Primary {
kvReqBuilder = builder.SetHandleRangesForTables(e.ctx.GetSessionVars().StmtCtx, []int64{e.tableID.GetStatisticsID()}, true, ranges, nil)
} else {
kvReqBuilder = builder.SetIndexRangesForTables(e.ctx.GetSessionVars().StmtCtx, []int64{e.tableID.GetStatisticsID()}, e.idxInfo.ID, ranges)
}
kvReq, err := kvReqBuilder.
SetAnalyzeRequest(e.analyzePB).
SetStartTS(math.MaxUint64).
SetKeepOrder(true).
SetConcurrency(e.concurrency).
Build()
if err != nil {
return err
}
ctx := context.TODO()
result, err := distsql.Analyze(ctx, e.ctx.GetClient(), kvReq, e.ctx.GetSessionVars().KVVars, e.ctx.GetSessionVars().InRestrictedSQL, e.ctx.GetSessionVars().StmtCtx.MemTracker)
if err != nil {
return err
}
if isNullRange {
e.countNullRes = result
} else {
e.result = result
}
return nil
}
func (e *AnalyzeIndexExec) open(ranges []*ranger.Range, considerNull bool) error {
err := e.fetchAnalyzeResult(ranges, false)
if err != nil {
return err
}
if considerNull && len(e.idxInfo.Columns) == 1 {
ranges = ranger.NullRange()
err = e.fetchAnalyzeResult(ranges, true)
if err != nil {
return err
}
}
return nil
}
func updateIndexResult(
ctx *stmtctx.StatementContext,
resp *tipb.AnalyzeIndexResp,
job *statistics.AnalyzeJob,
hist *statistics.Histogram,
cms *statistics.CMSketch,
fms *statistics.FMSketch,
topn *statistics.TopN,
idxInfo *model.IndexInfo,
numBuckets int,
numTopN int,
statsVer int,
) (
*statistics.Histogram,
*statistics.CMSketch,
*statistics.FMSketch,
*statistics.TopN,
error,
) {
var err error
needCMS := cms != nil
respHist := statistics.HistogramFromProto(resp.Hist)
if job != nil {
job.Update(int64(respHist.TotalRowCount()))
}
hist, err = statistics.MergeHistograms(ctx, hist, respHist, numBuckets, statsVer)
if err != nil {
return nil, nil, nil, nil, err
}
if needCMS {
if resp.Cms == nil {
logutil.Logger(context.TODO()).Warn("nil CMS in response", zap.String("table", idxInfo.Table.O), zap.String("index", idxInfo.Name.O))
} else {
cm, tmpTopN := statistics.CMSketchAndTopNFromProto(resp.Cms)
if err := cms.MergeCMSketch(cm); err != nil {
return nil, nil, nil, nil, err
}
statistics.MergeTopNAndUpdateCMSketch(topn, tmpTopN, cms, uint32(numTopN))
}
}
if fms != nil && resp.Collector != nil && resp.Collector.FmSketch != nil {
fms.MergeFMSketch(statistics.FMSketchFromProto(resp.Collector.FmSketch))
}
return hist, cms, fms, topn, nil
}
func (e *AnalyzeIndexExec) buildStatsFromResult(result distsql.SelectResult, needCMS bool) (*statistics.Histogram, *statistics.CMSketch, *statistics.FMSketch, *statistics.TopN, error) {
failpoint.Inject("buildStatsFromResult", func(val failpoint.Value) {
if val.(bool) {
failpoint.Return(nil, nil, nil, nil, errors.New("mock buildStatsFromResult error"))
}
})
hist := &statistics.Histogram{}
var cms *statistics.CMSketch
var topn *statistics.TopN
if needCMS {
cms = statistics.NewCMSketch(int32(e.opts[ast.AnalyzeOptCMSketchDepth]), int32(e.opts[ast.AnalyzeOptCMSketchWidth]))
topn = statistics.NewTopN(int(e.opts[ast.AnalyzeOptNumTopN]))
}
fms := statistics.NewFMSketch(maxSketchSize)
statsVer := statistics.Version1
if e.analyzePB.IdxReq.Version != nil {
statsVer = int(*e.analyzePB.IdxReq.Version)
}
for {
data, err := result.NextRaw(context.TODO())
if err != nil {
return nil, nil, nil, nil, err
}
if data == nil {
break
}
resp := &tipb.AnalyzeIndexResp{}
err = resp.Unmarshal(data)
if err != nil {
return nil, nil, nil, nil, err
}
hist, cms, fms, topn, err = updateIndexResult(e.ctx.GetSessionVars().StmtCtx, resp, e.job, hist, cms, fms, topn,
e.idxInfo, int(e.opts[ast.AnalyzeOptNumBuckets]), int(e.opts[ast.AnalyzeOptNumTopN]), statsVer)
if err != nil {
return nil, nil, nil, nil, err
}
}
if needCMS && topn.TotalCount() > 0 {
hist.RemoveVals(topn.TopN)
}
if needCMS && cms != nil {
cms.CalcDefaultValForAnalyze(uint64(hist.NDV))
}
return hist, cms, fms, topn, nil
}
func (e *AnalyzeIndexExec) buildStats(ranges []*ranger.Range, considerNull bool) (hist *statistics.Histogram, cms *statistics.CMSketch, fms *statistics.FMSketch, topN *statistics.TopN, err error) {
if err = e.open(ranges, considerNull); err != nil {
return nil, nil, nil, nil, err
}
defer func() {
err1 := closeAll(e.result, e.countNullRes)
if err == nil {
err = err1
}
}()
hist, cms, fms, topN, err = e.buildStatsFromResult(e.result, true)
if err != nil {
return nil, nil, nil, nil, err
}
if e.countNullRes != nil {
nullHist, _, _, _, err := e.buildStatsFromResult(e.countNullRes, false)
if err != nil {
return nil, nil, nil, nil, err
}
if l := nullHist.Len(); l > 0 {
hist.NullCount = nullHist.Buckets[l-1].Count
}
}
hist.ID = e.idxInfo.ID
return hist, cms, fms, topN, nil
}
func analyzeColumnsPushdown(colExec *AnalyzeColumnsExec) []analyzeResult {
var ranges []*ranger.Range
if hc := colExec.handleCols; hc != nil {
if hc.IsInt() {
ranges = ranger.FullIntRange(mysql.HasUnsignedFlag(hc.GetCol(0).RetType.Flag))
} else {
ranges = ranger.FullNotNullRange()
}
} else {
ranges = ranger.FullIntRange(false)
}
collExtStats := colExec.ctx.GetSessionVars().EnableExtendedStats
hists, cms, topNs, fms, extStats, err := colExec.buildStats(ranges, collExtStats)
if err != nil {
return []analyzeResult{{Err: err, job: colExec.job}}
}
if hasPkHist(colExec.handleCols) {
PKresult := analyzeResult{
TableID: colExec.tableID,
Hist: hists[:1],
Cms: cms[:1],
TopNs: topNs[:1],
Fms: fms[:1],
ExtStats: nil,
job: nil,
StatsVer: statistics.Version1,
}
PKresult.Count = int64(PKresult.Hist[0].TotalRowCount())
restResult := analyzeResult{
TableID: colExec.tableID,
Hist: hists[1:],
Cms: cms[1:],
TopNs: topNs[1:],
Fms: fms[1:],
ExtStats: extStats,
job: colExec.job,
StatsVer: colExec.analyzeVer,
}
restResult.Count = PKresult.Count
return []analyzeResult{PKresult, restResult}
}
var result []analyzeResult
if colExec.analyzePB.Tp == tipb.AnalyzeType_TypeMixed {
result = append(result, analyzeResult{
TableID: colExec.tableID,
Hist: []*statistics.Histogram{hists[0]},
Cms: []*statistics.CMSketch{cms[0]},
TopNs: []*statistics.TopN{topNs[0]},
Fms: []*statistics.FMSketch{nil},
IsIndex: 1,
job: colExec.job,
StatsVer: colExec.analyzeVer,
})
hists = hists[1:]
cms = cms[1:]
topNs = topNs[1:]
}
colResult := analyzeResult{
TableID: colExec.tableID,
Hist: hists,
Cms: cms,
TopNs: topNs,
Fms: fms,
ExtStats: extStats,
job: colExec.job,
StatsVer: colExec.analyzeVer,
}
colResult.Count = int64(colResult.Hist[0].TotalRowCount())
if colResult.StatsVer == statistics.Version2 {
colResult.Count += int64(topNs[0].TotalCount())
}
return append(result, colResult)
}
// AnalyzeColumnsExec represents Analyze columns push down executor.
type AnalyzeColumnsExec struct {
ctx sessionctx.Context
tableID core.AnalyzeTableID
colsInfo []*model.ColumnInfo
handleCols core.HandleCols
concurrency int
analyzePB *tipb.AnalyzeReq
commonHandle *model.IndexInfo
resultHandler *tableResultHandler
opts map[ast.AnalyzeOptionType]uint64
job *statistics.AnalyzeJob
analyzeVer int
}
func (e *AnalyzeColumnsExec) open(ranges []*ranger.Range) error {
e.resultHandler = &tableResultHandler{}
firstPartRanges, secondPartRanges := distsql.SplitRangesBySign(ranges, true, false, !hasPkHist(e.handleCols))
firstResult, err := e.buildResp(firstPartRanges)
if err != nil {
return err
}
if len(secondPartRanges) == 0 {
e.resultHandler.open(nil, firstResult)
return nil
}
var secondResult distsql.SelectResult
secondResult, err = e.buildResp(secondPartRanges)
if err != nil {
return err
}
e.resultHandler.open(firstResult, secondResult)
return nil
}
func (e *AnalyzeColumnsExec) buildResp(ranges []*ranger.Range) (distsql.SelectResult, error) {
var builder distsql.RequestBuilder
reqBuilder := builder.SetHandleRangesForTables(e.ctx.GetSessionVars().StmtCtx, []int64{e.tableID.GetStatisticsID()}, e.handleCols != nil && !e.handleCols.IsInt(), ranges, nil)
// Always set KeepOrder of the request to be true, in order to compute
// correct `correlation` of columns.
kvReq, err := reqBuilder.
SetAnalyzeRequest(e.analyzePB).
SetStartTS(math.MaxUint64).
SetKeepOrder(true).
SetConcurrency(e.concurrency).
Build()
if err != nil {
return nil, err
}
ctx := context.TODO()
result, err := distsql.Analyze(ctx, e.ctx.GetClient(), kvReq, e.ctx.GetSessionVars().KVVars, e.ctx.GetSessionVars().InRestrictedSQL, e.ctx.GetSessionVars().StmtCtx.MemTracker)
if err != nil {
return nil, err
}
return result, nil
}
func (e *AnalyzeColumnsExec) buildStats(ranges []*ranger.Range, needExtStats bool) (hists []*statistics.Histogram, cms []*statistics.CMSketch, topNs []*statistics.TopN, fms []*statistics.FMSketch, extStats *statistics.ExtendedStatsColl, err error) {
if err = e.open(ranges); err != nil {
return nil, nil, nil, nil, nil, err
}
defer func() {
if err1 := e.resultHandler.Close(); err1 != nil {
hists = nil
cms = nil
extStats = nil
err = err1
}
}()
var handleHist *statistics.Histogram
var handleCms *statistics.CMSketch
var handleFms *statistics.FMSketch
var handleTopn *statistics.TopN
statsVer := statistics.Version1
if e.analyzePB.Tp == tipb.AnalyzeType_TypeMixed {
handleHist = &statistics.Histogram{}
handleCms = statistics.NewCMSketch(int32(e.opts[ast.AnalyzeOptCMSketchDepth]), int32(e.opts[ast.AnalyzeOptCMSketchWidth]))
handleTopn = statistics.NewTopN(int(e.opts[ast.AnalyzeOptNumTopN]))
handleFms = statistics.NewFMSketch(maxSketchSize)
if e.analyzePB.IdxReq.Version != nil {
statsVer = int(*e.analyzePB.IdxReq.Version)
}
}
pkHist := &statistics.Histogram{}
collectors := make([]*statistics.SampleCollector, len(e.colsInfo))
for i := range collectors {
collectors[i] = &statistics.SampleCollector{
IsMerger: true,
FMSketch: statistics.NewFMSketch(maxSketchSize),
MaxSampleSize: int64(e.opts[ast.AnalyzeOptNumSamples]),
CMSketch: statistics.NewCMSketch(int32(e.opts[ast.AnalyzeOptCMSketchDepth]), int32(e.opts[ast.AnalyzeOptCMSketchWidth])),
}
}
for {
data, err1 := e.resultHandler.nextRaw(context.TODO())
if err1 != nil {
return nil, nil, nil, nil, nil, err1
}
if data == nil {
break
}
sc := e.ctx.GetSessionVars().StmtCtx
var colResp *tipb.AnalyzeColumnsResp
if e.analyzePB.Tp == tipb.AnalyzeType_TypeMixed {
resp := &tipb.AnalyzeMixedResp{}
err = resp.Unmarshal(data)
if err != nil {
return nil, nil, nil, nil, nil, err
}
colResp = resp.ColumnsResp
handleHist, handleCms, handleFms, handleTopn, err = updateIndexResult(sc, resp.IndexResp, nil, handleHist,
handleCms, handleFms, handleTopn, e.commonHandle, int(e.opts[ast.AnalyzeOptNumBuckets]),
int(e.opts[ast.AnalyzeOptNumTopN]), statsVer)
if err != nil {
return nil, nil, nil, nil, nil, err
}
} else {
colResp = &tipb.AnalyzeColumnsResp{}
err = colResp.Unmarshal(data)
}
rowCount := int64(0)
if hasPkHist(e.handleCols) {
respHist := statistics.HistogramFromProto(colResp.PkHist)
rowCount = int64(respHist.TotalRowCount())
pkHist, err = statistics.MergeHistograms(sc, pkHist, respHist, int(e.opts[ast.AnalyzeOptNumBuckets]), statistics.Version1)
if err != nil {
return nil, nil, nil, nil, nil, err
}
}
for i, rc := range colResp.Collectors {
respSample := statistics.SampleCollectorFromProto(rc)
rowCount = respSample.Count + respSample.NullCount
collectors[i].MergeSampleCollector(sc, respSample)
}
e.job.Update(rowCount)
}
timeZone := e.ctx.GetSessionVars().Location()
if hasPkHist(e.handleCols) {
pkInfo := e.handleCols.GetCol(0)
pkHist.ID = pkInfo.ID
err = pkHist.DecodeTo(pkInfo.RetType, timeZone)
if err != nil {
return nil, nil, nil, nil, nil, err
}
hists = append(hists, pkHist)
cms = append(cms, nil)
topNs = append(topNs, nil)
fms = append(fms, nil)
}
for i, col := range e.colsInfo {
if e.analyzeVer < 2 {
// In analyze version 2, we don't collect TopN this way. We will collect TopN from samples in `BuildColumnHistAndTopN()` below.
err := collectors[i].ExtractTopN(uint32(e.opts[ast.AnalyzeOptNumTopN]), e.ctx.GetSessionVars().StmtCtx, &col.FieldType, timeZone)
if err != nil {
return nil, nil, nil, nil, nil, err
}
topNs = append(topNs, collectors[i].TopN)
}
for j, s := range collectors[i].Samples {
collectors[i].Samples[j].Ordinal = j
collectors[i].Samples[j].Value, err = tablecodec.DecodeColumnValue(s.Value.GetBytes(), &col.FieldType, timeZone)
if err != nil {
return nil, nil, nil, nil, nil, err
}
// When collation is enabled, we store the Key representation of the sampling data. So we set it to kind `Bytes` here
// to avoid to convert it to its Key representation once more.
if collectors[i].Samples[j].Value.Kind() == types.KindString {
collectors[i].Samples[j].Value.SetBytes(collectors[i].Samples[j].Value.GetBytes())
}
}
var hg *statistics.Histogram
var err error
var topn *statistics.TopN
if e.analyzeVer < 2 {
hg, err = statistics.BuildColumn(e.ctx, int64(e.opts[ast.AnalyzeOptNumBuckets]), col.ID, collectors[i], &col.FieldType)
} else {
hg, topn, err = statistics.BuildColumnHistAndTopN(e.ctx, int(e.opts[ast.AnalyzeOptNumBuckets]), int(e.opts[ast.AnalyzeOptNumTopN]), col.ID, collectors[i], &col.FieldType)
topNs = append(topNs, topn)
}
if err != nil {
return nil, nil, nil, nil, nil, err
}
hists = append(hists, hg)
collectors[i].CMSketch.CalcDefaultValForAnalyze(uint64(hg.NDV))
cms = append(cms, collectors[i].CMSketch)
fms = append(fms, collectors[i].FMSketch)
}
if needExtStats {
statsHandle := domain.GetDomain(e.ctx).StatsHandle()
extStats, err = statsHandle.BuildExtendedStats(e.tableID.GetStatisticsID(), e.colsInfo, collectors)
if err != nil {
return nil, nil, nil, nil, nil, err
}
}
if handleHist != nil {
handleHist.ID = e.commonHandle.ID
if handleTopn != nil && handleTopn.TotalCount() > 0 {
handleHist.RemoveVals(handleTopn.TopN)
}
if handleCms != nil {
handleCms.CalcDefaultValForAnalyze(uint64(handleHist.NDV))
}
hists = append([]*statistics.Histogram{handleHist}, hists...)
cms = append([]*statistics.CMSketch{handleCms}, cms...)
fms = append([]*statistics.FMSketch{handleFms}, fms...)
topNs = append([]*statistics.TopN{handleTopn}, topNs...)
}
return hists, cms, topNs, fms, extStats, nil
}
func hasPkHist(handleCols core.HandleCols) bool {
return handleCols != nil && handleCols.IsInt()
}
func pkColsCount(handleCols core.HandleCols) int {
if handleCols == nil {
return 0
}
return handleCols.NumCols()
}
var (
fastAnalyzeHistogramSample = metrics.FastAnalyzeHistogram.WithLabelValues(metrics.LblGeneral, "sample")
fastAnalyzeHistogramAccessRegions = metrics.FastAnalyzeHistogram.WithLabelValues(metrics.LblGeneral, "access_regions")
fastAnalyzeHistogramScanKeys = metrics.FastAnalyzeHistogram.WithLabelValues(metrics.LblGeneral, "scan_keys")
)
func analyzeFastExec(exec *AnalyzeFastExec) []analyzeResult {
hists, cms, topNs, fms, err := exec.buildStats()
if err != nil {
return []analyzeResult{{Err: err, job: exec.job}}
}
var results []analyzeResult
pkColCount := pkColsCount(exec.handleCols)
if len(exec.idxsInfo) > 0 {
for i := pkColCount + len(exec.colsInfo); i < len(hists); i++ {
idxResult := analyzeResult{
TableID: exec.tableID,
Hist: []*statistics.Histogram{hists[i]},
Cms: []*statistics.CMSketch{cms[i]},
TopNs: []*statistics.TopN{topNs[i]},
Fms: []*statistics.FMSketch{nil},
IsIndex: 1,
Count: hists[i].NullCount,
job: exec.job,
StatsVer: statistics.Version1,
}
if hists[i].Len() > 0 {
idxResult.Count += hists[i].Buckets[hists[i].Len()-1].Count
}
if exec.rowCount != 0 {
idxResult.Count = exec.rowCount
}
results = append(results, idxResult)
}
}
hist := hists[0]
colResult := analyzeResult{
TableID: exec.tableID,
Hist: hists[:pkColCount+len(exec.colsInfo)],
Cms: cms[:pkColCount+len(exec.colsInfo)],
TopNs: topNs[:pkColCount+len(exec.colsInfo)],
Fms: fms[:pkColCount+len(exec.colsInfo)],
Count: hist.NullCount,
job: exec.job,
StatsVer: statistics.Version1,
}
if hist.Len() > 0 {
colResult.Count += hist.Buckets[hist.Len()-1].Count
}
if exec.rowCount != 0 {
colResult.Count = exec.rowCount
}
results = append(results, colResult)
return results
}
// AnalyzeFastExec represents Fast Analyze executor.
type AnalyzeFastExec struct {
ctx sessionctx.Context
tableID core.AnalyzeTableID
handleCols core.HandleCols
colsInfo []*model.ColumnInfo
idxsInfo []*model.IndexInfo
concurrency int
opts map[ast.AnalyzeOptionType]uint64
tblInfo *model.TableInfo
cache *tikv.RegionCache
wg *sync.WaitGroup
rowCount int64
sampCursor int32
sampTasks []*tikv.KeyLocation
scanTasks []*tikv.KeyLocation
collectors []*statistics.SampleCollector
randSeed int64
job *statistics.AnalyzeJob
estSampStep uint32
}
func (e *AnalyzeFastExec) calculateEstimateSampleStep() (err error) {
exec := e.ctx.(sqlexec.RestrictedSQLExecutor)
var stmt ast.StmtNode
stmt, err = exec.ParseWithParams(context.TODO(), "select flag from mysql.stats_histograms where table_id = %?", e.tableID.GetStatisticsID())
if err != nil {
return
}
var rows []chunk.Row
rows, _, err = exec.ExecRestrictedStmt(context.TODO(), stmt)
if err != nil {
return
}
var historyRowCount uint64
hasBeenAnalyzed := len(rows) != 0 && rows[0].GetInt64(0) == statistics.AnalyzeFlag
if hasBeenAnalyzed {
historyRowCount = uint64(domain.GetDomain(e.ctx).StatsHandle().GetPartitionStats(e.tblInfo, e.tableID.GetStatisticsID()).Count)
} else {
dbInfo, ok := domain.GetDomain(e.ctx).InfoSchema().SchemaByTable(e.tblInfo)
if !ok {
err = errors.Errorf("database not found for table '%s'", e.tblInfo.Name)
return
}
var rollbackFn func() error
rollbackFn, err = e.activateTxnForRowCount()
if err != nil {
return
}
defer func() {
if rollbackFn != nil {
err = rollbackFn()
}
}()
sql := new(strings.Builder)
sqlexec.MustFormatSQL(sql, "select count(*) from %n.%n", dbInfo.Name.L, e.tblInfo.Name.L)
if e.tblInfo.ID != e.tableID.GetStatisticsID() {
for _, definition := range e.tblInfo.Partition.Definitions {
if definition.ID == e.tableID.GetStatisticsID() {
sqlexec.MustFormatSQL(sql, " partition(%n)", definition.Name.L)
break
}
}
}
var rs sqlexec.RecordSet
rs, err = e.ctx.(sqlexec.SQLExecutor).ExecuteInternal(context.TODO(), sql.String())
if err != nil {
return
}
if rs == nil {
err = errors.Trace(errors.Errorf("empty record set"))
return
}
defer terror.Call(rs.Close)
chk := rs.NewChunk()
err = rs.Next(context.TODO(), chk)
if err != nil {
return
}
e.rowCount = chk.GetRow(0).GetInt64(0)
historyRowCount = uint64(e.rowCount)
}
totalSampSize := e.opts[ast.AnalyzeOptNumSamples]
e.estSampStep = uint32(historyRowCount / totalSampSize)
return
}
func (e *AnalyzeFastExec) activateTxnForRowCount() (rollbackFn func() error, err error) {
txn, err := e.ctx.Txn(true)
if err != nil {
if kv.ErrInvalidTxn.Equal(err) {
_, err := e.ctx.(sqlexec.SQLExecutor).ExecuteInternal(context.TODO(), "begin")
if err != nil {
return nil, errors.Trace(err)
}
rollbackFn = func() error {
_, err := e.ctx.(sqlexec.SQLExecutor).ExecuteInternal(context.TODO(), "rollback")
return err
}
} else {
return nil, errors.Trace(err)
}
}
txn.SetOption(tikvstore.Priority, tikvstore.PriorityLow)
txn.SetOption(tikvstore.IsolationLevel, tikvstore.RC)
txn.SetOption(tikvstore.NotFillCache, true)
return rollbackFn, nil
}
// buildSampTask build sample tasks.
func (e *AnalyzeFastExec) buildSampTask() (err error) {
bo := tikv.NewBackofferWithVars(context.Background(), 500, nil)
store, _ := e.ctx.GetStore().(tikv.Storage)
e.cache = store.GetRegionCache()
accessRegionsCounter := 0
pid := e.tableID.GetStatisticsID()
startKey, endKey := tablecodec.GetTableHandleKeyRange(pid)
targetKey := startKey
for {
// Search for the region which contains the targetKey.
loc, err := e.cache.LocateKey(bo, targetKey)
if err != nil {
return err
}
if bytes.Compare(endKey, loc.StartKey) < 0 {
break
}
accessRegionsCounter++
// Set the next search key.
targetKey = loc.EndKey
// If the KV pairs in the region all belonging to the table, add it to the sample task.
if bytes.Compare(startKey, loc.StartKey) <= 0 && len(loc.EndKey) != 0 && bytes.Compare(loc.EndKey, endKey) <= 0 {
e.sampTasks = append(e.sampTasks, loc)
continue
}
e.scanTasks = append(e.scanTasks, loc)
if bytes.Compare(loc.StartKey, startKey) < 0 {
loc.StartKey = startKey
}
if bytes.Compare(endKey, loc.EndKey) < 0 || len(loc.EndKey) == 0 {
loc.EndKey = endKey
break
}
}
fastAnalyzeHistogramAccessRegions.Observe(float64(accessRegionsCounter))
return nil
}
func (e *AnalyzeFastExec) decodeValues(handle kv.Handle, sValue []byte, wantCols map[int64]*types.FieldType) (values map[int64]types.Datum, err error) {
loc := e.ctx.GetSessionVars().Location()
values, err = tablecodec.DecodeRowToDatumMap(sValue, wantCols, loc)
if err != nil || e.handleCols == nil {
return values, err
}
wantCols = make(map[int64]*types.FieldType, e.handleCols.NumCols())
handleColIDs := make([]int64, e.handleCols.NumCols())
for i := 0; i < e.handleCols.NumCols(); i++ {
c := e.handleCols.GetCol(i)
handleColIDs[i] = c.ID
wantCols[c.ID] = c.RetType
}
return tablecodec.DecodeHandleToDatumMap(handle, handleColIDs, wantCols, loc, values)
}
func (e *AnalyzeFastExec) getValueByInfo(colInfo *model.ColumnInfo, values map[int64]types.Datum) (types.Datum, error) {
val, ok := values[colInfo.ID]
if !ok {
return table.GetColOriginDefaultValue(e.ctx, colInfo)
}
return val, nil
}
func (e *AnalyzeFastExec) updateCollectorSamples(sValue []byte, sKey kv.Key, samplePos int32) (err error) {
var handle kv.Handle
handle, err = tablecodec.DecodeRowKey(sKey)
if err != nil {
return err
}
// Decode cols for analyze table
wantCols := make(map[int64]*types.FieldType, len(e.colsInfo))
for _, col := range e.colsInfo {
wantCols[col.ID] = &col.FieldType
}
// Pre-build index->cols relationship and refill wantCols if not exists(analyze index)
index2Cols := make([][]*model.ColumnInfo, len(e.idxsInfo))
for i, idxInfo := range e.idxsInfo {
for _, idxCol := range idxInfo.Columns {
colInfo := e.tblInfo.Columns[idxCol.Offset]
index2Cols[i] = append(index2Cols[i], colInfo)
wantCols[colInfo.ID] = &colInfo.FieldType
}
}
// Decode the cols value in order.
var values map[int64]types.Datum
values, err = e.decodeValues(handle, sValue, wantCols)
if err != nil {
return err
}
// Update the primary key collector.
pkColsCount := pkColsCount(e.handleCols)
for i := 0; i < pkColsCount; i++ {
col := e.handleCols.GetCol(i)
v, ok := values[col.ID]
if !ok {
return errors.Trace(errors.Errorf("Primary key column not found"))
}
if e.collectors[i].Samples[samplePos] == nil {
e.collectors[i].Samples[samplePos] = &statistics.SampleItem{}
}
e.collectors[i].Samples[samplePos].Handle = handle
e.collectors[i].Samples[samplePos].Value = v
}
// Update the columns' collectors.
for j, colInfo := range e.colsInfo {
v, err := e.getValueByInfo(colInfo, values)
if err != nil {
return err
}
if e.collectors[pkColsCount+j].Samples[samplePos] == nil {
e.collectors[pkColsCount+j].Samples[samplePos] = &statistics.SampleItem{}
}
e.collectors[pkColsCount+j].Samples[samplePos].Handle = handle
e.collectors[pkColsCount+j].Samples[samplePos].Value = v
}
// Update the indexes' collectors.
for j, idxInfo := range e.idxsInfo {
idxVals := make([]types.Datum, 0, len(idxInfo.Columns))
cols := index2Cols[j]
for _, colInfo := range cols {
v, err := e.getValueByInfo(colInfo, values)
if err != nil {
return err
}
idxVals = append(idxVals, v)
}
var bytes []byte
bytes, err = codec.EncodeKey(e.ctx.GetSessionVars().StmtCtx, bytes, idxVals...)
if err != nil {
return err
}
if e.collectors[len(e.colsInfo)+pkColsCount+j].Samples[samplePos] == nil {
e.collectors[len(e.colsInfo)+pkColsCount+j].Samples[samplePos] = &statistics.SampleItem{}
}
e.collectors[len(e.colsInfo)+pkColsCount+j].Samples[samplePos].Handle = handle
e.collectors[len(e.colsInfo)+pkColsCount+j].Samples[samplePos].Value = types.NewBytesDatum(bytes)
}
return nil
}
func (e *AnalyzeFastExec) handleBatchSeekResponse(kvMap map[string][]byte) (err error) {
length := int32(len(kvMap))
newCursor := atomic.AddInt32(&e.sampCursor, length)
samplePos := newCursor - length
for sKey, sValue := range kvMap {
exceedNeededSampleCounts := uint64(samplePos) >= e.opts[ast.AnalyzeOptNumSamples]
if exceedNeededSampleCounts {
atomic.StoreInt32(&e.sampCursor, int32(e.opts[ast.AnalyzeOptNumSamples]))
break
}
err = e.updateCollectorSamples(sValue, kv.Key(sKey), samplePos)
if err != nil {
return err
}
samplePos++
}
return nil
}
func (e *AnalyzeFastExec) handleScanIter(iter kv.Iterator) (scanKeysSize int, err error) {
rander := rand.New(rand.NewSource(e.randSeed))
sampleSize := int64(e.opts[ast.AnalyzeOptNumSamples])
for ; iter.Valid() && err == nil; err = iter.Next() {
// reservoir sampling
scanKeysSize++
randNum := rander.Int63n(int64(e.sampCursor) + int64(scanKeysSize))
if randNum > sampleSize && e.sampCursor == int32(sampleSize) {
continue
}
p := rander.Int31n(int32(sampleSize))
if e.sampCursor < int32(sampleSize) {
p = e.sampCursor
e.sampCursor++
}
err = e.updateCollectorSamples(iter.Value(), iter.Key(), p)
if err != nil {
return
}
}
return
}
func (e *AnalyzeFastExec) handleScanTasks(bo *tikv.Backoffer) (keysSize int, err error) {
snapshot := e.ctx.GetStore().GetSnapshot(kv.MaxVersion)
if e.ctx.GetSessionVars().GetReplicaRead().IsFollowerRead() {
snapshot.SetOption(tikvstore.ReplicaRead, tikvstore.ReplicaReadFollower)
}
for _, t := range e.scanTasks {
iter, err := snapshot.Iter(kv.Key(t.StartKey), kv.Key(t.EndKey))
if err != nil {
return keysSize, err
}
size, err := e.handleScanIter(iter)
keysSize += size
if err != nil {
return keysSize, err
}
}
return keysSize, nil
}
func (e *AnalyzeFastExec) handleSampTasks(workID int, step uint32, err *error) {
defer e.wg.Done()
snapshot := e.ctx.GetStore().GetSnapshot(kv.MaxVersion)
snapshot.SetOption(tikvstore.NotFillCache, true)
snapshot.SetOption(tikvstore.IsolationLevel, tikvstore.RC)
snapshot.SetOption(tikvstore.Priority, tikvstore.PriorityLow)
if e.ctx.GetSessionVars().GetReplicaRead().IsFollowerRead() {
snapshot.SetOption(tikvstore.ReplicaRead, tikvstore.ReplicaReadFollower)
}
rander := rand.New(rand.NewSource(e.randSeed))
for i := workID; i < len(e.sampTasks); i += e.concurrency {
task := e.sampTasks[i]
// randomize the estimate step in range [step - 2 * sqrt(step), step]
if step > 4 { // 2*sqrt(x) < x
lower, upper := step-uint32(2*math.Sqrt(float64(step))), step
step = uint32(rander.Intn(int(upper-lower))) + lower
}
snapshot.SetOption(tikvstore.SampleStep, step)
kvMap := make(map[string][]byte)
var iter kv.Iterator
iter, *err = snapshot.Iter(kv.Key(task.StartKey), kv.Key(task.EndKey))
if *err != nil {
return
}
for iter.Valid() {
kvMap[string(iter.Key())] = iter.Value()
*err = iter.Next()
if *err != nil {
return
}
}
fastAnalyzeHistogramSample.Observe(float64(len(kvMap)))
*err = e.handleBatchSeekResponse(kvMap)
if *err != nil {
return
}
}
}
func (e *AnalyzeFastExec) buildColumnStats(ID int64, collector *statistics.SampleCollector, tp *types.FieldType, rowCount int64) (*statistics.Histogram, *statistics.CMSketch, *statistics.TopN, *statistics.FMSketch, error) {
sc := e.ctx.GetSessionVars().StmtCtx
data := make([][]byte, 0, len(collector.Samples))
fmSketch := statistics.NewFMSketch(maxSketchSize)
for i, sample := range collector.Samples {
sample.Ordinal = i
if sample.Value.IsNull() {
collector.NullCount++
continue
}
err := fmSketch.InsertValue(sc, sample.Value)
if err != nil {
return nil, nil, nil, nil, err
}
bytes, err := tablecodec.EncodeValue(sc, nil, sample.Value)
if err != nil {
return nil, nil, nil, nil, err
}
data = append(data, bytes)
}
// Build CMSketch.
cmSketch, topN, ndv, scaleRatio := statistics.NewCMSketchAndTopN(int32(e.opts[ast.AnalyzeOptCMSketchDepth]), int32(e.opts[ast.AnalyzeOptCMSketchWidth]), data, uint32(e.opts[ast.AnalyzeOptNumTopN]), uint64(rowCount))
// Build Histogram.
hist, err := statistics.BuildColumnHist(e.ctx, int64(e.opts[ast.AnalyzeOptNumBuckets]), ID, collector, tp, rowCount, int64(ndv), collector.NullCount*int64(scaleRatio))
return hist, cmSketch, topN, fmSketch, err
}
func (e *AnalyzeFastExec) buildIndexStats(idxInfo *model.IndexInfo, collector *statistics.SampleCollector, rowCount int64) (*statistics.Histogram, *statistics.CMSketch, *statistics.TopN, error) {
data := make([][][]byte, len(idxInfo.Columns))
for _, sample := range collector.Samples {
var preLen int
remained := sample.Value.GetBytes()
// We need to insert each prefix values into CM Sketch.
for i := 0; i < len(idxInfo.Columns); i++ {
var err error
var value []byte
value, remained, err = codec.CutOne(remained)
if err != nil {
return nil, nil, nil, err
}
preLen += len(value)
data[i] = append(data[i], sample.Value.GetBytes()[:preLen])
}
}
numTop := uint32(e.opts[ast.AnalyzeOptNumTopN])
cmSketch, topN, ndv, scaleRatio := statistics.NewCMSketchAndTopN(int32(e.opts[ast.AnalyzeOptCMSketchDepth]), int32(e.opts[ast.AnalyzeOptCMSketchWidth]), data[0], numTop, uint64(rowCount))
// Build CM Sketch for each prefix and merge them into one.
for i := 1; i < len(idxInfo.Columns); i++ {
var curCMSketch *statistics.CMSketch
var curTopN *statistics.TopN
// `ndv` should be the ndv of full index, so just rewrite it here.
curCMSketch, curTopN, ndv, scaleRatio = statistics.NewCMSketchAndTopN(int32(e.opts[ast.AnalyzeOptCMSketchDepth]), int32(e.opts[ast.AnalyzeOptCMSketchWidth]), data[i], numTop, uint64(rowCount))
err := cmSketch.MergeCMSketch(curCMSketch)
if err != nil {
return nil, nil, nil, err
}
statistics.MergeTopNAndUpdateCMSketch(topN, curTopN, cmSketch, numTop)
}
// Build Histogram.
hist, err := statistics.BuildColumnHist(e.ctx, int64(e.opts[ast.AnalyzeOptNumBuckets]), idxInfo.ID, collector, types.NewFieldType(mysql.TypeBlob), rowCount, int64(ndv), collector.NullCount*int64(scaleRatio))
return hist, cmSketch, topN, err
}
func (e *AnalyzeFastExec) runTasks() ([]*statistics.Histogram, []*statistics.CMSketch, []*statistics.TopN, []*statistics.FMSketch, error) {
errs := make([]error, e.concurrency)
pkColCount := pkColsCount(e.handleCols)
// collect column samples and primary key samples and index samples.
length := len(e.colsInfo) + pkColCount + len(e.idxsInfo)
e.collectors = make([]*statistics.SampleCollector, length)
for i := range e.collectors {
e.collectors[i] = &statistics.SampleCollector{
MaxSampleSize: int64(e.opts[ast.AnalyzeOptNumSamples]),
Samples: make([]*statistics.SampleItem, e.opts[ast.AnalyzeOptNumSamples]),
}
}
e.wg.Add(e.concurrency)
bo := tikv.NewBackofferWithVars(context.Background(), 500, nil)
for i := 0; i < e.concurrency; i++ {
go e.handleSampTasks(i, e.estSampStep, &errs[i])
}
e.wg.Wait()
for _, err := range errs {
if err != nil {
return nil, nil, nil, nil, err
}
}
scanKeysSize, err := e.handleScanTasks(bo)
fastAnalyzeHistogramScanKeys.Observe(float64(scanKeysSize))
if err != nil {
return nil, nil, nil, nil, err
}
stats := domain.GetDomain(e.ctx).StatsHandle()
var rowCount int64 = 0
if stats.Lease() > 0 {
if t := stats.GetPartitionStats(e.tblInfo, e.tableID.GetStatisticsID()); !t.Pseudo {
rowCount = t.Count
}
}
hists, cms, topNs, fms := make([]*statistics.Histogram, length), make([]*statistics.CMSketch, length), make([]*statistics.TopN, length), make([]*statistics.FMSketch, length)
for i := 0; i < length; i++ {
// Build collector properties.
collector := e.collectors[i]
collector.Samples = collector.Samples[:e.sampCursor]
sort.Slice(collector.Samples, func(i, j int) bool { return collector.Samples[i].Handle.Compare(collector.Samples[j].Handle) < 0 })
collector.CalcTotalSize()
// Adjust the row count in case the count of `tblStats` is not accurate and too small.
rowCount = mathutil.MaxInt64(rowCount, int64(len(collector.Samples)))
// Scale the total column size.
if len(collector.Samples) > 0 {
collector.TotalSize *= rowCount / int64(len(collector.Samples))
}
if i < pkColCount {
pkCol := e.handleCols.GetCol(i)
hists[i], cms[i], topNs[i], fms[i], err = e.buildColumnStats(pkCol.ID, e.collectors[i], pkCol.RetType, rowCount)
} else if i < pkColCount+len(e.colsInfo) {
hists[i], cms[i], topNs[i], fms[i], err = e.buildColumnStats(e.colsInfo[i-pkColCount].ID, e.collectors[i], &e.colsInfo[i-pkColCount].FieldType, rowCount)
} else {
hists[i], cms[i], topNs[i], err = e.buildIndexStats(e.idxsInfo[i-pkColCount-len(e.colsInfo)], e.collectors[i], rowCount)
}
if err != nil {
return nil, nil, nil, nil, err
}
}
return hists, cms, topNs, fms, nil
}
func (e *AnalyzeFastExec) buildStats() (hists []*statistics.Histogram, cms []*statistics.CMSketch, topNs []*statistics.TopN, fms []*statistics.FMSketch, err error) {
// To set rand seed, it's for unit test.
// To ensure that random sequences are different in non-test environments, RandSeed must be set time.Now().
if RandSeed == 1 {
atomic.StoreInt64(&e.randSeed, time.Now().UnixNano())
} else {
atomic.StoreInt64(&e.randSeed, RandSeed)
}
err = e.buildSampTask()
if err != nil {
return nil, nil, nil, nil, err
}
return e.runTasks()
}
// AnalyzeTestFastExec is for fast sample in unit test.
type AnalyzeTestFastExec struct {
AnalyzeFastExec
Ctx sessionctx.Context
TableID core.AnalyzeTableID
HandleCols core.HandleCols
ColsInfo []*model.ColumnInfo
IdxsInfo []*model.IndexInfo
Concurrency int
Collectors []*statistics.SampleCollector
TblInfo *model.TableInfo
Opts map[ast.AnalyzeOptionType]uint64
}
// TestFastSample only test the fast sample in unit test.
func (e *AnalyzeTestFastExec) TestFastSample() error {
e.ctx = e.Ctx
e.handleCols = e.HandleCols
e.colsInfo = e.ColsInfo
e.idxsInfo = e.IdxsInfo
e.concurrency = e.Concurrency
e.tableID = e.TableID
e.wg = &sync.WaitGroup{}
e.job = &statistics.AnalyzeJob{}
e.tblInfo = e.TblInfo
e.opts = e.Opts
_, _, _, _, err := e.buildStats()
e.Collectors = e.collectors
return err
}
type analyzeIndexIncrementalExec struct {
AnalyzeIndexExec
oldHist *statistics.Histogram
oldCMS *statistics.CMSketch
oldTopN *statistics.TopN
}
func analyzeIndexIncremental(idxExec *analyzeIndexIncrementalExec) analyzeResult {
var statsVer = statistics.Version1
if idxExec.analyzePB.IdxReq.Version != nil {
statsVer = int(*idxExec.analyzePB.IdxReq.Version)
}
pruneMode := variable.PartitionPruneMode(idxExec.ctx.GetSessionVars().PartitionPruneMode.Load())
if idxExec.tableID.IsPartitionTable() && pruneMode == variable.Dynamic {
err := errors.Errorf("[stats]: global statistics for partitioned tables unavailable in ANALYZE INCREMENTAL")
return analyzeResult{Err: err, job: idxExec.job}
}
startPos := idxExec.oldHist.GetUpper(idxExec.oldHist.Len() - 1)
values, _, err := codec.DecodeRange(startPos.GetBytes(), len(idxExec.idxInfo.Columns), nil, nil)
if err != nil {
return analyzeResult{Err: err, job: idxExec.job}
}
ran := ranger.Range{LowVal: values, HighVal: []types.Datum{types.MaxValueDatum()}}
hist, cms, fms, topN, err := idxExec.buildStats([]*ranger.Range{&ran}, false)
if err != nil {
return analyzeResult{Err: err, job: idxExec.job}
}
hist, err = statistics.MergeHistograms(idxExec.ctx.GetSessionVars().StmtCtx, idxExec.oldHist, hist, int(idxExec.opts[ast.AnalyzeOptNumBuckets]), statsVer)
if err != nil {
return analyzeResult{Err: err, job: idxExec.job}
}
if idxExec.oldCMS != nil && cms != nil {
err = cms.MergeCMSketch4IncrementalAnalyze(idxExec.oldCMS, uint32(idxExec.opts[ast.AnalyzeOptNumTopN]))
if err != nil {
return analyzeResult{Err: err, job: idxExec.job}
}
cms.CalcDefaultValForAnalyze(uint64(hist.NDV))
}
if statsVer == statistics.Version2 {
poped := statistics.MergeTopNAndUpdateCMSketch(topN, idxExec.oldTopN, cms, uint32(idxExec.opts[ast.AnalyzeOptNumTopN]))
hist.AddIdxVals(poped)
}
result := analyzeResult{
TableID: idxExec.tableID,
Hist: []*statistics.Histogram{hist},
Cms: []*statistics.CMSketch{cms},
TopNs: []*statistics.TopN{topN},
Fms: []*statistics.FMSketch{fms},
IsIndex: 1,
job: idxExec.job,
StatsVer: statsVer,
}
result.Count = hist.NullCount
if hist.Len() > 0 {
result.Count += hist.Buckets[hist.Len()-1].Count
}
return result
}
type analyzePKIncrementalExec struct {
AnalyzeColumnsExec
oldHist *statistics.Histogram
}
func analyzePKIncremental(colExec *analyzePKIncrementalExec) analyzeResult {
var maxVal types.Datum
pkInfo := colExec.handleCols.GetCol(0)
if mysql.HasUnsignedFlag(pkInfo.RetType.Flag) {
maxVal = types.NewUintDatum(math.MaxUint64)
} else {
maxVal = types.NewIntDatum(math.MaxInt64)
}
startPos := *colExec.oldHist.GetUpper(colExec.oldHist.Len() - 1)
ran := ranger.Range{LowVal: []types.Datum{startPos}, LowExclude: true, HighVal: []types.Datum{maxVal}}
hists, _, _, _, _, err := colExec.buildStats([]*ranger.Range{&ran}, false)
if err != nil {
return analyzeResult{Err: err, job: colExec.job}
}
hist := hists[0]
hist, err = statistics.MergeHistograms(colExec.ctx.GetSessionVars().StmtCtx, colExec.oldHist, hist, int(colExec.opts[ast.AnalyzeOptNumBuckets]), statistics.Version1)
if err != nil {
return analyzeResult{Err: err, job: colExec.job}
}
result := analyzeResult{
TableID: colExec.tableID,
Hist: []*statistics.Histogram{hist},
Cms: []*statistics.CMSketch{nil},
TopNs: []*statistics.TopN{nil},
Fms: []*statistics.FMSketch{nil},
job: colExec.job,
StatsVer: statistics.Version1,
}
if hist.Len() > 0 {
result.Count += hist.Buckets[hist.Len()-1].Count
}
return result
}
// analyzeResult is used to represent analyze result.
type analyzeResult struct {
TableID core.AnalyzeTableID
Hist []*statistics.Histogram
Cms []*statistics.CMSketch
TopNs []*statistics.TopN
Fms []*statistics.FMSketch
ExtStats *statistics.ExtendedStatsColl
Count int64
IsIndex int
Err error
job *statistics.AnalyzeJob
StatsVer int
}
| executor/analyze.go | 1 | https://github.com/pingcap/tidb/commit/3f6dfe91daab7acb178ad24941dfca1745993afd | [
0.9981964230537415,
0.03979484736919403,
0.00016038531612139195,
0.00017481941904406995,
0.19401544332504272
] |
{
"id": 3,
"code_window": [
"\t\t\t}\n",
"\t\t\tidxVals = append(idxVals, v)\n",
"\t\t}\n",
"\t\tvar bytes []byte\n",
"\t\tbytes, err = codec.EncodeKey(e.ctx.GetSessionVars().StmtCtx, bytes, idxVals...)\n",
"\t\tif err != nil {\n",
"\t\t\treturn err\n",
"\t\t}\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tvar keyBytes []byte\n",
"\t\tkeyBytes, err = codec.EncodeKey(e.ctx.GetSessionVars().StmtCtx, keyBytes, idxVals...)\n"
],
"file_path": "executor/analyze.go",
"type": "replace",
"edit_start_line_idx": 1106
} | // Copyright 2020 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package executor_test
import (
"sync"
"time"
. "github.com/pingcap/check"
"github.com/pingcap/tidb/sessionctx/variable"
"github.com/pingcap/tidb/util/testkit"
)
func (s *testSuite8) TestDeleteLockKey(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec(`drop table if exists t1, t2, t3, t4, t5, t6;`)
cases := []struct {
ddl string
pre string
tk1Stmt string
tk2Stmt string
}{
{
"create table t1(k int, kk int, val int, primary key(k, kk), unique key(val))",
"insert into t1 values(1, 2, 3)",
"delete from t1 where val = 3",
"insert into t1 values(1, 3, 3)",
},
{
"create table t2(k int, kk int, val int, primary key(k, kk))",
"insert into t2 values(1, 1, 1)",
"delete from t2 where k = 1",
"insert into t2 values(1, 1, 2)",
},
{
"create table t3(k int, kk int, val int, vv int, primary key(k, kk), unique key(val))",
"insert into t3 values(1, 2, 3, 4)",
"delete from t3 where vv = 4",
"insert into t3 values(1, 2, 3, 5)",
},
{
"create table t4(k int, kk int, val int, vv int, primary key(k, kk), unique key(val))",
"insert into t4 values(1, 2, 3, 4)",
"delete from t4 where 1",
"insert into t4 values(1, 2, 3, 5)",
},
{
"create table t5(k int, kk int, val int, vv int, primary key(k, kk), unique key(val))",
"insert into t5 values(1, 2, 3, 4), (2, 3, 4, 5)",
"delete from t5 where k in (1, 2, 3, 4)",
"insert into t5 values(1, 2, 3, 5)",
},
{
"create table t6(k int, kk int, val int, vv int, primary key(k, kk), unique key(val))",
"insert into t6 values(1, 2, 3, 4), (2, 3, 4, 5)",
"delete from t6 where kk between 0 and 10",
"insert into t6 values(1, 2, 3, 5), (2, 3, 4, 6)",
},
}
var wg sync.WaitGroup
for _, t := range cases {
wg.Add(1)
go func(t struct {
ddl string
pre string
tk1Stmt string
tk2Stmt string
}) {
tk1, tk2 := testkit.NewTestKit(c, s.store), testkit.NewTestKit(c, s.store)
tk1.MustExec("use test")
tk2.MustExec("use test")
tk1.Se.GetSessionVars().EnableClusteredIndex = variable.ClusteredIndexDefModeIntOnly
tk1.MustExec(t.ddl)
tk1.MustExec(t.pre)
tk1.MustExec("begin pessimistic")
tk2.MustExec("begin pessimistic")
tk1.MustExec(t.tk1Stmt)
doneCh := make(chan struct{}, 1)
go func() {
tk2.MustExec(t.tk2Stmt)
doneCh <- struct{}{}
}()
time.Sleep(50 * time.Millisecond)
tk1.MustExec("commit")
<-doneCh
tk2.MustExec("commit")
wg.Done()
}(t)
}
wg.Wait()
}
func (s *testSuite8) TestIssue21200(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("drop database if exists TEST1")
tk.MustExec("create database TEST1")
tk.MustExec("use TEST1")
tk.MustExec("create table t(a int)")
tk.MustExec("create table t1(a int)")
tk.MustExec("insert into t values(1)")
tk.MustExec("insert into t1 values(1)")
tk.MustExec("delete a from t a where exists (select 1 from t1 where t1.a=a.a)")
tk.MustQuery("select * from t").Check(testkit.Rows())
tk.MustExec("insert into t values(1), (2)")
tk.MustExec("insert into t1 values(2)")
tk.MustExec("prepare stmt from 'delete a from t a where exists (select 1 from t1 where a.a=t1.a and t1.a=?)'")
tk.MustExec("set @a=1")
tk.MustExec("execute stmt using @a")
tk.MustQuery("select * from t").Check(testkit.Rows("2"))
}
| executor/delete_test.go | 0 | https://github.com/pingcap/tidb/commit/3f6dfe91daab7acb178ad24941dfca1745993afd | [
0.00017885664419736713,
0.00016986881382763386,
0.0001630363258300349,
0.0001709774078335613,
0.0000049860664148582146
] |
{
"id": 3,
"code_window": [
"\t\t\t}\n",
"\t\t\tidxVals = append(idxVals, v)\n",
"\t\t}\n",
"\t\tvar bytes []byte\n",
"\t\tbytes, err = codec.EncodeKey(e.ctx.GetSessionVars().StmtCtx, bytes, idxVals...)\n",
"\t\tif err != nil {\n",
"\t\t\treturn err\n",
"\t\t}\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tvar keyBytes []byte\n",
"\t\tkeyBytes, err = codec.EncodeKey(e.ctx.GetSessionVars().StmtCtx, keyBytes, idxVals...)\n"
],
"file_path": "executor/analyze.go",
"type": "replace",
"edit_start_line_idx": 1106
} | // Copyright 2019 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package bindinfo
import (
"strings"
"time"
"github.com/pingcap/parser"
"github.com/pingcap/parser/mysql"
"github.com/pingcap/tidb/metrics"
"github.com/pingcap/tidb/sessionctx"
"github.com/pingcap/tidb/types"
)
// SessionHandle is used to handle all session sql bind operations.
type SessionHandle struct {
ch cache
parser *parser.Parser
}
// NewSessionBindHandle creates a new SessionBindHandle.
func NewSessionBindHandle(parser *parser.Parser) *SessionHandle {
sessionHandle := &SessionHandle{parser: parser}
sessionHandle.ch = make(cache)
return sessionHandle
}
// appendBindRecord adds the BindRecord to the cache, all the stale bindMetas are
// removed from the cache after this operation.
func (h *SessionHandle) appendBindRecord(hash string, meta *BindRecord) {
oldRecord := h.ch.getBindRecord(hash, meta.OriginalSQL, meta.Db)
h.ch.setBindRecord(hash, meta)
updateMetrics(metrics.ScopeSession, oldRecord, meta, false)
}
// CreateBindRecord creates a BindRecord to the cache.
// It replaces all the exists bindings for the same normalized SQL.
func (h *SessionHandle) CreateBindRecord(sctx sessionctx.Context, record *BindRecord) (err error) {
err = record.prepareHints(sctx)
if err != nil {
return err
}
record.Db = strings.ToLower(record.Db)
now := types.NewTime(types.FromGoTime(time.Now().In(sctx.GetSessionVars().StmtCtx.TimeZone)), mysql.TypeTimestamp, 3)
for i := range record.Bindings {
record.Bindings[i].CreateTime = now
record.Bindings[i].UpdateTime = now
}
// update the BindMeta to the cache.
h.appendBindRecord(parser.DigestNormalized(record.OriginalSQL), record)
return nil
}
// DropBindRecord drops a BindRecord in the cache.
func (h *SessionHandle) DropBindRecord(originalSQL, db string, binding *Binding) error {
db = strings.ToLower(db)
oldRecord := h.GetBindRecord(originalSQL, db)
var newRecord *BindRecord
record := &BindRecord{OriginalSQL: originalSQL, Db: db}
if binding != nil {
record.Bindings = append(record.Bindings, *binding)
}
if oldRecord != nil {
newRecord = oldRecord.remove(record)
} else {
newRecord = record
}
h.ch.setBindRecord(parser.DigestNormalized(record.OriginalSQL), newRecord)
updateMetrics(metrics.ScopeSession, oldRecord, newRecord, false)
return nil
}
// GetBindRecord return the BindMeta of the (normdOrigSQL,db) if BindMeta exist.
func (h *SessionHandle) GetBindRecord(normdOrigSQL, db string) *BindRecord {
hash := parser.DigestNormalized(normdOrigSQL)
bindRecords := h.ch[hash]
for _, bindRecord := range bindRecords {
if bindRecord.OriginalSQL == normdOrigSQL {
return bindRecord
}
}
return nil
}
// GetAllBindRecord return all session bind info.
func (h *SessionHandle) GetAllBindRecord() (bindRecords []*BindRecord) {
for _, bindRecord := range h.ch {
bindRecords = append(bindRecords, bindRecord...)
}
return bindRecords
}
// Close closes the session handle.
func (h *SessionHandle) Close() {
for _, bindRecords := range h.ch {
for _, bindRecord := range bindRecords {
updateMetrics(metrics.ScopeSession, bindRecord, nil, false)
}
}
}
// sessionBindInfoKeyType is a dummy type to avoid naming collision in context.
type sessionBindInfoKeyType int
// String defines a Stringer function for debugging and pretty printing.
func (k sessionBindInfoKeyType) String() string {
return "session_bindinfo"
}
// SessionBindInfoKeyType is a variable key for store session bind info.
const SessionBindInfoKeyType sessionBindInfoKeyType = 0
| bindinfo/session_handle.go | 0 | https://github.com/pingcap/tidb/commit/3f6dfe91daab7acb178ad24941dfca1745993afd | [
0.004510627593845129,
0.0007164233829826117,
0.00016783091996330768,
0.00017974824004340917,
0.0012243869714438915
] |
{
"id": 3,
"code_window": [
"\t\t\t}\n",
"\t\t\tidxVals = append(idxVals, v)\n",
"\t\t}\n",
"\t\tvar bytes []byte\n",
"\t\tbytes, err = codec.EncodeKey(e.ctx.GetSessionVars().StmtCtx, bytes, idxVals...)\n",
"\t\tif err != nil {\n",
"\t\t\treturn err\n",
"\t\t}\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tvar keyBytes []byte\n",
"\t\tkeyBytes, err = codec.EncodeKey(e.ctx.GetSessionVars().StmtCtx, keyBytes, idxVals...)\n"
],
"file_path": "executor/analyze.go",
"type": "replace",
"edit_start_line_idx": 1106
} | // Copyright 2018 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package chunk
import "github.com/pingcap/errors"
// CopySelectedJoinRowsDirect directly copies the selected joined rows from the source Chunk
// to the destination Chunk.
// Return true if at least one joined row was selected.
func CopySelectedJoinRowsDirect(src *Chunk, selected []bool, dst *Chunk) (bool, error) {
if src.NumRows() == 0 {
return false, nil
}
if src.sel != nil || dst.sel != nil {
return false, errors.New(msgErrSelNotNil)
}
if len(src.columns) == 0 {
numSelected := 0
for _, s := range selected {
if s {
numSelected++
}
}
dst.numVirtualRows += numSelected
return numSelected > 0, nil
}
oldLen := dst.columns[0].length
for j, srcCol := range src.columns {
dstCol := dst.columns[j]
if srcCol.isFixed() {
for i := 0; i < len(selected); i++ {
if !selected[i] {
continue
}
dstCol.appendNullBitmap(!srcCol.IsNull(i))
dstCol.length++
elemLen := len(srcCol.elemBuf)
offset := i * elemLen
dstCol.data = append(dstCol.data, srcCol.data[offset:offset+elemLen]...)
}
} else {
for i := 0; i < len(selected); i++ {
if !selected[i] {
continue
}
dstCol.appendNullBitmap(!srcCol.IsNull(i))
dstCol.length++
start, end := srcCol.offsets[i], srcCol.offsets[i+1]
dstCol.data = append(dstCol.data, srcCol.data[start:end]...)
dstCol.offsets = append(dstCol.offsets, int64(len(dstCol.data)))
}
}
}
numSelected := dst.columns[0].length - oldLen
dst.numVirtualRows += numSelected
return numSelected > 0, nil
}
// CopySelectedJoinRowsWithSameOuterRows copies the selected joined rows from the source Chunk
// to the destination Chunk.
// Return true if at least one joined row was selected.
//
// NOTE: All the outer rows in the source Chunk should be the same.
func CopySelectedJoinRowsWithSameOuterRows(src *Chunk, innerColOffset, innerColLen, outerColOffset, outerColLen int, selected []bool, dst *Chunk) (bool, error) {
if src.NumRows() == 0 {
return false, nil
}
if src.sel != nil || dst.sel != nil {
return false, errors.New(msgErrSelNotNil)
}
numSelected := copySelectedInnerRows(innerColOffset, innerColLen, src, selected, dst)
copySameOuterRows(outerColOffset, outerColLen, src, numSelected, dst)
dst.numVirtualRows += numSelected
return numSelected > 0, nil
}
// copySelectedInnerRows copies the selected inner rows from the source Chunk
// to the destination Chunk.
// return the number of rows which is selected.
func copySelectedInnerRows(innerColOffset, innerColLen int, src *Chunk, selected []bool, dst *Chunk) int {
srcCols := src.columns[innerColOffset : innerColOffset+innerColLen]
if len(srcCols) == 0 {
numSelected := 0
for _, s := range selected {
if s {
numSelected++
}
}
return numSelected
}
oldLen := dst.columns[innerColOffset].length
for j, srcCol := range srcCols {
dstCol := dst.columns[innerColOffset+j]
if srcCol.isFixed() {
for i := 0; i < len(selected); i++ {
if !selected[i] {
continue
}
dstCol.appendNullBitmap(!srcCol.IsNull(i))
dstCol.length++
elemLen := len(srcCol.elemBuf)
offset := i * elemLen
dstCol.data = append(dstCol.data, srcCol.data[offset:offset+elemLen]...)
}
} else {
for i := 0; i < len(selected); i++ {
if !selected[i] {
continue
}
dstCol.appendNullBitmap(!srcCol.IsNull(i))
dstCol.length++
start, end := srcCol.offsets[i], srcCol.offsets[i+1]
dstCol.data = append(dstCol.data, srcCol.data[start:end]...)
dstCol.offsets = append(dstCol.offsets, int64(len(dstCol.data)))
}
}
}
return dst.columns[innerColOffset].length - oldLen
}
// copySameOuterRows copies the continuous 'numRows' outer rows in the source Chunk
// to the destination Chunk.
func copySameOuterRows(outerColOffset, outerColLen int, src *Chunk, numRows int, dst *Chunk) {
if numRows <= 0 || outerColLen <= 0 {
return
}
row := src.GetRow(0)
srcCols := src.columns[outerColOffset : outerColOffset+outerColLen]
for i, srcCol := range srcCols {
dstCol := dst.columns[outerColOffset+i]
dstCol.appendMultiSameNullBitmap(!srcCol.IsNull(row.idx), numRows)
dstCol.length += numRows
if srcCol.isFixed() {
elemLen := len(srcCol.elemBuf)
start := row.idx * elemLen
end := start + numRows*elemLen
dstCol.data = append(dstCol.data, srcCol.data[start:end]...)
} else {
start, end := srcCol.offsets[row.idx], srcCol.offsets[row.idx+numRows]
dstCol.data = append(dstCol.data, srcCol.data[start:end]...)
offsets := dstCol.offsets
elemLen := srcCol.offsets[row.idx+1] - srcCol.offsets[row.idx]
for j := 0; j < numRows; j++ {
offsets = append(offsets, offsets[len(offsets)-1]+elemLen)
}
dstCol.offsets = offsets
}
}
}
| util/chunk/chunk_util.go | 0 | https://github.com/pingcap/tidb/commit/3f6dfe91daab7acb178ad24941dfca1745993afd | [
0.00040554499719291925,
0.00018615166482049972,
0.00016584573313593864,
0.00017249736993107945,
0.00005496154335560277
] |
{
"id": 4,
"code_window": [
"\t\tif e.collectors[len(e.colsInfo)+pkColsCount+j].Samples[samplePos] == nil {\n",
"\t\t\te.collectors[len(e.colsInfo)+pkColsCount+j].Samples[samplePos] = &statistics.SampleItem{}\n",
"\t\t}\n",
"\t\te.collectors[len(e.colsInfo)+pkColsCount+j].Samples[samplePos].Handle = handle\n",
"\t\te.collectors[len(e.colsInfo)+pkColsCount+j].Samples[samplePos].Value = types.NewBytesDatum(bytes)\n",
"\t}\n",
"\treturn nil\n",
"}\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\te.collectors[len(e.colsInfo)+pkColsCount+j].Samples[samplePos].Value = types.NewBytesDatum(keyBytes)\n"
],
"file_path": "executor/analyze.go",
"type": "replace",
"edit_start_line_idx": 1115
} | // Copyright 2018 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package distsql
import (
"fmt"
"math"
"sort"
"github.com/pingcap/errors"
"github.com/pingcap/kvproto/pkg/metapb"
"github.com/pingcap/parser/mysql"
"github.com/pingcap/tidb/ddl/placement"
"github.com/pingcap/tidb/infoschema"
"github.com/pingcap/tidb/kv"
"github.com/pingcap/tidb/sessionctx/stmtctx"
"github.com/pingcap/tidb/sessionctx/variable"
"github.com/pingcap/tidb/statistics"
tikvstore "github.com/pingcap/tidb/store/tikv/kv"
"github.com/pingcap/tidb/store/tikv/oracle"
"github.com/pingcap/tidb/tablecodec"
"github.com/pingcap/tidb/types"
"github.com/pingcap/tidb/util/codec"
"github.com/pingcap/tidb/util/memory"
"github.com/pingcap/tidb/util/ranger"
"github.com/pingcap/tipb/go-tipb"
)
// RequestBuilder is used to build a "kv.Request".
// It is called before we issue a kv request by "Select".
type RequestBuilder struct {
kv.Request
// txnScope indicates the value of txn_scope
txnScope string
is infoschema.InfoSchema
err error
}
// Build builds a "kv.Request".
func (builder *RequestBuilder) Build() (*kv.Request, error) {
err := builder.verifyTxnScope()
if err != nil {
builder.err = err
}
return &builder.Request, builder.err
}
// SetMemTracker sets a memTracker for this request.
func (builder *RequestBuilder) SetMemTracker(tracker *memory.Tracker) *RequestBuilder {
builder.Request.MemTracker = tracker
return builder
}
// SetTableRanges sets "KeyRanges" for "kv.Request" by converting "tableRanges"
// to "KeyRanges" firstly.
// Note this function should be deleted or at least not exported, but currently
// br refers it, so have to keep it.
func (builder *RequestBuilder) SetTableRanges(tid int64, tableRanges []*ranger.Range, fb *statistics.QueryFeedback) *RequestBuilder {
if builder.err == nil {
builder.Request.KeyRanges = TableRangesToKVRanges(tid, tableRanges, fb)
}
return builder
}
// SetIndexRanges sets "KeyRanges" for "kv.Request" by converting index range
// "ranges" to "KeyRanges" firstly.
func (builder *RequestBuilder) SetIndexRanges(sc *stmtctx.StatementContext, tid, idxID int64, ranges []*ranger.Range) *RequestBuilder {
if builder.err == nil {
builder.Request.KeyRanges, builder.err = IndexRangesToKVRanges(sc, tid, idxID, ranges, nil)
}
return builder
}
// SetIndexRangesForTables sets "KeyRanges" for "kv.Request" by converting multiple indexes range
// "ranges" to "KeyRanges" firstly.
func (builder *RequestBuilder) SetIndexRangesForTables(sc *stmtctx.StatementContext, tids []int64, idxID int64, ranges []*ranger.Range) *RequestBuilder {
if builder.err == nil {
builder.Request.KeyRanges, builder.err = IndexRangesToKVRangesForTables(sc, tids, idxID, ranges, nil)
}
return builder
}
// SetHandleRanges sets "KeyRanges" for "kv.Request" by converting table handle range
// "ranges" to "KeyRanges" firstly.
func (builder *RequestBuilder) SetHandleRanges(sc *stmtctx.StatementContext, tid int64, isCommonHandle bool, ranges []*ranger.Range, fb *statistics.QueryFeedback) *RequestBuilder {
return builder.SetHandleRangesForTables(sc, []int64{tid}, isCommonHandle, ranges, fb)
}
// SetHandleRangesForTables sets "KeyRanges" for "kv.Request" by converting table handle range
// "ranges" to "KeyRanges" firstly for multiple tables.
func (builder *RequestBuilder) SetHandleRangesForTables(sc *stmtctx.StatementContext, tid []int64, isCommonHandle bool, ranges []*ranger.Range, fb *statistics.QueryFeedback) *RequestBuilder {
if builder.err == nil {
builder.Request.KeyRanges, builder.err = TableHandleRangesToKVRanges(sc, tid, isCommonHandle, ranges, fb)
}
return builder
}
// SetTableHandles sets "KeyRanges" for "kv.Request" by converting table handles
// "handles" to "KeyRanges" firstly.
func (builder *RequestBuilder) SetTableHandles(tid int64, handles []kv.Handle) *RequestBuilder {
builder.Request.KeyRanges = TableHandlesToKVRanges(tid, handles)
return builder
}
// SetPartitionsAndHandles sets "KeyRanges" for "kv.Request" by converting ParitionHandles to KeyRanges.
// handles in slice must be kv.PartitionHandle.
func (builder *RequestBuilder) SetPartitionsAndHandles(handles []kv.Handle) *RequestBuilder {
builder.Request.KeyRanges = PartitionHandlesToKVRanges(handles)
return builder
}
const estimatedRegionRowCount = 100000
// SetDAGRequest sets the request type to "ReqTypeDAG" and construct request data.
func (builder *RequestBuilder) SetDAGRequest(dag *tipb.DAGRequest) *RequestBuilder {
if builder.err == nil {
builder.Request.Tp = kv.ReqTypeDAG
builder.Request.Cacheable = true
builder.Request.Data, builder.err = dag.Marshal()
}
// When the DAG is just simple scan and small limit, set concurrency to 1 would be sufficient.
if len(dag.Executors) == 2 && dag.Executors[1].GetLimit() != nil {
limit := dag.Executors[1].GetLimit()
if limit != nil && limit.Limit < estimatedRegionRowCount {
builder.Request.Concurrency = 1
}
}
return builder
}
// SetAnalyzeRequest sets the request type to "ReqTypeAnalyze" and construct request data.
func (builder *RequestBuilder) SetAnalyzeRequest(ana *tipb.AnalyzeReq) *RequestBuilder {
if builder.err == nil {
builder.Request.Tp = kv.ReqTypeAnalyze
builder.Request.Data, builder.err = ana.Marshal()
builder.Request.NotFillCache = true
builder.Request.IsolationLevel = tikvstore.RC
builder.Request.Priority = tikvstore.PriorityLow
}
return builder
}
// SetChecksumRequest sets the request type to "ReqTypeChecksum" and construct request data.
func (builder *RequestBuilder) SetChecksumRequest(checksum *tipb.ChecksumRequest) *RequestBuilder {
if builder.err == nil {
builder.Request.Tp = kv.ReqTypeChecksum
builder.Request.Data, builder.err = checksum.Marshal()
builder.Request.NotFillCache = true
}
return builder
}
// SetKeyRanges sets "KeyRanges" for "kv.Request".
func (builder *RequestBuilder) SetKeyRanges(keyRanges []kv.KeyRange) *RequestBuilder {
builder.Request.KeyRanges = keyRanges
return builder
}
// SetStartTS sets "StartTS" for "kv.Request".
func (builder *RequestBuilder) SetStartTS(startTS uint64) *RequestBuilder {
builder.Request.StartTs = startTS
return builder
}
// SetDesc sets "Desc" for "kv.Request".
func (builder *RequestBuilder) SetDesc(desc bool) *RequestBuilder {
builder.Request.Desc = desc
return builder
}
// SetKeepOrder sets "KeepOrder" for "kv.Request".
func (builder *RequestBuilder) SetKeepOrder(order bool) *RequestBuilder {
builder.Request.KeepOrder = order
return builder
}
// SetStoreType sets "StoreType" for "kv.Request".
func (builder *RequestBuilder) SetStoreType(storeType kv.StoreType) *RequestBuilder {
builder.Request.StoreType = storeType
return builder
}
// SetAllowBatchCop sets `BatchCop` property.
func (builder *RequestBuilder) SetAllowBatchCop(batchCop bool) *RequestBuilder {
builder.Request.BatchCop = batchCop
return builder
}
func (builder *RequestBuilder) getIsolationLevel() tikvstore.IsoLevel {
switch builder.Tp {
case kv.ReqTypeAnalyze:
return tikvstore.RC
}
return tikvstore.SI
}
func (builder *RequestBuilder) getKVPriority(sv *variable.SessionVars) int {
switch sv.StmtCtx.Priority {
case mysql.NoPriority, mysql.DelayedPriority:
return tikvstore.PriorityNormal
case mysql.LowPriority:
return tikvstore.PriorityLow
case mysql.HighPriority:
return tikvstore.PriorityHigh
}
return tikvstore.PriorityNormal
}
// SetFromSessionVars sets the following fields for "kv.Request" from session variables:
// "Concurrency", "IsolationLevel", "NotFillCache", "ReplicaRead", "SchemaVar".
func (builder *RequestBuilder) SetFromSessionVars(sv *variable.SessionVars) *RequestBuilder {
if builder.Request.Concurrency == 0 {
// Concurrency may be set to 1 by SetDAGRequest
builder.Request.Concurrency = sv.DistSQLScanConcurrency()
}
builder.Request.IsolationLevel = builder.getIsolationLevel()
builder.Request.NotFillCache = sv.StmtCtx.NotFillCache
builder.Request.TaskID = sv.StmtCtx.TaskID
builder.Request.Priority = builder.getKVPriority(sv)
builder.Request.ReplicaRead = sv.GetReplicaRead()
if sv.SnapshotInfoschema != nil {
builder.Request.SchemaVar = infoschema.GetInfoSchemaBySessionVars(sv).SchemaMetaVersion()
} else {
builder.Request.SchemaVar = sv.TxnCtx.SchemaVersion
}
builder.txnScope = sv.TxnCtx.TxnScope
builder.IsStaleness = sv.TxnCtx.IsStaleness
if builder.IsStaleness && builder.txnScope != oracle.GlobalTxnScope {
builder.MatchStoreLabels = []*metapb.StoreLabel{
{
Key: placement.DCLabelKey,
Value: builder.txnScope,
},
}
}
return builder
}
// SetStreaming sets "Streaming" flag for "kv.Request".
func (builder *RequestBuilder) SetStreaming(streaming bool) *RequestBuilder {
builder.Request.Streaming = streaming
return builder
}
// SetConcurrency sets "Concurrency" for "kv.Request".
func (builder *RequestBuilder) SetConcurrency(concurrency int) *RequestBuilder {
builder.Request.Concurrency = concurrency
return builder
}
// SetTiDBServerID sets "TiDBServerID" for "kv.Request"
// ServerID is a unique id of TiDB instance among the cluster.
// See https://github.com/pingcap/tidb/blob/master/docs/design/2020-06-01-global-kill.md
func (builder *RequestBuilder) SetTiDBServerID(serverID uint64) *RequestBuilder {
builder.Request.TiDBServerID = serverID
return builder
}
// SetFromInfoSchema sets the following fields from infoSchema:
// "bundles"
func (builder *RequestBuilder) SetFromInfoSchema(is infoschema.InfoSchema) *RequestBuilder {
if is == nil {
return builder
}
builder.is = is
return builder
}
func (builder *RequestBuilder) verifyTxnScope() error {
if builder.txnScope == "" {
builder.txnScope = oracle.GlobalTxnScope
}
if builder.txnScope == oracle.GlobalTxnScope || builder.is == nil {
return nil
}
visitPhysicalTableID := make(map[int64]struct{})
for _, keyRange := range builder.Request.KeyRanges {
tableID := tablecodec.DecodeTableID(keyRange.StartKey)
if tableID > 0 {
visitPhysicalTableID[tableID] = struct{}{}
} else {
return errors.New("requestBuilder can't decode tableID from keyRange")
}
}
for phyTableID := range visitPhysicalTableID {
valid := VerifyTxnScope(builder.txnScope, phyTableID, builder.is)
if !valid {
var tblName string
var partName string
tblInfo, _, partInfo := builder.is.FindTableByPartitionID(phyTableID)
if tblInfo != nil && partInfo != nil {
tblName = tblInfo.Meta().Name.String()
partName = partInfo.Name.String()
} else {
tblInfo, _ = builder.is.TableByID(phyTableID)
tblName = tblInfo.Meta().Name.String()
}
err := fmt.Errorf("table %v can not be read by %v txn_scope", tblName, builder.txnScope)
if len(partName) > 0 {
err = fmt.Errorf("table %v's partition %v can not be read by %v txn_scope",
tblName, partName, builder.txnScope)
}
return err
}
}
return nil
}
// TableHandleRangesToKVRanges convert table handle ranges to "KeyRanges" for multiple tables.
func TableHandleRangesToKVRanges(sc *stmtctx.StatementContext, tid []int64, isCommonHandle bool, ranges []*ranger.Range, fb *statistics.QueryFeedback) ([]kv.KeyRange, error) {
if !isCommonHandle {
return tablesRangesToKVRanges(tid, ranges, fb), nil
}
return CommonHandleRangesToKVRanges(sc, tid, ranges)
}
// TableRangesToKVRanges converts table ranges to "KeyRange".
// Note this function should not be exported, but currently
// br refers to it, so have to keep it.
func TableRangesToKVRanges(tid int64, ranges []*ranger.Range, fb *statistics.QueryFeedback) []kv.KeyRange {
return tablesRangesToKVRanges([]int64{tid}, ranges, fb)
}
// tablesRangesToKVRanges converts table ranges to "KeyRange".
func tablesRangesToKVRanges(tids []int64, ranges []*ranger.Range, fb *statistics.QueryFeedback) []kv.KeyRange {
if fb == nil || fb.Hist == nil {
return tableRangesToKVRangesWithoutSplit(tids, ranges)
}
krs := make([]kv.KeyRange, 0, len(ranges))
feedbackRanges := make([]*ranger.Range, 0, len(ranges))
for _, ran := range ranges {
low := codec.EncodeInt(nil, ran.LowVal[0].GetInt64())
high := codec.EncodeInt(nil, ran.HighVal[0].GetInt64())
if ran.LowExclude {
low = kv.Key(low).PrefixNext()
}
// If this range is split by histogram, then the high val will equal to one bucket's upper bound,
// since we need to guarantee each range falls inside the exactly one bucket, `PrefixNext` will make the
// high value greater than upper bound, so we store the range here.
r := &ranger.Range{LowVal: []types.Datum{types.NewBytesDatum(low)},
HighVal: []types.Datum{types.NewBytesDatum(high)}}
feedbackRanges = append(feedbackRanges, r)
if !ran.HighExclude {
high = kv.Key(high).PrefixNext()
}
for _, tid := range tids {
startKey := tablecodec.EncodeRowKey(tid, low)
endKey := tablecodec.EncodeRowKey(tid, high)
krs = append(krs, kv.KeyRange{StartKey: startKey, EndKey: endKey})
}
}
fb.StoreRanges(feedbackRanges)
return krs
}
func tableRangesToKVRangesWithoutSplit(tids []int64, ranges []*ranger.Range) []kv.KeyRange {
krs := make([]kv.KeyRange, 0, len(ranges)*len(tids))
for _, ran := range ranges {
low, high := encodeHandleKey(ran)
for _, tid := range tids {
startKey := tablecodec.EncodeRowKey(tid, low)
endKey := tablecodec.EncodeRowKey(tid, high)
krs = append(krs, kv.KeyRange{StartKey: startKey, EndKey: endKey})
}
}
return krs
}
func encodeHandleKey(ran *ranger.Range) ([]byte, []byte) {
low := codec.EncodeInt(nil, ran.LowVal[0].GetInt64())
high := codec.EncodeInt(nil, ran.HighVal[0].GetInt64())
if ran.LowExclude {
low = kv.Key(low).PrefixNext()
}
if !ran.HighExclude {
high = kv.Key(high).PrefixNext()
}
return low, high
}
// SplitRangesBySign split the ranges into two parts:
// 1. signedRanges is less or equal than maxInt64
// 2. unsignedRanges is greater than maxInt64
// We do that because the encoding of tikv key takes every key as a int. As a result MaxUInt64 is indeed
// small than zero. So we must
// 1. pick the range that straddles the MaxInt64
// 2. split that range into two parts : smaller than max int64 and greater than it.
// 3. if the ascent order is required, return signed first, vice versa.
// 4. if no order is required, is better to return the unsigned one. That's because it's the normal order
// of tikv scan.
func SplitRangesBySign(ranges []*ranger.Range, keepOrder bool, desc bool, isCommonHandle bool) ([]*ranger.Range, []*ranger.Range) {
if isCommonHandle || len(ranges) == 0 || ranges[0].LowVal[0].Kind() == types.KindInt64 {
return ranges, nil
}
idx := sort.Search(len(ranges), func(i int) bool { return ranges[i].HighVal[0].GetUint64() > math.MaxInt64 })
if idx == len(ranges) {
return ranges, nil
}
if ranges[idx].LowVal[0].GetUint64() > math.MaxInt64 {
signedRanges := ranges[0:idx]
unsignedRanges := ranges[idx:]
if !keepOrder {
return append(unsignedRanges, signedRanges...), nil
}
if desc {
return unsignedRanges, signedRanges
}
return signedRanges, unsignedRanges
}
signedRanges := make([]*ranger.Range, 0, idx+1)
unsignedRanges := make([]*ranger.Range, 0, len(ranges)-idx)
signedRanges = append(signedRanges, ranges[0:idx]...)
if !(ranges[idx].LowVal[0].GetUint64() == math.MaxInt64 && ranges[idx].LowExclude) {
signedRanges = append(signedRanges, &ranger.Range{
LowVal: ranges[idx].LowVal,
LowExclude: ranges[idx].LowExclude,
HighVal: []types.Datum{types.NewUintDatum(math.MaxInt64)},
})
}
if !(ranges[idx].HighVal[0].GetUint64() == math.MaxInt64+1 && ranges[idx].HighExclude) {
unsignedRanges = append(unsignedRanges, &ranger.Range{
LowVal: []types.Datum{types.NewUintDatum(math.MaxInt64 + 1)},
HighVal: ranges[idx].HighVal,
HighExclude: ranges[idx].HighExclude,
})
}
if idx < len(ranges) {
unsignedRanges = append(unsignedRanges, ranges[idx+1:]...)
}
if !keepOrder {
return append(unsignedRanges, signedRanges...), nil
}
if desc {
return unsignedRanges, signedRanges
}
return signedRanges, unsignedRanges
}
// TableHandlesToKVRanges converts sorted handle to kv ranges.
// For continuous handles, we should merge them to a single key range.
func TableHandlesToKVRanges(tid int64, handles []kv.Handle) []kv.KeyRange {
krs := make([]kv.KeyRange, 0, len(handles))
i := 0
for i < len(handles) {
if commonHandle, ok := handles[i].(*kv.CommonHandle); ok {
ran := kv.KeyRange{
StartKey: tablecodec.EncodeRowKey(tid, commonHandle.Encoded()),
EndKey: tablecodec.EncodeRowKey(tid, kv.Key(commonHandle.Encoded()).Next()),
}
krs = append(krs, ran)
i++
continue
}
j := i + 1
for ; j < len(handles) && handles[j-1].IntValue() != math.MaxInt64; j++ {
if handles[j].IntValue() != handles[j-1].IntValue()+1 {
break
}
}
low := codec.EncodeInt(nil, handles[i].IntValue())
high := codec.EncodeInt(nil, handles[j-1].IntValue())
high = kv.Key(high).PrefixNext()
startKey := tablecodec.EncodeRowKey(tid, low)
endKey := tablecodec.EncodeRowKey(tid, high)
krs = append(krs, kv.KeyRange{StartKey: startKey, EndKey: endKey})
i = j
}
return krs
}
// PartitionHandlesToKVRanges convert ParitionHandles to kv ranges.
// Handle in slices must be kv.PartitionHandle
func PartitionHandlesToKVRanges(handles []kv.Handle) []kv.KeyRange {
krs := make([]kv.KeyRange, 0, len(handles))
i := 0
for i < len(handles) {
ph := handles[i].(kv.PartitionHandle)
h := ph.Handle
pid := ph.PartitionID
if commonHandle, ok := h.(*kv.CommonHandle); ok {
ran := kv.KeyRange{
StartKey: tablecodec.EncodeRowKey(pid, commonHandle.Encoded()),
EndKey: tablecodec.EncodeRowKey(pid, append(commonHandle.Encoded(), 0)),
}
krs = append(krs, ran)
i++
continue
}
j := i + 1
for ; j < len(handles) && handles[j-1].IntValue() != math.MaxInt64; j++ {
if handles[j].IntValue() != handles[j-1].IntValue()+1 {
break
}
if handles[j].(kv.PartitionHandle).PartitionID != pid {
break
}
}
low := codec.EncodeInt(nil, handles[i].IntValue())
high := codec.EncodeInt(nil, handles[j-1].IntValue())
high = kv.Key(high).PrefixNext()
startKey := tablecodec.EncodeRowKey(pid, low)
endKey := tablecodec.EncodeRowKey(pid, high)
krs = append(krs, kv.KeyRange{StartKey: startKey, EndKey: endKey})
i = j
}
return krs
}
// IndexRangesToKVRanges converts index ranges to "KeyRange".
func IndexRangesToKVRanges(sc *stmtctx.StatementContext, tid, idxID int64, ranges []*ranger.Range, fb *statistics.QueryFeedback) ([]kv.KeyRange, error) {
return IndexRangesToKVRangesForTables(sc, []int64{tid}, idxID, ranges, fb)
}
// IndexRangesToKVRangesForTables converts indexes ranges to "KeyRange".
func IndexRangesToKVRangesForTables(sc *stmtctx.StatementContext, tids []int64, idxID int64, ranges []*ranger.Range, fb *statistics.QueryFeedback) ([]kv.KeyRange, error) {
if fb == nil || fb.Hist == nil {
return indexRangesToKVWithoutSplit(sc, tids, idxID, ranges)
}
feedbackRanges := make([]*ranger.Range, 0, len(ranges))
for _, ran := range ranges {
low, high, err := encodeIndexKey(sc, ran)
if err != nil {
return nil, err
}
feedbackRanges = append(feedbackRanges, &ranger.Range{LowVal: []types.Datum{types.NewBytesDatum(low)},
HighVal: []types.Datum{types.NewBytesDatum(high)}, LowExclude: false, HighExclude: true})
}
feedbackRanges, ok := fb.Hist.SplitRange(sc, feedbackRanges, true)
if !ok {
fb.Invalidate()
}
krs := make([]kv.KeyRange, 0, len(feedbackRanges))
for _, ran := range feedbackRanges {
low, high := ran.LowVal[0].GetBytes(), ran.HighVal[0].GetBytes()
if ran.LowExclude {
low = kv.Key(low).PrefixNext()
}
ran.LowVal[0].SetBytes(low)
// If this range is split by histogram, then the high val will equal to one bucket's upper bound,
// since we need to guarantee each range falls inside the exactly one bucket, `PrefixNext` will make the
// high value greater than upper bound, so we store the high value here.
ran.HighVal[0].SetBytes(high)
if !ran.HighExclude {
high = kv.Key(high).PrefixNext()
}
for _, tid := range tids {
startKey := tablecodec.EncodeIndexSeekKey(tid, idxID, low)
endKey := tablecodec.EncodeIndexSeekKey(tid, idxID, high)
krs = append(krs, kv.KeyRange{StartKey: startKey, EndKey: endKey})
}
}
fb.StoreRanges(feedbackRanges)
return krs, nil
}
// CommonHandleRangesToKVRanges converts common handle ranges to "KeyRange".
func CommonHandleRangesToKVRanges(sc *stmtctx.StatementContext, tids []int64, ranges []*ranger.Range) ([]kv.KeyRange, error) {
rans := make([]*ranger.Range, 0, len(ranges))
for _, ran := range ranges {
low, high, err := encodeIndexKey(sc, ran)
if err != nil {
return nil, err
}
rans = append(rans, &ranger.Range{LowVal: []types.Datum{types.NewBytesDatum(low)},
HighVal: []types.Datum{types.NewBytesDatum(high)}, LowExclude: false, HighExclude: true})
}
krs := make([]kv.KeyRange, 0, len(rans))
for _, ran := range rans {
low, high := ran.LowVal[0].GetBytes(), ran.HighVal[0].GetBytes()
if ran.LowExclude {
low = kv.Key(low).PrefixNext()
}
ran.LowVal[0].SetBytes(low)
for _, tid := range tids {
startKey := tablecodec.EncodeRowKey(tid, low)
endKey := tablecodec.EncodeRowKey(tid, high)
krs = append(krs, kv.KeyRange{StartKey: startKey, EndKey: endKey})
}
}
return krs, nil
}
// VerifyTxnScope verify whether the txnScope and visited physical table break the leader rule's dcLocation.
func VerifyTxnScope(txnScope string, physicalTableID int64, is infoschema.InfoSchema) bool {
if txnScope == "" || txnScope == oracle.GlobalTxnScope {
return true
}
bundle, ok := is.BundleByName(placement.GroupID(physicalTableID))
if !ok {
return true
}
leaderDC, ok := placement.GetLeaderDCByBundle(bundle, placement.DCLabelKey)
if !ok {
return true
}
if leaderDC != txnScope {
return false
}
return true
}
func indexRangesToKVWithoutSplit(sc *stmtctx.StatementContext, tids []int64, idxID int64, ranges []*ranger.Range) ([]kv.KeyRange, error) {
krs := make([]kv.KeyRange, 0, len(ranges))
for _, ran := range ranges {
low, high, err := encodeIndexKey(sc, ran)
if err != nil {
return nil, err
}
for _, tid := range tids {
startKey := tablecodec.EncodeIndexSeekKey(tid, idxID, low)
endKey := tablecodec.EncodeIndexSeekKey(tid, idxID, high)
krs = append(krs, kv.KeyRange{StartKey: startKey, EndKey: endKey})
}
}
return krs, nil
}
func encodeIndexKey(sc *stmtctx.StatementContext, ran *ranger.Range) ([]byte, []byte, error) {
low, err := codec.EncodeKey(sc, nil, ran.LowVal...)
if err != nil {
return nil, nil, err
}
if ran.LowExclude {
low = kv.Key(low).PrefixNext()
}
high, err := codec.EncodeKey(sc, nil, ran.HighVal...)
if err != nil {
return nil, nil, err
}
if !ran.HighExclude {
high = kv.Key(high).PrefixNext()
}
var hasNull bool
for _, highVal := range ran.HighVal {
if highVal.IsNull() {
hasNull = true
break
}
}
if hasNull {
// Append 0 to make unique-key range [null, null] to be a scan rather than point-get.
high = kv.Key(high).Next()
}
return low, high, nil
}
| distsql/request_builder.go | 1 | https://github.com/pingcap/tidb/commit/3f6dfe91daab7acb178ad24941dfca1745993afd | [
0.0011682364856824279,
0.00018889145576395094,
0.00016061318456195295,
0.0001724851899780333,
0.00012120794417569414
] |
{
"id": 4,
"code_window": [
"\t\tif e.collectors[len(e.colsInfo)+pkColsCount+j].Samples[samplePos] == nil {\n",
"\t\t\te.collectors[len(e.colsInfo)+pkColsCount+j].Samples[samplePos] = &statistics.SampleItem{}\n",
"\t\t}\n",
"\t\te.collectors[len(e.colsInfo)+pkColsCount+j].Samples[samplePos].Handle = handle\n",
"\t\te.collectors[len(e.colsInfo)+pkColsCount+j].Samples[samplePos].Value = types.NewBytesDatum(bytes)\n",
"\t}\n",
"\treturn nil\n",
"}\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\te.collectors[len(e.colsInfo)+pkColsCount+j].Samples[samplePos].Value = types.NewBytesDatum(keyBytes)\n"
],
"file_path": "executor/analyze.go",
"type": "replace",
"edit_start_line_idx": 1115
} | // Copyright 2017 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package server
import (
. "github.com/pingcap/check"
"github.com/pingcap/parser/ast"
"github.com/pingcap/parser/charset"
"github.com/pingcap/parser/model"
"github.com/pingcap/parser/mysql"
"github.com/pingcap/tidb/types"
)
type tidbResultSetTestSuite struct{}
var _ = Suite(tidbResultSetTestSuite{})
func createColumnByTypeAndLen(tp byte, len uint32) *ColumnInfo {
return &ColumnInfo{
Schema: "test",
Table: "dual",
OrgTable: "",
Name: "a",
OrgName: "a",
ColumnLength: len,
Charset: uint16(mysql.CharsetNameToID(charset.CharsetUTF8)),
Flag: uint16(mysql.UnsignedFlag),
Decimal: uint8(0),
Type: tp,
DefaultValueLength: uint64(0),
DefaultValue: nil,
}
}
func (ts tidbResultSetTestSuite) TestConvertColumnInfo(c *C) {
// Test "mysql.TypeBit", for: https://github.com/pingcap/tidb/issues/5405.
resultField := ast.ResultField{
Column: &model.ColumnInfo{
Name: model.NewCIStr("a"),
ID: 0,
Offset: 0,
FieldType: types.FieldType{
Tp: mysql.TypeBit,
Flag: mysql.UnsignedFlag,
Flen: 1,
Decimal: 0,
Charset: charset.CharsetUTF8,
Collate: charset.CollationUTF8,
},
Comment: "column a is the first column in table dual",
},
ColumnAsName: model.NewCIStr("a"),
TableAsName: model.NewCIStr("dual"),
DBName: model.NewCIStr("test"),
}
colInfo := convertColumnInfo(&resultField)
c.Assert(colInfo, DeepEquals, createColumnByTypeAndLen(mysql.TypeBit, 1))
// Test "mysql.TypeTiny", for: https://github.com/pingcap/tidb/issues/5405.
resultField = ast.ResultField{
Column: &model.ColumnInfo{
Name: model.NewCIStr("a"),
ID: 0,
Offset: 0,
FieldType: types.FieldType{
Tp: mysql.TypeTiny,
Flag: mysql.UnsignedFlag,
Flen: 1,
Decimal: 0,
Charset: charset.CharsetUTF8,
Collate: charset.CollationUTF8,
},
Comment: "column a is the first column in table dual",
},
ColumnAsName: model.NewCIStr("a"),
TableAsName: model.NewCIStr("dual"),
DBName: model.NewCIStr("test"),
}
colInfo = convertColumnInfo(&resultField)
c.Assert(colInfo, DeepEquals, createColumnByTypeAndLen(mysql.TypeTiny, 1))
resultField = ast.ResultField{
Column: &model.ColumnInfo{
Name: model.NewCIStr("a"),
ID: 0,
Offset: 0,
FieldType: types.FieldType{
Tp: mysql.TypeYear,
Flag: mysql.ZerofillFlag,
Flen: 4,
Decimal: 0,
Charset: charset.CharsetBin,
Collate: charset.CollationBin,
},
Comment: "column a is the first column in table dual",
},
ColumnAsName: model.NewCIStr("a"),
TableAsName: model.NewCIStr("dual"),
DBName: model.NewCIStr("test"),
}
colInfo = convertColumnInfo(&resultField)
c.Assert(colInfo.ColumnLength, Equals, uint32(4))
}
| server/driver_tidb_test.go | 0 | https://github.com/pingcap/tidb/commit/3f6dfe91daab7acb178ad24941dfca1745993afd | [
0.0001788916124496609,
0.00017621013103052974,
0.0001715950929792598,
0.00017668999498710036,
0.0000020712523109978065
] |
{
"id": 4,
"code_window": [
"\t\tif e.collectors[len(e.colsInfo)+pkColsCount+j].Samples[samplePos] == nil {\n",
"\t\t\te.collectors[len(e.colsInfo)+pkColsCount+j].Samples[samplePos] = &statistics.SampleItem{}\n",
"\t\t}\n",
"\t\te.collectors[len(e.colsInfo)+pkColsCount+j].Samples[samplePos].Handle = handle\n",
"\t\te.collectors[len(e.colsInfo)+pkColsCount+j].Samples[samplePos].Value = types.NewBytesDatum(bytes)\n",
"\t}\n",
"\treturn nil\n",
"}\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\te.collectors[len(e.colsInfo)+pkColsCount+j].Samples[samplePos].Value = types.NewBytesDatum(keyBytes)\n"
],
"file_path": "executor/analyze.go",
"type": "replace",
"edit_start_line_idx": 1115
} | // Copyright 2018 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package memo
import (
plannercore "github.com/pingcap/tidb/planner/core"
)
// Operand is the node of a pattern tree, it represents a logical expression operator.
// Different from logical plan operator which holds the full information about an expression
// operator, Operand only stores the type information.
// An Operand may correspond to a concrete logical plan operator, or it can has special meaning,
// e.g, a placeholder for any logical plan operator.
type Operand int
const (
// OperandAny is a placeholder for any Operand.
OperandAny Operand = iota
// OperandJoin is the operand for LogicalJoin.
OperandJoin
// OperandAggregation is the operand for LogicalAggregation.
OperandAggregation
// OperandProjection is the operand for LogicalProjection.
OperandProjection
// OperandSelection is the operand for LogicalSelection.
OperandSelection
// OperandApply is the operand for LogicalApply.
OperandApply
// OperandMaxOneRow is the operand for LogicalMaxOneRow.
OperandMaxOneRow
// OperandTableDual is the operand for LogicalTableDual.
OperandTableDual
// OperandDataSource is the operand for DataSource.
OperandDataSource
// OperandUnionScan is the operand for LogicalUnionScan.
OperandUnionScan
// OperandUnionAll is the operand for LogicalUnionAll.
OperandUnionAll
// OperandSort is the operand for LogicalSort.
OperandSort
// OperandTopN is the operand for LogicalTopN.
OperandTopN
// OperandLock is the operand for LogicalLock.
OperandLock
// OperandLimit is the operand for LogicalLimit.
OperandLimit
// OperandTiKVSingleGather is the operand for TiKVSingleGather.
OperandTiKVSingleGather
// OperandMemTableScan is the operand for MemTableScan.
OperandMemTableScan
// OperandTableScan is the operand for TableScan.
OperandTableScan
// OperandIndexScan is the operand for IndexScan.
OperandIndexScan
// OperandShow is the operand for Show.
OperandShow
// OperandWindow is the operand for window function.
OperandWindow
// OperandUnsupported is the operand for unsupported operators.
OperandUnsupported
)
// GetOperand maps logical plan operator to Operand.
func GetOperand(p plannercore.LogicalPlan) Operand {
switch p.(type) {
case *plannercore.LogicalApply:
return OperandApply
case *plannercore.LogicalJoin:
return OperandJoin
case *plannercore.LogicalAggregation:
return OperandAggregation
case *plannercore.LogicalProjection:
return OperandProjection
case *plannercore.LogicalSelection:
return OperandSelection
case *plannercore.LogicalMaxOneRow:
return OperandMaxOneRow
case *plannercore.LogicalTableDual:
return OperandTableDual
case *plannercore.DataSource:
return OperandDataSource
case *plannercore.LogicalUnionScan:
return OperandUnionScan
case *plannercore.LogicalUnionAll:
return OperandUnionAll
case *plannercore.LogicalSort:
return OperandSort
case *plannercore.LogicalTopN:
return OperandTopN
case *plannercore.LogicalLock:
return OperandLock
case *plannercore.LogicalLimit:
return OperandLimit
case *plannercore.TiKVSingleGather:
return OperandTiKVSingleGather
case *plannercore.LogicalTableScan:
return OperandTableScan
case *plannercore.LogicalMemTable:
return OperandMemTableScan
case *plannercore.LogicalIndexScan:
return OperandIndexScan
case *plannercore.LogicalShow:
return OperandShow
case *plannercore.LogicalWindow:
return OperandWindow
default:
return OperandUnsupported
}
}
// Match checks if current Operand matches specified one.
func (o Operand) Match(t Operand) bool {
if o == OperandAny || t == OperandAny {
return true
}
if o == t {
return true
}
return false
}
// Pattern defines the match pattern for a rule. It's a tree-like structure
// which is a piece of a logical expression. Each node in the Pattern tree is
// defined by an Operand and EngineType pair.
type Pattern struct {
Operand
EngineTypeSet
Children []*Pattern
}
// Match checks whether the EngineTypeSet contains the given EngineType
// and whether the two Operands match.
func (p *Pattern) Match(o Operand, e EngineType) bool {
return p.EngineTypeSet.Contains(e) && p.Operand.Match(o)
}
// MatchOperandAny checks whether the pattern's Operand is OperandAny
// and the EngineTypeSet contains the given EngineType.
func (p *Pattern) MatchOperandAny(e EngineType) bool {
return p.EngineTypeSet.Contains(e) && p.Operand == OperandAny
}
// NewPattern creates a pattern node according to the Operand and EngineType.
func NewPattern(operand Operand, engineTypeSet EngineTypeSet) *Pattern {
return &Pattern{Operand: operand, EngineTypeSet: engineTypeSet}
}
// SetChildren sets the Children information for a pattern node.
func (p *Pattern) SetChildren(children ...*Pattern) {
p.Children = children
}
// BuildPattern builds a Pattern from Operand, EngineType and child Patterns.
// Used in GetPattern() of Transformation interface to generate a Pattern.
func BuildPattern(operand Operand, engineTypeSet EngineTypeSet, children ...*Pattern) *Pattern {
p := &Pattern{Operand: operand, EngineTypeSet: engineTypeSet}
p.Children = children
return p
}
| planner/memo/pattern.go | 0 | https://github.com/pingcap/tidb/commit/3f6dfe91daab7acb178ad24941dfca1745993afd | [
0.00017902010586112738,
0.00017202639719471335,
0.00015945255290716887,
0.00017350021516904235,
0.000005339773451851215
] |
{
"id": 4,
"code_window": [
"\t\tif e.collectors[len(e.colsInfo)+pkColsCount+j].Samples[samplePos] == nil {\n",
"\t\t\te.collectors[len(e.colsInfo)+pkColsCount+j].Samples[samplePos] = &statistics.SampleItem{}\n",
"\t\t}\n",
"\t\te.collectors[len(e.colsInfo)+pkColsCount+j].Samples[samplePos].Handle = handle\n",
"\t\te.collectors[len(e.colsInfo)+pkColsCount+j].Samples[samplePos].Value = types.NewBytesDatum(bytes)\n",
"\t}\n",
"\treturn nil\n",
"}\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\te.collectors[len(e.colsInfo)+pkColsCount+j].Samples[samplePos].Value = types.NewBytesDatum(keyBytes)\n"
],
"file_path": "executor/analyze.go",
"type": "replace",
"edit_start_line_idx": 1115
} | // Copyright 2020 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package telemetry
import (
"crypto/sha1"
"fmt"
"sort"
"strconv"
"strings"
)
// hashString returns the SHA1 checksum in hex of the string.
func hashString(text string) (string, error) {
hash := sha1.New()
_, err := hash.Write([]byte(text))
if err != nil {
return "", err
}
hashed := hash.Sum(nil)
return fmt.Sprintf("%x", hashed), nil
}
// parseAddressAndHash parses an address in HOST:PORT format, returns the hashed host and the port.
func parseAddressAndHash(address string) (string, string, error) {
var host, port string
if !strings.Contains(address, ":") {
host = address
port = ""
} else {
parts := strings.Split(address, ":")
lastPart := parts[len(parts)-1]
if _, err := strconv.Atoi(lastPart); err != nil {
// Ensure that all plaintext part (i.e. port) will never contain sensitive data.
// The port part is not int, recognize all as host.
host = address
port = ""
} else {
host = strings.Join(parts[:len(parts)-1], ":")
port = lastPart
}
}
res, err := hashString(host)
if err != nil {
return "", "", err
}
return res, port, err
}
// See https://stackoverflow.com/a/58026884
func sortedStringContains(s []string, searchTerm string) bool {
i := sort.SearchStrings(s, searchTerm)
return i < len(s) && s[i] == searchTerm
}
| telemetry/util.go | 0 | https://github.com/pingcap/tidb/commit/3f6dfe91daab7acb178ad24941dfca1745993afd | [
0.00017921207472682,
0.00017289535026066005,
0.00016430260438937694,
0.000172482876223512,
0.000005114806754136225
] |
{
"id": 5,
"code_window": [
"\t\t}\n",
"\t\terr := fmSketch.InsertValue(sc, sample.Value)\n",
"\t\tif err != nil {\n",
"\t\t\treturn nil, nil, nil, nil, err\n",
"\t\t}\n",
"\t\tbytes, err := tablecodec.EncodeValue(sc, nil, sample.Value)\n",
"\t\tif err != nil {\n",
"\t\t\treturn nil, nil, nil, nil, err\n",
"\t\t}\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tvalBytes, err := tablecodec.EncodeValue(sc, nil, sample.Value)\n"
],
"file_path": "executor/analyze.go",
"type": "replace",
"edit_start_line_idx": 1238
} | // Copyright 2017 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package executor
import (
"bytes"
"context"
"fmt"
"math"
"math/rand"
"runtime"
"sort"
"strconv"
"strings"
"sync"
"sync/atomic"
"time"
"github.com/cznic/mathutil"
"github.com/pingcap/errors"
"github.com/pingcap/failpoint"
"github.com/pingcap/parser/ast"
"github.com/pingcap/parser/model"
"github.com/pingcap/parser/mysql"
"github.com/pingcap/parser/terror"
"github.com/pingcap/tidb/distsql"
"github.com/pingcap/tidb/domain"
"github.com/pingcap/tidb/infoschema"
"github.com/pingcap/tidb/kv"
"github.com/pingcap/tidb/metrics"
"github.com/pingcap/tidb/planner/core"
"github.com/pingcap/tidb/sessionctx"
"github.com/pingcap/tidb/sessionctx/stmtctx"
"github.com/pingcap/tidb/sessionctx/variable"
"github.com/pingcap/tidb/statistics"
"github.com/pingcap/tidb/store/tikv"
tikvstore "github.com/pingcap/tidb/store/tikv/kv"
"github.com/pingcap/tidb/table"
"github.com/pingcap/tidb/tablecodec"
"github.com/pingcap/tidb/types"
"github.com/pingcap/tidb/util/chunk"
"github.com/pingcap/tidb/util/codec"
"github.com/pingcap/tidb/util/logutil"
"github.com/pingcap/tidb/util/ranger"
"github.com/pingcap/tidb/util/sqlexec"
"github.com/pingcap/tipb/go-tipb"
"go.uber.org/zap"
)
var _ Executor = &AnalyzeExec{}
// AnalyzeExec represents Analyze executor.
type AnalyzeExec struct {
baseExecutor
tasks []*analyzeTask
wg *sync.WaitGroup
opts map[ast.AnalyzeOptionType]uint64
}
var (
// RandSeed is the seed for randing package.
// It's public for test.
RandSeed = int64(1)
)
const (
maxRegionSampleSize = 1000
maxSketchSize = 10000
)
// Next implements the Executor Next interface.
func (e *AnalyzeExec) Next(ctx context.Context, req *chunk.Chunk) error {
concurrency, err := getBuildStatsConcurrency(e.ctx)
if err != nil {
return err
}
taskCh := make(chan *analyzeTask, len(e.tasks))
resultCh := make(chan analyzeResult, len(e.tasks))
e.wg.Add(concurrency)
for i := 0; i < concurrency; i++ {
go e.analyzeWorker(taskCh, resultCh, i == 0)
}
for _, task := range e.tasks {
statistics.AddNewAnalyzeJob(task.job)
}
for _, task := range e.tasks {
taskCh <- task
}
close(taskCh)
statsHandle := domain.GetDomain(e.ctx).StatsHandle()
panicCnt := 0
pruneMode := variable.PartitionPruneMode(e.ctx.GetSessionVars().PartitionPruneMode.Load())
// needGlobalStats used to indicate whether we should merge the partition-level stats to global-level stats.
needGlobalStats := pruneMode == variable.Dynamic
type globalStatsKey struct {
tableID int64
indexID int64
}
type globalStatsInfo struct {
isIndex int
// When the `isIndex == 0`, the idxID will be the column ID.
// Otherwise, the idxID will be the index ID.
idxID int64
statsVersion int
}
// globalStatsMap is a map used to store which partition tables and the corresponding indexes need global-level stats.
// The meaning of key in map is the structure that used to store the tableID and indexID.
// The meaning of value in map is some additional information needed to build global-level stats.
globalStatsMap := make(map[globalStatsKey]globalStatsInfo)
finishJobWithLogFn := func(ctx context.Context, job *statistics.AnalyzeJob, meetError bool) {
job.Finish(meetError)
if job != nil {
logutil.Logger(ctx).Info(fmt.Sprintf("analyze table `%s`.`%s` has %s", job.DBName, job.TableName, job.State),
zap.String("partition", job.PartitionName),
zap.String("job info", job.JobInfo),
zap.Time("start time", job.StartTime),
zap.Time("end time", job.EndTime),
zap.String("cost", job.EndTime.Sub(job.StartTime).String()))
}
}
for panicCnt < concurrency {
result, ok := <-resultCh
if !ok {
break
}
if result.Err != nil {
err = result.Err
if err == errAnalyzeWorkerPanic {
panicCnt++
} else {
logutil.Logger(ctx).Error("analyze failed", zap.Error(err))
}
finishJobWithLogFn(ctx, result.job, true)
continue
}
statisticsID := result.TableID.GetStatisticsID()
for i, hg := range result.Hist {
if result.TableID.IsPartitionTable() && needGlobalStats {
// If it does not belong to the statistics of index, we need to set it to -1 to distinguish.
idxID := int64(-1)
if result.IsIndex != 0 {
idxID = hg.ID
}
globalStatsID := globalStatsKey{result.TableID.TableID, idxID}
if _, ok := globalStatsMap[globalStatsID]; !ok {
globalStatsMap[globalStatsID] = globalStatsInfo{result.IsIndex, hg.ID, result.StatsVer}
}
}
err1 := statsHandle.SaveStatsToStorage(statisticsID, result.Count, result.IsIndex, hg, result.Cms[i], result.TopNs[i], result.Fms[i], result.StatsVer, 1)
if err1 != nil {
err = err1
logutil.Logger(ctx).Error("save stats to storage failed", zap.Error(err))
finishJobWithLogFn(ctx, result.job, true)
continue
}
}
if err1 := statsHandle.SaveExtendedStatsToStorage(statisticsID, result.ExtStats, false); err1 != nil {
err = err1
logutil.Logger(ctx).Error("save extended stats to storage failed", zap.Error(err))
finishJobWithLogFn(ctx, result.job, true)
} else {
finishJobWithLogFn(ctx, result.job, false)
}
}
for _, task := range e.tasks {
statistics.MoveToHistory(task.job)
}
if err != nil {
return err
}
if needGlobalStats {
for globalStatsID, info := range globalStatsMap {
globalStats, err := statsHandle.MergePartitionStats2GlobalStatsByTableID(e.ctx, e.opts, infoschema.GetInfoSchema(e.ctx), globalStatsID.tableID, info.isIndex, info.idxID)
if err != nil {
if types.ErrPartitionStatsMissing.Equal(err) {
// When we find some partition-level stats are missing, we need to report warning.
e.ctx.GetSessionVars().StmtCtx.AppendWarning(err)
continue
}
return err
}
for i := 0; i < globalStats.Num; i++ {
hg, cms, topN, fms := globalStats.Hg[i], globalStats.Cms[i], globalStats.TopN[i], globalStats.Fms[i]
err = statsHandle.SaveStatsToStorage(globalStatsID.tableID, globalStats.Count, info.isIndex, hg, cms, topN, fms, info.statsVersion, 1)
if err != nil {
logutil.Logger(ctx).Error("save global-level stats to storage failed", zap.Error(err))
}
}
}
}
return statsHandle.Update(infoschema.GetInfoSchema(e.ctx))
}
func getBuildStatsConcurrency(ctx sessionctx.Context) (int, error) {
sessionVars := ctx.GetSessionVars()
concurrency, err := variable.GetSessionSystemVar(sessionVars, variable.TiDBBuildStatsConcurrency)
if err != nil {
return 0, err
}
c, err := strconv.ParseInt(concurrency, 10, 64)
return int(c), err
}
type taskType int
const (
colTask taskType = iota
idxTask
fastTask
pkIncrementalTask
idxIncrementalTask
)
type analyzeTask struct {
taskType taskType
idxExec *AnalyzeIndexExec
colExec *AnalyzeColumnsExec
fastExec *AnalyzeFastExec
idxIncrementalExec *analyzeIndexIncrementalExec
colIncrementalExec *analyzePKIncrementalExec
job *statistics.AnalyzeJob
}
var errAnalyzeWorkerPanic = errors.New("analyze worker panic")
func (e *AnalyzeExec) analyzeWorker(taskCh <-chan *analyzeTask, resultCh chan<- analyzeResult, isCloseChanThread bool) {
var task *analyzeTask
defer func() {
if r := recover(); r != nil {
buf := make([]byte, 4096)
stackSize := runtime.Stack(buf, false)
buf = buf[:stackSize]
logutil.BgLogger().Error("analyze worker panicked", zap.String("stack", string(buf)))
metrics.PanicCounter.WithLabelValues(metrics.LabelAnalyze).Inc()
resultCh <- analyzeResult{
Err: errAnalyzeWorkerPanic,
job: task.job,
}
}
e.wg.Done()
if isCloseChanThread {
e.wg.Wait()
close(resultCh)
}
}()
for {
var ok bool
task, ok = <-taskCh
if !ok {
break
}
task.job.Start()
switch task.taskType {
case colTask:
task.colExec.job = task.job
for _, result := range analyzeColumnsPushdown(task.colExec) {
resultCh <- result
}
case idxTask:
task.idxExec.job = task.job
resultCh <- analyzeIndexPushdown(task.idxExec)
case fastTask:
task.fastExec.job = task.job
task.job.Start()
for _, result := range analyzeFastExec(task.fastExec) {
resultCh <- result
}
case pkIncrementalTask:
task.colIncrementalExec.job = task.job
resultCh <- analyzePKIncremental(task.colIncrementalExec)
case idxIncrementalTask:
task.idxIncrementalExec.job = task.job
resultCh <- analyzeIndexIncremental(task.idxIncrementalExec)
}
}
}
func analyzeIndexPushdown(idxExec *AnalyzeIndexExec) analyzeResult {
ranges := ranger.FullRange()
// For single-column index, we do not load null rows from TiKV, so the built histogram would not include
// null values, and its `NullCount` would be set by result of another distsql call to get null rows.
// For multi-column index, we cannot define null for the rows, so we still use full range, and the rows
// containing null fields would exist in built histograms. Note that, the `NullCount` of histograms for
// multi-column index is always 0 then.
if len(idxExec.idxInfo.Columns) == 1 {
ranges = ranger.FullNotNullRange()
}
hist, cms, fms, topN, err := idxExec.buildStats(ranges, true)
if err != nil {
return analyzeResult{Err: err, job: idxExec.job}
}
var statsVer = statistics.Version1
if idxExec.analyzePB.IdxReq.Version != nil {
statsVer = int(*idxExec.analyzePB.IdxReq.Version)
}
result := analyzeResult{
TableID: idxExec.tableID,
Hist: []*statistics.Histogram{hist},
Cms: []*statistics.CMSketch{cms},
TopNs: []*statistics.TopN{topN},
Fms: []*statistics.FMSketch{fms},
IsIndex: 1,
job: idxExec.job,
StatsVer: statsVer,
}
result.Count = hist.NullCount
if hist.Len() > 0 {
result.Count += hist.Buckets[hist.Len()-1].Count
}
if topN.TotalCount() > 0 {
result.Count += int64(topN.TotalCount())
}
return result
}
// AnalyzeIndexExec represents analyze index push down executor.
type AnalyzeIndexExec struct {
ctx sessionctx.Context
tableID core.AnalyzeTableID
idxInfo *model.IndexInfo
isCommonHandle bool
concurrency int
analyzePB *tipb.AnalyzeReq
result distsql.SelectResult
countNullRes distsql.SelectResult
opts map[ast.AnalyzeOptionType]uint64
job *statistics.AnalyzeJob
}
// fetchAnalyzeResult builds and dispatches the `kv.Request` from given ranges, and stores the `SelectResult`
// in corresponding fields based on the input `isNullRange` argument, which indicates if the range is the
// special null range for single-column index to get the null count.
func (e *AnalyzeIndexExec) fetchAnalyzeResult(ranges []*ranger.Range, isNullRange bool) error {
var builder distsql.RequestBuilder
var kvReqBuilder *distsql.RequestBuilder
if e.isCommonHandle && e.idxInfo.Primary {
kvReqBuilder = builder.SetHandleRangesForTables(e.ctx.GetSessionVars().StmtCtx, []int64{e.tableID.GetStatisticsID()}, true, ranges, nil)
} else {
kvReqBuilder = builder.SetIndexRangesForTables(e.ctx.GetSessionVars().StmtCtx, []int64{e.tableID.GetStatisticsID()}, e.idxInfo.ID, ranges)
}
kvReq, err := kvReqBuilder.
SetAnalyzeRequest(e.analyzePB).
SetStartTS(math.MaxUint64).
SetKeepOrder(true).
SetConcurrency(e.concurrency).
Build()
if err != nil {
return err
}
ctx := context.TODO()
result, err := distsql.Analyze(ctx, e.ctx.GetClient(), kvReq, e.ctx.GetSessionVars().KVVars, e.ctx.GetSessionVars().InRestrictedSQL, e.ctx.GetSessionVars().StmtCtx.MemTracker)
if err != nil {
return err
}
if isNullRange {
e.countNullRes = result
} else {
e.result = result
}
return nil
}
func (e *AnalyzeIndexExec) open(ranges []*ranger.Range, considerNull bool) error {
err := e.fetchAnalyzeResult(ranges, false)
if err != nil {
return err
}
if considerNull && len(e.idxInfo.Columns) == 1 {
ranges = ranger.NullRange()
err = e.fetchAnalyzeResult(ranges, true)
if err != nil {
return err
}
}
return nil
}
func updateIndexResult(
ctx *stmtctx.StatementContext,
resp *tipb.AnalyzeIndexResp,
job *statistics.AnalyzeJob,
hist *statistics.Histogram,
cms *statistics.CMSketch,
fms *statistics.FMSketch,
topn *statistics.TopN,
idxInfo *model.IndexInfo,
numBuckets int,
numTopN int,
statsVer int,
) (
*statistics.Histogram,
*statistics.CMSketch,
*statistics.FMSketch,
*statistics.TopN,
error,
) {
var err error
needCMS := cms != nil
respHist := statistics.HistogramFromProto(resp.Hist)
if job != nil {
job.Update(int64(respHist.TotalRowCount()))
}
hist, err = statistics.MergeHistograms(ctx, hist, respHist, numBuckets, statsVer)
if err != nil {
return nil, nil, nil, nil, err
}
if needCMS {
if resp.Cms == nil {
logutil.Logger(context.TODO()).Warn("nil CMS in response", zap.String("table", idxInfo.Table.O), zap.String("index", idxInfo.Name.O))
} else {
cm, tmpTopN := statistics.CMSketchAndTopNFromProto(resp.Cms)
if err := cms.MergeCMSketch(cm); err != nil {
return nil, nil, nil, nil, err
}
statistics.MergeTopNAndUpdateCMSketch(topn, tmpTopN, cms, uint32(numTopN))
}
}
if fms != nil && resp.Collector != nil && resp.Collector.FmSketch != nil {
fms.MergeFMSketch(statistics.FMSketchFromProto(resp.Collector.FmSketch))
}
return hist, cms, fms, topn, nil
}
func (e *AnalyzeIndexExec) buildStatsFromResult(result distsql.SelectResult, needCMS bool) (*statistics.Histogram, *statistics.CMSketch, *statistics.FMSketch, *statistics.TopN, error) {
failpoint.Inject("buildStatsFromResult", func(val failpoint.Value) {
if val.(bool) {
failpoint.Return(nil, nil, nil, nil, errors.New("mock buildStatsFromResult error"))
}
})
hist := &statistics.Histogram{}
var cms *statistics.CMSketch
var topn *statistics.TopN
if needCMS {
cms = statistics.NewCMSketch(int32(e.opts[ast.AnalyzeOptCMSketchDepth]), int32(e.opts[ast.AnalyzeOptCMSketchWidth]))
topn = statistics.NewTopN(int(e.opts[ast.AnalyzeOptNumTopN]))
}
fms := statistics.NewFMSketch(maxSketchSize)
statsVer := statistics.Version1
if e.analyzePB.IdxReq.Version != nil {
statsVer = int(*e.analyzePB.IdxReq.Version)
}
for {
data, err := result.NextRaw(context.TODO())
if err != nil {
return nil, nil, nil, nil, err
}
if data == nil {
break
}
resp := &tipb.AnalyzeIndexResp{}
err = resp.Unmarshal(data)
if err != nil {
return nil, nil, nil, nil, err
}
hist, cms, fms, topn, err = updateIndexResult(e.ctx.GetSessionVars().StmtCtx, resp, e.job, hist, cms, fms, topn,
e.idxInfo, int(e.opts[ast.AnalyzeOptNumBuckets]), int(e.opts[ast.AnalyzeOptNumTopN]), statsVer)
if err != nil {
return nil, nil, nil, nil, err
}
}
if needCMS && topn.TotalCount() > 0 {
hist.RemoveVals(topn.TopN)
}
if needCMS && cms != nil {
cms.CalcDefaultValForAnalyze(uint64(hist.NDV))
}
return hist, cms, fms, topn, nil
}
func (e *AnalyzeIndexExec) buildStats(ranges []*ranger.Range, considerNull bool) (hist *statistics.Histogram, cms *statistics.CMSketch, fms *statistics.FMSketch, topN *statistics.TopN, err error) {
if err = e.open(ranges, considerNull); err != nil {
return nil, nil, nil, nil, err
}
defer func() {
err1 := closeAll(e.result, e.countNullRes)
if err == nil {
err = err1
}
}()
hist, cms, fms, topN, err = e.buildStatsFromResult(e.result, true)
if err != nil {
return nil, nil, nil, nil, err
}
if e.countNullRes != nil {
nullHist, _, _, _, err := e.buildStatsFromResult(e.countNullRes, false)
if err != nil {
return nil, nil, nil, nil, err
}
if l := nullHist.Len(); l > 0 {
hist.NullCount = nullHist.Buckets[l-1].Count
}
}
hist.ID = e.idxInfo.ID
return hist, cms, fms, topN, nil
}
func analyzeColumnsPushdown(colExec *AnalyzeColumnsExec) []analyzeResult {
var ranges []*ranger.Range
if hc := colExec.handleCols; hc != nil {
if hc.IsInt() {
ranges = ranger.FullIntRange(mysql.HasUnsignedFlag(hc.GetCol(0).RetType.Flag))
} else {
ranges = ranger.FullNotNullRange()
}
} else {
ranges = ranger.FullIntRange(false)
}
collExtStats := colExec.ctx.GetSessionVars().EnableExtendedStats
hists, cms, topNs, fms, extStats, err := colExec.buildStats(ranges, collExtStats)
if err != nil {
return []analyzeResult{{Err: err, job: colExec.job}}
}
if hasPkHist(colExec.handleCols) {
PKresult := analyzeResult{
TableID: colExec.tableID,
Hist: hists[:1],
Cms: cms[:1],
TopNs: topNs[:1],
Fms: fms[:1],
ExtStats: nil,
job: nil,
StatsVer: statistics.Version1,
}
PKresult.Count = int64(PKresult.Hist[0].TotalRowCount())
restResult := analyzeResult{
TableID: colExec.tableID,
Hist: hists[1:],
Cms: cms[1:],
TopNs: topNs[1:],
Fms: fms[1:],
ExtStats: extStats,
job: colExec.job,
StatsVer: colExec.analyzeVer,
}
restResult.Count = PKresult.Count
return []analyzeResult{PKresult, restResult}
}
var result []analyzeResult
if colExec.analyzePB.Tp == tipb.AnalyzeType_TypeMixed {
result = append(result, analyzeResult{
TableID: colExec.tableID,
Hist: []*statistics.Histogram{hists[0]},
Cms: []*statistics.CMSketch{cms[0]},
TopNs: []*statistics.TopN{topNs[0]},
Fms: []*statistics.FMSketch{nil},
IsIndex: 1,
job: colExec.job,
StatsVer: colExec.analyzeVer,
})
hists = hists[1:]
cms = cms[1:]
topNs = topNs[1:]
}
colResult := analyzeResult{
TableID: colExec.tableID,
Hist: hists,
Cms: cms,
TopNs: topNs,
Fms: fms,
ExtStats: extStats,
job: colExec.job,
StatsVer: colExec.analyzeVer,
}
colResult.Count = int64(colResult.Hist[0].TotalRowCount())
if colResult.StatsVer == statistics.Version2 {
colResult.Count += int64(topNs[0].TotalCount())
}
return append(result, colResult)
}
// AnalyzeColumnsExec represents Analyze columns push down executor.
type AnalyzeColumnsExec struct {
ctx sessionctx.Context
tableID core.AnalyzeTableID
colsInfo []*model.ColumnInfo
handleCols core.HandleCols
concurrency int
analyzePB *tipb.AnalyzeReq
commonHandle *model.IndexInfo
resultHandler *tableResultHandler
opts map[ast.AnalyzeOptionType]uint64
job *statistics.AnalyzeJob
analyzeVer int
}
func (e *AnalyzeColumnsExec) open(ranges []*ranger.Range) error {
e.resultHandler = &tableResultHandler{}
firstPartRanges, secondPartRanges := distsql.SplitRangesBySign(ranges, true, false, !hasPkHist(e.handleCols))
firstResult, err := e.buildResp(firstPartRanges)
if err != nil {
return err
}
if len(secondPartRanges) == 0 {
e.resultHandler.open(nil, firstResult)
return nil
}
var secondResult distsql.SelectResult
secondResult, err = e.buildResp(secondPartRanges)
if err != nil {
return err
}
e.resultHandler.open(firstResult, secondResult)
return nil
}
func (e *AnalyzeColumnsExec) buildResp(ranges []*ranger.Range) (distsql.SelectResult, error) {
var builder distsql.RequestBuilder
reqBuilder := builder.SetHandleRangesForTables(e.ctx.GetSessionVars().StmtCtx, []int64{e.tableID.GetStatisticsID()}, e.handleCols != nil && !e.handleCols.IsInt(), ranges, nil)
// Always set KeepOrder of the request to be true, in order to compute
// correct `correlation` of columns.
kvReq, err := reqBuilder.
SetAnalyzeRequest(e.analyzePB).
SetStartTS(math.MaxUint64).
SetKeepOrder(true).
SetConcurrency(e.concurrency).
Build()
if err != nil {
return nil, err
}
ctx := context.TODO()
result, err := distsql.Analyze(ctx, e.ctx.GetClient(), kvReq, e.ctx.GetSessionVars().KVVars, e.ctx.GetSessionVars().InRestrictedSQL, e.ctx.GetSessionVars().StmtCtx.MemTracker)
if err != nil {
return nil, err
}
return result, nil
}
func (e *AnalyzeColumnsExec) buildStats(ranges []*ranger.Range, needExtStats bool) (hists []*statistics.Histogram, cms []*statistics.CMSketch, topNs []*statistics.TopN, fms []*statistics.FMSketch, extStats *statistics.ExtendedStatsColl, err error) {
if err = e.open(ranges); err != nil {
return nil, nil, nil, nil, nil, err
}
defer func() {
if err1 := e.resultHandler.Close(); err1 != nil {
hists = nil
cms = nil
extStats = nil
err = err1
}
}()
var handleHist *statistics.Histogram
var handleCms *statistics.CMSketch
var handleFms *statistics.FMSketch
var handleTopn *statistics.TopN
statsVer := statistics.Version1
if e.analyzePB.Tp == tipb.AnalyzeType_TypeMixed {
handleHist = &statistics.Histogram{}
handleCms = statistics.NewCMSketch(int32(e.opts[ast.AnalyzeOptCMSketchDepth]), int32(e.opts[ast.AnalyzeOptCMSketchWidth]))
handleTopn = statistics.NewTopN(int(e.opts[ast.AnalyzeOptNumTopN]))
handleFms = statistics.NewFMSketch(maxSketchSize)
if e.analyzePB.IdxReq.Version != nil {
statsVer = int(*e.analyzePB.IdxReq.Version)
}
}
pkHist := &statistics.Histogram{}
collectors := make([]*statistics.SampleCollector, len(e.colsInfo))
for i := range collectors {
collectors[i] = &statistics.SampleCollector{
IsMerger: true,
FMSketch: statistics.NewFMSketch(maxSketchSize),
MaxSampleSize: int64(e.opts[ast.AnalyzeOptNumSamples]),
CMSketch: statistics.NewCMSketch(int32(e.opts[ast.AnalyzeOptCMSketchDepth]), int32(e.opts[ast.AnalyzeOptCMSketchWidth])),
}
}
for {
data, err1 := e.resultHandler.nextRaw(context.TODO())
if err1 != nil {
return nil, nil, nil, nil, nil, err1
}
if data == nil {
break
}
sc := e.ctx.GetSessionVars().StmtCtx
var colResp *tipb.AnalyzeColumnsResp
if e.analyzePB.Tp == tipb.AnalyzeType_TypeMixed {
resp := &tipb.AnalyzeMixedResp{}
err = resp.Unmarshal(data)
if err != nil {
return nil, nil, nil, nil, nil, err
}
colResp = resp.ColumnsResp
handleHist, handleCms, handleFms, handleTopn, err = updateIndexResult(sc, resp.IndexResp, nil, handleHist,
handleCms, handleFms, handleTopn, e.commonHandle, int(e.opts[ast.AnalyzeOptNumBuckets]),
int(e.opts[ast.AnalyzeOptNumTopN]), statsVer)
if err != nil {
return nil, nil, nil, nil, nil, err
}
} else {
colResp = &tipb.AnalyzeColumnsResp{}
err = colResp.Unmarshal(data)
}
rowCount := int64(0)
if hasPkHist(e.handleCols) {
respHist := statistics.HistogramFromProto(colResp.PkHist)
rowCount = int64(respHist.TotalRowCount())
pkHist, err = statistics.MergeHistograms(sc, pkHist, respHist, int(e.opts[ast.AnalyzeOptNumBuckets]), statistics.Version1)
if err != nil {
return nil, nil, nil, nil, nil, err
}
}
for i, rc := range colResp.Collectors {
respSample := statistics.SampleCollectorFromProto(rc)
rowCount = respSample.Count + respSample.NullCount
collectors[i].MergeSampleCollector(sc, respSample)
}
e.job.Update(rowCount)
}
timeZone := e.ctx.GetSessionVars().Location()
if hasPkHist(e.handleCols) {
pkInfo := e.handleCols.GetCol(0)
pkHist.ID = pkInfo.ID
err = pkHist.DecodeTo(pkInfo.RetType, timeZone)
if err != nil {
return nil, nil, nil, nil, nil, err
}
hists = append(hists, pkHist)
cms = append(cms, nil)
topNs = append(topNs, nil)
fms = append(fms, nil)
}
for i, col := range e.colsInfo {
if e.analyzeVer < 2 {
// In analyze version 2, we don't collect TopN this way. We will collect TopN from samples in `BuildColumnHistAndTopN()` below.
err := collectors[i].ExtractTopN(uint32(e.opts[ast.AnalyzeOptNumTopN]), e.ctx.GetSessionVars().StmtCtx, &col.FieldType, timeZone)
if err != nil {
return nil, nil, nil, nil, nil, err
}
topNs = append(topNs, collectors[i].TopN)
}
for j, s := range collectors[i].Samples {
collectors[i].Samples[j].Ordinal = j
collectors[i].Samples[j].Value, err = tablecodec.DecodeColumnValue(s.Value.GetBytes(), &col.FieldType, timeZone)
if err != nil {
return nil, nil, nil, nil, nil, err
}
// When collation is enabled, we store the Key representation of the sampling data. So we set it to kind `Bytes` here
// to avoid to convert it to its Key representation once more.
if collectors[i].Samples[j].Value.Kind() == types.KindString {
collectors[i].Samples[j].Value.SetBytes(collectors[i].Samples[j].Value.GetBytes())
}
}
var hg *statistics.Histogram
var err error
var topn *statistics.TopN
if e.analyzeVer < 2 {
hg, err = statistics.BuildColumn(e.ctx, int64(e.opts[ast.AnalyzeOptNumBuckets]), col.ID, collectors[i], &col.FieldType)
} else {
hg, topn, err = statistics.BuildColumnHistAndTopN(e.ctx, int(e.opts[ast.AnalyzeOptNumBuckets]), int(e.opts[ast.AnalyzeOptNumTopN]), col.ID, collectors[i], &col.FieldType)
topNs = append(topNs, topn)
}
if err != nil {
return nil, nil, nil, nil, nil, err
}
hists = append(hists, hg)
collectors[i].CMSketch.CalcDefaultValForAnalyze(uint64(hg.NDV))
cms = append(cms, collectors[i].CMSketch)
fms = append(fms, collectors[i].FMSketch)
}
if needExtStats {
statsHandle := domain.GetDomain(e.ctx).StatsHandle()
extStats, err = statsHandle.BuildExtendedStats(e.tableID.GetStatisticsID(), e.colsInfo, collectors)
if err != nil {
return nil, nil, nil, nil, nil, err
}
}
if handleHist != nil {
handleHist.ID = e.commonHandle.ID
if handleTopn != nil && handleTopn.TotalCount() > 0 {
handleHist.RemoveVals(handleTopn.TopN)
}
if handleCms != nil {
handleCms.CalcDefaultValForAnalyze(uint64(handleHist.NDV))
}
hists = append([]*statistics.Histogram{handleHist}, hists...)
cms = append([]*statistics.CMSketch{handleCms}, cms...)
fms = append([]*statistics.FMSketch{handleFms}, fms...)
topNs = append([]*statistics.TopN{handleTopn}, topNs...)
}
return hists, cms, topNs, fms, extStats, nil
}
func hasPkHist(handleCols core.HandleCols) bool {
return handleCols != nil && handleCols.IsInt()
}
func pkColsCount(handleCols core.HandleCols) int {
if handleCols == nil {
return 0
}
return handleCols.NumCols()
}
var (
fastAnalyzeHistogramSample = metrics.FastAnalyzeHistogram.WithLabelValues(metrics.LblGeneral, "sample")
fastAnalyzeHistogramAccessRegions = metrics.FastAnalyzeHistogram.WithLabelValues(metrics.LblGeneral, "access_regions")
fastAnalyzeHistogramScanKeys = metrics.FastAnalyzeHistogram.WithLabelValues(metrics.LblGeneral, "scan_keys")
)
func analyzeFastExec(exec *AnalyzeFastExec) []analyzeResult {
hists, cms, topNs, fms, err := exec.buildStats()
if err != nil {
return []analyzeResult{{Err: err, job: exec.job}}
}
var results []analyzeResult
pkColCount := pkColsCount(exec.handleCols)
if len(exec.idxsInfo) > 0 {
for i := pkColCount + len(exec.colsInfo); i < len(hists); i++ {
idxResult := analyzeResult{
TableID: exec.tableID,
Hist: []*statistics.Histogram{hists[i]},
Cms: []*statistics.CMSketch{cms[i]},
TopNs: []*statistics.TopN{topNs[i]},
Fms: []*statistics.FMSketch{nil},
IsIndex: 1,
Count: hists[i].NullCount,
job: exec.job,
StatsVer: statistics.Version1,
}
if hists[i].Len() > 0 {
idxResult.Count += hists[i].Buckets[hists[i].Len()-1].Count
}
if exec.rowCount != 0 {
idxResult.Count = exec.rowCount
}
results = append(results, idxResult)
}
}
hist := hists[0]
colResult := analyzeResult{
TableID: exec.tableID,
Hist: hists[:pkColCount+len(exec.colsInfo)],
Cms: cms[:pkColCount+len(exec.colsInfo)],
TopNs: topNs[:pkColCount+len(exec.colsInfo)],
Fms: fms[:pkColCount+len(exec.colsInfo)],
Count: hist.NullCount,
job: exec.job,
StatsVer: statistics.Version1,
}
if hist.Len() > 0 {
colResult.Count += hist.Buckets[hist.Len()-1].Count
}
if exec.rowCount != 0 {
colResult.Count = exec.rowCount
}
results = append(results, colResult)
return results
}
// AnalyzeFastExec represents Fast Analyze executor.
type AnalyzeFastExec struct {
ctx sessionctx.Context
tableID core.AnalyzeTableID
handleCols core.HandleCols
colsInfo []*model.ColumnInfo
idxsInfo []*model.IndexInfo
concurrency int
opts map[ast.AnalyzeOptionType]uint64
tblInfo *model.TableInfo
cache *tikv.RegionCache
wg *sync.WaitGroup
rowCount int64
sampCursor int32
sampTasks []*tikv.KeyLocation
scanTasks []*tikv.KeyLocation
collectors []*statistics.SampleCollector
randSeed int64
job *statistics.AnalyzeJob
estSampStep uint32
}
func (e *AnalyzeFastExec) calculateEstimateSampleStep() (err error) {
exec := e.ctx.(sqlexec.RestrictedSQLExecutor)
var stmt ast.StmtNode
stmt, err = exec.ParseWithParams(context.TODO(), "select flag from mysql.stats_histograms where table_id = %?", e.tableID.GetStatisticsID())
if err != nil {
return
}
var rows []chunk.Row
rows, _, err = exec.ExecRestrictedStmt(context.TODO(), stmt)
if err != nil {
return
}
var historyRowCount uint64
hasBeenAnalyzed := len(rows) != 0 && rows[0].GetInt64(0) == statistics.AnalyzeFlag
if hasBeenAnalyzed {
historyRowCount = uint64(domain.GetDomain(e.ctx).StatsHandle().GetPartitionStats(e.tblInfo, e.tableID.GetStatisticsID()).Count)
} else {
dbInfo, ok := domain.GetDomain(e.ctx).InfoSchema().SchemaByTable(e.tblInfo)
if !ok {
err = errors.Errorf("database not found for table '%s'", e.tblInfo.Name)
return
}
var rollbackFn func() error
rollbackFn, err = e.activateTxnForRowCount()
if err != nil {
return
}
defer func() {
if rollbackFn != nil {
err = rollbackFn()
}
}()
sql := new(strings.Builder)
sqlexec.MustFormatSQL(sql, "select count(*) from %n.%n", dbInfo.Name.L, e.tblInfo.Name.L)
if e.tblInfo.ID != e.tableID.GetStatisticsID() {
for _, definition := range e.tblInfo.Partition.Definitions {
if definition.ID == e.tableID.GetStatisticsID() {
sqlexec.MustFormatSQL(sql, " partition(%n)", definition.Name.L)
break
}
}
}
var rs sqlexec.RecordSet
rs, err = e.ctx.(sqlexec.SQLExecutor).ExecuteInternal(context.TODO(), sql.String())
if err != nil {
return
}
if rs == nil {
err = errors.Trace(errors.Errorf("empty record set"))
return
}
defer terror.Call(rs.Close)
chk := rs.NewChunk()
err = rs.Next(context.TODO(), chk)
if err != nil {
return
}
e.rowCount = chk.GetRow(0).GetInt64(0)
historyRowCount = uint64(e.rowCount)
}
totalSampSize := e.opts[ast.AnalyzeOptNumSamples]
e.estSampStep = uint32(historyRowCount / totalSampSize)
return
}
func (e *AnalyzeFastExec) activateTxnForRowCount() (rollbackFn func() error, err error) {
txn, err := e.ctx.Txn(true)
if err != nil {
if kv.ErrInvalidTxn.Equal(err) {
_, err := e.ctx.(sqlexec.SQLExecutor).ExecuteInternal(context.TODO(), "begin")
if err != nil {
return nil, errors.Trace(err)
}
rollbackFn = func() error {
_, err := e.ctx.(sqlexec.SQLExecutor).ExecuteInternal(context.TODO(), "rollback")
return err
}
} else {
return nil, errors.Trace(err)
}
}
txn.SetOption(tikvstore.Priority, tikvstore.PriorityLow)
txn.SetOption(tikvstore.IsolationLevel, tikvstore.RC)
txn.SetOption(tikvstore.NotFillCache, true)
return rollbackFn, nil
}
// buildSampTask build sample tasks.
func (e *AnalyzeFastExec) buildSampTask() (err error) {
bo := tikv.NewBackofferWithVars(context.Background(), 500, nil)
store, _ := e.ctx.GetStore().(tikv.Storage)
e.cache = store.GetRegionCache()
accessRegionsCounter := 0
pid := e.tableID.GetStatisticsID()
startKey, endKey := tablecodec.GetTableHandleKeyRange(pid)
targetKey := startKey
for {
// Search for the region which contains the targetKey.
loc, err := e.cache.LocateKey(bo, targetKey)
if err != nil {
return err
}
if bytes.Compare(endKey, loc.StartKey) < 0 {
break
}
accessRegionsCounter++
// Set the next search key.
targetKey = loc.EndKey
// If the KV pairs in the region all belonging to the table, add it to the sample task.
if bytes.Compare(startKey, loc.StartKey) <= 0 && len(loc.EndKey) != 0 && bytes.Compare(loc.EndKey, endKey) <= 0 {
e.sampTasks = append(e.sampTasks, loc)
continue
}
e.scanTasks = append(e.scanTasks, loc)
if bytes.Compare(loc.StartKey, startKey) < 0 {
loc.StartKey = startKey
}
if bytes.Compare(endKey, loc.EndKey) < 0 || len(loc.EndKey) == 0 {
loc.EndKey = endKey
break
}
}
fastAnalyzeHistogramAccessRegions.Observe(float64(accessRegionsCounter))
return nil
}
func (e *AnalyzeFastExec) decodeValues(handle kv.Handle, sValue []byte, wantCols map[int64]*types.FieldType) (values map[int64]types.Datum, err error) {
loc := e.ctx.GetSessionVars().Location()
values, err = tablecodec.DecodeRowToDatumMap(sValue, wantCols, loc)
if err != nil || e.handleCols == nil {
return values, err
}
wantCols = make(map[int64]*types.FieldType, e.handleCols.NumCols())
handleColIDs := make([]int64, e.handleCols.NumCols())
for i := 0; i < e.handleCols.NumCols(); i++ {
c := e.handleCols.GetCol(i)
handleColIDs[i] = c.ID
wantCols[c.ID] = c.RetType
}
return tablecodec.DecodeHandleToDatumMap(handle, handleColIDs, wantCols, loc, values)
}
func (e *AnalyzeFastExec) getValueByInfo(colInfo *model.ColumnInfo, values map[int64]types.Datum) (types.Datum, error) {
val, ok := values[colInfo.ID]
if !ok {
return table.GetColOriginDefaultValue(e.ctx, colInfo)
}
return val, nil
}
func (e *AnalyzeFastExec) updateCollectorSamples(sValue []byte, sKey kv.Key, samplePos int32) (err error) {
var handle kv.Handle
handle, err = tablecodec.DecodeRowKey(sKey)
if err != nil {
return err
}
// Decode cols for analyze table
wantCols := make(map[int64]*types.FieldType, len(e.colsInfo))
for _, col := range e.colsInfo {
wantCols[col.ID] = &col.FieldType
}
// Pre-build index->cols relationship and refill wantCols if not exists(analyze index)
index2Cols := make([][]*model.ColumnInfo, len(e.idxsInfo))
for i, idxInfo := range e.idxsInfo {
for _, idxCol := range idxInfo.Columns {
colInfo := e.tblInfo.Columns[idxCol.Offset]
index2Cols[i] = append(index2Cols[i], colInfo)
wantCols[colInfo.ID] = &colInfo.FieldType
}
}
// Decode the cols value in order.
var values map[int64]types.Datum
values, err = e.decodeValues(handle, sValue, wantCols)
if err != nil {
return err
}
// Update the primary key collector.
pkColsCount := pkColsCount(e.handleCols)
for i := 0; i < pkColsCount; i++ {
col := e.handleCols.GetCol(i)
v, ok := values[col.ID]
if !ok {
return errors.Trace(errors.Errorf("Primary key column not found"))
}
if e.collectors[i].Samples[samplePos] == nil {
e.collectors[i].Samples[samplePos] = &statistics.SampleItem{}
}
e.collectors[i].Samples[samplePos].Handle = handle
e.collectors[i].Samples[samplePos].Value = v
}
// Update the columns' collectors.
for j, colInfo := range e.colsInfo {
v, err := e.getValueByInfo(colInfo, values)
if err != nil {
return err
}
if e.collectors[pkColsCount+j].Samples[samplePos] == nil {
e.collectors[pkColsCount+j].Samples[samplePos] = &statistics.SampleItem{}
}
e.collectors[pkColsCount+j].Samples[samplePos].Handle = handle
e.collectors[pkColsCount+j].Samples[samplePos].Value = v
}
// Update the indexes' collectors.
for j, idxInfo := range e.idxsInfo {
idxVals := make([]types.Datum, 0, len(idxInfo.Columns))
cols := index2Cols[j]
for _, colInfo := range cols {
v, err := e.getValueByInfo(colInfo, values)
if err != nil {
return err
}
idxVals = append(idxVals, v)
}
var bytes []byte
bytes, err = codec.EncodeKey(e.ctx.GetSessionVars().StmtCtx, bytes, idxVals...)
if err != nil {
return err
}
if e.collectors[len(e.colsInfo)+pkColsCount+j].Samples[samplePos] == nil {
e.collectors[len(e.colsInfo)+pkColsCount+j].Samples[samplePos] = &statistics.SampleItem{}
}
e.collectors[len(e.colsInfo)+pkColsCount+j].Samples[samplePos].Handle = handle
e.collectors[len(e.colsInfo)+pkColsCount+j].Samples[samplePos].Value = types.NewBytesDatum(bytes)
}
return nil
}
func (e *AnalyzeFastExec) handleBatchSeekResponse(kvMap map[string][]byte) (err error) {
length := int32(len(kvMap))
newCursor := atomic.AddInt32(&e.sampCursor, length)
samplePos := newCursor - length
for sKey, sValue := range kvMap {
exceedNeededSampleCounts := uint64(samplePos) >= e.opts[ast.AnalyzeOptNumSamples]
if exceedNeededSampleCounts {
atomic.StoreInt32(&e.sampCursor, int32(e.opts[ast.AnalyzeOptNumSamples]))
break
}
err = e.updateCollectorSamples(sValue, kv.Key(sKey), samplePos)
if err != nil {
return err
}
samplePos++
}
return nil
}
func (e *AnalyzeFastExec) handleScanIter(iter kv.Iterator) (scanKeysSize int, err error) {
rander := rand.New(rand.NewSource(e.randSeed))
sampleSize := int64(e.opts[ast.AnalyzeOptNumSamples])
for ; iter.Valid() && err == nil; err = iter.Next() {
// reservoir sampling
scanKeysSize++
randNum := rander.Int63n(int64(e.sampCursor) + int64(scanKeysSize))
if randNum > sampleSize && e.sampCursor == int32(sampleSize) {
continue
}
p := rander.Int31n(int32(sampleSize))
if e.sampCursor < int32(sampleSize) {
p = e.sampCursor
e.sampCursor++
}
err = e.updateCollectorSamples(iter.Value(), iter.Key(), p)
if err != nil {
return
}
}
return
}
func (e *AnalyzeFastExec) handleScanTasks(bo *tikv.Backoffer) (keysSize int, err error) {
snapshot := e.ctx.GetStore().GetSnapshot(kv.MaxVersion)
if e.ctx.GetSessionVars().GetReplicaRead().IsFollowerRead() {
snapshot.SetOption(tikvstore.ReplicaRead, tikvstore.ReplicaReadFollower)
}
for _, t := range e.scanTasks {
iter, err := snapshot.Iter(kv.Key(t.StartKey), kv.Key(t.EndKey))
if err != nil {
return keysSize, err
}
size, err := e.handleScanIter(iter)
keysSize += size
if err != nil {
return keysSize, err
}
}
return keysSize, nil
}
func (e *AnalyzeFastExec) handleSampTasks(workID int, step uint32, err *error) {
defer e.wg.Done()
snapshot := e.ctx.GetStore().GetSnapshot(kv.MaxVersion)
snapshot.SetOption(tikvstore.NotFillCache, true)
snapshot.SetOption(tikvstore.IsolationLevel, tikvstore.RC)
snapshot.SetOption(tikvstore.Priority, tikvstore.PriorityLow)
if e.ctx.GetSessionVars().GetReplicaRead().IsFollowerRead() {
snapshot.SetOption(tikvstore.ReplicaRead, tikvstore.ReplicaReadFollower)
}
rander := rand.New(rand.NewSource(e.randSeed))
for i := workID; i < len(e.sampTasks); i += e.concurrency {
task := e.sampTasks[i]
// randomize the estimate step in range [step - 2 * sqrt(step), step]
if step > 4 { // 2*sqrt(x) < x
lower, upper := step-uint32(2*math.Sqrt(float64(step))), step
step = uint32(rander.Intn(int(upper-lower))) + lower
}
snapshot.SetOption(tikvstore.SampleStep, step)
kvMap := make(map[string][]byte)
var iter kv.Iterator
iter, *err = snapshot.Iter(kv.Key(task.StartKey), kv.Key(task.EndKey))
if *err != nil {
return
}
for iter.Valid() {
kvMap[string(iter.Key())] = iter.Value()
*err = iter.Next()
if *err != nil {
return
}
}
fastAnalyzeHistogramSample.Observe(float64(len(kvMap)))
*err = e.handleBatchSeekResponse(kvMap)
if *err != nil {
return
}
}
}
func (e *AnalyzeFastExec) buildColumnStats(ID int64, collector *statistics.SampleCollector, tp *types.FieldType, rowCount int64) (*statistics.Histogram, *statistics.CMSketch, *statistics.TopN, *statistics.FMSketch, error) {
sc := e.ctx.GetSessionVars().StmtCtx
data := make([][]byte, 0, len(collector.Samples))
fmSketch := statistics.NewFMSketch(maxSketchSize)
for i, sample := range collector.Samples {
sample.Ordinal = i
if sample.Value.IsNull() {
collector.NullCount++
continue
}
err := fmSketch.InsertValue(sc, sample.Value)
if err != nil {
return nil, nil, nil, nil, err
}
bytes, err := tablecodec.EncodeValue(sc, nil, sample.Value)
if err != nil {
return nil, nil, nil, nil, err
}
data = append(data, bytes)
}
// Build CMSketch.
cmSketch, topN, ndv, scaleRatio := statistics.NewCMSketchAndTopN(int32(e.opts[ast.AnalyzeOptCMSketchDepth]), int32(e.opts[ast.AnalyzeOptCMSketchWidth]), data, uint32(e.opts[ast.AnalyzeOptNumTopN]), uint64(rowCount))
// Build Histogram.
hist, err := statistics.BuildColumnHist(e.ctx, int64(e.opts[ast.AnalyzeOptNumBuckets]), ID, collector, tp, rowCount, int64(ndv), collector.NullCount*int64(scaleRatio))
return hist, cmSketch, topN, fmSketch, err
}
func (e *AnalyzeFastExec) buildIndexStats(idxInfo *model.IndexInfo, collector *statistics.SampleCollector, rowCount int64) (*statistics.Histogram, *statistics.CMSketch, *statistics.TopN, error) {
data := make([][][]byte, len(idxInfo.Columns))
for _, sample := range collector.Samples {
var preLen int
remained := sample.Value.GetBytes()
// We need to insert each prefix values into CM Sketch.
for i := 0; i < len(idxInfo.Columns); i++ {
var err error
var value []byte
value, remained, err = codec.CutOne(remained)
if err != nil {
return nil, nil, nil, err
}
preLen += len(value)
data[i] = append(data[i], sample.Value.GetBytes()[:preLen])
}
}
numTop := uint32(e.opts[ast.AnalyzeOptNumTopN])
cmSketch, topN, ndv, scaleRatio := statistics.NewCMSketchAndTopN(int32(e.opts[ast.AnalyzeOptCMSketchDepth]), int32(e.opts[ast.AnalyzeOptCMSketchWidth]), data[0], numTop, uint64(rowCount))
// Build CM Sketch for each prefix and merge them into one.
for i := 1; i < len(idxInfo.Columns); i++ {
var curCMSketch *statistics.CMSketch
var curTopN *statistics.TopN
// `ndv` should be the ndv of full index, so just rewrite it here.
curCMSketch, curTopN, ndv, scaleRatio = statistics.NewCMSketchAndTopN(int32(e.opts[ast.AnalyzeOptCMSketchDepth]), int32(e.opts[ast.AnalyzeOptCMSketchWidth]), data[i], numTop, uint64(rowCount))
err := cmSketch.MergeCMSketch(curCMSketch)
if err != nil {
return nil, nil, nil, err
}
statistics.MergeTopNAndUpdateCMSketch(topN, curTopN, cmSketch, numTop)
}
// Build Histogram.
hist, err := statistics.BuildColumnHist(e.ctx, int64(e.opts[ast.AnalyzeOptNumBuckets]), idxInfo.ID, collector, types.NewFieldType(mysql.TypeBlob), rowCount, int64(ndv), collector.NullCount*int64(scaleRatio))
return hist, cmSketch, topN, err
}
func (e *AnalyzeFastExec) runTasks() ([]*statistics.Histogram, []*statistics.CMSketch, []*statistics.TopN, []*statistics.FMSketch, error) {
errs := make([]error, e.concurrency)
pkColCount := pkColsCount(e.handleCols)
// collect column samples and primary key samples and index samples.
length := len(e.colsInfo) + pkColCount + len(e.idxsInfo)
e.collectors = make([]*statistics.SampleCollector, length)
for i := range e.collectors {
e.collectors[i] = &statistics.SampleCollector{
MaxSampleSize: int64(e.opts[ast.AnalyzeOptNumSamples]),
Samples: make([]*statistics.SampleItem, e.opts[ast.AnalyzeOptNumSamples]),
}
}
e.wg.Add(e.concurrency)
bo := tikv.NewBackofferWithVars(context.Background(), 500, nil)
for i := 0; i < e.concurrency; i++ {
go e.handleSampTasks(i, e.estSampStep, &errs[i])
}
e.wg.Wait()
for _, err := range errs {
if err != nil {
return nil, nil, nil, nil, err
}
}
scanKeysSize, err := e.handleScanTasks(bo)
fastAnalyzeHistogramScanKeys.Observe(float64(scanKeysSize))
if err != nil {
return nil, nil, nil, nil, err
}
stats := domain.GetDomain(e.ctx).StatsHandle()
var rowCount int64 = 0
if stats.Lease() > 0 {
if t := stats.GetPartitionStats(e.tblInfo, e.tableID.GetStatisticsID()); !t.Pseudo {
rowCount = t.Count
}
}
hists, cms, topNs, fms := make([]*statistics.Histogram, length), make([]*statistics.CMSketch, length), make([]*statistics.TopN, length), make([]*statistics.FMSketch, length)
for i := 0; i < length; i++ {
// Build collector properties.
collector := e.collectors[i]
collector.Samples = collector.Samples[:e.sampCursor]
sort.Slice(collector.Samples, func(i, j int) bool { return collector.Samples[i].Handle.Compare(collector.Samples[j].Handle) < 0 })
collector.CalcTotalSize()
// Adjust the row count in case the count of `tblStats` is not accurate and too small.
rowCount = mathutil.MaxInt64(rowCount, int64(len(collector.Samples)))
// Scale the total column size.
if len(collector.Samples) > 0 {
collector.TotalSize *= rowCount / int64(len(collector.Samples))
}
if i < pkColCount {
pkCol := e.handleCols.GetCol(i)
hists[i], cms[i], topNs[i], fms[i], err = e.buildColumnStats(pkCol.ID, e.collectors[i], pkCol.RetType, rowCount)
} else if i < pkColCount+len(e.colsInfo) {
hists[i], cms[i], topNs[i], fms[i], err = e.buildColumnStats(e.colsInfo[i-pkColCount].ID, e.collectors[i], &e.colsInfo[i-pkColCount].FieldType, rowCount)
} else {
hists[i], cms[i], topNs[i], err = e.buildIndexStats(e.idxsInfo[i-pkColCount-len(e.colsInfo)], e.collectors[i], rowCount)
}
if err != nil {
return nil, nil, nil, nil, err
}
}
return hists, cms, topNs, fms, nil
}
func (e *AnalyzeFastExec) buildStats() (hists []*statistics.Histogram, cms []*statistics.CMSketch, topNs []*statistics.TopN, fms []*statistics.FMSketch, err error) {
// To set rand seed, it's for unit test.
// To ensure that random sequences are different in non-test environments, RandSeed must be set time.Now().
if RandSeed == 1 {
atomic.StoreInt64(&e.randSeed, time.Now().UnixNano())
} else {
atomic.StoreInt64(&e.randSeed, RandSeed)
}
err = e.buildSampTask()
if err != nil {
return nil, nil, nil, nil, err
}
return e.runTasks()
}
// AnalyzeTestFastExec is for fast sample in unit test.
type AnalyzeTestFastExec struct {
AnalyzeFastExec
Ctx sessionctx.Context
TableID core.AnalyzeTableID
HandleCols core.HandleCols
ColsInfo []*model.ColumnInfo
IdxsInfo []*model.IndexInfo
Concurrency int
Collectors []*statistics.SampleCollector
TblInfo *model.TableInfo
Opts map[ast.AnalyzeOptionType]uint64
}
// TestFastSample only test the fast sample in unit test.
func (e *AnalyzeTestFastExec) TestFastSample() error {
e.ctx = e.Ctx
e.handleCols = e.HandleCols
e.colsInfo = e.ColsInfo
e.idxsInfo = e.IdxsInfo
e.concurrency = e.Concurrency
e.tableID = e.TableID
e.wg = &sync.WaitGroup{}
e.job = &statistics.AnalyzeJob{}
e.tblInfo = e.TblInfo
e.opts = e.Opts
_, _, _, _, err := e.buildStats()
e.Collectors = e.collectors
return err
}
type analyzeIndexIncrementalExec struct {
AnalyzeIndexExec
oldHist *statistics.Histogram
oldCMS *statistics.CMSketch
oldTopN *statistics.TopN
}
func analyzeIndexIncremental(idxExec *analyzeIndexIncrementalExec) analyzeResult {
var statsVer = statistics.Version1
if idxExec.analyzePB.IdxReq.Version != nil {
statsVer = int(*idxExec.analyzePB.IdxReq.Version)
}
pruneMode := variable.PartitionPruneMode(idxExec.ctx.GetSessionVars().PartitionPruneMode.Load())
if idxExec.tableID.IsPartitionTable() && pruneMode == variable.Dynamic {
err := errors.Errorf("[stats]: global statistics for partitioned tables unavailable in ANALYZE INCREMENTAL")
return analyzeResult{Err: err, job: idxExec.job}
}
startPos := idxExec.oldHist.GetUpper(idxExec.oldHist.Len() - 1)
values, _, err := codec.DecodeRange(startPos.GetBytes(), len(idxExec.idxInfo.Columns), nil, nil)
if err != nil {
return analyzeResult{Err: err, job: idxExec.job}
}
ran := ranger.Range{LowVal: values, HighVal: []types.Datum{types.MaxValueDatum()}}
hist, cms, fms, topN, err := idxExec.buildStats([]*ranger.Range{&ran}, false)
if err != nil {
return analyzeResult{Err: err, job: idxExec.job}
}
hist, err = statistics.MergeHistograms(idxExec.ctx.GetSessionVars().StmtCtx, idxExec.oldHist, hist, int(idxExec.opts[ast.AnalyzeOptNumBuckets]), statsVer)
if err != nil {
return analyzeResult{Err: err, job: idxExec.job}
}
if idxExec.oldCMS != nil && cms != nil {
err = cms.MergeCMSketch4IncrementalAnalyze(idxExec.oldCMS, uint32(idxExec.opts[ast.AnalyzeOptNumTopN]))
if err != nil {
return analyzeResult{Err: err, job: idxExec.job}
}
cms.CalcDefaultValForAnalyze(uint64(hist.NDV))
}
if statsVer == statistics.Version2 {
poped := statistics.MergeTopNAndUpdateCMSketch(topN, idxExec.oldTopN, cms, uint32(idxExec.opts[ast.AnalyzeOptNumTopN]))
hist.AddIdxVals(poped)
}
result := analyzeResult{
TableID: idxExec.tableID,
Hist: []*statistics.Histogram{hist},
Cms: []*statistics.CMSketch{cms},
TopNs: []*statistics.TopN{topN},
Fms: []*statistics.FMSketch{fms},
IsIndex: 1,
job: idxExec.job,
StatsVer: statsVer,
}
result.Count = hist.NullCount
if hist.Len() > 0 {
result.Count += hist.Buckets[hist.Len()-1].Count
}
return result
}
type analyzePKIncrementalExec struct {
AnalyzeColumnsExec
oldHist *statistics.Histogram
}
func analyzePKIncremental(colExec *analyzePKIncrementalExec) analyzeResult {
var maxVal types.Datum
pkInfo := colExec.handleCols.GetCol(0)
if mysql.HasUnsignedFlag(pkInfo.RetType.Flag) {
maxVal = types.NewUintDatum(math.MaxUint64)
} else {
maxVal = types.NewIntDatum(math.MaxInt64)
}
startPos := *colExec.oldHist.GetUpper(colExec.oldHist.Len() - 1)
ran := ranger.Range{LowVal: []types.Datum{startPos}, LowExclude: true, HighVal: []types.Datum{maxVal}}
hists, _, _, _, _, err := colExec.buildStats([]*ranger.Range{&ran}, false)
if err != nil {
return analyzeResult{Err: err, job: colExec.job}
}
hist := hists[0]
hist, err = statistics.MergeHistograms(colExec.ctx.GetSessionVars().StmtCtx, colExec.oldHist, hist, int(colExec.opts[ast.AnalyzeOptNumBuckets]), statistics.Version1)
if err != nil {
return analyzeResult{Err: err, job: colExec.job}
}
result := analyzeResult{
TableID: colExec.tableID,
Hist: []*statistics.Histogram{hist},
Cms: []*statistics.CMSketch{nil},
TopNs: []*statistics.TopN{nil},
Fms: []*statistics.FMSketch{nil},
job: colExec.job,
StatsVer: statistics.Version1,
}
if hist.Len() > 0 {
result.Count += hist.Buckets[hist.Len()-1].Count
}
return result
}
// analyzeResult is used to represent analyze result.
type analyzeResult struct {
TableID core.AnalyzeTableID
Hist []*statistics.Histogram
Cms []*statistics.CMSketch
TopNs []*statistics.TopN
Fms []*statistics.FMSketch
ExtStats *statistics.ExtendedStatsColl
Count int64
IsIndex int
Err error
job *statistics.AnalyzeJob
StatsVer int
}
| executor/analyze.go | 1 | https://github.com/pingcap/tidb/commit/3f6dfe91daab7acb178ad24941dfca1745993afd | [
0.998306155204773,
0.044590968638658524,
0.0001588067680131644,
0.000174322776729241,
0.19842365384101868
] |
{
"id": 5,
"code_window": [
"\t\t}\n",
"\t\terr := fmSketch.InsertValue(sc, sample.Value)\n",
"\t\tif err != nil {\n",
"\t\t\treturn nil, nil, nil, nil, err\n",
"\t\t}\n",
"\t\tbytes, err := tablecodec.EncodeValue(sc, nil, sample.Value)\n",
"\t\tif err != nil {\n",
"\t\t\treturn nil, nil, nil, nil, err\n",
"\t\t}\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tvalBytes, err := tablecodec.EncodeValue(sc, nil, sample.Value)\n"
],
"file_path": "executor/analyze.go",
"type": "replace",
"edit_start_line_idx": 1238
} | // Copyright 2019 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package types
import "testing"
func BenchmarkRound(b *testing.B) {
b.StopTimer()
var roundTo MyDecimal
tests := []struct {
input string
scale int
inputDec MyDecimal
}{
{input: "123456789.987654321", scale: 1},
{input: "15.1", scale: 0},
{input: "15.5", scale: 0},
{input: "15.9", scale: 0},
{input: "-15.1", scale: 0},
{input: "-15.5", scale: 0},
{input: "-15.9", scale: 0},
{input: "15.1", scale: 1},
{input: "-15.1", scale: 1},
{input: "15.17", scale: 1},
{input: "15.4", scale: -1},
{input: "-15.4", scale: -1},
{input: "5.4", scale: -1},
{input: ".999", scale: 0},
{input: "999999999", scale: -9},
}
for i := 0; i < len(tests); i++ {
err := tests[i].inputDec.FromString([]byte(tests[i].input))
if err != nil {
b.Fatal(err)
}
}
b.StartTimer()
for n := 0; n < b.N; n++ {
for i := 0; i < len(tests); i++ {
err := tests[i].inputDec.Round(&roundTo, tests[i].scale, ModeHalfEven)
if err != nil {
b.Fatal(err)
}
}
for i := 0; i < len(tests); i++ {
err := tests[i].inputDec.Round(&roundTo, tests[i].scale, ModeTruncate)
if err != nil {
b.Fatal(err)
}
}
for i := 0; i < len(tests); i++ {
err := tests[i].inputDec.Round(&roundTo, tests[i].scale, modeCeiling)
if err != nil {
b.Fatal(err)
}
}
}
}
| types/mydecimal_benchmark_test.go | 0 | https://github.com/pingcap/tidb/commit/3f6dfe91daab7acb178ad24941dfca1745993afd | [
0.00018081192683894187,
0.00017499836394563317,
0.00016567330749239773,
0.0001761029998306185,
0.000004781804818776436
] |
{
"id": 5,
"code_window": [
"\t\t}\n",
"\t\terr := fmSketch.InsertValue(sc, sample.Value)\n",
"\t\tif err != nil {\n",
"\t\t\treturn nil, nil, nil, nil, err\n",
"\t\t}\n",
"\t\tbytes, err := tablecodec.EncodeValue(sc, nil, sample.Value)\n",
"\t\tif err != nil {\n",
"\t\t\treturn nil, nil, nil, nil, err\n",
"\t\t}\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tvalBytes, err := tablecodec.EncodeValue(sc, nil, sample.Value)\n"
],
"file_path": "executor/analyze.go",
"type": "replace",
"edit_start_line_idx": 1238
} | // Copyright 2019 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
// Copyright 2019-present PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package tikv
import (
"container/list"
"sync"
"time"
"github.com/pingcap/log"
"go.uber.org/zap"
)
// Detector detects deadlock.
type Detector struct {
waitForMap map[uint64]*txnList
lock sync.Mutex
entryTTL time.Duration
totalSize uint64
lastActiveExpire time.Time
urgentSize uint64
expireInterval time.Duration
}
type txnList struct {
//txns []txnKeyHashPair
txns *list.List
}
type txnKeyHashPair struct {
txn uint64
keyHash uint64
registerTime time.Time
}
func (p *txnKeyHashPair) isExpired(ttl time.Duration, nowTime time.Time) bool {
if p.registerTime.Add(ttl).Before(nowTime) {
return true
}
return false
}
// NewDetector creates a new Detector.
func NewDetector(ttl time.Duration, urgentSize uint64, expireInterval time.Duration) *Detector {
return &Detector{
waitForMap: map[uint64]*txnList{},
entryTTL: ttl,
lastActiveExpire: time.Now(),
urgentSize: urgentSize,
expireInterval: expireInterval,
}
}
// Detect detects deadlock for the sourceTxn on a locked key.
func (d *Detector) Detect(sourceTxn, waitForTxn, keyHash uint64) *ErrDeadlock {
d.lock.Lock()
nowTime := time.Now()
d.activeExpire(nowTime)
err := d.doDetect(nowTime, sourceTxn, waitForTxn)
if err == nil {
d.register(sourceTxn, waitForTxn, keyHash)
}
d.lock.Unlock()
return err
}
func (d *Detector) doDetect(nowTime time.Time, sourceTxn, waitForTxn uint64) *ErrDeadlock {
val := d.waitForMap[waitForTxn]
if val == nil {
return nil
}
var nextVal *list.Element
for cur := val.txns.Front(); cur != nil; cur = nextVal {
nextVal = cur.Next()
keyHashPair := cur.Value.(*txnKeyHashPair)
// check if this edge is expired
if keyHashPair.isExpired(d.entryTTL, nowTime) {
val.txns.Remove(cur)
d.totalSize--
continue
}
if keyHashPair.txn == sourceTxn {
return &ErrDeadlock{DeadlockKeyHash: keyHashPair.keyHash}
}
if err := d.doDetect(nowTime, sourceTxn, keyHashPair.txn); err != nil {
return err
}
}
if val.txns.Len() == 0 {
delete(d.waitForMap, waitForTxn)
}
return nil
}
func (d *Detector) register(sourceTxn, waitForTxn, keyHash uint64) {
val := d.waitForMap[sourceTxn]
pair := txnKeyHashPair{txn: waitForTxn, keyHash: keyHash, registerTime: time.Now()}
if val == nil {
newList := &txnList{txns: list.New()}
newList.txns.PushBack(&pair)
d.waitForMap[sourceTxn] = newList
d.totalSize++
return
}
for cur := val.txns.Front(); cur != nil; cur = cur.Next() {
valuePair := cur.Value.(*txnKeyHashPair)
if valuePair.txn == waitForTxn && valuePair.keyHash == keyHash {
return
}
}
val.txns.PushBack(&pair)
d.totalSize++
}
// CleanUp removes the wait for entry for the transaction.
func (d *Detector) CleanUp(txn uint64) {
d.lock.Lock()
if l, ok := d.waitForMap[txn]; ok {
d.totalSize -= uint64(l.txns.Len())
}
delete(d.waitForMap, txn)
d.lock.Unlock()
}
// CleanUpWaitFor removes a key in the wait for entry for the transaction.
func (d *Detector) CleanUpWaitFor(txn, waitForTxn, keyHash uint64) {
d.lock.Lock()
l := d.waitForMap[txn]
if l != nil {
var nextVal *list.Element
for cur := l.txns.Front(); cur != nil; cur = nextVal {
nextVal = cur.Next()
valuePair := cur.Value.(*txnKeyHashPair)
if valuePair.txn == waitForTxn && valuePair.keyHash == keyHash {
l.txns.Remove(cur)
d.totalSize--
break
}
}
if l.txns.Len() == 0 {
delete(d.waitForMap, txn)
}
}
d.lock.Unlock()
}
// activeExpire removes expired entries, should be called under d.lock protection
func (d *Detector) activeExpire(nowTime time.Time) {
if nowTime.Sub(d.lastActiveExpire) > d.expireInterval &&
d.totalSize >= d.urgentSize {
log.Info("detector will do activeExpire", zap.Uint64("size", d.totalSize))
for txn, l := range d.waitForMap {
var nextVal *list.Element
for cur := l.txns.Front(); cur != nil; cur = nextVal {
nextVal = cur.Next()
valuePair := cur.Value.(*txnKeyHashPair)
if valuePair.isExpired(d.entryTTL, nowTime) {
l.txns.Remove(cur)
d.totalSize--
}
}
if l.txns.Len() == 0 {
delete(d.waitForMap, txn)
}
}
d.lastActiveExpire = nowTime
log.Info("detector activeExpire finished", zap.Uint64("size", d.totalSize))
}
}
| store/mockstore/unistore/tikv/detector.go | 0 | https://github.com/pingcap/tidb/commit/3f6dfe91daab7acb178ad24941dfca1745993afd | [
0.00017987066530622542,
0.00017235756968148053,
0.00016439819592051208,
0.0001719131541904062,
0.000004246609933034051
] |
{
"id": 5,
"code_window": [
"\t\t}\n",
"\t\terr := fmSketch.InsertValue(sc, sample.Value)\n",
"\t\tif err != nil {\n",
"\t\t\treturn nil, nil, nil, nil, err\n",
"\t\t}\n",
"\t\tbytes, err := tablecodec.EncodeValue(sc, nil, sample.Value)\n",
"\t\tif err != nil {\n",
"\t\t\treturn nil, nil, nil, nil, err\n",
"\t\t}\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tvalBytes, err := tablecodec.EncodeValue(sc, nil, sample.Value)\n"
],
"file_path": "executor/analyze.go",
"type": "replace",
"edit_start_line_idx": 1238
} | // Copyright 2017 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package ranger_test
import (
"math"
. "github.com/pingcap/check"
"github.com/pingcap/tidb/sessionctx/stmtctx"
"github.com/pingcap/tidb/types"
"github.com/pingcap/tidb/util/ranger"
)
var _ = Suite(&testRangeSuite{})
type testRangeSuite struct {
}
func (s *testRangeSuite) TestRange(c *C) {
simpleTests := []struct {
ran ranger.Range
str string
}{
{
ran: ranger.Range{
LowVal: []types.Datum{types.NewIntDatum(1)},
HighVal: []types.Datum{types.NewIntDatum(1)},
},
str: "[1,1]",
},
{
ran: ranger.Range{
LowVal: []types.Datum{types.NewIntDatum(1)},
HighVal: []types.Datum{types.NewIntDatum(1)},
HighExclude: true,
},
str: "[1,1)",
},
{
ran: ranger.Range{
LowVal: []types.Datum{types.NewIntDatum(1)},
HighVal: []types.Datum{types.NewIntDatum(2)},
LowExclude: true,
HighExclude: true,
},
str: "(1,2)",
},
{
ran: ranger.Range{
LowVal: []types.Datum{types.NewFloat64Datum(1.1)},
HighVal: []types.Datum{types.NewFloat64Datum(1.9)},
HighExclude: true,
},
str: "[1.1,1.9)",
},
{
ran: ranger.Range{
LowVal: []types.Datum{types.MinNotNullDatum()},
HighVal: []types.Datum{types.NewIntDatum(1)},
HighExclude: true,
},
str: "[-inf,1)",
},
}
for _, t := range simpleTests {
c.Assert(t.ran.String(), Equals, t.str)
}
isPointTests := []struct {
ran ranger.Range
isPoint bool
}{
{
ran: ranger.Range{
LowVal: []types.Datum{types.NewIntDatum(1)},
HighVal: []types.Datum{types.NewIntDatum(1)},
},
isPoint: true,
},
{
ran: ranger.Range{
LowVal: []types.Datum{types.NewStringDatum("abc")},
HighVal: []types.Datum{types.NewStringDatum("abc")},
},
isPoint: true,
},
{
ran: ranger.Range{
LowVal: []types.Datum{types.NewIntDatum(1)},
HighVal: []types.Datum{types.NewIntDatum(1), types.NewIntDatum(1)},
},
isPoint: false,
},
{
ran: ranger.Range{
LowVal: []types.Datum{types.NewIntDatum(1)},
HighVal: []types.Datum{types.NewIntDatum(1)},
LowExclude: true,
},
isPoint: false,
},
{
ran: ranger.Range{
LowVal: []types.Datum{types.NewIntDatum(1)},
HighVal: []types.Datum{types.NewIntDatum(1)},
HighExclude: true,
},
isPoint: false,
},
{
ran: ranger.Range{
LowVal: []types.Datum{types.NewIntDatum(1)},
HighVal: []types.Datum{types.NewIntDatum(2)},
},
isPoint: false,
},
}
sc := new(stmtctx.StatementContext)
for _, t := range isPointTests {
c.Assert(t.ran.IsPoint(sc), Equals, t.isPoint)
}
}
func (s *testRangeSuite) TestIsFullRange(c *C) {
nullDatum := types.MinNotNullDatum()
nullDatum.SetNull()
isFullRangeTests := []struct {
ran ranger.Range
isFullRange bool
}{
{
ran: ranger.Range{
LowVal: []types.Datum{types.NewIntDatum(math.MinInt64)},
HighVal: []types.Datum{types.NewIntDatum(math.MaxInt64)},
},
isFullRange: true,
},
{
ran: ranger.Range{
LowVal: []types.Datum{types.NewIntDatum(math.MaxInt64)},
HighVal: []types.Datum{types.NewIntDatum(math.MinInt64)},
},
isFullRange: false,
},
{
ran: ranger.Range{
LowVal: []types.Datum{types.NewIntDatum(1)},
HighVal: []types.Datum{types.NewUintDatum(math.MaxUint64)},
},
isFullRange: false,
},
{
ran: ranger.Range{
LowVal: []types.Datum{*nullDatum.Clone()},
HighVal: []types.Datum{types.NewUintDatum(math.MaxUint64)},
},
isFullRange: true,
},
{
ran: ranger.Range{
LowVal: []types.Datum{*nullDatum.Clone()},
HighVal: []types.Datum{*nullDatum.Clone()},
},
isFullRange: false,
},
{
ran: ranger.Range{
LowVal: []types.Datum{types.MinNotNullDatum()},
HighVal: []types.Datum{types.MaxValueDatum()},
},
isFullRange: true,
},
}
for _, t := range isFullRangeTests {
c.Assert(t.ran.IsFullRange(), Equals, t.isFullRange)
}
}
| util/ranger/types_test.go | 0 | https://github.com/pingcap/tidb/commit/3f6dfe91daab7acb178ad24941dfca1745993afd | [
0.0015320852398872375,
0.0002463719865772873,
0.00016561202937737107,
0.00017612268857192248,
0.00030306781991384923
] |
{
"id": 6,
"code_window": [
"\t\tif err != nil {\n",
"\t\t\treturn nil, nil, nil, nil, err\n",
"\t\t}\n",
"\t\tdata = append(data, bytes)\n",
"\t}\n",
"\t// Build CMSketch.\n",
"\tcmSketch, topN, ndv, scaleRatio := statistics.NewCMSketchAndTopN(int32(e.opts[ast.AnalyzeOptCMSketchDepth]), int32(e.opts[ast.AnalyzeOptCMSketchWidth]), data, uint32(e.opts[ast.AnalyzeOptNumTopN]), uint64(rowCount))\n",
"\t// Build Histogram.\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tdata = append(data, valBytes)\n"
],
"file_path": "executor/analyze.go",
"type": "replace",
"edit_start_line_idx": 1242
} | // Copyright 2018 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package distsql
import (
"fmt"
"math"
"sort"
"github.com/pingcap/errors"
"github.com/pingcap/kvproto/pkg/metapb"
"github.com/pingcap/parser/mysql"
"github.com/pingcap/tidb/ddl/placement"
"github.com/pingcap/tidb/infoschema"
"github.com/pingcap/tidb/kv"
"github.com/pingcap/tidb/sessionctx/stmtctx"
"github.com/pingcap/tidb/sessionctx/variable"
"github.com/pingcap/tidb/statistics"
tikvstore "github.com/pingcap/tidb/store/tikv/kv"
"github.com/pingcap/tidb/store/tikv/oracle"
"github.com/pingcap/tidb/tablecodec"
"github.com/pingcap/tidb/types"
"github.com/pingcap/tidb/util/codec"
"github.com/pingcap/tidb/util/memory"
"github.com/pingcap/tidb/util/ranger"
"github.com/pingcap/tipb/go-tipb"
)
// RequestBuilder is used to build a "kv.Request".
// It is called before we issue a kv request by "Select".
type RequestBuilder struct {
kv.Request
// txnScope indicates the value of txn_scope
txnScope string
is infoschema.InfoSchema
err error
}
// Build builds a "kv.Request".
func (builder *RequestBuilder) Build() (*kv.Request, error) {
err := builder.verifyTxnScope()
if err != nil {
builder.err = err
}
return &builder.Request, builder.err
}
// SetMemTracker sets a memTracker for this request.
func (builder *RequestBuilder) SetMemTracker(tracker *memory.Tracker) *RequestBuilder {
builder.Request.MemTracker = tracker
return builder
}
// SetTableRanges sets "KeyRanges" for "kv.Request" by converting "tableRanges"
// to "KeyRanges" firstly.
// Note this function should be deleted or at least not exported, but currently
// br refers it, so have to keep it.
func (builder *RequestBuilder) SetTableRanges(tid int64, tableRanges []*ranger.Range, fb *statistics.QueryFeedback) *RequestBuilder {
if builder.err == nil {
builder.Request.KeyRanges = TableRangesToKVRanges(tid, tableRanges, fb)
}
return builder
}
// SetIndexRanges sets "KeyRanges" for "kv.Request" by converting index range
// "ranges" to "KeyRanges" firstly.
func (builder *RequestBuilder) SetIndexRanges(sc *stmtctx.StatementContext, tid, idxID int64, ranges []*ranger.Range) *RequestBuilder {
if builder.err == nil {
builder.Request.KeyRanges, builder.err = IndexRangesToKVRanges(sc, tid, idxID, ranges, nil)
}
return builder
}
// SetIndexRangesForTables sets "KeyRanges" for "kv.Request" by converting multiple indexes range
// "ranges" to "KeyRanges" firstly.
func (builder *RequestBuilder) SetIndexRangesForTables(sc *stmtctx.StatementContext, tids []int64, idxID int64, ranges []*ranger.Range) *RequestBuilder {
if builder.err == nil {
builder.Request.KeyRanges, builder.err = IndexRangesToKVRangesForTables(sc, tids, idxID, ranges, nil)
}
return builder
}
// SetHandleRanges sets "KeyRanges" for "kv.Request" by converting table handle range
// "ranges" to "KeyRanges" firstly.
func (builder *RequestBuilder) SetHandleRanges(sc *stmtctx.StatementContext, tid int64, isCommonHandle bool, ranges []*ranger.Range, fb *statistics.QueryFeedback) *RequestBuilder {
return builder.SetHandleRangesForTables(sc, []int64{tid}, isCommonHandle, ranges, fb)
}
// SetHandleRangesForTables sets "KeyRanges" for "kv.Request" by converting table handle range
// "ranges" to "KeyRanges" firstly for multiple tables.
func (builder *RequestBuilder) SetHandleRangesForTables(sc *stmtctx.StatementContext, tid []int64, isCommonHandle bool, ranges []*ranger.Range, fb *statistics.QueryFeedback) *RequestBuilder {
if builder.err == nil {
builder.Request.KeyRanges, builder.err = TableHandleRangesToKVRanges(sc, tid, isCommonHandle, ranges, fb)
}
return builder
}
// SetTableHandles sets "KeyRanges" for "kv.Request" by converting table handles
// "handles" to "KeyRanges" firstly.
func (builder *RequestBuilder) SetTableHandles(tid int64, handles []kv.Handle) *RequestBuilder {
builder.Request.KeyRanges = TableHandlesToKVRanges(tid, handles)
return builder
}
// SetPartitionsAndHandles sets "KeyRanges" for "kv.Request" by converting ParitionHandles to KeyRanges.
// handles in slice must be kv.PartitionHandle.
func (builder *RequestBuilder) SetPartitionsAndHandles(handles []kv.Handle) *RequestBuilder {
builder.Request.KeyRanges = PartitionHandlesToKVRanges(handles)
return builder
}
const estimatedRegionRowCount = 100000
// SetDAGRequest sets the request type to "ReqTypeDAG" and construct request data.
func (builder *RequestBuilder) SetDAGRequest(dag *tipb.DAGRequest) *RequestBuilder {
if builder.err == nil {
builder.Request.Tp = kv.ReqTypeDAG
builder.Request.Cacheable = true
builder.Request.Data, builder.err = dag.Marshal()
}
// When the DAG is just simple scan and small limit, set concurrency to 1 would be sufficient.
if len(dag.Executors) == 2 && dag.Executors[1].GetLimit() != nil {
limit := dag.Executors[1].GetLimit()
if limit != nil && limit.Limit < estimatedRegionRowCount {
builder.Request.Concurrency = 1
}
}
return builder
}
// SetAnalyzeRequest sets the request type to "ReqTypeAnalyze" and construct request data.
func (builder *RequestBuilder) SetAnalyzeRequest(ana *tipb.AnalyzeReq) *RequestBuilder {
if builder.err == nil {
builder.Request.Tp = kv.ReqTypeAnalyze
builder.Request.Data, builder.err = ana.Marshal()
builder.Request.NotFillCache = true
builder.Request.IsolationLevel = tikvstore.RC
builder.Request.Priority = tikvstore.PriorityLow
}
return builder
}
// SetChecksumRequest sets the request type to "ReqTypeChecksum" and construct request data.
func (builder *RequestBuilder) SetChecksumRequest(checksum *tipb.ChecksumRequest) *RequestBuilder {
if builder.err == nil {
builder.Request.Tp = kv.ReqTypeChecksum
builder.Request.Data, builder.err = checksum.Marshal()
builder.Request.NotFillCache = true
}
return builder
}
// SetKeyRanges sets "KeyRanges" for "kv.Request".
func (builder *RequestBuilder) SetKeyRanges(keyRanges []kv.KeyRange) *RequestBuilder {
builder.Request.KeyRanges = keyRanges
return builder
}
// SetStartTS sets "StartTS" for "kv.Request".
func (builder *RequestBuilder) SetStartTS(startTS uint64) *RequestBuilder {
builder.Request.StartTs = startTS
return builder
}
// SetDesc sets "Desc" for "kv.Request".
func (builder *RequestBuilder) SetDesc(desc bool) *RequestBuilder {
builder.Request.Desc = desc
return builder
}
// SetKeepOrder sets "KeepOrder" for "kv.Request".
func (builder *RequestBuilder) SetKeepOrder(order bool) *RequestBuilder {
builder.Request.KeepOrder = order
return builder
}
// SetStoreType sets "StoreType" for "kv.Request".
func (builder *RequestBuilder) SetStoreType(storeType kv.StoreType) *RequestBuilder {
builder.Request.StoreType = storeType
return builder
}
// SetAllowBatchCop sets `BatchCop` property.
func (builder *RequestBuilder) SetAllowBatchCop(batchCop bool) *RequestBuilder {
builder.Request.BatchCop = batchCop
return builder
}
func (builder *RequestBuilder) getIsolationLevel() tikvstore.IsoLevel {
switch builder.Tp {
case kv.ReqTypeAnalyze:
return tikvstore.RC
}
return tikvstore.SI
}
func (builder *RequestBuilder) getKVPriority(sv *variable.SessionVars) int {
switch sv.StmtCtx.Priority {
case mysql.NoPriority, mysql.DelayedPriority:
return tikvstore.PriorityNormal
case mysql.LowPriority:
return tikvstore.PriorityLow
case mysql.HighPriority:
return tikvstore.PriorityHigh
}
return tikvstore.PriorityNormal
}
// SetFromSessionVars sets the following fields for "kv.Request" from session variables:
// "Concurrency", "IsolationLevel", "NotFillCache", "ReplicaRead", "SchemaVar".
func (builder *RequestBuilder) SetFromSessionVars(sv *variable.SessionVars) *RequestBuilder {
if builder.Request.Concurrency == 0 {
// Concurrency may be set to 1 by SetDAGRequest
builder.Request.Concurrency = sv.DistSQLScanConcurrency()
}
builder.Request.IsolationLevel = builder.getIsolationLevel()
builder.Request.NotFillCache = sv.StmtCtx.NotFillCache
builder.Request.TaskID = sv.StmtCtx.TaskID
builder.Request.Priority = builder.getKVPriority(sv)
builder.Request.ReplicaRead = sv.GetReplicaRead()
if sv.SnapshotInfoschema != nil {
builder.Request.SchemaVar = infoschema.GetInfoSchemaBySessionVars(sv).SchemaMetaVersion()
} else {
builder.Request.SchemaVar = sv.TxnCtx.SchemaVersion
}
builder.txnScope = sv.TxnCtx.TxnScope
builder.IsStaleness = sv.TxnCtx.IsStaleness
if builder.IsStaleness && builder.txnScope != oracle.GlobalTxnScope {
builder.MatchStoreLabels = []*metapb.StoreLabel{
{
Key: placement.DCLabelKey,
Value: builder.txnScope,
},
}
}
return builder
}
// SetStreaming sets "Streaming" flag for "kv.Request".
func (builder *RequestBuilder) SetStreaming(streaming bool) *RequestBuilder {
builder.Request.Streaming = streaming
return builder
}
// SetConcurrency sets "Concurrency" for "kv.Request".
func (builder *RequestBuilder) SetConcurrency(concurrency int) *RequestBuilder {
builder.Request.Concurrency = concurrency
return builder
}
// SetTiDBServerID sets "TiDBServerID" for "kv.Request"
// ServerID is a unique id of TiDB instance among the cluster.
// See https://github.com/pingcap/tidb/blob/master/docs/design/2020-06-01-global-kill.md
func (builder *RequestBuilder) SetTiDBServerID(serverID uint64) *RequestBuilder {
builder.Request.TiDBServerID = serverID
return builder
}
// SetFromInfoSchema sets the following fields from infoSchema:
// "bundles"
func (builder *RequestBuilder) SetFromInfoSchema(is infoschema.InfoSchema) *RequestBuilder {
if is == nil {
return builder
}
builder.is = is
return builder
}
func (builder *RequestBuilder) verifyTxnScope() error {
if builder.txnScope == "" {
builder.txnScope = oracle.GlobalTxnScope
}
if builder.txnScope == oracle.GlobalTxnScope || builder.is == nil {
return nil
}
visitPhysicalTableID := make(map[int64]struct{})
for _, keyRange := range builder.Request.KeyRanges {
tableID := tablecodec.DecodeTableID(keyRange.StartKey)
if tableID > 0 {
visitPhysicalTableID[tableID] = struct{}{}
} else {
return errors.New("requestBuilder can't decode tableID from keyRange")
}
}
for phyTableID := range visitPhysicalTableID {
valid := VerifyTxnScope(builder.txnScope, phyTableID, builder.is)
if !valid {
var tblName string
var partName string
tblInfo, _, partInfo := builder.is.FindTableByPartitionID(phyTableID)
if tblInfo != nil && partInfo != nil {
tblName = tblInfo.Meta().Name.String()
partName = partInfo.Name.String()
} else {
tblInfo, _ = builder.is.TableByID(phyTableID)
tblName = tblInfo.Meta().Name.String()
}
err := fmt.Errorf("table %v can not be read by %v txn_scope", tblName, builder.txnScope)
if len(partName) > 0 {
err = fmt.Errorf("table %v's partition %v can not be read by %v txn_scope",
tblName, partName, builder.txnScope)
}
return err
}
}
return nil
}
// TableHandleRangesToKVRanges convert table handle ranges to "KeyRanges" for multiple tables.
func TableHandleRangesToKVRanges(sc *stmtctx.StatementContext, tid []int64, isCommonHandle bool, ranges []*ranger.Range, fb *statistics.QueryFeedback) ([]kv.KeyRange, error) {
if !isCommonHandle {
return tablesRangesToKVRanges(tid, ranges, fb), nil
}
return CommonHandleRangesToKVRanges(sc, tid, ranges)
}
// TableRangesToKVRanges converts table ranges to "KeyRange".
// Note this function should not be exported, but currently
// br refers to it, so have to keep it.
func TableRangesToKVRanges(tid int64, ranges []*ranger.Range, fb *statistics.QueryFeedback) []kv.KeyRange {
return tablesRangesToKVRanges([]int64{tid}, ranges, fb)
}
// tablesRangesToKVRanges converts table ranges to "KeyRange".
func tablesRangesToKVRanges(tids []int64, ranges []*ranger.Range, fb *statistics.QueryFeedback) []kv.KeyRange {
if fb == nil || fb.Hist == nil {
return tableRangesToKVRangesWithoutSplit(tids, ranges)
}
krs := make([]kv.KeyRange, 0, len(ranges))
feedbackRanges := make([]*ranger.Range, 0, len(ranges))
for _, ran := range ranges {
low := codec.EncodeInt(nil, ran.LowVal[0].GetInt64())
high := codec.EncodeInt(nil, ran.HighVal[0].GetInt64())
if ran.LowExclude {
low = kv.Key(low).PrefixNext()
}
// If this range is split by histogram, then the high val will equal to one bucket's upper bound,
// since we need to guarantee each range falls inside the exactly one bucket, `PrefixNext` will make the
// high value greater than upper bound, so we store the range here.
r := &ranger.Range{LowVal: []types.Datum{types.NewBytesDatum(low)},
HighVal: []types.Datum{types.NewBytesDatum(high)}}
feedbackRanges = append(feedbackRanges, r)
if !ran.HighExclude {
high = kv.Key(high).PrefixNext()
}
for _, tid := range tids {
startKey := tablecodec.EncodeRowKey(tid, low)
endKey := tablecodec.EncodeRowKey(tid, high)
krs = append(krs, kv.KeyRange{StartKey: startKey, EndKey: endKey})
}
}
fb.StoreRanges(feedbackRanges)
return krs
}
func tableRangesToKVRangesWithoutSplit(tids []int64, ranges []*ranger.Range) []kv.KeyRange {
krs := make([]kv.KeyRange, 0, len(ranges)*len(tids))
for _, ran := range ranges {
low, high := encodeHandleKey(ran)
for _, tid := range tids {
startKey := tablecodec.EncodeRowKey(tid, low)
endKey := tablecodec.EncodeRowKey(tid, high)
krs = append(krs, kv.KeyRange{StartKey: startKey, EndKey: endKey})
}
}
return krs
}
func encodeHandleKey(ran *ranger.Range) ([]byte, []byte) {
low := codec.EncodeInt(nil, ran.LowVal[0].GetInt64())
high := codec.EncodeInt(nil, ran.HighVal[0].GetInt64())
if ran.LowExclude {
low = kv.Key(low).PrefixNext()
}
if !ran.HighExclude {
high = kv.Key(high).PrefixNext()
}
return low, high
}
// SplitRangesBySign split the ranges into two parts:
// 1. signedRanges is less or equal than maxInt64
// 2. unsignedRanges is greater than maxInt64
// We do that because the encoding of tikv key takes every key as a int. As a result MaxUInt64 is indeed
// small than zero. So we must
// 1. pick the range that straddles the MaxInt64
// 2. split that range into two parts : smaller than max int64 and greater than it.
// 3. if the ascent order is required, return signed first, vice versa.
// 4. if no order is required, is better to return the unsigned one. That's because it's the normal order
// of tikv scan.
func SplitRangesBySign(ranges []*ranger.Range, keepOrder bool, desc bool, isCommonHandle bool) ([]*ranger.Range, []*ranger.Range) {
if isCommonHandle || len(ranges) == 0 || ranges[0].LowVal[0].Kind() == types.KindInt64 {
return ranges, nil
}
idx := sort.Search(len(ranges), func(i int) bool { return ranges[i].HighVal[0].GetUint64() > math.MaxInt64 })
if idx == len(ranges) {
return ranges, nil
}
if ranges[idx].LowVal[0].GetUint64() > math.MaxInt64 {
signedRanges := ranges[0:idx]
unsignedRanges := ranges[idx:]
if !keepOrder {
return append(unsignedRanges, signedRanges...), nil
}
if desc {
return unsignedRanges, signedRanges
}
return signedRanges, unsignedRanges
}
signedRanges := make([]*ranger.Range, 0, idx+1)
unsignedRanges := make([]*ranger.Range, 0, len(ranges)-idx)
signedRanges = append(signedRanges, ranges[0:idx]...)
if !(ranges[idx].LowVal[0].GetUint64() == math.MaxInt64 && ranges[idx].LowExclude) {
signedRanges = append(signedRanges, &ranger.Range{
LowVal: ranges[idx].LowVal,
LowExclude: ranges[idx].LowExclude,
HighVal: []types.Datum{types.NewUintDatum(math.MaxInt64)},
})
}
if !(ranges[idx].HighVal[0].GetUint64() == math.MaxInt64+1 && ranges[idx].HighExclude) {
unsignedRanges = append(unsignedRanges, &ranger.Range{
LowVal: []types.Datum{types.NewUintDatum(math.MaxInt64 + 1)},
HighVal: ranges[idx].HighVal,
HighExclude: ranges[idx].HighExclude,
})
}
if idx < len(ranges) {
unsignedRanges = append(unsignedRanges, ranges[idx+1:]...)
}
if !keepOrder {
return append(unsignedRanges, signedRanges...), nil
}
if desc {
return unsignedRanges, signedRanges
}
return signedRanges, unsignedRanges
}
// TableHandlesToKVRanges converts sorted handle to kv ranges.
// For continuous handles, we should merge them to a single key range.
func TableHandlesToKVRanges(tid int64, handles []kv.Handle) []kv.KeyRange {
krs := make([]kv.KeyRange, 0, len(handles))
i := 0
for i < len(handles) {
if commonHandle, ok := handles[i].(*kv.CommonHandle); ok {
ran := kv.KeyRange{
StartKey: tablecodec.EncodeRowKey(tid, commonHandle.Encoded()),
EndKey: tablecodec.EncodeRowKey(tid, kv.Key(commonHandle.Encoded()).Next()),
}
krs = append(krs, ran)
i++
continue
}
j := i + 1
for ; j < len(handles) && handles[j-1].IntValue() != math.MaxInt64; j++ {
if handles[j].IntValue() != handles[j-1].IntValue()+1 {
break
}
}
low := codec.EncodeInt(nil, handles[i].IntValue())
high := codec.EncodeInt(nil, handles[j-1].IntValue())
high = kv.Key(high).PrefixNext()
startKey := tablecodec.EncodeRowKey(tid, low)
endKey := tablecodec.EncodeRowKey(tid, high)
krs = append(krs, kv.KeyRange{StartKey: startKey, EndKey: endKey})
i = j
}
return krs
}
// PartitionHandlesToKVRanges convert ParitionHandles to kv ranges.
// Handle in slices must be kv.PartitionHandle
func PartitionHandlesToKVRanges(handles []kv.Handle) []kv.KeyRange {
krs := make([]kv.KeyRange, 0, len(handles))
i := 0
for i < len(handles) {
ph := handles[i].(kv.PartitionHandle)
h := ph.Handle
pid := ph.PartitionID
if commonHandle, ok := h.(*kv.CommonHandle); ok {
ran := kv.KeyRange{
StartKey: tablecodec.EncodeRowKey(pid, commonHandle.Encoded()),
EndKey: tablecodec.EncodeRowKey(pid, append(commonHandle.Encoded(), 0)),
}
krs = append(krs, ran)
i++
continue
}
j := i + 1
for ; j < len(handles) && handles[j-1].IntValue() != math.MaxInt64; j++ {
if handles[j].IntValue() != handles[j-1].IntValue()+1 {
break
}
if handles[j].(kv.PartitionHandle).PartitionID != pid {
break
}
}
low := codec.EncodeInt(nil, handles[i].IntValue())
high := codec.EncodeInt(nil, handles[j-1].IntValue())
high = kv.Key(high).PrefixNext()
startKey := tablecodec.EncodeRowKey(pid, low)
endKey := tablecodec.EncodeRowKey(pid, high)
krs = append(krs, kv.KeyRange{StartKey: startKey, EndKey: endKey})
i = j
}
return krs
}
// IndexRangesToKVRanges converts index ranges to "KeyRange".
func IndexRangesToKVRanges(sc *stmtctx.StatementContext, tid, idxID int64, ranges []*ranger.Range, fb *statistics.QueryFeedback) ([]kv.KeyRange, error) {
return IndexRangesToKVRangesForTables(sc, []int64{tid}, idxID, ranges, fb)
}
// IndexRangesToKVRangesForTables converts indexes ranges to "KeyRange".
func IndexRangesToKVRangesForTables(sc *stmtctx.StatementContext, tids []int64, idxID int64, ranges []*ranger.Range, fb *statistics.QueryFeedback) ([]kv.KeyRange, error) {
if fb == nil || fb.Hist == nil {
return indexRangesToKVWithoutSplit(sc, tids, idxID, ranges)
}
feedbackRanges := make([]*ranger.Range, 0, len(ranges))
for _, ran := range ranges {
low, high, err := encodeIndexKey(sc, ran)
if err != nil {
return nil, err
}
feedbackRanges = append(feedbackRanges, &ranger.Range{LowVal: []types.Datum{types.NewBytesDatum(low)},
HighVal: []types.Datum{types.NewBytesDatum(high)}, LowExclude: false, HighExclude: true})
}
feedbackRanges, ok := fb.Hist.SplitRange(sc, feedbackRanges, true)
if !ok {
fb.Invalidate()
}
krs := make([]kv.KeyRange, 0, len(feedbackRanges))
for _, ran := range feedbackRanges {
low, high := ran.LowVal[0].GetBytes(), ran.HighVal[0].GetBytes()
if ran.LowExclude {
low = kv.Key(low).PrefixNext()
}
ran.LowVal[0].SetBytes(low)
// If this range is split by histogram, then the high val will equal to one bucket's upper bound,
// since we need to guarantee each range falls inside the exactly one bucket, `PrefixNext` will make the
// high value greater than upper bound, so we store the high value here.
ran.HighVal[0].SetBytes(high)
if !ran.HighExclude {
high = kv.Key(high).PrefixNext()
}
for _, tid := range tids {
startKey := tablecodec.EncodeIndexSeekKey(tid, idxID, low)
endKey := tablecodec.EncodeIndexSeekKey(tid, idxID, high)
krs = append(krs, kv.KeyRange{StartKey: startKey, EndKey: endKey})
}
}
fb.StoreRanges(feedbackRanges)
return krs, nil
}
// CommonHandleRangesToKVRanges converts common handle ranges to "KeyRange".
func CommonHandleRangesToKVRanges(sc *stmtctx.StatementContext, tids []int64, ranges []*ranger.Range) ([]kv.KeyRange, error) {
rans := make([]*ranger.Range, 0, len(ranges))
for _, ran := range ranges {
low, high, err := encodeIndexKey(sc, ran)
if err != nil {
return nil, err
}
rans = append(rans, &ranger.Range{LowVal: []types.Datum{types.NewBytesDatum(low)},
HighVal: []types.Datum{types.NewBytesDatum(high)}, LowExclude: false, HighExclude: true})
}
krs := make([]kv.KeyRange, 0, len(rans))
for _, ran := range rans {
low, high := ran.LowVal[0].GetBytes(), ran.HighVal[0].GetBytes()
if ran.LowExclude {
low = kv.Key(low).PrefixNext()
}
ran.LowVal[0].SetBytes(low)
for _, tid := range tids {
startKey := tablecodec.EncodeRowKey(tid, low)
endKey := tablecodec.EncodeRowKey(tid, high)
krs = append(krs, kv.KeyRange{StartKey: startKey, EndKey: endKey})
}
}
return krs, nil
}
// VerifyTxnScope verify whether the txnScope and visited physical table break the leader rule's dcLocation.
func VerifyTxnScope(txnScope string, physicalTableID int64, is infoschema.InfoSchema) bool {
if txnScope == "" || txnScope == oracle.GlobalTxnScope {
return true
}
bundle, ok := is.BundleByName(placement.GroupID(physicalTableID))
if !ok {
return true
}
leaderDC, ok := placement.GetLeaderDCByBundle(bundle, placement.DCLabelKey)
if !ok {
return true
}
if leaderDC != txnScope {
return false
}
return true
}
func indexRangesToKVWithoutSplit(sc *stmtctx.StatementContext, tids []int64, idxID int64, ranges []*ranger.Range) ([]kv.KeyRange, error) {
krs := make([]kv.KeyRange, 0, len(ranges))
for _, ran := range ranges {
low, high, err := encodeIndexKey(sc, ran)
if err != nil {
return nil, err
}
for _, tid := range tids {
startKey := tablecodec.EncodeIndexSeekKey(tid, idxID, low)
endKey := tablecodec.EncodeIndexSeekKey(tid, idxID, high)
krs = append(krs, kv.KeyRange{StartKey: startKey, EndKey: endKey})
}
}
return krs, nil
}
func encodeIndexKey(sc *stmtctx.StatementContext, ran *ranger.Range) ([]byte, []byte, error) {
low, err := codec.EncodeKey(sc, nil, ran.LowVal...)
if err != nil {
return nil, nil, err
}
if ran.LowExclude {
low = kv.Key(low).PrefixNext()
}
high, err := codec.EncodeKey(sc, nil, ran.HighVal...)
if err != nil {
return nil, nil, err
}
if !ran.HighExclude {
high = kv.Key(high).PrefixNext()
}
var hasNull bool
for _, highVal := range ran.HighVal {
if highVal.IsNull() {
hasNull = true
break
}
}
if hasNull {
// Append 0 to make unique-key range [null, null] to be a scan rather than point-get.
high = kv.Key(high).Next()
}
return low, high, nil
}
| distsql/request_builder.go | 1 | https://github.com/pingcap/tidb/commit/3f6dfe91daab7acb178ad24941dfca1745993afd | [
0.0006216502515599132,
0.00019356745178811252,
0.00016255929949693382,
0.00017174912500195205,
0.0000718102091923356
] |
{
"id": 6,
"code_window": [
"\t\tif err != nil {\n",
"\t\t\treturn nil, nil, nil, nil, err\n",
"\t\t}\n",
"\t\tdata = append(data, bytes)\n",
"\t}\n",
"\t// Build CMSketch.\n",
"\tcmSketch, topN, ndv, scaleRatio := statistics.NewCMSketchAndTopN(int32(e.opts[ast.AnalyzeOptCMSketchDepth]), int32(e.opts[ast.AnalyzeOptCMSketchWidth]), data, uint32(e.opts[ast.AnalyzeOptNumTopN]), uint64(rowCount))\n",
"\t// Build Histogram.\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tdata = append(data, valBytes)\n"
],
"file_path": "executor/analyze.go",
"type": "replace",
"edit_start_line_idx": 1242
} | [
{
"name": "TestEagerAggregation",
"cases": [
"select sum(t.a), sum(t.a+1), sum(t.a), count(t.a), sum(t.a) + count(t.a) from t",
"select sum(t.a + t.b), sum(t.a + t.c), sum(t.a + t.b), count(t.a) from t having sum(t.a + t.b) > 0 order by sum(t.a + t.c)",
"select sum(a.a) from t a, t b where a.c = b.c",
"select sum(b.a) from t a, t b where a.c = b.c",
"select sum(b.a), a.a from t a, t b where a.c = b.c",
"select sum(a.a), b.a from t a, t b where a.c = b.c",
"select sum(a.a), sum(b.a) from t a, t b where a.c = b.c",
"select sum(a.a), max(b.a) from t a, t b where a.c = b.c",
"select max(a.a), sum(b.a) from t a, t b where a.c = b.c",
"select sum(a.a) from t a, t b, t c where a.c = b.c and b.c = c.c",
"select sum(b.a) from t a left join t b on a.c = b.c",
"select sum(a.a) from t a left join t b on a.c = b.c",
"select sum(a.a) from t a right join t b on a.c = b.c",
"select sum(a) from (select * from t) x",
"select sum(c1) from (select c c1, d c2 from t a union all select a c1, b c2 from t b union all select b c1, e c2 from t c) x group by c2",
"select max(a.b), max(b.b) from t a join t b on a.c = b.c group by a.a",
"select max(a.b), max(b.b) from t a join t b on a.a = b.a group by a.c",
"select max(c.b) from (select * from t a union all select * from t b) c group by c.a",
"select max(a.c) from t a join t b on a.a=b.a and a.b=b.b group by a.b",
"select t1.a, count(t2.b) from t t1, t t2 where t1.a = t2.a group by t1.a",
"select avg(a) from (select * from t t1 union all select * from t t2) t",
"select count(distinct a) from (select * from t t1 union all select * from t t2) t",
"select count(distinct b) from (select * from t t1 union all select * from t t2) t",
"select approx_count_distinct(a) from (select * from t t1 union all select * from t t2) t",
"select approx_count_distinct(b) from (select * from t t1 union all select * from t t2) t"
]
},
{
"name": "TestPlanBuilder",
"cases": [
"select * from t for update",
"update t set t.a = t.a * 1.5 where t.a >= 1000 order by t.a desc limit 10",
"delete from t where t.a >= 1000 order by t.a desc limit 10",
"explain format = 'brief' select * from t union all select * from t limit 1, 1",
// The correctness of explain result is checked at integration test. There is to improve coverage.
"explain format = 'brief' select /*+ TIDB_INLJ(t1, t2) */ * from t t1 left join t t2 on t1.a=t2.a where t1.b=1 and t2.b=1 and (t1.c=1 or t2.c=1)",
"explain format = 'brief' select /*+ TIDB_HJ(t1, t2) */ * from t t1 left join t t2 on t1.a=t2.a where t1.b=1 and t2.b=1 and (t1.c=1 or t2.c=1)",
"explain format = 'brief' select /*+ TIDB_SMJ(t1, t2) */ * from t t1 right join t t2 on t1.a=t2.a where t1.b=1 and t2.b=1 and (t1.c=1 or t2.c=1)",
"explain format=\"dot\" select /*+ TIDB_SMJ(t1, t2) */ * from t t1, t t2 where t1.a=t2.a",
"explain format = 'brief' select * from t order by b",
"explain format = 'brief' select * from t order by b limit 1",
"explain format=\"dot\" select * from t order by a",
"insert into t select * from t",
"show columns from t where `Key` = 'pri' like 't*'",
"do sleep(5)",
"select substr(\"abc\", 1)",
"select * from t t1, t t2 where 1 = 0",
"select * from t t1 join t t2 using(a)",
"select * from t t1 natural join t t2",
// Note the Projection before Delete: the final schema should be the schema of
// table t rather than Join.
// If this schema is not set correctly, table.RemoveRecord would fail when adding
// binlog columns, because the schema and data are not consistent.
"delete from t where a in (select b from t where c = 666) or b in (select a from t where c = 42)",
"update t set a = 2 where b in (select c from t)"
]
},
{
"name": "TestPredicatePushDown",
"cases": [
"select count(*) from t a, t b where a.a = b.a",
"select a from (select a from t where d = 0) k where k.a = 5",
"select a from (select a+1 as a from t) k where k.a = 5",
"select a from (select 1+2 as a from t where d = 0) k where k.a = 5",
"select a from (select d as a from t where d = 0) k where k.a = 5",
"select * from t ta, t tb where (ta.d, ta.a) = (tb.b, tb.c)",
"select * from t t1, t t2 where t1.a = t2.b and t2.b > 0 and t1.a = t1.c and t1.d like 'abc' and t2.d = t1.d",
"select * from t ta join t tb on ta.d = tb.d and ta.d > 1 where tb.a = 0",
"select * from t ta join t tb on ta.d = tb.d where ta.d > 1 and tb.a = 0",
"select * from t ta left outer join t tb on ta.d = tb.d and ta.d > 1 where tb.a = 0",
"select * from t ta right outer join t tb on ta.d = tb.d and ta.a > 1 where tb.a = 0",
"select * from t ta left outer join t tb on ta.d = tb.d and ta.a > 1 where ta.d = 0",
"select * from t ta left outer join t tb on ta.d = tb.d and ta.a > 1 where tb.d = 0",
"select * from t ta left outer join t tb on ta.d = tb.d and ta.a > 1 where tb.c is not null and tb.c = 0 and ifnull(tb.d, 1)",
"select * from t ta left outer join t tb on ta.a = tb.a left outer join t tc on tb.b = tc.b where tc.c > 0",
"select * from t ta left outer join t tb on ta.a = tb.a left outer join t tc on tc.b = ta.b where tb.c > 0",
"select * from t as ta left outer join (t as tb left join t as tc on tc.b = tb.b) on tb.a = ta.a where tc.c > 0",
"select * from ( t as ta left outer join t as tb on ta.a = tb.a) join ( t as tc left join t as td on tc.b = td.b) on ta.c = td.c where tb.c = 2 and td.a = 1",
"select * from t ta left outer join (t tb left outer join t tc on tc.b = tb.b) on tb.a = ta.a and tc.c = ta.c where tc.d > 0 or ta.d > 0",
"select * from t ta left outer join t tb on ta.d = tb.d and ta.a > 1 where ifnull(tb.d, 1) or tb.d is null",
"select a, d from (select * from t union all select * from t union all select * from t) z where a < 10",
"select (select count(*) from t where t.a = k.a) from t k",
"select a from t where exists(select 1 from t as x where x.a < t.a)",
"select a from t where exists(select 1 from t as x where x.a = t.a and t.a < 1 and x.a < 1)",
"select a from t where exists(select 1 from t as x where x.a = t.a and x.a < 1) and a < 1",
"select a from t where exists(select 1 from t as x where x.a = t.a) and exists(select 1 from t as x where x.a = t.a)",
"select * from (select a, b, sum(c) as s from t group by a, b) k where k.a > k.b * 2 + 1",
"select * from (select a, b, sum(c) as s from t group by a, b) k where k.a > 1 and k.b > 2",
"select * from (select k.a, sum(k.s) as ss from (select a, sum(b) as s from t group by a) k group by k.a) l where l.a > 2",
"select * from (select a, sum(b) as s from t group by a) k where a > s",
"select * from (select a, sum(b) as s from t group by a + 1) k where a > 1",
"select * from (select a, sum(b) as s from t group by a having 1 = 0) k where a > 1",
"select a, count(a) cnt from t group by a having cnt < 1",
// issue #3873
"select t1.a, t2.a from t as t1 left join t as t2 on t1.a = t2.a where t1.a < 1.0",
// issue #7728
"select * from t t1 join t t2 on t1.a = t2.a where t2.a = null",
"select a, b from (select a, b, min(a) over(partition by b) as min_a from t)as tt where a < 10 and b > 10 and b = min_a",
"select a, b from (select a, b, c, d, sum(a) over(partition by b, c) as sum_a from t)as tt where b + c > 10 and b in (1, 2) and sum_a > b"
]
},
{
"name": "TestSubquery",
"cases": [
// This will be resolved as in sub query.
"select * from t where 10 in (select b from t s where s.a = t.a)",
"select count(c) ,(select b from t s where s.a = t.a) from t",
"select count(c) ,(select count(s.b) from t s where s.a = t.a) from t",
// Semi-join with agg cannot decorrelate.
"select t.c in (select count(s.b) from t s where s.a = t.a) from t",
"select (select count(s.b) k from t s where s.a = t.a having k != 0) from t",
"select (select count(s.b) k from t s where s.a = t1.a) from t t1, t t2",
"select (select count(1) k from t s where s.a = t.a having k != 0) from t",
"select a from t where a in (select a from t s group by t.b)",
// This will be resolved as in sub query.
"select * from t where 10 in (((select b from t s where s.a = t.a)))",
// This will be resolved as in function.
"select * from t where 10 in (((select b from t s where s.a = t.a)), 10)",
"select * from t where exists (select s.a from t s having sum(s.a) = t.a )",
// Test MaxOneRow for limit.
"select (select * from (select b from t limit 1) x where x.b = t1.b) from t t1",
// Test Nested sub query.
"select * from t where exists (select s.a from t s where s.c in (select c from t as k where k.d = s.d) having sum(s.a) = t.a )",
"select t1.b from t t1 where t1.b = (select max(t2.a) from t t2 where t1.b=t2.b)",
"select t1.b from t t1 where t1.b = (select avg(t2.a) from t t2 where t1.g=t2.g and (t1.b = 4 or t2.b = 2))",
"select t1.b from t t1 where t1.b = (select max(t2.a) from t t2 where t1.b=t2.b order by t1.a)",
"select t1.b from t t1 where t1.b in (select t2.b from t t2 where t2.a = t1.a order by t2.a)",
"select t1.b from t t1 where exists(select t2.b from t t2 where t2.a = t1.a order by t2.a)",
// `Sort` will not be eliminated, if it is not the top level operator.
"select t1.b from t t1 where t1.b = (select t2.b from t t2 where t2.a = t1.a order by t2.a limit 1)"
]
},
{
"name": "TestTopNPushDown",
"cases": [
// Test TopN + Selection.
"select * from t where a < 1 order by b limit 5",
// Test Limit + Selection.
"select * from t where a < 1 limit 5",
// Test Limit + Agg + Proj .
"select a, count(b) from t group by b limit 5",
// Test TopN + Agg + Proj .
"select a, count(b) from t group by b order by c limit 5",
// Test TopN + Join + Proj.
"select * from t, t s order by t.a limit 5",
// Test Limit + Join + Proj.
"select * from t, t s limit 5",
// Test Limit + Proj
"select a, b from (select @i as a, @i := @i+1 as b from t) t order by a desc limit 1",
// Test TopN + Left Join + Proj.
"select * from t left outer join t s on t.a = s.a order by t.a limit 5",
// Test TopN + Left Join + Proj.
"select * from t left outer join t s on t.a = s.a order by t.a limit 5, 5",
// Test Limit + Left Join + Proj.
"select * from t left outer join t s on t.a = s.a limit 5",
// Test Limit + Left Join Apply + Proj.
"select (select s.a from t s where t.a = s.a) from t limit 5",
// Test TopN + Left Join Apply + Proj.
"select (select s.a from t s where t.a = s.a) from t order by t.a limit 5",
// Test TopN + Left Semi Join Apply + Proj.
"select exists (select s.a from t s where t.a = s.a) from t order by t.a limit 5",
// Test TopN + Left Semi Outer Join + Proj.
"select (exists (select s.a from t s where t.a = s.a)) as x from t order by x limit 5",
// Test TopN + Semi Join Apply + Proj.
"select * from t where exists (select s.a from t s where t.a = s.a) order by t.a limit 5",
// Test TopN + Right Join + Proj.
"select * from t right outer join t s on t.a = s.a order by s.a limit 5",
// Test Limit + Right Join + Proj.
"select * from t right outer join t s on t.a = s.a order by s.a,t.b limit 5",
// Test TopN + UA + Proj.
"select * from t union all (select * from t s) order by a,b limit 5",
// Test TopN + UA + Proj.
"select * from t union all (select * from t s) order by a,b limit 5, 5",
// Test Limit + UA + Proj + Sort.
"select * from t union all (select * from t s order by a) limit 5",
// Test `ByItem` containing column from both sides.
"select ifnull(t1.b, t2.a) from t t1 left join t t2 on t1.e=t2.e order by ifnull(t1.b, t2.a) limit 5",
// Test ifnull cannot be eliminated
"select ifnull(t1.h, t2.b) from t t1 left join t t2 on t1.e=t2.e order by ifnull(t1.h, t2.b) limit 5"
]
},
{
"name": "TestUnion",
"cases": [
"select a from t union select a from t",
"select a from t union all select a from t",
"select a from t union select a from t union all select a from t",
"select a from t union select a from t union all select a from t union select a from t union select a from t",
"select a from t union select a, b from t",
"select * from (select 1 as a union select 1 union all select 2) t order by a",
"select * from (select 1 as a union select 1 union all select 2) t order by (select a)"
]
},
{
"name": "TestWindowFunction",
"cases": [
"select a, avg(a) over(partition by a) from t",
"select a, avg(a) over(partition by b) from t",
"select a, avg(a+1) over(partition by (a+1)) from t",
"select a, avg(a) over(order by a asc, b desc) from t order by a asc, b desc",
"select a, b as a, avg(a) over(partition by a) from t",
"select a, b as z, sum(z) over() from t",
"select a, b as z from t order by (sum(z) over())",
"select sum(avg(a)) over() from t",
"select b from t order by(sum(a) over())",
"select b from t order by(sum(a) over(partition by a))",
"select b from t order by(sum(avg(a)) over())",
"select a from t having (select sum(a) over() as w from t tt where a > t.a)",
"select avg(a) over() as w from t having w > 1",
"select sum(a) over() as sum_a from t group by sum_a",
"select sum(a) over() from t window w1 as (w2)",
"select sum(a) over(w) from t",
"select sum(a) over() from t window w1 as (w2), w2 as (w1)",
"select sum(a) over(w partition by a) from t window w as ()",
"SELECT FIRST_VALUE(a) RESPECT NULLS OVER (w1 PARTITION BY b ORDER BY b ASC, a DESC ROWS 2 PRECEDING) AS 'first_value', a, b FROM ( SELECT a, b FROM `t` ) as t WINDOW w1 AS (PARTITION BY b ORDER BY b ASC, a ASC );",
"select sum(a) over(w) from t window w as (rows between 1 preceding AND 1 following)",
"select sum(a) over w from t window w as (rows between 1 preceding AND 1 following)",
"select sum(a) over(w order by b) from t window w as (order by a)",
"select sum(a) over() from t window w1 as (), w1 as ()",
"select avg(a) over(w2) from t window w1 as (partition by a), w2 as (w1)",
"select a from t window w1 as (partition by a) order by (sum(a) over(w1))",
"select sum(a) over(groups 1 preceding) from t",
"select sum(a) over(rows between unbounded following and 1 preceding) from t",
"select sum(a) over(rows between current row and unbounded preceding) from t",
"select sum(a) over(rows interval 1 MINUTE_SECOND preceding) from t",
"select sum(a) over(rows between 1.0 preceding and 1 following) from t",
"select sum(a) over(range between 1 preceding and 1 following) from t",
"select sum(a) over(order by c_str range between 1 preceding and 1 following) from t",
"select sum(a) over(order by a range interval 1 MINUTE_SECOND preceding) from t",
"select sum(a) over(order by i_date range interval a MINUTE_SECOND preceding) from t",
"select sum(a) over(order by i_date range interval -1 MINUTE_SECOND preceding) from t",
"select sum(a) over(order by i_date range 1 preceding) from t",
"select sum(a) over(order by a range between 1.0 preceding and 1 following) from t",
"select row_number() over(rows between 1 preceding and 1 following) from t",
"select avg(b), max(avg(b)) over(rows between 1 preceding and 1 following) max from t group by c",
"select nth_value(a, 1.0) over() from t",
"SELECT NTH_VALUE(a, 1.0) OVER() FROM t",
"select nth_value(a, 0) over() from t",
"select ntile(0) over() from t",
"select ntile(null) over() from t",
"select avg(a) over w from t window w as(partition by b)",
"select nth_value(i_date, 1) over() from t",
"select sum(b) over w, sum(c) over w from t window w as (order by a)",
"delete from t order by (sum(a) over())",
"delete from t order by (SUM(a) over())",
"SELECT * from t having ROW_NUMBER() over()",
// The best execution order should be (a,c), (a, b, c), (a, b), (), it requires only 2 sort operations.
"select sum(a) over (partition by a order by b), sum(b) over (order by a, b, c), sum(c) over(partition by a order by c), sum(d) over() from t",
// Test issue 11010.
"select dense_rank() over w1, a, b from t window w1 as (partition by t.b order by t.a desc, t.b desc range between current row and 1 following)",
"select dense_rank() over w1, a, b from t window w1 as (partition by t.b order by t.a desc, t.b desc range between current row and unbounded following)",
"select dense_rank() over w1, a, b from t window w1 as (partition by t.b order by t.a desc, t.b desc range between 1 preceding and 1 following)",
// Test issue 11001.
"SELECT PERCENT_RANK() OVER w1 AS 'percent_rank', fieldA, fieldB FROM ( SELECT a AS fieldA, b AS fieldB FROM t ) t1 WINDOW w1 AS ( ROWS BETWEEN 0 FOLLOWING AND UNBOUNDED PRECEDING)",
// Test issue 11002.
"SELECT PERCENT_RANK() OVER w1 AS 'percent_rank', fieldA, fieldB FROM ( SELECT a AS fieldA, b AS fieldB FROM t ) as t1 WINDOW w1 AS ( ROWS BETWEEN UNBOUNDED FOLLOWING AND UNBOUNDED FOLLOWING)",
// Test issue 11011.
"select dense_rank() over w1, a, b from t window w1 as (partition by t.b order by t.a asc range between 1250951168 following AND 1250951168 preceding)",
// Test issue 10556.
"SELECT FIRST_VALUE(a) IGNORE NULLS OVER () FROM t",
"SELECT SUM(DISTINCT a) OVER () FROM t",
"SELECT NTH_VALUE(a, 1) FROM LAST over (partition by b order by b), a FROM t",
"SELECT NTH_VALUE(a, 1) FROM LAST IGNORE NULLS over (partition by b order by b), a FROM t",
"SELECT NTH_VALUE(fieldA, ATAN(-1)) OVER (w1) AS 'ntile', fieldA, fieldB FROM ( SELECT a AS fieldA, b AS fieldB FROM t ) as te WINDOW w1 AS ( ORDER BY fieldB ASC, fieldA DESC )",
"SELECT NTH_VALUE(fieldA, -1) OVER (w1 PARTITION BY fieldB ORDER BY fieldB , fieldA ) AS 'ntile', fieldA, fieldB FROM ( SELECT a AS fieldA, b AS fieldB FROM t ) as temp WINDOW w1 AS ( ORDER BY fieldB ASC, fieldA DESC )",
"SELECT SUM(a) OVER w AS 'sum' FROM t WINDOW w AS (ROWS BETWEEN 1 FOLLOWING AND CURRENT ROW )",
"SELECT SUM(a) OVER w AS 'sum' FROM t WINDOW w AS (ROWS BETWEEN CURRENT ROW AND 1 PRECEDING )",
"SELECT SUM(a) OVER w AS 'sum' FROM t WINDOW w AS (ROWS BETWEEN 1 FOLLOWING AND 1 PRECEDING )",
// Test issue 11943
"SELECT ROW_NUMBER() OVER (partition by b) + a FROM t",
// Test issue 10996
"SELECT GROUP_CONCAT(a) OVER () FROM t"
]
},
{
"name": "TestWindowParallelFunction",
"cases": [
"select a, avg(a) over(partition by a) from t",
"select a, avg(a) over(partition by b) from t",
"select a, avg(a+1) over(partition by (a+1)) from t",
"select a, avg(a) over(order by a asc, b desc) from t order by a asc, b desc",
"select a, b as a, avg(a) over(partition by a) from t",
"select a, b as z, sum(z) over() from t",
"select a, b as z from t order by (sum(z) over())",
"select sum(avg(a)) over() from t",
"select b from t order by(sum(a) over())",
"select b from t order by(sum(a) over(partition by a))",
"select b from t order by(sum(avg(a)) over())",
"select a from t having (select sum(a) over() as w from t tt where a > t.a)",
"select avg(a) over() as w from t having w > 1",
"select sum(a) over() as sum_a from t group by sum_a",
"select sum(a) over() from t window w1 as (w2)",
"select sum(a) over(w) from t",
"select sum(a) over() from t window w1 as (w2), w2 as (w1)",
"select sum(a) over(w partition by a) from t window w as ()",
"SELECT FIRST_VALUE(a) RESPECT NULLS OVER (w1 PARTITION BY b ORDER BY b ASC, a DESC ROWS 2 PRECEDING) AS 'first_value', a, b FROM ( SELECT a, b FROM `t` ) as t WINDOW w1 AS (PARTITION BY b ORDER BY b ASC, a ASC );",
"select sum(a) over(w) from t window w as (rows between 1 preceding AND 1 following)",
"select sum(a) over w from t window w as (rows between 1 preceding AND 1 following)",
"select sum(a) over(w order by b) from t window w as (order by a)",
"select sum(a) over() from t window w1 as (), w1 as ()",
"select avg(a) over(w2) from t window w1 as (partition by a), w2 as (w1)",
"select a from t window w1 as (partition by a) order by (sum(a) over(w1))",
"select sum(a) over(groups 1 preceding) from t",
"select sum(a) over(rows between unbounded following and 1 preceding) from t",
"select sum(a) over(rows between current row and unbounded preceding) from t",
"select sum(a) over(rows interval 1 MINUTE_SECOND preceding) from t",
"select sum(a) over(rows between 1.0 preceding and 1 following) from t",
"select sum(a) over(range between 1 preceding and 1 following) from t",
"select sum(a) over(order by c_str range between 1 preceding and 1 following) from t",
"select sum(a) over(order by a range interval 1 MINUTE_SECOND preceding) from t",
"select sum(a) over(order by i_date range interval a MINUTE_SECOND preceding) from t",
"select sum(a) over(order by i_date range interval -1 MINUTE_SECOND preceding) from t",
"select sum(a) over(order by i_date range 1 preceding) from t",
"select sum(a) over(order by a range between 1.0 preceding and 1 following) from t",
"select row_number() over(rows between 1 preceding and 1 following) from t",
"select avg(b), max(avg(b)) over(rows between 1 preceding and 1 following) max from t group by c",
"select nth_value(a, 1.0) over() from t",
"SELECT NTH_VALUE(a, 1.0) OVER() FROM t",
"select nth_value(a, 0) over() from t",
"select ntile(0) over() from t",
"select ntile(null) over() from t",
"select avg(a) over w from t window w as(partition by b)",
"select nth_value(i_date, 1) over() from t",
"select sum(b) over w, sum(c) over w from t window w as (order by a)",
"delete from t order by (sum(a) over())",
"delete from t order by (SUM(a) over())",
"SELECT * from t having ROW_NUMBER() over()",
// The best execution order should be (a,c), (a, b, c), (a, b), (), it requires only 2 sort operations.
"select sum(a) over (partition by a order by b), sum(b) over (order by a, b, c), sum(c) over(partition by a order by c), sum(d) over() from t",
// Test issue 11010.
"select dense_rank() over w1, a, b from t window w1 as (partition by t.b order by t.a desc, t.b desc range between current row and 1 following)",
"select dense_rank() over w1, a, b from t window w1 as (partition by t.b order by t.a desc, t.b desc range between current row and unbounded following)",
"select dense_rank() over w1, a, b from t window w1 as (partition by t.b order by t.a desc, t.b desc range between 1 preceding and 1 following)",
// Test issue 11001.
"SELECT PERCENT_RANK() OVER w1 AS 'percent_rank', fieldA, fieldB FROM ( SELECT a AS fieldA, b AS fieldB FROM t ) t1 WINDOW w1 AS ( ROWS BETWEEN 0 FOLLOWING AND UNBOUNDED PRECEDING)",
// Test issue 11002.
"SELECT PERCENT_RANK() OVER w1 AS 'percent_rank', fieldA, fieldB FROM ( SELECT a AS fieldA, b AS fieldB FROM t ) as t1 WINDOW w1 AS ( ROWS BETWEEN UNBOUNDED FOLLOWING AND UNBOUNDED FOLLOWING)",
// Test issue 11011.
"select dense_rank() over w1, a, b from t window w1 as (partition by t.b order by t.a asc range between 1250951168 following AND 1250951168 preceding)",
// Test issue 10556.
"SELECT FIRST_VALUE(a) IGNORE NULLS OVER () FROM t",
"SELECT SUM(DISTINCT a) OVER () FROM t",
"SELECT NTH_VALUE(a, 1) FROM LAST over (partition by b order by b), a FROM t",
"SELECT NTH_VALUE(a, 1) FROM LAST IGNORE NULLS over (partition by b order by b), a FROM t",
"SELECT NTH_VALUE(fieldA, ATAN(-1)) OVER (w1) AS 'ntile', fieldA, fieldB FROM ( SELECT a AS fieldA, b AS fieldB FROM t ) as te WINDOW w1 AS ( ORDER BY fieldB ASC, fieldA DESC )",
"SELECT NTH_VALUE(fieldA, -1) OVER (w1 PARTITION BY fieldB ORDER BY fieldB , fieldA ) AS 'ntile', fieldA, fieldB FROM ( SELECT a AS fieldA, b AS fieldB FROM t ) as temp WINDOW w1 AS ( ORDER BY fieldB ASC, fieldA DESC )",
"SELECT SUM(a) OVER w AS 'sum' FROM t WINDOW w AS (ROWS BETWEEN 1 FOLLOWING AND CURRENT ROW )",
"SELECT SUM(a) OVER w AS 'sum' FROM t WINDOW w AS (ROWS BETWEEN CURRENT ROW AND 1 PRECEDING )",
"SELECT SUM(a) OVER w AS 'sum' FROM t WINDOW w AS (ROWS BETWEEN 1 FOLLOWING AND 1 PRECEDING )",
// Test issue 11943
"SELECT ROW_NUMBER() OVER (partition by b) + a FROM t"
]
},
{
"name": "TestUniqueKeyInfo",
"cases": [
"select a, sum(e) from t group by b",
"select a, b, sum(f) from t group by b",
"select c, d, e, sum(a) from t group by c, d, e",
"select f, g, sum(a) from t",
"select * from t t1 join t t2 on t1.a = t2.e",
"select f from t having sum(a) > 0",
"select * from t t1 left join t t2 on t1.a = t2.a",
"select a from t where b > 0",
"select a from t where b > 0 limit 5"
]
},
{
"name": "TestAggPrune",
"cases": [
"select a, count(b) from t group by a",
"select sum(b) from t group by c, d, e",
"select tt.a, sum(tt.b) from (select a, b from t) tt group by tt.a",
"select count(1) from (select count(1), a as b from t group by a) tt group by b",
"select a, count(b) from t group by a",
"select a, count(distinct a, b) from t group by a",
"select a, approx_count_distinct(a, b) from t group by a",
// Test issue23436
"select count(distinct a) from t",
"select count(distinct a), sum(distinct a) from t",
"select count(distinct a), sum(distinct f) from t",
"select count(distinct e) from t",
"select count(distinct f), avg(b) from t"
]
},
{
"name": "TestColumnPruning",
"cases": [
"select count(*) from t group by a",
"select count(*) from t",
"select count(*) from t a join t b where a.a < 1",
"select count(*) from t a join t b on a.a = b.d",
"select count(*) from t a join t b on a.a = b.d order by sum(a.d)",
"select count(b.a) from t a join t b on a.a = b.d group by b.b order by sum(a.d)",
"select * from (select count(b.a) from t a join t b on a.a = b.d group by b.b having sum(a.d) < 0) tt",
"select (select count(a) from t where b = k.a) from t k",
"select exists (select count(*) from t where b = k.a) from t k",
"select b = (select count(*) from t where b = k.a) from t k",
"select exists (select count(a) from t where b = k.a group by b) from t k",
"select a as c1, b as c2 from t order by 1, c1 + c2 + c",
"select a from t where b < any (select c from t)",
"select a from t where (b,a) != all (select c,d from t)",
"select a from t where (b,a) in (select c,d from t)",
"select a from t where a in (select a from t s group by t.b)",
"select t01.a from (select a from t t21 union all select a from t t22) t2 join t t01 on 1 left outer join t t3 on 1 join t t4 on 1",
"select 1 from (select count(b) as cnt from t) t1",
"select count(1) from (select count(b) as cnt from t) t1",
"select count(1) from (select count(b) as cnt from t group by c) t1",
"select b from t where a > 0 limit 5, 10"
]
},
{
"name": "TestSortByItemsPruning",
"cases": [
"select * from t where a > 1 order by a asc, a asc limit 10",
"select * from t where a > 1 order by a asc, b asc, a asc, c asc limit 10",
"select * from t where a > 1 order by pow(a, 2) asc, b asc, pow(a, 2) asc, c asc limit 10"
]
},
{
"name": "TestDeriveNotNullConds",
"cases": [
"select * from t t1 inner join t t2 on t1.e = t2.e",
"select * from t t1 inner join t t2 on t1.e > t2.e",
"select * from t t1 inner join t t2 on t1.e = t2.e and t1.e is not null",
"select * from t t1 left join t t2 on t1.e = t2.e",
"select * from t t1 left join t t2 on t1.e > t2.e",
"select * from t t1 left join t t2 on t1.e = t2.e and t2.e is not null",
"select * from t t1 right join t t2 on t1.e = t2.e and t1.e is not null",
"select * from t t1 inner join t t2 on t1.e <=> t2.e",
"select * from t t1 left join t t2 on t1.e <=> t2.e",
// Not deriving if column has NotNull flag already.
"select * from t t1 inner join t t2 on t1.b = t2.b",
"select * from t t1 left join t t2 on t1.b = t2.b",
"select * from t t1 left join t t2 on t1.b > t2.b",
"select * from t t1 where not exists (select * from t t2 where t2.e = t1.e)"
]
},
{
"name": "TestTablePartition",
"cases": [
{
"SQL": "select * from t",
"IsIdx": 0
},
{
"SQL": "select * from t where t.ptn < 31",
"IsIdx": 0
},
{
"SQL": "select * from t where t.ptn < 61",
"IsIdx": 0
},
{
"SQL": "select * from t where t.ptn > 17 and t.ptn < 61",
"IsIdx": 0
},
{
"SQL": "select * from t where t.ptn > 17 and t.ptn < 61 union all select * from t where t.ptn > 17 and t.ptn < 61 ",
"IsIdx": 0
},
{
"SQL": "select ptn from t where t.ptn > 17 and t.ptn < 61 union all select ptn from t where t.ptn > 17 and t.ptn < 61 ",
"IsIdx": 0
},
{
"SQL": "select * from t where t.ptn < 8",
"IsIdx": 0
},
{
"SQL": "select * from t where t.ptn > 128",
"IsIdx": 0
},
{
"SQL": "select * from t where t.ptn > 128",
"IsIdx": 1
},
{
// NULL will be located in the first partition.
"SQL": "select * from t where t.ptn is null",
"IsIdx": 0
},
{
"SQL": "select * from t where t.ptn is null or t.ptn > 70",
"IsIdx": 1
}
]
},
{
"name": "TestJoinPredicatePushDown",
"cases": [
// issue #7628, inner join
"select * from t as t1 join t as t2 on t1.b = t2.b where t1.a > t2.a",
"select * from t as t1 join t as t2 on t1.b = t2.b where t1.a=1 or t2.a=1",
"select * from t as t1 join t as t2 on t1.b = t2.b where (t1.a=1 and t2.a=1) or (t1.a=2 and t2.a=2)",
"select * from t as t1 join t as t2 on t1.b = t2.b where (t1.c=1 and (t1.a=3 or t2.a=3)) or (t1.a=2 and t2.a=2)",
"select * from t as t1 join t as t2 on t1.b = t2.b where (t1.c=1 and ((t1.a=3 and t2.a=3) or (t1.a=4 and t2.a=4)))",
"select * from t as t1 join t as t2 on t1.b = t2.b where (t1.a>1 and t1.a < 3 and t2.a=1) or (t1.a=2 and t2.a=2)",
"select * from t as t1 join t as t2 on t1.b = t2.b and ((t1.a=1 and t2.a=1) or (t1.a=2 and t2.a=2))",
// issue #7628, left join
"select * from t as t1 left join t as t2 on t1.b = t2.b and ((t1.a=1 and t2.a=1) or (t1.a=2 and t2.a=2))",
"select * from t as t1 left join t as t2 on t1.b = t2.b and t1.a > t2.a",
"select * from t as t1 left join t as t2 on t1.b = t2.b and (t1.a=1 or t2.a=1)",
"select * from t as t1 left join t as t2 on t1.b = t2.b and ((t1.c=1 and (t1.a=3 or t2.a=3)) or (t1.a=2 and t2.a=2))",
"select * from t as t1 left join t as t2 on t1.b = t2.b and ((t2.c=1 and (t1.a=3 or t2.a=3)) or (t1.a=2 and t2.a=2))",
"select * from t as t1 left join t as t2 on t1.b = t2.b and ((t1.c=1 and ((t1.a=3 and t2.a=3) or (t1.a=4 and t2.a=4))) or (t1.a=2 and t2.a=2))",
// Duplicate condition would be removed.
"select * from t t1 join t t2 on t1.a > 1 and t1.a > 1"
]
},
{
"name": "TestJoinReOrder",
"cases": [
"select * from t t1, t t2, t t3, t t4, t t5, t t6 where t1.a = t2.b and t2.a = t3.b and t3.c = t4.a and t4.d = t2.c and t5.d = t6.d",
"select * from t t1, t t2, t t3, t t4, t t5, t t6, t t7, t t8 where t1.a = t8.a",
"select * from t t1, t t2, t t3, t t4, t t5 where t1.a = t5.a and t5.a = t4.a and t4.a = t3.a and t3.a = t2.a and t2.a = t1.a and t1.a = t3.a and t2.a = t4.a and t5.b < 8",
"select * from t t1, t t2, t t3, t t4, t t5 where t1.a = t5.a and t5.a = t4.a and t4.a = t3.a and t3.a = t2.a and t2.a = t1.a and t1.a = t3.a and t2.a = t4.a and t3.b = 1 and t4.a = 1",
"select * from t o where o.b in (select t3.c from t t1, t t2, t t3 where t1.a = t3.a and t2.a = t3.a and t2.a = o.a)",
"select * from t o where o.b in (select t3.c from t t1, t t2, t t3 where t1.a = t3.a and t2.a = t3.a and t2.a = o.a and t1.a = 1)"
]
},
{
"name": "TestOuterJoinEliminator",
"cases": [
// Test left outer join + distinct
"select distinct t1.a, t1.b from t t1 left outer join t t2 on t1.b = t2.b",
// Test right outer join + distinct
"select distinct t2.a, t2.b from t t1 right outer join t t2 on t1.b = t2.b",
// Test duplicate agnostic agg functions on join
"select max(t1.a), min(test.t1.b) from t t1 left join t t2 on t1.b = t2.b",
"select sum(distinct t1.a) from t t1 left join t t2 on t1.a = t2.a and t1.b = t2.b",
"select count(distinct t1.a, t1.b) from t t1 left join t t2 on t1.b = t2.b",
"select approx_count_distinct(t1.a, t1.b) from t t1 left join t t2 on t1.b = t2.b",
// Test left outer join
"select t1.b from t t1 left outer join t t2 on t1.a = t2.a",
// Test right outer join
"select t2.b from t t1 right outer join t t2 on t1.a = t2.a",
// For complex join query
"select max(t3.b) from (t t1 left join t t2 on t1.a = t2.a) right join t t3 on t1.b = t3.b",
"select t1.a ta, t1.b tb from t t1 left join t t2 on t1.a = t2.a",
// Because the `order by` uses t2.a, the `join` can't be eliminated.
"select t1.a, t1.b from t t1 left join t t2 on t1.a = t2.a order by t2.a",
// For issue 11167
"select a.a from t a natural left join t b natural left join t c"
]
},
{
"name": "TestSimplifyOuterJoin",
"cases": [
"select * from t t1 left join t t2 on t1.b = t2.b where t1.c > 1 or t2.c > 1;",
"select * from t t1 left join t t2 on t1.b = t2.b where t1.c > 1 and t2.c > 1;",
"select * from t t1 left join t t2 on t1.b = t2.b where not (t1.c > 1 or t2.c > 1);",
"select * from t t1 left join t t2 on t1.b = t2.b where not (t1.c > 1 and t2.c > 1);",
"select * from t t1 left join t t2 on t1.b > 1 where t1.c = t2.c;",
"select * from t t1 left join t t2 on true where t1.b <=> t2.b;"
]
},
{
"name": "TestOuterWherePredicatePushDown",
"cases": [
// issue #7628, left join with where condition
"select * from t as t1 left join t as t2 on t1.b = t2.b where (t1.a=1 and t2.a is null) or (t1.a=2 and t2.a=2)",
"select * from t as t1 left join t as t2 on t1.b = t2.b where (t1.c=1 and (t1.a=3 or t2.a=3)) or (t1.a=2 and t2.a=2)",
"select * from t as t1 left join t as t2 on t1.b = t2.b where (t1.c=1 and ((t1.a=3 and t2.a=3) or (t1.a=4 and t2.a=4))) or (t1.a=2 and t2.a is null)"
]
}
]
| planner/core/testdata/plan_suite_unexported_in.json | 0 | https://github.com/pingcap/tidb/commit/3f6dfe91daab7acb178ad24941dfca1745993afd | [
0.00017791453865356743,
0.00017413136083632708,
0.00016556450282223523,
0.00017419227515347302,
0.0000024111425318551483
] |
{
"id": 6,
"code_window": [
"\t\tif err != nil {\n",
"\t\t\treturn nil, nil, nil, nil, err\n",
"\t\t}\n",
"\t\tdata = append(data, bytes)\n",
"\t}\n",
"\t// Build CMSketch.\n",
"\tcmSketch, topN, ndv, scaleRatio := statistics.NewCMSketchAndTopN(int32(e.opts[ast.AnalyzeOptCMSketchDepth]), int32(e.opts[ast.AnalyzeOptCMSketchWidth]), data, uint32(e.opts[ast.AnalyzeOptNumTopN]), uint64(rowCount))\n",
"\t// Build Histogram.\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tdata = append(data, valBytes)\n"
],
"file_path": "executor/analyze.go",
"type": "replace",
"edit_start_line_idx": 1242
} | [
{
"name": "TestExpBackoffEstimation",
"cases": [
"explain select * from exp_backoff where a = 1",
"explain select * from exp_backoff where b = 1",
"explain select * from exp_backoff where c = 1",
"explain select * from exp_backoff where d >= 3 and d <= 5",
"explain select * from exp_backoff where a = 1 and b = 1 and c = 1 and d >= 3 and d<= 5",
"explain select * from exp_backoff where a = 1 and b = 1 and c = 1 and d >= 3 and d<= 5"
]
}
]
| statistics/testdata/integration_suite_in.json | 0 | https://github.com/pingcap/tidb/commit/3f6dfe91daab7acb178ad24941dfca1745993afd | [
0.00017860051593743265,
0.00017642085731495172,
0.0001742411986924708,
0.00017642085731495172,
0.000002179658622480929
] |
{
"id": 6,
"code_window": [
"\t\tif err != nil {\n",
"\t\t\treturn nil, nil, nil, nil, err\n",
"\t\t}\n",
"\t\tdata = append(data, bytes)\n",
"\t}\n",
"\t// Build CMSketch.\n",
"\tcmSketch, topN, ndv, scaleRatio := statistics.NewCMSketchAndTopN(int32(e.opts[ast.AnalyzeOptCMSketchDepth]), int32(e.opts[ast.AnalyzeOptCMSketchWidth]), data, uint32(e.opts[ast.AnalyzeOptNumTopN]), uint64(rowCount))\n",
"\t// Build Histogram.\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tdata = append(data, valBytes)\n"
],
"file_path": "executor/analyze.go",
"type": "replace",
"edit_start_line_idx": 1242
} | // Copyright 2018 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package distsql
import (
"bytes"
"context"
"fmt"
"sort"
"strconv"
"sync/atomic"
"time"
"github.com/pingcap/errors"
"github.com/pingcap/parser/terror"
"github.com/pingcap/tidb/config"
"github.com/pingcap/tidb/errno"
"github.com/pingcap/tidb/kv"
"github.com/pingcap/tidb/metrics"
"github.com/pingcap/tidb/sessionctx"
"github.com/pingcap/tidb/statistics"
"github.com/pingcap/tidb/store/copr"
"github.com/pingcap/tidb/store/tikv"
"github.com/pingcap/tidb/store/tikv/tikvrpc"
"github.com/pingcap/tidb/telemetry"
"github.com/pingcap/tidb/types"
"github.com/pingcap/tidb/util/chunk"
"github.com/pingcap/tidb/util/codec"
"github.com/pingcap/tidb/util/dbterror"
"github.com/pingcap/tidb/util/execdetails"
"github.com/pingcap/tidb/util/logutil"
"github.com/pingcap/tidb/util/memory"
"github.com/pingcap/tipb/go-tipb"
"go.uber.org/zap"
)
var (
errQueryInterrupted = dbterror.ClassExecutor.NewStd(errno.ErrQueryInterrupted)
)
var (
coprCacheHistogramHit = metrics.DistSQLCoprCacheHistogram.WithLabelValues("hit")
coprCacheHistogramMiss = metrics.DistSQLCoprCacheHistogram.WithLabelValues("miss")
)
var (
_ SelectResult = (*selectResult)(nil)
_ SelectResult = (*streamResult)(nil)
)
// SelectResult is an iterator of coprocessor partial results.
type SelectResult interface {
// NextRaw gets the next raw result.
NextRaw(context.Context) ([]byte, error)
// Next reads the data into chunk.
Next(context.Context, *chunk.Chunk) error
// Close closes the iterator.
Close() error
}
type selectResult struct {
label string
resp kv.Response
rowLen int
fieldTypes []*types.FieldType
ctx sessionctx.Context
selectResp *tipb.SelectResponse
selectRespSize int64 // record the selectResp.Size() when it is initialized.
respChkIdx int
respChunkDecoder *chunk.Decoder
feedback *statistics.QueryFeedback
partialCount int64 // number of partial results.
sqlType string
encodeType tipb.EncodeType
// copPlanIDs contains all copTasks' planIDs,
// which help to collect copTasks' runtime stats.
copPlanIDs []int
rootPlanID int
storeType kv.StoreType
fetchDuration time.Duration
durationReported bool
memTracker *memory.Tracker
stats *selectResultRuntimeStats
}
func (r *selectResult) fetchResp(ctx context.Context) error {
defer func() {
if r.stats != nil {
coprCacheHistogramHit.Observe(float64(r.stats.CoprCacheHitNum))
coprCacheHistogramMiss.Observe(float64(len(r.stats.copRespTime) - int(r.stats.CoprCacheHitNum)))
// Ignore internal sql.
if !r.ctx.GetSessionVars().InRestrictedSQL && len(r.stats.copRespTime) > 0 {
ratio := float64(r.stats.CoprCacheHitNum) / float64(len(r.stats.copRespTime))
if ratio >= 1 {
telemetry.CurrentCoprCacheHitRatioGTE100Count.Inc()
}
if ratio >= 0.8 {
telemetry.CurrentCoprCacheHitRatioGTE80Count.Inc()
}
if ratio >= 0.4 {
telemetry.CurrentCoprCacheHitRatioGTE40Count.Inc()
}
if ratio >= 0.2 {
telemetry.CurrentCoprCacheHitRatioGTE20Count.Inc()
}
if ratio >= 0.1 {
telemetry.CurrentCoprCacheHitRatioGTE10Count.Inc()
}
if ratio >= 0.01 {
telemetry.CurrentCoprCacheHitRatioGTE1Count.Inc()
}
if ratio >= 0 {
telemetry.CurrentCoprCacheHitRatioGTE0Count.Inc()
}
}
}
}()
for {
r.respChkIdx = 0
startTime := time.Now()
resultSubset, err := r.resp.Next(ctx)
duration := time.Since(startTime)
r.fetchDuration += duration
if err != nil {
return errors.Trace(err)
}
if r.selectResp != nil {
r.memConsume(-atomic.LoadInt64(&r.selectRespSize))
}
if resultSubset == nil {
r.selectResp = nil
atomic.StoreInt64(&r.selectRespSize, 0)
if !r.durationReported {
// final round of fetch
// TODO: Add a label to distinguish between success or failure.
// https://github.com/pingcap/tidb/issues/11397
metrics.DistSQLQueryHistogram.WithLabelValues(r.label, r.sqlType).Observe(r.fetchDuration.Seconds())
r.durationReported = true
}
return nil
}
r.selectResp = new(tipb.SelectResponse)
err = r.selectResp.Unmarshal(resultSubset.GetData())
if err != nil {
return errors.Trace(err)
}
respSize := int64(r.selectResp.Size())
atomic.StoreInt64(&r.selectRespSize, respSize)
r.memConsume(respSize)
if err := r.selectResp.Error; err != nil {
return dbterror.ClassTiKV.Synthesize(terror.ErrCode(err.Code), err.Msg)
}
sessVars := r.ctx.GetSessionVars()
if atomic.LoadUint32(&sessVars.Killed) == 1 {
return errors.Trace(errQueryInterrupted)
}
sc := sessVars.StmtCtx
for _, warning := range r.selectResp.Warnings {
sc.AppendWarning(dbterror.ClassTiKV.Synthesize(terror.ErrCode(warning.Code), warning.Msg))
}
if r.feedback != nil {
r.feedback.Update(resultSubset.GetStartKey(), r.selectResp.OutputCounts, r.selectResp.Ndvs)
}
r.partialCount++
hasStats, ok := resultSubset.(CopRuntimeStats)
if ok {
copStats := hasStats.GetCopRuntimeStats()
if copStats != nil {
r.updateCopRuntimeStats(ctx, copStats, resultSubset.RespTime())
copStats.CopTime = duration
sc.MergeExecDetails(&copStats.ExecDetails, nil)
}
}
if len(r.selectResp.Chunks) != 0 {
break
}
}
return nil
}
func (r *selectResult) Next(ctx context.Context, chk *chunk.Chunk) error {
chk.Reset()
if r.selectResp == nil || r.respChkIdx == len(r.selectResp.Chunks) {
err := r.fetchResp(ctx)
if err != nil {
return err
}
if r.selectResp == nil {
return nil
}
}
// TODO(Shenghui Wu): add metrics
switch r.selectResp.GetEncodeType() {
case tipb.EncodeType_TypeDefault:
return r.readFromDefault(ctx, chk)
case tipb.EncodeType_TypeChunk:
return r.readFromChunk(ctx, chk)
}
return errors.Errorf("unsupported encode type:%v", r.encodeType)
}
// NextRaw returns the next raw partial result.
func (r *selectResult) NextRaw(ctx context.Context) (data []byte, err error) {
resultSubset, err := r.resp.Next(ctx)
r.partialCount++
r.feedback.Invalidate()
if resultSubset != nil && err == nil {
data = resultSubset.GetData()
}
return data, err
}
func (r *selectResult) readFromDefault(ctx context.Context, chk *chunk.Chunk) error {
for !chk.IsFull() {
if r.respChkIdx == len(r.selectResp.Chunks) {
err := r.fetchResp(ctx)
if err != nil || r.selectResp == nil {
return err
}
}
err := r.readRowsData(chk)
if err != nil {
return err
}
if len(r.selectResp.Chunks[r.respChkIdx].RowsData) == 0 {
r.respChkIdx++
}
}
return nil
}
func (r *selectResult) readFromChunk(ctx context.Context, chk *chunk.Chunk) error {
if r.respChunkDecoder == nil {
r.respChunkDecoder = chunk.NewDecoder(
chunk.NewChunkWithCapacity(r.fieldTypes, 0),
r.fieldTypes,
)
}
for !chk.IsFull() {
if r.respChkIdx == len(r.selectResp.Chunks) {
err := r.fetchResp(ctx)
if err != nil || r.selectResp == nil {
return err
}
}
if r.respChunkDecoder.IsFinished() {
r.respChunkDecoder.Reset(r.selectResp.Chunks[r.respChkIdx].RowsData)
}
// If the next chunk size is greater than required rows * 0.8, reuse the memory of the next chunk and return
// immediately. Otherwise, splice the data to one chunk and wait the next chunk.
if r.respChunkDecoder.RemainedRows() > int(float64(chk.RequiredRows())*0.8) {
if chk.NumRows() > 0 {
return nil
}
r.respChunkDecoder.ReuseIntermChk(chk)
r.respChkIdx++
return nil
}
r.respChunkDecoder.Decode(chk)
if r.respChunkDecoder.IsFinished() {
r.respChkIdx++
}
}
return nil
}
func (r *selectResult) updateCopRuntimeStats(ctx context.Context, copStats *copr.CopRuntimeStats, respTime time.Duration) {
callee := copStats.CalleeAddress
if r.rootPlanID <= 0 || r.ctx.GetSessionVars().StmtCtx.RuntimeStatsColl == nil || callee == "" {
return
}
if len(r.selectResp.GetExecutionSummaries()) != len(r.copPlanIDs) {
logutil.Logger(ctx).Error("invalid cop task execution summaries length",
zap.Int("expected", len(r.copPlanIDs)),
zap.Int("received", len(r.selectResp.GetExecutionSummaries())))
return
}
if r.stats == nil {
id := r.rootPlanID
r.stats = &selectResultRuntimeStats{
backoffSleep: make(map[string]time.Duration),
rpcStat: tikv.NewRegionRequestRuntimeStats(),
}
r.ctx.GetSessionVars().StmtCtx.RuntimeStatsColl.RegisterStats(id, r.stats)
}
r.stats.mergeCopRuntimeStats(copStats, respTime)
if copStats.ScanDetail != nil && len(r.copPlanIDs) > 0 {
r.ctx.GetSessionVars().StmtCtx.RuntimeStatsColl.RecordScanDetail(r.copPlanIDs[len(r.copPlanIDs)-1], r.storeType.Name(), copStats.ScanDetail)
}
for i, detail := range r.selectResp.GetExecutionSummaries() {
if detail != nil && detail.TimeProcessedNs != nil &&
detail.NumProducedRows != nil && detail.NumIterations != nil {
planID := r.copPlanIDs[i]
r.ctx.GetSessionVars().StmtCtx.RuntimeStatsColl.
RecordOneCopTask(planID, r.storeType.Name(), callee, detail)
}
}
}
func (r *selectResult) readRowsData(chk *chunk.Chunk) (err error) {
rowsData := r.selectResp.Chunks[r.respChkIdx].RowsData
decoder := codec.NewDecoder(chk, r.ctx.GetSessionVars().Location())
for !chk.IsFull() && len(rowsData) > 0 {
for i := 0; i < r.rowLen; i++ {
rowsData, err = decoder.DecodeOne(rowsData, i, r.fieldTypes[i])
if err != nil {
return err
}
}
}
r.selectResp.Chunks[r.respChkIdx].RowsData = rowsData
return nil
}
func (r *selectResult) memConsume(bytes int64) {
if r.memTracker != nil {
r.memTracker.Consume(bytes)
}
}
// Close closes selectResult.
func (r *selectResult) Close() error {
if r.feedback.Actual() >= 0 {
metrics.DistSQLScanKeysHistogram.Observe(float64(r.feedback.Actual()))
}
metrics.DistSQLPartialCountHistogram.Observe(float64(r.partialCount))
respSize := atomic.SwapInt64(&r.selectRespSize, 0)
if respSize > 0 {
r.memConsume(-respSize)
}
return r.resp.Close()
}
// CopRuntimeStats is a interface uses to check whether the result has cop runtime stats.
type CopRuntimeStats interface {
// GetCopRuntimeStats gets the cop runtime stats information.
GetCopRuntimeStats() *copr.CopRuntimeStats
}
type selectResultRuntimeStats struct {
copRespTime []time.Duration
procKeys []int64
backoffSleep map[string]time.Duration
totalProcessTime time.Duration
totalWaitTime time.Duration
rpcStat tikv.RegionRequestRuntimeStats
CoprCacheHitNum int64
}
func (s *selectResultRuntimeStats) mergeCopRuntimeStats(copStats *copr.CopRuntimeStats, respTime time.Duration) {
s.copRespTime = append(s.copRespTime, respTime)
if copStats.ScanDetail != nil {
s.procKeys = append(s.procKeys, copStats.ScanDetail.ProcessedKeys)
} else {
s.procKeys = append(s.procKeys, 0)
}
for k, v := range copStats.BackoffSleep {
s.backoffSleep[k] += v
}
s.totalProcessTime += copStats.TimeDetail.ProcessTime
s.totalWaitTime += copStats.TimeDetail.WaitTime
s.rpcStat.Merge(copStats.RegionRequestRuntimeStats)
if copStats.CoprCacheHit {
s.CoprCacheHitNum++
}
}
func (s *selectResultRuntimeStats) Clone() execdetails.RuntimeStats {
newRs := selectResultRuntimeStats{
copRespTime: make([]time.Duration, 0, len(s.copRespTime)),
procKeys: make([]int64, 0, len(s.procKeys)),
backoffSleep: make(map[string]time.Duration, len(s.backoffSleep)),
rpcStat: tikv.NewRegionRequestRuntimeStats(),
}
newRs.copRespTime = append(newRs.copRespTime, s.copRespTime...)
newRs.procKeys = append(newRs.procKeys, s.procKeys...)
for k, v := range s.backoffSleep {
newRs.backoffSleep[k] += v
}
newRs.totalProcessTime += s.totalProcessTime
newRs.totalWaitTime += s.totalWaitTime
for k, v := range s.rpcStat.Stats {
newRs.rpcStat.Stats[k] = v
}
return &newRs
}
func (s *selectResultRuntimeStats) Merge(rs execdetails.RuntimeStats) {
other, ok := rs.(*selectResultRuntimeStats)
if !ok {
return
}
s.copRespTime = append(s.copRespTime, other.copRespTime...)
s.procKeys = append(s.procKeys, other.procKeys...)
for k, v := range other.backoffSleep {
s.backoffSleep[k] += v
}
s.totalProcessTime += other.totalProcessTime
s.totalWaitTime += other.totalWaitTime
s.rpcStat.Merge(other.rpcStat)
s.CoprCacheHitNum += other.CoprCacheHitNum
}
func (s *selectResultRuntimeStats) String() string {
buf := bytes.NewBuffer(nil)
rpcStat := s.rpcStat
if len(s.copRespTime) > 0 {
size := len(s.copRespTime)
if size == 1 {
buf.WriteString(fmt.Sprintf("cop_task: {num: 1, max: %v, proc_keys: %v", execdetails.FormatDuration(s.copRespTime[0]), s.procKeys[0]))
} else {
sort.Slice(s.copRespTime, func(i, j int) bool {
return s.copRespTime[i] < s.copRespTime[j]
})
vMax, vMin := s.copRespTime[size-1], s.copRespTime[0]
vP95 := s.copRespTime[size*19/20]
sum := 0.0
for _, t := range s.copRespTime {
sum += float64(t)
}
vAvg := time.Duration(sum / float64(size))
sort.Slice(s.procKeys, func(i, j int) bool {
return s.procKeys[i] < s.procKeys[j]
})
keyMax := s.procKeys[size-1]
keyP95 := s.procKeys[size*19/20]
buf.WriteString(fmt.Sprintf("cop_task: {num: %v, max: %v, min: %v, avg: %v, p95: %v", size,
execdetails.FormatDuration(vMax), execdetails.FormatDuration(vMin),
execdetails.FormatDuration(vAvg), execdetails.FormatDuration(vP95)))
if keyMax > 0 {
buf.WriteString(", max_proc_keys: ")
buf.WriteString(strconv.FormatInt(keyMax, 10))
buf.WriteString(", p95_proc_keys: ")
buf.WriteString(strconv.FormatInt(keyP95, 10))
}
}
if s.totalProcessTime > 0 {
buf.WriteString(", tot_proc: ")
buf.WriteString(execdetails.FormatDuration(s.totalProcessTime))
if s.totalWaitTime > 0 {
buf.WriteString(", tot_wait: ")
buf.WriteString(execdetails.FormatDuration(s.totalWaitTime))
}
}
copRPC := rpcStat.Stats[tikvrpc.CmdCop]
if copRPC != nil && copRPC.Count > 0 {
rpcStat = rpcStat.Clone()
delete(rpcStat.Stats, tikvrpc.CmdCop)
buf.WriteString(", rpc_num: ")
buf.WriteString(strconv.FormatInt(copRPC.Count, 10))
buf.WriteString(", rpc_time: ")
buf.WriteString(execdetails.FormatDuration(time.Duration(copRPC.Consume)))
}
if config.GetGlobalConfig().TiKVClient.CoprCache.CapacityMB > 0 {
buf.WriteString(fmt.Sprintf(", copr_cache_hit_ratio: %v",
strconv.FormatFloat(float64(s.CoprCacheHitNum)/float64(len(s.copRespTime)), 'f', 2, 64)))
} else {
buf.WriteString(", copr_cache: disabled")
}
buf.WriteString("}")
}
rpcStatsStr := rpcStat.String()
if len(rpcStatsStr) > 0 {
buf.WriteString(", ")
buf.WriteString(rpcStatsStr)
}
if len(s.backoffSleep) > 0 {
buf.WriteString(", backoff{")
idx := 0
for k, d := range s.backoffSleep {
if idx > 0 {
buf.WriteString(", ")
}
idx++
buf.WriteString(fmt.Sprintf("%s: %s", k, execdetails.FormatDuration(d)))
}
buf.WriteString("}")
}
return buf.String()
}
// Tp implements the RuntimeStats interface.
func (s *selectResultRuntimeStats) Tp() int {
return execdetails.TpSelectResultRuntimeStats
}
| distsql/select_result.go | 0 | https://github.com/pingcap/tidb/commit/3f6dfe91daab7acb178ad24941dfca1745993afd | [
0.0016735203098505735,
0.0002047367743216455,
0.00016175603377632797,
0.00017235497944056988,
0.00020762930216733366
] |
{
"id": 7,
"code_window": [
"\t\te.ranges, ok = e.feedback.Hist.SplitRange(nil, e.ranges, false)\n",
"\t\tif !ok {\n",
"\t\t\te.feedback.Invalidate()\n",
"\t\t}\n",
"\t}\n",
"\tfirstPartRanges, secondPartRanges := distsql.SplitRangesBySign(e.ranges, e.keepOrder, e.desc, e.table.Meta() != nil && e.table.Meta().IsCommonHandle)\n",
"\tfirstResult, err := e.buildResp(ctx, firstPartRanges)\n",
"\tif err != nil {\n",
"\t\te.feedback.Invalidate()\n",
"\t\treturn err\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tfirstPartRanges, secondPartRanges := distsql.SplitRangesAcrossInt64Boundary(e.ranges, e.keepOrder, e.desc, e.table.Meta() != nil && e.table.Meta().IsCommonHandle)\n"
],
"file_path": "executor/table_reader.go",
"type": "replace",
"edit_start_line_idx": 155
} | // Copyright 2017 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package core
import (
"github.com/pingcap/errors"
"github.com/pingcap/parser/model"
"github.com/pingcap/parser/mysql"
"github.com/pingcap/tidb/distsql"
"github.com/pingcap/tidb/expression"
"github.com/pingcap/tidb/expression/aggregation"
"github.com/pingcap/tidb/kv"
"github.com/pingcap/tidb/sessionctx"
"github.com/pingcap/tidb/table"
"github.com/pingcap/tidb/table/tables"
"github.com/pingcap/tidb/tablecodec"
"github.com/pingcap/tidb/util"
"github.com/pingcap/tidb/util/codec"
"github.com/pingcap/tidb/util/ranger"
"github.com/pingcap/tipb/go-tipb"
)
// ToPB implements PhysicalPlan ToPB interface.
func (p *basePhysicalPlan) ToPB(_ sessionctx.Context, _ kv.StoreType) (*tipb.Executor, error) {
return nil, errors.Errorf("plan %s fails converts to PB", p.basePlan.ExplainID())
}
// ToPB implements PhysicalPlan ToPB interface.
func (p *PhysicalHashAgg) ToPB(ctx sessionctx.Context, storeType kv.StoreType) (*tipb.Executor, error) {
sc := ctx.GetSessionVars().StmtCtx
client := ctx.GetClient()
groupByExprs, err := expression.ExpressionsToPBList(sc, p.GroupByItems, client)
if err != nil {
return nil, err
}
aggExec := &tipb.Aggregation{
GroupBy: groupByExprs,
}
for _, aggFunc := range p.AggFuncs {
aggExec.AggFunc = append(aggExec.AggFunc, aggregation.AggFuncToPBExpr(sc, client, aggFunc))
}
executorID := ""
if storeType == kv.TiFlash {
var err error
aggExec.Child, err = p.children[0].ToPB(ctx, storeType)
if err != nil {
return nil, errors.Trace(err)
}
executorID = p.ExplainID().String()
}
return &tipb.Executor{Tp: tipb.ExecType_TypeAggregation, Aggregation: aggExec, ExecutorId: &executorID}, nil
}
// ToPB implements PhysicalPlan ToPB interface.
func (p *PhysicalStreamAgg) ToPB(ctx sessionctx.Context, storeType kv.StoreType) (*tipb.Executor, error) {
sc := ctx.GetSessionVars().StmtCtx
client := ctx.GetClient()
groupByExprs, err := expression.ExpressionsToPBList(sc, p.GroupByItems, client)
if err != nil {
return nil, err
}
aggExec := &tipb.Aggregation{
GroupBy: groupByExprs,
}
for _, aggFunc := range p.AggFuncs {
aggExec.AggFunc = append(aggExec.AggFunc, aggregation.AggFuncToPBExpr(sc, client, aggFunc))
}
executorID := ""
if storeType == kv.TiFlash {
var err error
aggExec.Child, err = p.children[0].ToPB(ctx, storeType)
if err != nil {
return nil, errors.Trace(err)
}
executorID = p.ExplainID().String()
}
return &tipb.Executor{Tp: tipb.ExecType_TypeStreamAgg, Aggregation: aggExec, ExecutorId: &executorID}, nil
}
// ToPB implements PhysicalPlan ToPB interface.
func (p *PhysicalSelection) ToPB(ctx sessionctx.Context, storeType kv.StoreType) (*tipb.Executor, error) {
sc := ctx.GetSessionVars().StmtCtx
client := ctx.GetClient()
conditions, err := expression.ExpressionsToPBList(sc, p.Conditions, client)
if err != nil {
return nil, err
}
selExec := &tipb.Selection{
Conditions: conditions,
}
executorID := ""
if storeType == kv.TiFlash {
var err error
selExec.Child, err = p.children[0].ToPB(ctx, storeType)
if err != nil {
return nil, errors.Trace(err)
}
executorID = p.ExplainID().String()
}
return &tipb.Executor{Tp: tipb.ExecType_TypeSelection, Selection: selExec, ExecutorId: &executorID}, nil
}
// ToPB implements PhysicalPlan ToPB interface.
func (p *PhysicalProjection) ToPB(ctx sessionctx.Context, storeType kv.StoreType) (*tipb.Executor, error) {
sc := ctx.GetSessionVars().StmtCtx
client := ctx.GetClient()
exprs, err := expression.ExpressionsToPBList(sc, p.Exprs, client)
if err != nil {
return nil, err
}
projExec := &tipb.Projection{
Exprs: exprs,
}
executorID := ""
if storeType == kv.TiFlash {
var err error
projExec.Child, err = p.children[0].ToPB(ctx, storeType)
if err != nil {
return nil, errors.Trace(err)
}
executorID = p.ExplainID().String()
} else {
return nil, errors.Errorf("The projection can only be pushed down to TiFlash now, not %s.", storeType.Name())
}
return &tipb.Executor{Tp: tipb.ExecType_TypeProjection, Projection: projExec, ExecutorId: &executorID}, nil
}
// ToPB implements PhysicalPlan ToPB interface.
func (p *PhysicalTopN) ToPB(ctx sessionctx.Context, storeType kv.StoreType) (*tipb.Executor, error) {
sc := ctx.GetSessionVars().StmtCtx
client := ctx.GetClient()
topNExec := &tipb.TopN{
Limit: p.Count,
}
for _, item := range p.ByItems {
topNExec.OrderBy = append(topNExec.OrderBy, expression.SortByItemToPB(sc, client, item.Expr, item.Desc))
}
executorID := ""
if storeType == kv.TiFlash {
var err error
topNExec.Child, err = p.children[0].ToPB(ctx, storeType)
if err != nil {
return nil, errors.Trace(err)
}
executorID = p.ExplainID().String()
}
return &tipb.Executor{Tp: tipb.ExecType_TypeTopN, TopN: topNExec, ExecutorId: &executorID}, nil
}
// ToPB implements PhysicalPlan ToPB interface.
func (p *PhysicalLimit) ToPB(ctx sessionctx.Context, storeType kv.StoreType) (*tipb.Executor, error) {
limitExec := &tipb.Limit{
Limit: p.Count,
}
executorID := ""
if storeType == kv.TiFlash {
var err error
limitExec.Child, err = p.children[0].ToPB(ctx, storeType)
if err != nil {
return nil, errors.Trace(err)
}
executorID = p.ExplainID().String()
}
return &tipb.Executor{Tp: tipb.ExecType_TypeLimit, Limit: limitExec, ExecutorId: &executorID}, nil
}
// ToPB implements PhysicalPlan ToPB interface.
func (p *PhysicalTableScan) ToPB(ctx sessionctx.Context, storeType kv.StoreType) (*tipb.Executor, error) {
tsExec := tables.BuildTableScanFromInfos(p.Table, p.Columns)
tsExec.Desc = p.Desc
if p.isPartition {
tsExec.TableId = p.physicalTableID
}
executorID := ""
if storeType == kv.TiFlash && p.IsGlobalRead {
tsExec.NextReadEngine = tipb.EngineType_TiFlash
splitedRanges, _ := distsql.SplitRangesBySign(p.Ranges, false, false, p.Table.IsCommonHandle)
ranges, err := distsql.TableHandleRangesToKVRanges(ctx.GetSessionVars().StmtCtx, []int64{tsExec.TableId}, p.Table.IsCommonHandle, splitedRanges, nil)
if err != nil {
return nil, err
}
for _, keyRange := range ranges {
tsExec.Ranges = append(tsExec.Ranges, tipb.KeyRange{Low: keyRange.StartKey, High: keyRange.EndKey})
}
}
if storeType == kv.TiFlash {
executorID = p.ExplainID().String()
}
err := SetPBColumnsDefaultValue(ctx, tsExec.Columns, p.Columns)
return &tipb.Executor{Tp: tipb.ExecType_TypeTableScan, TblScan: tsExec, ExecutorId: &executorID}, err
}
// checkCoverIndex checks whether we can pass unique info to TiKV. We should push it if and only if the length of
// range and index are equal.
func checkCoverIndex(idx *model.IndexInfo, ranges []*ranger.Range) bool {
// If the index is (c1, c2) but the query range only contains c1, it is not a unique get.
if !idx.Unique {
return false
}
for _, rg := range ranges {
if len(rg.LowVal) != len(idx.Columns) {
return false
}
}
return true
}
func findColumnInfoByID(infos []*model.ColumnInfo, id int64) *model.ColumnInfo {
for _, info := range infos {
if info.ID == id {
return info
}
}
return nil
}
// ToPB generates the pb structure.
func (e *PhysicalExchangeSender) ToPB(ctx sessionctx.Context, storeType kv.StoreType) (*tipb.Executor, error) {
child, err := e.Children()[0].ToPB(ctx, kv.TiFlash)
if err != nil {
return nil, errors.Trace(err)
}
encodedTask := make([][]byte, 0, len(e.TargetTasks))
for _, task := range e.TargetTasks {
encodedStr, err := task.ToPB().Marshal()
if err != nil {
return nil, errors.Trace(err)
}
encodedTask = append(encodedTask, encodedStr)
}
hashCols := make([]expression.Expression, 0, len(e.HashCols))
for _, col := range e.HashCols {
hashCols = append(hashCols, col)
}
hashColPb, err := expression.ExpressionsToPBList(ctx.GetSessionVars().StmtCtx, hashCols, ctx.GetClient())
if err != nil {
return nil, errors.Trace(err)
}
ecExec := &tipb.ExchangeSender{
Tp: e.ExchangeType,
EncodedTaskMeta: encodedTask,
PartitionKeys: hashColPb,
Child: child,
}
executorID := e.ExplainID().String()
return &tipb.Executor{
Tp: tipb.ExecType_TypeExchangeSender,
ExchangeSender: ecExec,
ExecutorId: &executorID,
}, nil
}
// ToPB generates the pb structure.
func (e *PhysicalExchangeReceiver) ToPB(ctx sessionctx.Context, storeType kv.StoreType) (*tipb.Executor, error) {
encodedTask := make([][]byte, 0, len(e.Tasks))
for _, task := range e.Tasks {
encodedStr, err := task.ToPB().Marshal()
if err != nil {
return nil, errors.Trace(err)
}
encodedTask = append(encodedTask, encodedStr)
}
fieldTypes := make([]*tipb.FieldType, 0, len(e.Schema().Columns))
for _, column := range e.Schema().Columns {
pbType := expression.ToPBFieldType(column.RetType)
if column.RetType.Tp == mysql.TypeEnum {
pbType.Elems = append(pbType.Elems, column.RetType.Elems...)
}
fieldTypes = append(fieldTypes, pbType)
}
ecExec := &tipb.ExchangeReceiver{
EncodedTaskMeta: encodedTask,
FieldTypes: fieldTypes,
}
executorID := e.ExplainID().String()
return &tipb.Executor{
Tp: tipb.ExecType_TypeExchangeReceiver,
ExchangeReceiver: ecExec,
ExecutorId: &executorID,
}, nil
}
// ToPB implements PhysicalPlan ToPB interface.
func (p *PhysicalIndexScan) ToPB(ctx sessionctx.Context, _ kv.StoreType) (*tipb.Executor, error) {
columns := make([]*model.ColumnInfo, 0, p.schema.Len())
tableColumns := p.Table.Cols()
for _, col := range p.schema.Columns {
if col.ID == model.ExtraHandleID {
columns = append(columns, model.NewExtraHandleColInfo())
} else if col.ID == model.ExtraPidColID {
columns = append(columns, model.NewExtraPartitionIDColInfo())
} else {
columns = append(columns, findColumnInfoByID(tableColumns, col.ID))
}
}
var pkColIds []int64
if p.NeedCommonHandle {
pkColIds = tables.TryGetCommonPkColumnIds(p.Table)
}
idxExec := &tipb.IndexScan{
TableId: p.Table.ID,
IndexId: p.Index.ID,
Columns: util.ColumnsToProto(columns, p.Table.PKIsHandle),
Desc: p.Desc,
PrimaryColumnIds: pkColIds,
}
if p.isPartition {
idxExec.TableId = p.physicalTableID
}
unique := checkCoverIndex(p.Index, p.Ranges)
idxExec.Unique = &unique
return &tipb.Executor{Tp: tipb.ExecType_TypeIndexScan, IdxScan: idxExec}, nil
}
// ToPB implements PhysicalPlan ToPB interface.
func (p *PhysicalHashJoin) ToPB(ctx sessionctx.Context, storeType kv.StoreType) (*tipb.Executor, error) {
sc := ctx.GetSessionVars().StmtCtx
client := ctx.GetClient()
leftJoinKeys := make([]expression.Expression, 0, len(p.LeftJoinKeys))
rightJoinKeys := make([]expression.Expression, 0, len(p.RightJoinKeys))
for _, leftKey := range p.LeftJoinKeys {
leftJoinKeys = append(leftJoinKeys, leftKey)
}
for _, rightKey := range p.RightJoinKeys {
rightJoinKeys = append(rightJoinKeys, rightKey)
}
lChildren, err := p.children[0].ToPB(ctx, storeType)
if err != nil {
return nil, errors.Trace(err)
}
rChildren, err := p.children[1].ToPB(ctx, storeType)
if err != nil {
return nil, errors.Trace(err)
}
left, err := expression.ExpressionsToPBList(sc, leftJoinKeys, client)
if err != nil {
return nil, err
}
right, err := expression.ExpressionsToPBList(sc, rightJoinKeys, client)
if err != nil {
return nil, err
}
leftConditions, err := expression.ExpressionsToPBList(sc, p.LeftConditions, client)
if err != nil {
return nil, err
}
rightConditions, err := expression.ExpressionsToPBList(sc, p.RightConditions, client)
if err != nil {
return nil, err
}
otherConditions, err := expression.ExpressionsToPBList(sc, p.OtherConditions, client)
if err != nil {
return nil, err
}
pbJoinType := tipb.JoinType_TypeInnerJoin
switch p.JoinType {
case LeftOuterJoin:
pbJoinType = tipb.JoinType_TypeLeftOuterJoin
case RightOuterJoin:
pbJoinType = tipb.JoinType_TypeRightOuterJoin
case SemiJoin:
pbJoinType = tipb.JoinType_TypeSemiJoin
case AntiSemiJoin:
pbJoinType = tipb.JoinType_TypeAntiSemiJoin
case LeftOuterSemiJoin:
pbJoinType = tipb.JoinType_TypeLeftOuterSemiJoin
case AntiLeftOuterSemiJoin:
pbJoinType = tipb.JoinType_TypeAntiLeftOuterSemiJoin
}
probeFiledTypes := make([]*tipb.FieldType, 0, len(p.EqualConditions))
buildFiledTypes := make([]*tipb.FieldType, 0, len(p.EqualConditions))
for _, equalCondition := range p.EqualConditions {
retType := equalCondition.RetType.Clone()
chs, coll := equalCondition.CharsetAndCollation(ctx)
retType.Charset = chs
retType.Collate = coll
probeFiledTypes = append(probeFiledTypes, expression.ToPBFieldType(retType))
buildFiledTypes = append(buildFiledTypes, expression.ToPBFieldType(retType))
}
join := &tipb.Join{
JoinType: pbJoinType,
JoinExecType: tipb.JoinExecType_TypeHashJoin,
InnerIdx: int64(p.InnerChildIdx),
LeftJoinKeys: left,
RightJoinKeys: right,
ProbeTypes: probeFiledTypes,
BuildTypes: buildFiledTypes,
LeftConditions: leftConditions,
RightConditions: rightConditions,
OtherConditions: otherConditions,
Children: []*tipb.Executor{lChildren, rChildren},
}
executorID := p.ExplainID().String()
return &tipb.Executor{Tp: tipb.ExecType_TypeJoin, Join: join, ExecutorId: &executorID}, nil
}
// SetPBColumnsDefaultValue sets the default values of tipb.ColumnInfos.
func SetPBColumnsDefaultValue(ctx sessionctx.Context, pbColumns []*tipb.ColumnInfo, columns []*model.ColumnInfo) error {
for i, c := range columns {
// For virtual columns, we set their default values to NULL so that TiKV will return NULL properly,
// They real values will be compute later.
if c.IsGenerated() && !c.GeneratedStored {
pbColumns[i].DefaultVal = []byte{codec.NilFlag}
}
if c.GetOriginDefaultValue() == nil {
continue
}
sessVars := ctx.GetSessionVars()
originStrict := sessVars.StrictSQLMode
sessVars.StrictSQLMode = false
d, err := table.GetColOriginDefaultValue(ctx, c)
sessVars.StrictSQLMode = originStrict
if err != nil {
return err
}
pbColumns[i].DefaultVal, err = tablecodec.EncodeValue(sessVars.StmtCtx, nil, d)
if err != nil {
return err
}
}
return nil
}
// SupportStreaming returns true if a pushed down operation supports using coprocessor streaming API.
// Note that this function handle pushed down physical plan only! It's called in constructDAGReq.
// Some plans are difficult (if possible) to implement streaming, and some are pointless to do so.
// TODO: Support more kinds of physical plan.
func SupportStreaming(p PhysicalPlan) bool {
switch p.(type) {
case *PhysicalIndexScan, *PhysicalSelection, *PhysicalTableScan:
return true
}
return false
}
| planner/core/plan_to_pb.go | 1 | https://github.com/pingcap/tidb/commit/3f6dfe91daab7acb178ad24941dfca1745993afd | [
0.002308099064975977,
0.00023212601081468165,
0.00015847731265239418,
0.00016979724750854075,
0.00032186959288083017
] |
{
"id": 7,
"code_window": [
"\t\te.ranges, ok = e.feedback.Hist.SplitRange(nil, e.ranges, false)\n",
"\t\tif !ok {\n",
"\t\t\te.feedback.Invalidate()\n",
"\t\t}\n",
"\t}\n",
"\tfirstPartRanges, secondPartRanges := distsql.SplitRangesBySign(e.ranges, e.keepOrder, e.desc, e.table.Meta() != nil && e.table.Meta().IsCommonHandle)\n",
"\tfirstResult, err := e.buildResp(ctx, firstPartRanges)\n",
"\tif err != nil {\n",
"\t\te.feedback.Invalidate()\n",
"\t\treturn err\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tfirstPartRanges, secondPartRanges := distsql.SplitRangesAcrossInt64Boundary(e.ranges, e.keepOrder, e.desc, e.table.Meta() != nil && e.table.Meta().IsCommonHandle)\n"
],
"file_path": "executor/table_reader.go",
"type": "replace",
"edit_start_line_idx": 155
} | [
{
"name": "TestCBOWithoutAnalyze",
"cases": [
"explain format = 'brief' select * from t1, t2 where t1.a = t2.a",
"explain format = 'hint' select * from t1, t2 where t1.a = t2.a"
]
},
{
"name": "TestTableDual",
"cases": [
"explain format = 'brief' select * from t where 1 = 0",
"explain format = 'brief' select * from t where 1 = 1 limit 0"
]
},
{
"name": "TestEstimation",
"cases": [
"explain format = 'brief' select count(*) from t group by a"
]
},
{
"name": "TestOutdatedAnalyze",
"cases": [
{
"SQL": "explain format = 'brief' select * from t where a <= 5 and b <= 5",
"RatioOfPseudoEstimate": 10.0
},
{
"SQL": "explain format = 'brief' select * from t where a <= 5 and b <= 5",
"RatioOfPseudoEstimate": 0.7
}
]
},
{
"name": "TestInconsistentEstimation",
"cases": [
// Using the histogram (a, b) to estimate `a = 5` will get 1.22, while using the CM Sketch to estimate
// the `a = 5 and c = 5` will get 10, it is not consistent.
"explain format = 'brief' select * from t use index(ab) where a = 5 and c = 5"
]
},
{
"name": "TestLimitCrossEstimation",
"cases": [
// Pseudo stats.
[
"set session tidb_opt_correlation_exp_factor = 0",
"explain format = 'brief' SELECT * FROM t WHERE b = 2 ORDER BY a limit 1;"
],
// Positive correlation.
[
"insert into t (a, b) values (1, 1),(2, 1),(3, 1),(4, 1),(5, 1),(6, 1),(7, 1),(8, 1),(9, 1),(10, 1),(11, 1),(12, 1),(13, 1),(14, 1),(15, 1),(16, 1),(17, 1),(18, 1),(19, 1),(20, 2),(21, 2),(22, 2),(23, 2),(24, 2),(25, 2)",
"analyze table t",
"explain format = 'brief' SELECT * FROM t WHERE b = 2 ORDER BY a limit 1"
],
// Negative correlation.
[
"truncate table t",
"insert into t (a, b) values (1, 25),(2, 24),(3, 23),(4, 23),(5, 21),(6, 20),(7, 19),(8, 18),(9, 17),(10, 16),(11, 15),(12, 14),(13, 13),(14, 12),(15, 11),(16, 10),(17, 9),(18, 8),(19, 7),(20, 6),(21, 5),(22, 4),(23, 3),(24, 2),(25, 1)",
"analyze table t",
"explain format = 'brief' SELECT * FROM t WHERE b <= 6 ORDER BY a limit 1"
],
// Outer plan of index join (to test that correct column ID is used).
[
"explain format = 'brief' SELECT *, t1.a IN (SELECT t2.b FROM t t2) FROM t t1 WHERE t1.b <= 6 ORDER BY t1.a limit 1"
],
// Desc TableScan.
[
"truncate table t",
"insert into t (a, b) values (1, 1),(2, 1),(3, 1),(4, 1),(5, 1),(6, 1),(7, 2),(8, 2),(9, 2),(10, 2),(11, 2),(12, 2),(13, 2),(14, 2),(15, 2),(16, 2),(17, 2),(18, 2),(19, 2),(20, 2),(21, 2),(22, 2),(23, 2),(24, 2),(25, 2)",
"analyze table t",
"explain format = 'brief' SELECT * FROM t WHERE b = 1 ORDER BY a desc limit 1"
],
// Correlation threshold not met.
[
"truncate table t",
"insert into t (a, b) values (1, 1),(2, 1),(3, 1),(4, 1),(5, 1),(6, 1),(7, 1),(8, 1),(9, 2),(10, 1),(11, 1),(12, 1),(13, 1),(14, 2),(15, 2),(16, 1),(17, 2),(18, 1),(19, 2),(20, 1),(21, 2),(22, 1),(23, 1),(24, 1),(25, 1)",
"analyze table t",
"explain format = 'brief' SELECT * FROM t WHERE b = 2 ORDER BY a limit 1"
],
[
"set session tidb_opt_correlation_exp_factor = 1",
"explain format = 'brief' SELECT * FROM t WHERE b = 2 ORDER BY a limit 1"
],
// TableScan has access conditions, but correlation is 1.
[
"set session tidb_opt_correlation_exp_factor = 0",
"truncate table t",
"insert into t (a, b) values (1, 1),(2, 1),(3, 1),(4, 1),(5, 1),(6, 1),(7, 1),(8, 1),(9, 1),(10, 1),(11, 1),(12, 1),(13, 1),(14, 1),(15, 1),(16, 1),(17, 1),(18, 1),(19, 1),(20, 2),(21, 2),(22, 2),(23, 2),(24, 2),(25, 2)",
"analyze table t",
"explain format = 'brief' SELECT * FROM t WHERE b = 2 and a > 0 ORDER BY a limit 1"
],
// Multi-column filter.
[
"drop table t",
"create table t(a int primary key, b int, c int, d bigint default 2147483648, e bigint default 2147483648, f bigint default 2147483648, index idx(b,d,a,c))",
"insert into t(a, b, c) values (1, 1, 1),(2, 1, 2),(3, 1, 1),(4, 1, 2),(5, 1, 1),(6, 1, 2),(7, 1, 1),(8, 1, 2),(9, 1, 1),(10, 1, 2),(11, 1, 1),(12, 1, 2),(13, 1, 1),(14, 1, 2),(15, 1, 1),(16, 1, 2),(17, 1, 1),(18, 1, 2),(19, 1, 1),(20, 2, 2),(21, 2, 1),(22, 2, 2),(23, 2, 1),(24, 2, 2),(25, 2, 1)",
"analyze table t",
"explain format = 'brief' SELECT a FROM t WHERE b = 2 and c > 0 ORDER BY a limit 1"
]
]
},
{
"name": "TestIssue9562",
"cases": [
[
"create table t1(a bigint, b bigint, c bigint)",
"create table t2(a bigint, b bigint, c bigint, index idx(a, b, c))",
"explain format = 'brief' select /*+ TIDB_INLJ(t2) */ * from t1 join t2 on t2.a=t1.a and t2.b>t1.b-1 and t2.b<t1.b+1 and t2.c=t1.c"
],
[
"create table t(a int, b int, index idx_ab(a, b))",
"explain format = 'brief' select * from t t1 join t t2 where t1.b = t2.b and t2.b is null"
]
]
},
{
"name": "TestTiFlashCostModel",
"cases": [
[
"desc select * from t"
],
[
"desc select /*+ read_from_storage(tikv[t]) */ * from t"
],
[
"desc select * from t where t.a = 1 or t.a = 2"
],
[
"set @@session.tidb_isolation_read_engines='tiflash'",
"desc select * from t where t.a = 1 or t.a = 2"
]
]
},
{
"name": "TestStraightJoin",
"cases": [
"explain format = 'brief' select straight_join * from t1, t2, t3, t4",
"explain format = 'brief' select * from t1 straight_join t2 straight_join t3 straight_join t4",
"explain format = 'brief' select straight_join * from t1, t2, t3, t4 where t1.a=t4.a"
]
},
{
"name": "TestNullCount",
"cases": [
"explain format = 'brief' select * from t where a is null",
"explain format = 'brief' select * from t use index(idx) where a is null",
"explain format = 'brief' select * from t where b = 1",
"explain format = 'brief' select * from t where b < 1"
]
},
{
"name": "TestCorrelatedEstimation",
"cases": [
"explain format = 'brief' select t.c in (select count(*) from t s , t t1 where s.a = t.a and s.a = t1.a) from t",
"explain format = 'brief' select (select concat(t1.a, \",\", t1.b) from t t1 where t1.a=t.a and t1.c=t.c) from t"
]
},
{
"name": "TestLowSelIndexGreedySearch",
"cases": [
"explain format = 'brief' select max(e) from t where a='T3382' and b='ECO' and c='TOPIC' and d='23660fa1ace9455cb7f3ee831e14a342'"
]
},
{
"name": "TestEmptyTable",
"cases": [
"select * from t where t.c1 <= 50",
"select * from t where c1 in (select c1 from t1)",
"select * from t, t1 where t.c1 = t1.c1",
"select * from t limit 0"
]
},
{
"name": "TestIndexRead",
"cases": [
"select count(*) from t group by e",
"select count(*) from t where e <= 10 group by e",
"select count(*) from t where e <= 50",
"select count(*) from t where c > '1' group by b",
"select count(*) from t where e = 1 group by b",
"select count(*) from t where e > 1 group by b",
"select count(e) from t where t.b <= 20",
"select count(e) from t where t.b <= 30",
"select count(e) from t where t.b <= 40",
"select count(e) from t where t.b <= 50",
"select count(e) from t where t.b <= 100000000000",
"select * from t where t.b <= 40",
"select * from t where t.b <= 50",
"select * from t where t.b <= 10000000000",
// test panic
"select * from t where 1 and t.b <= 50",
"select * from t where t.b <= 100 order by t.a limit 1",
"select * from t where t.b <= 1 order by t.a limit 10",
"select * from t use index(b) where b = 1 order by a",
// test datetime
"select * from t where d < cast('1991-09-05' as datetime)",
// test timestamp
"select * from t where ts < '1991-09-05'",
"select sum(a) from t1 use index(idx) where a = 3 and b = 100000 group by a limit 1"
]
},
{
"name": "TestAnalyze",
"cases": [
"analyze table t3",
// Test analyze full table.
"select * from t where t.a <= 2",
"select b from t where t.b < 2",
"select * from t where t.a = 1 and t.b <= 2",
// Test not analyzed table.
"select * from t1 where t1.a <= 2",
"select * from t1 where t1.a = 1 and t1.b <= 2",
// Test analyze single index.
"select * from t2 where t2.a <= 2",
// Test analyze all index.
"analyze table t2 index",
// Test partitioned table.
"select * from t4 where t4.a <= 2",
"select b from t4 where t4.b < 2",
"select * from t4 where t4.a = 1 and t4.b <= 2"
// TODO: Refine these tests in the future.
// "select * from t2 where t2.a = 1 and t2.b <= 2",
]
},
{
"name": "TestIndexEqualUnknown",
"cases": [
// 7639902 is out of range for the analyzed histogram, and we know from the data writer that for each value
// of `a` in the table, there are 6 rows in average. Before this PR, the row count estimation is 150k, which is
// far from the real value.
"explain format = 'brief' select * from t where a = 7639902",
// Should choose `primary` index instead of index `b`.
"explain format = 'brief' select c, b from t where a = 7639902 order by b asc limit 6"
]
},
{
"name": "TestLimitIndexEstimation",
"cases": [
// Should choose idx_a instead of idx_b, because idx_b would scan 990001 rows.
"explain format = 'brief' select * from t where a <= 10000 order by b limit 1",
// Should choose idx_b instead of idx_a, because idx_b would scan only 1 row.
"explain format = 'brief' select * from t where a >= 999900 order by b limit 1"
]
}
]
| planner/core/testdata/analyze_suite_in.json | 0 | https://github.com/pingcap/tidb/commit/3f6dfe91daab7acb178ad24941dfca1745993afd | [
0.0001757972058840096,
0.00017186299373861402,
0.00016494744340889156,
0.00017200643196702003,
0.000002444196979922708
] |
{
"id": 7,
"code_window": [
"\t\te.ranges, ok = e.feedback.Hist.SplitRange(nil, e.ranges, false)\n",
"\t\tif !ok {\n",
"\t\t\te.feedback.Invalidate()\n",
"\t\t}\n",
"\t}\n",
"\tfirstPartRanges, secondPartRanges := distsql.SplitRangesBySign(e.ranges, e.keepOrder, e.desc, e.table.Meta() != nil && e.table.Meta().IsCommonHandle)\n",
"\tfirstResult, err := e.buildResp(ctx, firstPartRanges)\n",
"\tif err != nil {\n",
"\t\te.feedback.Invalidate()\n",
"\t\treturn err\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tfirstPartRanges, secondPartRanges := distsql.SplitRangesAcrossInt64Boundary(e.ranges, e.keepOrder, e.desc, e.table.Meta() != nil && e.table.Meta().IsCommonHandle)\n"
],
"file_path": "executor/table_reader.go",
"type": "replace",
"edit_start_line_idx": 155
} | // Copyright 2018 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
// +build !race
package israce
// RaceEnabled checks if race is enabled.
const RaceEnabled = false
| util/israce/norace.go | 0 | https://github.com/pingcap/tidb/commit/3f6dfe91daab7acb178ad24941dfca1745993afd | [
0.00017797612235881388,
0.00017626363842282444,
0.000174551154486835,
0.00017626363842282444,
0.0000017124839359894395
] |
{
"id": 7,
"code_window": [
"\t\te.ranges, ok = e.feedback.Hist.SplitRange(nil, e.ranges, false)\n",
"\t\tif !ok {\n",
"\t\t\te.feedback.Invalidate()\n",
"\t\t}\n",
"\t}\n",
"\tfirstPartRanges, secondPartRanges := distsql.SplitRangesBySign(e.ranges, e.keepOrder, e.desc, e.table.Meta() != nil && e.table.Meta().IsCommonHandle)\n",
"\tfirstResult, err := e.buildResp(ctx, firstPartRanges)\n",
"\tif err != nil {\n",
"\t\te.feedback.Invalidate()\n",
"\t\treturn err\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tfirstPartRanges, secondPartRanges := distsql.SplitRangesAcrossInt64Boundary(e.ranges, e.keepOrder, e.desc, e.table.Meta() != nil && e.table.Meta().IsCommonHandle)\n"
],
"file_path": "executor/table_reader.go",
"type": "replace",
"edit_start_line_idx": 155
} | // Copyright 2016 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"database/sql"
"fmt"
"time"
"github.com/pingcap/log"
"go.uber.org/zap"
)
func addJobs(jobCount int, jobChan chan struct{}) {
for i := 0; i < jobCount; i++ {
jobChan <- struct{}{}
}
close(jobChan)
}
func doInsert(table *table, db *sql.DB, count int) {
sqls, err := genRowDatas(table, count)
if err != nil {
log.Fatal("generate data failed", zap.Error(err))
}
txn, err := db.Begin()
if err != nil {
log.Fatal("begin failed", zap.Error(err))
}
for _, sql := range sqls {
_, err = txn.Exec(sql)
if err != nil {
log.Fatal("exec failed", zap.Error(err))
}
}
err = txn.Commit()
if err != nil {
log.Fatal("commit failed", zap.Error(err))
}
}
func doJob(table *table, db *sql.DB, batch int, jobChan chan struct{}, doneChan chan struct{}) {
count := 0
for range jobChan {
count++
if count == batch {
doInsert(table, db, count)
count = 0
}
}
if count > 0 {
doInsert(table, db, count)
}
doneChan <- struct{}{}
}
func doWait(doneChan chan struct{}, start time.Time, jobCount int, workerCount int) {
for i := 0; i < workerCount; i++ {
<-doneChan
}
close(doneChan)
now := time.Now()
seconds := now.Unix() - start.Unix()
tps := int64(-1)
if seconds > 0 {
tps = int64(jobCount) / seconds
}
fmt.Printf("[importer]total %d cases, cost %d seconds, tps %d, start %s, now %s\n", jobCount, seconds, tps, start, now)
}
func doProcess(table *table, dbs []*sql.DB, jobCount int, workerCount int, batch int) {
jobChan := make(chan struct{}, 16*workerCount)
doneChan := make(chan struct{}, workerCount)
start := time.Now()
go addJobs(jobCount, jobChan)
for _, col := range table.columns {
if col.incremental {
workerCount = 1
break
}
}
for i := 0; i < workerCount; i++ {
go doJob(table, dbs[i], batch, jobChan, doneChan)
}
doWait(doneChan, start, jobCount, workerCount)
}
| cmd/importer/job.go | 0 | https://github.com/pingcap/tidb/commit/3f6dfe91daab7acb178ad24941dfca1745993afd | [
0.00023471830354537815,
0.00017540079716127366,
0.0001650235935812816,
0.00016905704978853464,
0.000018251867004437372
] |
{
"id": 8,
"code_window": [
"\t\t\t}\n",
"\t\t\tbreak\n",
"\t\t}\n",
"\t}\n",
"\n",
"\tsplitedRanges, _ := distsql.SplitRangesBySign(ts.Ranges, false, false, ts.Table.IsCommonHandle)\n",
"\tif ts.Table.GetPartitionInfo() != nil {\n",
"\t\ttmp, _ := e.is.TableByID(ts.Table.ID)\n",
"\t\ttbl := tmp.(table.PartitionedTable)\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tsplitedRanges, _ := distsql.SplitRangesAcrossInt64Boundary(ts.Ranges, false, false, ts.Table.IsCommonHandle)\n"
],
"file_path": "planner/core/fragment.go",
"type": "replace",
"edit_start_line_idx": 200
} | // Copyright 2017 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package core
import (
"github.com/pingcap/errors"
"github.com/pingcap/parser/model"
"github.com/pingcap/parser/mysql"
"github.com/pingcap/tidb/distsql"
"github.com/pingcap/tidb/expression"
"github.com/pingcap/tidb/expression/aggregation"
"github.com/pingcap/tidb/kv"
"github.com/pingcap/tidb/sessionctx"
"github.com/pingcap/tidb/table"
"github.com/pingcap/tidb/table/tables"
"github.com/pingcap/tidb/tablecodec"
"github.com/pingcap/tidb/util"
"github.com/pingcap/tidb/util/codec"
"github.com/pingcap/tidb/util/ranger"
"github.com/pingcap/tipb/go-tipb"
)
// ToPB implements PhysicalPlan ToPB interface.
func (p *basePhysicalPlan) ToPB(_ sessionctx.Context, _ kv.StoreType) (*tipb.Executor, error) {
return nil, errors.Errorf("plan %s fails converts to PB", p.basePlan.ExplainID())
}
// ToPB implements PhysicalPlan ToPB interface.
func (p *PhysicalHashAgg) ToPB(ctx sessionctx.Context, storeType kv.StoreType) (*tipb.Executor, error) {
sc := ctx.GetSessionVars().StmtCtx
client := ctx.GetClient()
groupByExprs, err := expression.ExpressionsToPBList(sc, p.GroupByItems, client)
if err != nil {
return nil, err
}
aggExec := &tipb.Aggregation{
GroupBy: groupByExprs,
}
for _, aggFunc := range p.AggFuncs {
aggExec.AggFunc = append(aggExec.AggFunc, aggregation.AggFuncToPBExpr(sc, client, aggFunc))
}
executorID := ""
if storeType == kv.TiFlash {
var err error
aggExec.Child, err = p.children[0].ToPB(ctx, storeType)
if err != nil {
return nil, errors.Trace(err)
}
executorID = p.ExplainID().String()
}
return &tipb.Executor{Tp: tipb.ExecType_TypeAggregation, Aggregation: aggExec, ExecutorId: &executorID}, nil
}
// ToPB implements PhysicalPlan ToPB interface.
func (p *PhysicalStreamAgg) ToPB(ctx sessionctx.Context, storeType kv.StoreType) (*tipb.Executor, error) {
sc := ctx.GetSessionVars().StmtCtx
client := ctx.GetClient()
groupByExprs, err := expression.ExpressionsToPBList(sc, p.GroupByItems, client)
if err != nil {
return nil, err
}
aggExec := &tipb.Aggregation{
GroupBy: groupByExprs,
}
for _, aggFunc := range p.AggFuncs {
aggExec.AggFunc = append(aggExec.AggFunc, aggregation.AggFuncToPBExpr(sc, client, aggFunc))
}
executorID := ""
if storeType == kv.TiFlash {
var err error
aggExec.Child, err = p.children[0].ToPB(ctx, storeType)
if err != nil {
return nil, errors.Trace(err)
}
executorID = p.ExplainID().String()
}
return &tipb.Executor{Tp: tipb.ExecType_TypeStreamAgg, Aggregation: aggExec, ExecutorId: &executorID}, nil
}
// ToPB implements PhysicalPlan ToPB interface.
func (p *PhysicalSelection) ToPB(ctx sessionctx.Context, storeType kv.StoreType) (*tipb.Executor, error) {
sc := ctx.GetSessionVars().StmtCtx
client := ctx.GetClient()
conditions, err := expression.ExpressionsToPBList(sc, p.Conditions, client)
if err != nil {
return nil, err
}
selExec := &tipb.Selection{
Conditions: conditions,
}
executorID := ""
if storeType == kv.TiFlash {
var err error
selExec.Child, err = p.children[0].ToPB(ctx, storeType)
if err != nil {
return nil, errors.Trace(err)
}
executorID = p.ExplainID().String()
}
return &tipb.Executor{Tp: tipb.ExecType_TypeSelection, Selection: selExec, ExecutorId: &executorID}, nil
}
// ToPB implements PhysicalPlan ToPB interface.
func (p *PhysicalProjection) ToPB(ctx sessionctx.Context, storeType kv.StoreType) (*tipb.Executor, error) {
sc := ctx.GetSessionVars().StmtCtx
client := ctx.GetClient()
exprs, err := expression.ExpressionsToPBList(sc, p.Exprs, client)
if err != nil {
return nil, err
}
projExec := &tipb.Projection{
Exprs: exprs,
}
executorID := ""
if storeType == kv.TiFlash {
var err error
projExec.Child, err = p.children[0].ToPB(ctx, storeType)
if err != nil {
return nil, errors.Trace(err)
}
executorID = p.ExplainID().String()
} else {
return nil, errors.Errorf("The projection can only be pushed down to TiFlash now, not %s.", storeType.Name())
}
return &tipb.Executor{Tp: tipb.ExecType_TypeProjection, Projection: projExec, ExecutorId: &executorID}, nil
}
// ToPB implements PhysicalPlan ToPB interface.
func (p *PhysicalTopN) ToPB(ctx sessionctx.Context, storeType kv.StoreType) (*tipb.Executor, error) {
sc := ctx.GetSessionVars().StmtCtx
client := ctx.GetClient()
topNExec := &tipb.TopN{
Limit: p.Count,
}
for _, item := range p.ByItems {
topNExec.OrderBy = append(topNExec.OrderBy, expression.SortByItemToPB(sc, client, item.Expr, item.Desc))
}
executorID := ""
if storeType == kv.TiFlash {
var err error
topNExec.Child, err = p.children[0].ToPB(ctx, storeType)
if err != nil {
return nil, errors.Trace(err)
}
executorID = p.ExplainID().String()
}
return &tipb.Executor{Tp: tipb.ExecType_TypeTopN, TopN: topNExec, ExecutorId: &executorID}, nil
}
// ToPB implements PhysicalPlan ToPB interface.
func (p *PhysicalLimit) ToPB(ctx sessionctx.Context, storeType kv.StoreType) (*tipb.Executor, error) {
limitExec := &tipb.Limit{
Limit: p.Count,
}
executorID := ""
if storeType == kv.TiFlash {
var err error
limitExec.Child, err = p.children[0].ToPB(ctx, storeType)
if err != nil {
return nil, errors.Trace(err)
}
executorID = p.ExplainID().String()
}
return &tipb.Executor{Tp: tipb.ExecType_TypeLimit, Limit: limitExec, ExecutorId: &executorID}, nil
}
// ToPB implements PhysicalPlan ToPB interface.
func (p *PhysicalTableScan) ToPB(ctx sessionctx.Context, storeType kv.StoreType) (*tipb.Executor, error) {
tsExec := tables.BuildTableScanFromInfos(p.Table, p.Columns)
tsExec.Desc = p.Desc
if p.isPartition {
tsExec.TableId = p.physicalTableID
}
executorID := ""
if storeType == kv.TiFlash && p.IsGlobalRead {
tsExec.NextReadEngine = tipb.EngineType_TiFlash
splitedRanges, _ := distsql.SplitRangesBySign(p.Ranges, false, false, p.Table.IsCommonHandle)
ranges, err := distsql.TableHandleRangesToKVRanges(ctx.GetSessionVars().StmtCtx, []int64{tsExec.TableId}, p.Table.IsCommonHandle, splitedRanges, nil)
if err != nil {
return nil, err
}
for _, keyRange := range ranges {
tsExec.Ranges = append(tsExec.Ranges, tipb.KeyRange{Low: keyRange.StartKey, High: keyRange.EndKey})
}
}
if storeType == kv.TiFlash {
executorID = p.ExplainID().String()
}
err := SetPBColumnsDefaultValue(ctx, tsExec.Columns, p.Columns)
return &tipb.Executor{Tp: tipb.ExecType_TypeTableScan, TblScan: tsExec, ExecutorId: &executorID}, err
}
// checkCoverIndex checks whether we can pass unique info to TiKV. We should push it if and only if the length of
// range and index are equal.
func checkCoverIndex(idx *model.IndexInfo, ranges []*ranger.Range) bool {
// If the index is (c1, c2) but the query range only contains c1, it is not a unique get.
if !idx.Unique {
return false
}
for _, rg := range ranges {
if len(rg.LowVal) != len(idx.Columns) {
return false
}
}
return true
}
func findColumnInfoByID(infos []*model.ColumnInfo, id int64) *model.ColumnInfo {
for _, info := range infos {
if info.ID == id {
return info
}
}
return nil
}
// ToPB generates the pb structure.
func (e *PhysicalExchangeSender) ToPB(ctx sessionctx.Context, storeType kv.StoreType) (*tipb.Executor, error) {
child, err := e.Children()[0].ToPB(ctx, kv.TiFlash)
if err != nil {
return nil, errors.Trace(err)
}
encodedTask := make([][]byte, 0, len(e.TargetTasks))
for _, task := range e.TargetTasks {
encodedStr, err := task.ToPB().Marshal()
if err != nil {
return nil, errors.Trace(err)
}
encodedTask = append(encodedTask, encodedStr)
}
hashCols := make([]expression.Expression, 0, len(e.HashCols))
for _, col := range e.HashCols {
hashCols = append(hashCols, col)
}
hashColPb, err := expression.ExpressionsToPBList(ctx.GetSessionVars().StmtCtx, hashCols, ctx.GetClient())
if err != nil {
return nil, errors.Trace(err)
}
ecExec := &tipb.ExchangeSender{
Tp: e.ExchangeType,
EncodedTaskMeta: encodedTask,
PartitionKeys: hashColPb,
Child: child,
}
executorID := e.ExplainID().String()
return &tipb.Executor{
Tp: tipb.ExecType_TypeExchangeSender,
ExchangeSender: ecExec,
ExecutorId: &executorID,
}, nil
}
// ToPB generates the pb structure.
func (e *PhysicalExchangeReceiver) ToPB(ctx sessionctx.Context, storeType kv.StoreType) (*tipb.Executor, error) {
encodedTask := make([][]byte, 0, len(e.Tasks))
for _, task := range e.Tasks {
encodedStr, err := task.ToPB().Marshal()
if err != nil {
return nil, errors.Trace(err)
}
encodedTask = append(encodedTask, encodedStr)
}
fieldTypes := make([]*tipb.FieldType, 0, len(e.Schema().Columns))
for _, column := range e.Schema().Columns {
pbType := expression.ToPBFieldType(column.RetType)
if column.RetType.Tp == mysql.TypeEnum {
pbType.Elems = append(pbType.Elems, column.RetType.Elems...)
}
fieldTypes = append(fieldTypes, pbType)
}
ecExec := &tipb.ExchangeReceiver{
EncodedTaskMeta: encodedTask,
FieldTypes: fieldTypes,
}
executorID := e.ExplainID().String()
return &tipb.Executor{
Tp: tipb.ExecType_TypeExchangeReceiver,
ExchangeReceiver: ecExec,
ExecutorId: &executorID,
}, nil
}
// ToPB implements PhysicalPlan ToPB interface.
func (p *PhysicalIndexScan) ToPB(ctx sessionctx.Context, _ kv.StoreType) (*tipb.Executor, error) {
columns := make([]*model.ColumnInfo, 0, p.schema.Len())
tableColumns := p.Table.Cols()
for _, col := range p.schema.Columns {
if col.ID == model.ExtraHandleID {
columns = append(columns, model.NewExtraHandleColInfo())
} else if col.ID == model.ExtraPidColID {
columns = append(columns, model.NewExtraPartitionIDColInfo())
} else {
columns = append(columns, findColumnInfoByID(tableColumns, col.ID))
}
}
var pkColIds []int64
if p.NeedCommonHandle {
pkColIds = tables.TryGetCommonPkColumnIds(p.Table)
}
idxExec := &tipb.IndexScan{
TableId: p.Table.ID,
IndexId: p.Index.ID,
Columns: util.ColumnsToProto(columns, p.Table.PKIsHandle),
Desc: p.Desc,
PrimaryColumnIds: pkColIds,
}
if p.isPartition {
idxExec.TableId = p.physicalTableID
}
unique := checkCoverIndex(p.Index, p.Ranges)
idxExec.Unique = &unique
return &tipb.Executor{Tp: tipb.ExecType_TypeIndexScan, IdxScan: idxExec}, nil
}
// ToPB implements PhysicalPlan ToPB interface.
func (p *PhysicalHashJoin) ToPB(ctx sessionctx.Context, storeType kv.StoreType) (*tipb.Executor, error) {
sc := ctx.GetSessionVars().StmtCtx
client := ctx.GetClient()
leftJoinKeys := make([]expression.Expression, 0, len(p.LeftJoinKeys))
rightJoinKeys := make([]expression.Expression, 0, len(p.RightJoinKeys))
for _, leftKey := range p.LeftJoinKeys {
leftJoinKeys = append(leftJoinKeys, leftKey)
}
for _, rightKey := range p.RightJoinKeys {
rightJoinKeys = append(rightJoinKeys, rightKey)
}
lChildren, err := p.children[0].ToPB(ctx, storeType)
if err != nil {
return nil, errors.Trace(err)
}
rChildren, err := p.children[1].ToPB(ctx, storeType)
if err != nil {
return nil, errors.Trace(err)
}
left, err := expression.ExpressionsToPBList(sc, leftJoinKeys, client)
if err != nil {
return nil, err
}
right, err := expression.ExpressionsToPBList(sc, rightJoinKeys, client)
if err != nil {
return nil, err
}
leftConditions, err := expression.ExpressionsToPBList(sc, p.LeftConditions, client)
if err != nil {
return nil, err
}
rightConditions, err := expression.ExpressionsToPBList(sc, p.RightConditions, client)
if err != nil {
return nil, err
}
otherConditions, err := expression.ExpressionsToPBList(sc, p.OtherConditions, client)
if err != nil {
return nil, err
}
pbJoinType := tipb.JoinType_TypeInnerJoin
switch p.JoinType {
case LeftOuterJoin:
pbJoinType = tipb.JoinType_TypeLeftOuterJoin
case RightOuterJoin:
pbJoinType = tipb.JoinType_TypeRightOuterJoin
case SemiJoin:
pbJoinType = tipb.JoinType_TypeSemiJoin
case AntiSemiJoin:
pbJoinType = tipb.JoinType_TypeAntiSemiJoin
case LeftOuterSemiJoin:
pbJoinType = tipb.JoinType_TypeLeftOuterSemiJoin
case AntiLeftOuterSemiJoin:
pbJoinType = tipb.JoinType_TypeAntiLeftOuterSemiJoin
}
probeFiledTypes := make([]*tipb.FieldType, 0, len(p.EqualConditions))
buildFiledTypes := make([]*tipb.FieldType, 0, len(p.EqualConditions))
for _, equalCondition := range p.EqualConditions {
retType := equalCondition.RetType.Clone()
chs, coll := equalCondition.CharsetAndCollation(ctx)
retType.Charset = chs
retType.Collate = coll
probeFiledTypes = append(probeFiledTypes, expression.ToPBFieldType(retType))
buildFiledTypes = append(buildFiledTypes, expression.ToPBFieldType(retType))
}
join := &tipb.Join{
JoinType: pbJoinType,
JoinExecType: tipb.JoinExecType_TypeHashJoin,
InnerIdx: int64(p.InnerChildIdx),
LeftJoinKeys: left,
RightJoinKeys: right,
ProbeTypes: probeFiledTypes,
BuildTypes: buildFiledTypes,
LeftConditions: leftConditions,
RightConditions: rightConditions,
OtherConditions: otherConditions,
Children: []*tipb.Executor{lChildren, rChildren},
}
executorID := p.ExplainID().String()
return &tipb.Executor{Tp: tipb.ExecType_TypeJoin, Join: join, ExecutorId: &executorID}, nil
}
// SetPBColumnsDefaultValue sets the default values of tipb.ColumnInfos.
func SetPBColumnsDefaultValue(ctx sessionctx.Context, pbColumns []*tipb.ColumnInfo, columns []*model.ColumnInfo) error {
for i, c := range columns {
// For virtual columns, we set their default values to NULL so that TiKV will return NULL properly,
// They real values will be compute later.
if c.IsGenerated() && !c.GeneratedStored {
pbColumns[i].DefaultVal = []byte{codec.NilFlag}
}
if c.GetOriginDefaultValue() == nil {
continue
}
sessVars := ctx.GetSessionVars()
originStrict := sessVars.StrictSQLMode
sessVars.StrictSQLMode = false
d, err := table.GetColOriginDefaultValue(ctx, c)
sessVars.StrictSQLMode = originStrict
if err != nil {
return err
}
pbColumns[i].DefaultVal, err = tablecodec.EncodeValue(sessVars.StmtCtx, nil, d)
if err != nil {
return err
}
}
return nil
}
// SupportStreaming returns true if a pushed down operation supports using coprocessor streaming API.
// Note that this function handle pushed down physical plan only! It's called in constructDAGReq.
// Some plans are difficult (if possible) to implement streaming, and some are pointless to do so.
// TODO: Support more kinds of physical plan.
func SupportStreaming(p PhysicalPlan) bool {
switch p.(type) {
case *PhysicalIndexScan, *PhysicalSelection, *PhysicalTableScan:
return true
}
return false
}
| planner/core/plan_to_pb.go | 1 | https://github.com/pingcap/tidb/commit/3f6dfe91daab7acb178ad24941dfca1745993afd | [
0.9079640507698059,
0.020487753674387932,
0.00015938871365506202,
0.0001738570281304419,
0.13230572640895844
] |
{
"id": 8,
"code_window": [
"\t\t\t}\n",
"\t\t\tbreak\n",
"\t\t}\n",
"\t}\n",
"\n",
"\tsplitedRanges, _ := distsql.SplitRangesBySign(ts.Ranges, false, false, ts.Table.IsCommonHandle)\n",
"\tif ts.Table.GetPartitionInfo() != nil {\n",
"\t\ttmp, _ := e.is.TableByID(ts.Table.ID)\n",
"\t\ttbl := tmp.(table.PartitionedTable)\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tsplitedRanges, _ := distsql.SplitRangesAcrossInt64Boundary(ts.Ranges, false, false, ts.Table.IsCommonHandle)\n"
],
"file_path": "planner/core/fragment.go",
"type": "replace",
"edit_start_line_idx": 200
} | // Copyright 2019 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package implementation
import (
"math"
"github.com/pingcap/tidb/expression"
"github.com/pingcap/tidb/kv"
plannercore "github.com/pingcap/tidb/planner/core"
"github.com/pingcap/tidb/planner/memo"
"github.com/pingcap/tidb/statistics"
)
// TableDualImpl implementation of PhysicalTableDual.
type TableDualImpl struct {
baseImpl
}
// NewTableDualImpl creates a new table dual Implementation.
func NewTableDualImpl(dual *plannercore.PhysicalTableDual) *TableDualImpl {
return &TableDualImpl{baseImpl{plan: dual}}
}
// CalcCost calculates the cost of the table dual Implementation.
func (impl *TableDualImpl) CalcCost(outCount float64, children ...memo.Implementation) float64 {
return 0
}
// MemTableScanImpl implementation of PhysicalTableDual.
type MemTableScanImpl struct {
baseImpl
}
// NewMemTableScanImpl creates a new table dual Implementation.
func NewMemTableScanImpl(dual *plannercore.PhysicalMemTable) *MemTableScanImpl {
return &MemTableScanImpl{baseImpl{plan: dual}}
}
// CalcCost calculates the cost of the table dual Implementation.
func (impl *MemTableScanImpl) CalcCost(outCount float64, children ...memo.Implementation) float64 {
return 0
}
// TableReaderImpl implementation of PhysicalTableReader.
type TableReaderImpl struct {
baseImpl
tblColHists *statistics.HistColl
}
// NewTableReaderImpl creates a new table reader Implementation.
func NewTableReaderImpl(reader *plannercore.PhysicalTableReader, hists *statistics.HistColl) *TableReaderImpl {
base := baseImpl{plan: reader}
impl := &TableReaderImpl{
baseImpl: base,
tblColHists: hists,
}
return impl
}
// CalcCost calculates the cost of the table reader Implementation.
func (impl *TableReaderImpl) CalcCost(outCount float64, children ...memo.Implementation) float64 {
reader := impl.plan.(*plannercore.PhysicalTableReader)
width := impl.tblColHists.GetAvgRowSize(impl.plan.SCtx(), reader.Schema().Columns, false, false)
sessVars := reader.SCtx().GetSessionVars()
networkCost := outCount * sessVars.NetworkFactor * width
// copTasks are run in parallel, to make the estimated cost closer to execution time, we amortize
// the cost to cop iterator workers. According to `CopClient::Send`, the concurrency
// is Min(DistSQLScanConcurrency, numRegionsInvolvedInScan), since we cannot infer
// the number of regions involved, we simply use DistSQLScanConcurrency.
copIterWorkers := float64(sessVars.DistSQLScanConcurrency())
impl.cost = (networkCost + children[0].GetCost()) / copIterWorkers
return impl.cost
}
// GetCostLimit implements Implementation interface.
func (impl *TableReaderImpl) GetCostLimit(costLimit float64, children ...memo.Implementation) float64 {
reader := impl.plan.(*plannercore.PhysicalTableReader)
sessVars := reader.SCtx().GetSessionVars()
copIterWorkers := float64(sessVars.DistSQLScanConcurrency())
if math.MaxFloat64/copIterWorkers < costLimit {
return math.MaxFloat64
}
return costLimit * copIterWorkers
}
// TableScanImpl implementation of PhysicalTableScan.
type TableScanImpl struct {
baseImpl
tblColHists *statistics.HistColl
tblCols []*expression.Column
}
// NewTableScanImpl creates a new table scan Implementation.
func NewTableScanImpl(ts *plannercore.PhysicalTableScan, cols []*expression.Column, hists *statistics.HistColl) *TableScanImpl {
base := baseImpl{plan: ts}
impl := &TableScanImpl{
baseImpl: base,
tblColHists: hists,
tblCols: cols,
}
return impl
}
// CalcCost calculates the cost of the table scan Implementation.
func (impl *TableScanImpl) CalcCost(outCount float64, children ...memo.Implementation) float64 {
ts := impl.plan.(*plannercore.PhysicalTableScan)
width := impl.tblColHists.GetTableAvgRowSize(impl.plan.SCtx(), impl.tblCols, kv.TiKV, true)
sessVars := ts.SCtx().GetSessionVars()
impl.cost = outCount * sessVars.ScanFactor * width
if ts.Desc {
impl.cost = outCount * sessVars.DescScanFactor * width
}
return impl.cost
}
// IndexReaderImpl is the implementation of PhysicalIndexReader.
type IndexReaderImpl struct {
baseImpl
tblColHists *statistics.HistColl
}
// GetCostLimit implements Implementation interface.
func (impl *IndexReaderImpl) GetCostLimit(costLimit float64, children ...memo.Implementation) float64 {
reader := impl.plan.(*plannercore.PhysicalIndexReader)
sessVars := reader.SCtx().GetSessionVars()
copIterWorkers := float64(sessVars.DistSQLScanConcurrency())
if math.MaxFloat64/copIterWorkers < costLimit {
return math.MaxFloat64
}
return costLimit * copIterWorkers
}
// CalcCost implements Implementation interface.
func (impl *IndexReaderImpl) CalcCost(outCount float64, children ...memo.Implementation) float64 {
reader := impl.plan.(*plannercore.PhysicalIndexReader)
sessVars := reader.SCtx().GetSessionVars()
networkCost := outCount * sessVars.NetworkFactor * impl.tblColHists.GetAvgRowSize(reader.SCtx(), children[0].GetPlan().Schema().Columns, true, false)
copIterWorkers := float64(sessVars.DistSQLScanConcurrency())
impl.cost = (networkCost + children[0].GetCost()) / copIterWorkers
return impl.cost
}
// NewIndexReaderImpl creates a new IndexReader Implementation.
func NewIndexReaderImpl(reader *plannercore.PhysicalIndexReader, tblColHists *statistics.HistColl) *IndexReaderImpl {
return &IndexReaderImpl{
baseImpl: baseImpl{plan: reader},
tblColHists: tblColHists,
}
}
// IndexScanImpl is the Implementation of PhysicalIndexScan.
type IndexScanImpl struct {
baseImpl
tblColHists *statistics.HistColl
}
// CalcCost implements Implementation interface.
func (impl *IndexScanImpl) CalcCost(outCount float64, children ...memo.Implementation) float64 {
is := impl.plan.(*plannercore.PhysicalIndexScan)
sessVars := is.SCtx().GetSessionVars()
rowSize := impl.tblColHists.GetIndexAvgRowSize(is.SCtx(), is.Schema().Columns, is.Index.Unique)
cost := outCount * rowSize * sessVars.ScanFactor
if is.Desc {
cost = outCount * rowSize * sessVars.DescScanFactor
}
cost += float64(len(is.Ranges)) * sessVars.SeekFactor
impl.cost = cost
return impl.cost
}
// NewIndexScanImpl creates a new IndexScan Implementation.
func NewIndexScanImpl(scan *plannercore.PhysicalIndexScan, tblColHists *statistics.HistColl) *IndexScanImpl {
return &IndexScanImpl{
baseImpl: baseImpl{plan: scan},
tblColHists: tblColHists,
}
}
| planner/implementation/datasource.go | 0 | https://github.com/pingcap/tidb/commit/3f6dfe91daab7acb178ad24941dfca1745993afd | [
0.0026293371338397264,
0.0003198733029421419,
0.00016357790445908904,
0.00017239722365047783,
0.0005465418216772377
] |
{
"id": 8,
"code_window": [
"\t\t\t}\n",
"\t\t\tbreak\n",
"\t\t}\n",
"\t}\n",
"\n",
"\tsplitedRanges, _ := distsql.SplitRangesBySign(ts.Ranges, false, false, ts.Table.IsCommonHandle)\n",
"\tif ts.Table.GetPartitionInfo() != nil {\n",
"\t\ttmp, _ := e.is.TableByID(ts.Table.ID)\n",
"\t\ttbl := tmp.(table.PartitionedTable)\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tsplitedRanges, _ := distsql.SplitRangesAcrossInt64Boundary(ts.Ranges, false, false, ts.Table.IsCommonHandle)\n"
],
"file_path": "planner/core/fragment.go",
"type": "replace",
"edit_start_line_idx": 200
} | // Copyright 2019-present PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package lockstore
import (
"math"
"time"
"github.com/pingcap/log"
)
type arenaAddr uint64
const (
alignMask = 1<<32 - 8 // 29 bit 1 and 3 bit 0.
nullBlockOffset = math.MaxUint32
nullArenaAddr arenaAddr = 0
// Time waited until we reuse the empty block.
// Data corruption can happen under this time sequence.
// 1. a reader reads a node.
// 2. a writer delete the node, then free the block, put it into the writable queue
// and this block become the first writable block.
// 3. The writer insert another node, overwrite the block we just freed.
// 4. The reader reads the key/value of that delete node.
// But because the time between 1 and 4 is very short, this is very unlikely to happen but it can happen.
// So we wait for a while so the reader can finish reading before we overwrite the empty block.
reuseSafeDuration = time.Millisecond * 100
)
func (addr arenaAddr) blockIdx() int {
return int(addr>>32 - 1)
}
func (addr arenaAddr) blockOffset() uint32 {
return uint32(addr)
}
func newArenaAddr(blockIdx int, blockOffset uint32) arenaAddr {
return arenaAddr(uint64(blockIdx+1)<<32 | uint64(blockOffset))
}
type arena struct {
blockSize int
blocks []*arenaBlock
writableQueue []int
pendingBlocks []pendingBlock
}
type pendingBlock struct {
blockIdx int
reusableTime time.Time
}
func newArenaLocator(blockSize int) *arena {
return &arena{
blockSize: blockSize,
blocks: []*arenaBlock{newArenaBlock(blockSize)},
writableQueue: []int{0},
}
}
func (a *arena) get(addr arenaAddr, size int) []byte {
if addr.blockIdx() >= len(a.blocks) {
log.S().Fatalf("arena.get out of range. len(blocks)=%v, addr.blockIdx()=%v, addr.blockOffset()=%v, size=%v", len(a.blocks), addr.blockIdx(), addr.blockOffset(), size)
}
return a.blocks[addr.blockIdx()].get(addr.blockOffset(), size)
}
func (a *arena) alloc(size int) arenaAddr {
for {
if len(a.writableQueue) == 0 {
if len(a.pendingBlocks) > 0 {
pending := a.pendingBlocks[0]
if time.Now().After(pending.reusableTime) {
a.writableQueue = append(a.writableQueue, pending.blockIdx)
a.pendingBlocks = a.pendingBlocks[1:]
continue
}
}
return nullArenaAddr
}
availIdx := a.writableQueue[len(a.writableQueue)-1]
block := a.blocks[availIdx]
blockOffset := block.alloc(size)
if blockOffset != nullBlockOffset {
return newArenaAddr(availIdx, blockOffset)
}
a.writableQueue = a.writableQueue[:len(a.writableQueue)-1]
}
}
// free decrease the arena block reference and makes the block reusable.
// We don't know if there is concurrent reader who may reference the deleted entry.
// So we must make sure the old data is not referenced for long time, and we only overwrite
// it after a safe amount of time.
func (a *arena) free(addr arenaAddr) {
arena := a.blocks[addr.blockIdx()]
arena.ref--
// No reference, the arenaBlock can be reused.
if arena.ref == 0 && arena.length > len(arena.buf) {
a.pendingBlocks = append(a.pendingBlocks, pendingBlock{
blockIdx: addr.blockIdx(),
reusableTime: time.Now().Add(reuseSafeDuration),
})
arena.length = 0
}
}
func (a *arena) grow() *arena {
newLoc := new(arena)
newLoc.blockSize = a.blockSize
newLoc.blocks = make([]*arenaBlock, 0, len(a.blocks)+1)
newLoc.blocks = append(newLoc.blocks, a.blocks...)
availIdx := len(newLoc.blocks)
newLoc.blocks = append(newLoc.blocks, newArenaBlock(a.blockSize))
newLoc.writableQueue = append(newLoc.writableQueue, availIdx)
newLoc.pendingBlocks = a.pendingBlocks
return newLoc
}
type arenaBlock struct {
buf []byte
ref uint64
length int
}
func newArenaBlock(blockSize int) *arenaBlock {
return &arenaBlock{
buf: make([]byte, blockSize),
}
}
func (a *arenaBlock) get(offset uint32, size int) []byte {
return a.buf[offset : offset+uint32(size)]
}
func (a *arenaBlock) alloc(size int) uint32 {
// The returned addr should be aligned in 8 bytes.
offset := (a.length + 7) & alignMask
a.length = offset + size
if a.length > len(a.buf) {
return nullBlockOffset
}
a.ref++
return uint32(offset)
}
| store/mockstore/unistore/lockstore/arena.go | 0 | https://github.com/pingcap/tidb/commit/3f6dfe91daab7acb178ad24941dfca1745993afd | [
0.00017715479771140963,
0.00017099609249271452,
0.00016682098794262856,
0.00017068194574676454,
0.00000324320831168734
] |
{
"id": 8,
"code_window": [
"\t\t\t}\n",
"\t\t\tbreak\n",
"\t\t}\n",
"\t}\n",
"\n",
"\tsplitedRanges, _ := distsql.SplitRangesBySign(ts.Ranges, false, false, ts.Table.IsCommonHandle)\n",
"\tif ts.Table.GetPartitionInfo() != nil {\n",
"\t\ttmp, _ := e.is.TableByID(ts.Table.ID)\n",
"\t\ttbl := tmp.(table.PartitionedTable)\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tsplitedRanges, _ := distsql.SplitRangesAcrossInt64Boundary(ts.Ranges, false, false, ts.Table.IsCommonHandle)\n"
],
"file_path": "planner/core/fragment.go",
"type": "replace",
"edit_start_line_idx": 200
} | // Copyright 2019 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package helper
// TypeContext is the template context for each "github.com/pingcap/tidb/types".EvalType .
type TypeContext struct {
// Describe the name of "github.com/pingcap/tidb/types".ET{{ .ETName }} .
ETName string
// Describe the name of "github.com/pingcap/tidb/expression".VecExpr.VecEval{{ .TypeName }} .
TypeName string
// Describe the name of "github.com/pingcap/tidb/util/chunk".*Column.Append{{ .TypeNameInColumn }},
// Resize{{ .TypeNameInColumn }}, Reserve{{ .TypeNameInColumn }}, Get{{ .TypeNameInColumn }} and
// {{ .TypeNameInColumn }}s.
// If undefined, it's same as TypeName.
TypeNameInColumn string
// Describe the type name in golang.
TypeNameGo string
// Same as "github.com/pingcap/tidb/util/chunk".getFixedLen() .
Fixed bool
}
var (
// TypeInt represents the template context of types.ETInt .
TypeInt = TypeContext{ETName: "Int", TypeName: "Int", TypeNameInColumn: "Int64", TypeNameGo: "int64", Fixed: true}
// TypeReal represents the template context of types.ETReal .
TypeReal = TypeContext{ETName: "Real", TypeName: "Real", TypeNameInColumn: "Float64", TypeNameGo: "float64", Fixed: true}
// TypeDecimal represents the template context of types.ETDecimal .
TypeDecimal = TypeContext{ETName: "Decimal", TypeName: "Decimal", TypeNameInColumn: "Decimal", TypeNameGo: "types.MyDecimal", Fixed: true}
// TypeString represents the template context of types.ETString .
TypeString = TypeContext{ETName: "String", TypeName: "String", TypeNameInColumn: "String", TypeNameGo: "string", Fixed: false}
// TypeDatetime represents the template context of types.ETDatetime .
TypeDatetime = TypeContext{ETName: "Datetime", TypeName: "Time", TypeNameInColumn: "Time", TypeNameGo: "types.Time", Fixed: true}
// TypeDuration represents the template context of types.ETDuration .
TypeDuration = TypeContext{ETName: "Duration", TypeName: "Duration", TypeNameInColumn: "GoDuration", TypeNameGo: "time.Duration", Fixed: true}
// TypeJSON represents the template context of types.ETJson .
TypeJSON = TypeContext{ETName: "Json", TypeName: "JSON", TypeNameInColumn: "JSON", TypeNameGo: "json.BinaryJSON", Fixed: false}
)
| expression/generator/helper/helper.go | 0 | https://github.com/pingcap/tidb/commit/3f6dfe91daab7acb178ad24941dfca1745993afd | [
0.00017745251534506679,
0.00017301150364801288,
0.00017036421922966838,
0.0001732546224957332,
0.0000025354015633638483
] |
{
"id": 9,
"code_window": [
"\texecutorID := \"\"\n",
"\tif storeType == kv.TiFlash && p.IsGlobalRead {\n",
"\t\ttsExec.NextReadEngine = tipb.EngineType_TiFlash\n",
"\t\tsplitedRanges, _ := distsql.SplitRangesBySign(p.Ranges, false, false, p.Table.IsCommonHandle)\n",
"\t\tranges, err := distsql.TableHandleRangesToKVRanges(ctx.GetSessionVars().StmtCtx, []int64{tsExec.TableId}, p.Table.IsCommonHandle, splitedRanges, nil)\n",
"\t\tif err != nil {\n",
"\t\t\treturn nil, err\n",
"\t\t}\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tsplitedRanges, _ := distsql.SplitRangesAcrossInt64Boundary(p.Ranges, false, false, p.Table.IsCommonHandle)\n"
],
"file_path": "planner/core/plan_to_pb.go",
"type": "replace",
"edit_start_line_idx": 187
} | // Copyright 2018 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package distsql
import (
"fmt"
"math"
"sort"
"github.com/pingcap/errors"
"github.com/pingcap/kvproto/pkg/metapb"
"github.com/pingcap/parser/mysql"
"github.com/pingcap/tidb/ddl/placement"
"github.com/pingcap/tidb/infoschema"
"github.com/pingcap/tidb/kv"
"github.com/pingcap/tidb/sessionctx/stmtctx"
"github.com/pingcap/tidb/sessionctx/variable"
"github.com/pingcap/tidb/statistics"
tikvstore "github.com/pingcap/tidb/store/tikv/kv"
"github.com/pingcap/tidb/store/tikv/oracle"
"github.com/pingcap/tidb/tablecodec"
"github.com/pingcap/tidb/types"
"github.com/pingcap/tidb/util/codec"
"github.com/pingcap/tidb/util/memory"
"github.com/pingcap/tidb/util/ranger"
"github.com/pingcap/tipb/go-tipb"
)
// RequestBuilder is used to build a "kv.Request".
// It is called before we issue a kv request by "Select".
type RequestBuilder struct {
kv.Request
// txnScope indicates the value of txn_scope
txnScope string
is infoschema.InfoSchema
err error
}
// Build builds a "kv.Request".
func (builder *RequestBuilder) Build() (*kv.Request, error) {
err := builder.verifyTxnScope()
if err != nil {
builder.err = err
}
return &builder.Request, builder.err
}
// SetMemTracker sets a memTracker for this request.
func (builder *RequestBuilder) SetMemTracker(tracker *memory.Tracker) *RequestBuilder {
builder.Request.MemTracker = tracker
return builder
}
// SetTableRanges sets "KeyRanges" for "kv.Request" by converting "tableRanges"
// to "KeyRanges" firstly.
// Note this function should be deleted or at least not exported, but currently
// br refers it, so have to keep it.
func (builder *RequestBuilder) SetTableRanges(tid int64, tableRanges []*ranger.Range, fb *statistics.QueryFeedback) *RequestBuilder {
if builder.err == nil {
builder.Request.KeyRanges = TableRangesToKVRanges(tid, tableRanges, fb)
}
return builder
}
// SetIndexRanges sets "KeyRanges" for "kv.Request" by converting index range
// "ranges" to "KeyRanges" firstly.
func (builder *RequestBuilder) SetIndexRanges(sc *stmtctx.StatementContext, tid, idxID int64, ranges []*ranger.Range) *RequestBuilder {
if builder.err == nil {
builder.Request.KeyRanges, builder.err = IndexRangesToKVRanges(sc, tid, idxID, ranges, nil)
}
return builder
}
// SetIndexRangesForTables sets "KeyRanges" for "kv.Request" by converting multiple indexes range
// "ranges" to "KeyRanges" firstly.
func (builder *RequestBuilder) SetIndexRangesForTables(sc *stmtctx.StatementContext, tids []int64, idxID int64, ranges []*ranger.Range) *RequestBuilder {
if builder.err == nil {
builder.Request.KeyRanges, builder.err = IndexRangesToKVRangesForTables(sc, tids, idxID, ranges, nil)
}
return builder
}
// SetHandleRanges sets "KeyRanges" for "kv.Request" by converting table handle range
// "ranges" to "KeyRanges" firstly.
func (builder *RequestBuilder) SetHandleRanges(sc *stmtctx.StatementContext, tid int64, isCommonHandle bool, ranges []*ranger.Range, fb *statistics.QueryFeedback) *RequestBuilder {
return builder.SetHandleRangesForTables(sc, []int64{tid}, isCommonHandle, ranges, fb)
}
// SetHandleRangesForTables sets "KeyRanges" for "kv.Request" by converting table handle range
// "ranges" to "KeyRanges" firstly for multiple tables.
func (builder *RequestBuilder) SetHandleRangesForTables(sc *stmtctx.StatementContext, tid []int64, isCommonHandle bool, ranges []*ranger.Range, fb *statistics.QueryFeedback) *RequestBuilder {
if builder.err == nil {
builder.Request.KeyRanges, builder.err = TableHandleRangesToKVRanges(sc, tid, isCommonHandle, ranges, fb)
}
return builder
}
// SetTableHandles sets "KeyRanges" for "kv.Request" by converting table handles
// "handles" to "KeyRanges" firstly.
func (builder *RequestBuilder) SetTableHandles(tid int64, handles []kv.Handle) *RequestBuilder {
builder.Request.KeyRanges = TableHandlesToKVRanges(tid, handles)
return builder
}
// SetPartitionsAndHandles sets "KeyRanges" for "kv.Request" by converting ParitionHandles to KeyRanges.
// handles in slice must be kv.PartitionHandle.
func (builder *RequestBuilder) SetPartitionsAndHandles(handles []kv.Handle) *RequestBuilder {
builder.Request.KeyRanges = PartitionHandlesToKVRanges(handles)
return builder
}
const estimatedRegionRowCount = 100000
// SetDAGRequest sets the request type to "ReqTypeDAG" and construct request data.
func (builder *RequestBuilder) SetDAGRequest(dag *tipb.DAGRequest) *RequestBuilder {
if builder.err == nil {
builder.Request.Tp = kv.ReqTypeDAG
builder.Request.Cacheable = true
builder.Request.Data, builder.err = dag.Marshal()
}
// When the DAG is just simple scan and small limit, set concurrency to 1 would be sufficient.
if len(dag.Executors) == 2 && dag.Executors[1].GetLimit() != nil {
limit := dag.Executors[1].GetLimit()
if limit != nil && limit.Limit < estimatedRegionRowCount {
builder.Request.Concurrency = 1
}
}
return builder
}
// SetAnalyzeRequest sets the request type to "ReqTypeAnalyze" and construct request data.
func (builder *RequestBuilder) SetAnalyzeRequest(ana *tipb.AnalyzeReq) *RequestBuilder {
if builder.err == nil {
builder.Request.Tp = kv.ReqTypeAnalyze
builder.Request.Data, builder.err = ana.Marshal()
builder.Request.NotFillCache = true
builder.Request.IsolationLevel = tikvstore.RC
builder.Request.Priority = tikvstore.PriorityLow
}
return builder
}
// SetChecksumRequest sets the request type to "ReqTypeChecksum" and construct request data.
func (builder *RequestBuilder) SetChecksumRequest(checksum *tipb.ChecksumRequest) *RequestBuilder {
if builder.err == nil {
builder.Request.Tp = kv.ReqTypeChecksum
builder.Request.Data, builder.err = checksum.Marshal()
builder.Request.NotFillCache = true
}
return builder
}
// SetKeyRanges sets "KeyRanges" for "kv.Request".
func (builder *RequestBuilder) SetKeyRanges(keyRanges []kv.KeyRange) *RequestBuilder {
builder.Request.KeyRanges = keyRanges
return builder
}
// SetStartTS sets "StartTS" for "kv.Request".
func (builder *RequestBuilder) SetStartTS(startTS uint64) *RequestBuilder {
builder.Request.StartTs = startTS
return builder
}
// SetDesc sets "Desc" for "kv.Request".
func (builder *RequestBuilder) SetDesc(desc bool) *RequestBuilder {
builder.Request.Desc = desc
return builder
}
// SetKeepOrder sets "KeepOrder" for "kv.Request".
func (builder *RequestBuilder) SetKeepOrder(order bool) *RequestBuilder {
builder.Request.KeepOrder = order
return builder
}
// SetStoreType sets "StoreType" for "kv.Request".
func (builder *RequestBuilder) SetStoreType(storeType kv.StoreType) *RequestBuilder {
builder.Request.StoreType = storeType
return builder
}
// SetAllowBatchCop sets `BatchCop` property.
func (builder *RequestBuilder) SetAllowBatchCop(batchCop bool) *RequestBuilder {
builder.Request.BatchCop = batchCop
return builder
}
func (builder *RequestBuilder) getIsolationLevel() tikvstore.IsoLevel {
switch builder.Tp {
case kv.ReqTypeAnalyze:
return tikvstore.RC
}
return tikvstore.SI
}
func (builder *RequestBuilder) getKVPriority(sv *variable.SessionVars) int {
switch sv.StmtCtx.Priority {
case mysql.NoPriority, mysql.DelayedPriority:
return tikvstore.PriorityNormal
case mysql.LowPriority:
return tikvstore.PriorityLow
case mysql.HighPriority:
return tikvstore.PriorityHigh
}
return tikvstore.PriorityNormal
}
// SetFromSessionVars sets the following fields for "kv.Request" from session variables:
// "Concurrency", "IsolationLevel", "NotFillCache", "ReplicaRead", "SchemaVar".
func (builder *RequestBuilder) SetFromSessionVars(sv *variable.SessionVars) *RequestBuilder {
if builder.Request.Concurrency == 0 {
// Concurrency may be set to 1 by SetDAGRequest
builder.Request.Concurrency = sv.DistSQLScanConcurrency()
}
builder.Request.IsolationLevel = builder.getIsolationLevel()
builder.Request.NotFillCache = sv.StmtCtx.NotFillCache
builder.Request.TaskID = sv.StmtCtx.TaskID
builder.Request.Priority = builder.getKVPriority(sv)
builder.Request.ReplicaRead = sv.GetReplicaRead()
if sv.SnapshotInfoschema != nil {
builder.Request.SchemaVar = infoschema.GetInfoSchemaBySessionVars(sv).SchemaMetaVersion()
} else {
builder.Request.SchemaVar = sv.TxnCtx.SchemaVersion
}
builder.txnScope = sv.TxnCtx.TxnScope
builder.IsStaleness = sv.TxnCtx.IsStaleness
if builder.IsStaleness && builder.txnScope != oracle.GlobalTxnScope {
builder.MatchStoreLabels = []*metapb.StoreLabel{
{
Key: placement.DCLabelKey,
Value: builder.txnScope,
},
}
}
return builder
}
// SetStreaming sets "Streaming" flag for "kv.Request".
func (builder *RequestBuilder) SetStreaming(streaming bool) *RequestBuilder {
builder.Request.Streaming = streaming
return builder
}
// SetConcurrency sets "Concurrency" for "kv.Request".
func (builder *RequestBuilder) SetConcurrency(concurrency int) *RequestBuilder {
builder.Request.Concurrency = concurrency
return builder
}
// SetTiDBServerID sets "TiDBServerID" for "kv.Request"
// ServerID is a unique id of TiDB instance among the cluster.
// See https://github.com/pingcap/tidb/blob/master/docs/design/2020-06-01-global-kill.md
func (builder *RequestBuilder) SetTiDBServerID(serverID uint64) *RequestBuilder {
builder.Request.TiDBServerID = serverID
return builder
}
// SetFromInfoSchema sets the following fields from infoSchema:
// "bundles"
func (builder *RequestBuilder) SetFromInfoSchema(is infoschema.InfoSchema) *RequestBuilder {
if is == nil {
return builder
}
builder.is = is
return builder
}
func (builder *RequestBuilder) verifyTxnScope() error {
if builder.txnScope == "" {
builder.txnScope = oracle.GlobalTxnScope
}
if builder.txnScope == oracle.GlobalTxnScope || builder.is == nil {
return nil
}
visitPhysicalTableID := make(map[int64]struct{})
for _, keyRange := range builder.Request.KeyRanges {
tableID := tablecodec.DecodeTableID(keyRange.StartKey)
if tableID > 0 {
visitPhysicalTableID[tableID] = struct{}{}
} else {
return errors.New("requestBuilder can't decode tableID from keyRange")
}
}
for phyTableID := range visitPhysicalTableID {
valid := VerifyTxnScope(builder.txnScope, phyTableID, builder.is)
if !valid {
var tblName string
var partName string
tblInfo, _, partInfo := builder.is.FindTableByPartitionID(phyTableID)
if tblInfo != nil && partInfo != nil {
tblName = tblInfo.Meta().Name.String()
partName = partInfo.Name.String()
} else {
tblInfo, _ = builder.is.TableByID(phyTableID)
tblName = tblInfo.Meta().Name.String()
}
err := fmt.Errorf("table %v can not be read by %v txn_scope", tblName, builder.txnScope)
if len(partName) > 0 {
err = fmt.Errorf("table %v's partition %v can not be read by %v txn_scope",
tblName, partName, builder.txnScope)
}
return err
}
}
return nil
}
// TableHandleRangesToKVRanges convert table handle ranges to "KeyRanges" for multiple tables.
func TableHandleRangesToKVRanges(sc *stmtctx.StatementContext, tid []int64, isCommonHandle bool, ranges []*ranger.Range, fb *statistics.QueryFeedback) ([]kv.KeyRange, error) {
if !isCommonHandle {
return tablesRangesToKVRanges(tid, ranges, fb), nil
}
return CommonHandleRangesToKVRanges(sc, tid, ranges)
}
// TableRangesToKVRanges converts table ranges to "KeyRange".
// Note this function should not be exported, but currently
// br refers to it, so have to keep it.
func TableRangesToKVRanges(tid int64, ranges []*ranger.Range, fb *statistics.QueryFeedback) []kv.KeyRange {
return tablesRangesToKVRanges([]int64{tid}, ranges, fb)
}
// tablesRangesToKVRanges converts table ranges to "KeyRange".
func tablesRangesToKVRanges(tids []int64, ranges []*ranger.Range, fb *statistics.QueryFeedback) []kv.KeyRange {
if fb == nil || fb.Hist == nil {
return tableRangesToKVRangesWithoutSplit(tids, ranges)
}
krs := make([]kv.KeyRange, 0, len(ranges))
feedbackRanges := make([]*ranger.Range, 0, len(ranges))
for _, ran := range ranges {
low := codec.EncodeInt(nil, ran.LowVal[0].GetInt64())
high := codec.EncodeInt(nil, ran.HighVal[0].GetInt64())
if ran.LowExclude {
low = kv.Key(low).PrefixNext()
}
// If this range is split by histogram, then the high val will equal to one bucket's upper bound,
// since we need to guarantee each range falls inside the exactly one bucket, `PrefixNext` will make the
// high value greater than upper bound, so we store the range here.
r := &ranger.Range{LowVal: []types.Datum{types.NewBytesDatum(low)},
HighVal: []types.Datum{types.NewBytesDatum(high)}}
feedbackRanges = append(feedbackRanges, r)
if !ran.HighExclude {
high = kv.Key(high).PrefixNext()
}
for _, tid := range tids {
startKey := tablecodec.EncodeRowKey(tid, low)
endKey := tablecodec.EncodeRowKey(tid, high)
krs = append(krs, kv.KeyRange{StartKey: startKey, EndKey: endKey})
}
}
fb.StoreRanges(feedbackRanges)
return krs
}
func tableRangesToKVRangesWithoutSplit(tids []int64, ranges []*ranger.Range) []kv.KeyRange {
krs := make([]kv.KeyRange, 0, len(ranges)*len(tids))
for _, ran := range ranges {
low, high := encodeHandleKey(ran)
for _, tid := range tids {
startKey := tablecodec.EncodeRowKey(tid, low)
endKey := tablecodec.EncodeRowKey(tid, high)
krs = append(krs, kv.KeyRange{StartKey: startKey, EndKey: endKey})
}
}
return krs
}
func encodeHandleKey(ran *ranger.Range) ([]byte, []byte) {
low := codec.EncodeInt(nil, ran.LowVal[0].GetInt64())
high := codec.EncodeInt(nil, ran.HighVal[0].GetInt64())
if ran.LowExclude {
low = kv.Key(low).PrefixNext()
}
if !ran.HighExclude {
high = kv.Key(high).PrefixNext()
}
return low, high
}
// SplitRangesBySign split the ranges into two parts:
// 1. signedRanges is less or equal than maxInt64
// 2. unsignedRanges is greater than maxInt64
// We do that because the encoding of tikv key takes every key as a int. As a result MaxUInt64 is indeed
// small than zero. So we must
// 1. pick the range that straddles the MaxInt64
// 2. split that range into two parts : smaller than max int64 and greater than it.
// 3. if the ascent order is required, return signed first, vice versa.
// 4. if no order is required, is better to return the unsigned one. That's because it's the normal order
// of tikv scan.
func SplitRangesBySign(ranges []*ranger.Range, keepOrder bool, desc bool, isCommonHandle bool) ([]*ranger.Range, []*ranger.Range) {
if isCommonHandle || len(ranges) == 0 || ranges[0].LowVal[0].Kind() == types.KindInt64 {
return ranges, nil
}
idx := sort.Search(len(ranges), func(i int) bool { return ranges[i].HighVal[0].GetUint64() > math.MaxInt64 })
if idx == len(ranges) {
return ranges, nil
}
if ranges[idx].LowVal[0].GetUint64() > math.MaxInt64 {
signedRanges := ranges[0:idx]
unsignedRanges := ranges[idx:]
if !keepOrder {
return append(unsignedRanges, signedRanges...), nil
}
if desc {
return unsignedRanges, signedRanges
}
return signedRanges, unsignedRanges
}
signedRanges := make([]*ranger.Range, 0, idx+1)
unsignedRanges := make([]*ranger.Range, 0, len(ranges)-idx)
signedRanges = append(signedRanges, ranges[0:idx]...)
if !(ranges[idx].LowVal[0].GetUint64() == math.MaxInt64 && ranges[idx].LowExclude) {
signedRanges = append(signedRanges, &ranger.Range{
LowVal: ranges[idx].LowVal,
LowExclude: ranges[idx].LowExclude,
HighVal: []types.Datum{types.NewUintDatum(math.MaxInt64)},
})
}
if !(ranges[idx].HighVal[0].GetUint64() == math.MaxInt64+1 && ranges[idx].HighExclude) {
unsignedRanges = append(unsignedRanges, &ranger.Range{
LowVal: []types.Datum{types.NewUintDatum(math.MaxInt64 + 1)},
HighVal: ranges[idx].HighVal,
HighExclude: ranges[idx].HighExclude,
})
}
if idx < len(ranges) {
unsignedRanges = append(unsignedRanges, ranges[idx+1:]...)
}
if !keepOrder {
return append(unsignedRanges, signedRanges...), nil
}
if desc {
return unsignedRanges, signedRanges
}
return signedRanges, unsignedRanges
}
// TableHandlesToKVRanges converts sorted handle to kv ranges.
// For continuous handles, we should merge them to a single key range.
func TableHandlesToKVRanges(tid int64, handles []kv.Handle) []kv.KeyRange {
krs := make([]kv.KeyRange, 0, len(handles))
i := 0
for i < len(handles) {
if commonHandle, ok := handles[i].(*kv.CommonHandle); ok {
ran := kv.KeyRange{
StartKey: tablecodec.EncodeRowKey(tid, commonHandle.Encoded()),
EndKey: tablecodec.EncodeRowKey(tid, kv.Key(commonHandle.Encoded()).Next()),
}
krs = append(krs, ran)
i++
continue
}
j := i + 1
for ; j < len(handles) && handles[j-1].IntValue() != math.MaxInt64; j++ {
if handles[j].IntValue() != handles[j-1].IntValue()+1 {
break
}
}
low := codec.EncodeInt(nil, handles[i].IntValue())
high := codec.EncodeInt(nil, handles[j-1].IntValue())
high = kv.Key(high).PrefixNext()
startKey := tablecodec.EncodeRowKey(tid, low)
endKey := tablecodec.EncodeRowKey(tid, high)
krs = append(krs, kv.KeyRange{StartKey: startKey, EndKey: endKey})
i = j
}
return krs
}
// PartitionHandlesToKVRanges convert ParitionHandles to kv ranges.
// Handle in slices must be kv.PartitionHandle
func PartitionHandlesToKVRanges(handles []kv.Handle) []kv.KeyRange {
krs := make([]kv.KeyRange, 0, len(handles))
i := 0
for i < len(handles) {
ph := handles[i].(kv.PartitionHandle)
h := ph.Handle
pid := ph.PartitionID
if commonHandle, ok := h.(*kv.CommonHandle); ok {
ran := kv.KeyRange{
StartKey: tablecodec.EncodeRowKey(pid, commonHandle.Encoded()),
EndKey: tablecodec.EncodeRowKey(pid, append(commonHandle.Encoded(), 0)),
}
krs = append(krs, ran)
i++
continue
}
j := i + 1
for ; j < len(handles) && handles[j-1].IntValue() != math.MaxInt64; j++ {
if handles[j].IntValue() != handles[j-1].IntValue()+1 {
break
}
if handles[j].(kv.PartitionHandle).PartitionID != pid {
break
}
}
low := codec.EncodeInt(nil, handles[i].IntValue())
high := codec.EncodeInt(nil, handles[j-1].IntValue())
high = kv.Key(high).PrefixNext()
startKey := tablecodec.EncodeRowKey(pid, low)
endKey := tablecodec.EncodeRowKey(pid, high)
krs = append(krs, kv.KeyRange{StartKey: startKey, EndKey: endKey})
i = j
}
return krs
}
// IndexRangesToKVRanges converts index ranges to "KeyRange".
func IndexRangesToKVRanges(sc *stmtctx.StatementContext, tid, idxID int64, ranges []*ranger.Range, fb *statistics.QueryFeedback) ([]kv.KeyRange, error) {
return IndexRangesToKVRangesForTables(sc, []int64{tid}, idxID, ranges, fb)
}
// IndexRangesToKVRangesForTables converts indexes ranges to "KeyRange".
func IndexRangesToKVRangesForTables(sc *stmtctx.StatementContext, tids []int64, idxID int64, ranges []*ranger.Range, fb *statistics.QueryFeedback) ([]kv.KeyRange, error) {
if fb == nil || fb.Hist == nil {
return indexRangesToKVWithoutSplit(sc, tids, idxID, ranges)
}
feedbackRanges := make([]*ranger.Range, 0, len(ranges))
for _, ran := range ranges {
low, high, err := encodeIndexKey(sc, ran)
if err != nil {
return nil, err
}
feedbackRanges = append(feedbackRanges, &ranger.Range{LowVal: []types.Datum{types.NewBytesDatum(low)},
HighVal: []types.Datum{types.NewBytesDatum(high)}, LowExclude: false, HighExclude: true})
}
feedbackRanges, ok := fb.Hist.SplitRange(sc, feedbackRanges, true)
if !ok {
fb.Invalidate()
}
krs := make([]kv.KeyRange, 0, len(feedbackRanges))
for _, ran := range feedbackRanges {
low, high := ran.LowVal[0].GetBytes(), ran.HighVal[0].GetBytes()
if ran.LowExclude {
low = kv.Key(low).PrefixNext()
}
ran.LowVal[0].SetBytes(low)
// If this range is split by histogram, then the high val will equal to one bucket's upper bound,
// since we need to guarantee each range falls inside the exactly one bucket, `PrefixNext` will make the
// high value greater than upper bound, so we store the high value here.
ran.HighVal[0].SetBytes(high)
if !ran.HighExclude {
high = kv.Key(high).PrefixNext()
}
for _, tid := range tids {
startKey := tablecodec.EncodeIndexSeekKey(tid, idxID, low)
endKey := tablecodec.EncodeIndexSeekKey(tid, idxID, high)
krs = append(krs, kv.KeyRange{StartKey: startKey, EndKey: endKey})
}
}
fb.StoreRanges(feedbackRanges)
return krs, nil
}
// CommonHandleRangesToKVRanges converts common handle ranges to "KeyRange".
func CommonHandleRangesToKVRanges(sc *stmtctx.StatementContext, tids []int64, ranges []*ranger.Range) ([]kv.KeyRange, error) {
rans := make([]*ranger.Range, 0, len(ranges))
for _, ran := range ranges {
low, high, err := encodeIndexKey(sc, ran)
if err != nil {
return nil, err
}
rans = append(rans, &ranger.Range{LowVal: []types.Datum{types.NewBytesDatum(low)},
HighVal: []types.Datum{types.NewBytesDatum(high)}, LowExclude: false, HighExclude: true})
}
krs := make([]kv.KeyRange, 0, len(rans))
for _, ran := range rans {
low, high := ran.LowVal[0].GetBytes(), ran.HighVal[0].GetBytes()
if ran.LowExclude {
low = kv.Key(low).PrefixNext()
}
ran.LowVal[0].SetBytes(low)
for _, tid := range tids {
startKey := tablecodec.EncodeRowKey(tid, low)
endKey := tablecodec.EncodeRowKey(tid, high)
krs = append(krs, kv.KeyRange{StartKey: startKey, EndKey: endKey})
}
}
return krs, nil
}
// VerifyTxnScope verify whether the txnScope and visited physical table break the leader rule's dcLocation.
func VerifyTxnScope(txnScope string, physicalTableID int64, is infoschema.InfoSchema) bool {
if txnScope == "" || txnScope == oracle.GlobalTxnScope {
return true
}
bundle, ok := is.BundleByName(placement.GroupID(physicalTableID))
if !ok {
return true
}
leaderDC, ok := placement.GetLeaderDCByBundle(bundle, placement.DCLabelKey)
if !ok {
return true
}
if leaderDC != txnScope {
return false
}
return true
}
func indexRangesToKVWithoutSplit(sc *stmtctx.StatementContext, tids []int64, idxID int64, ranges []*ranger.Range) ([]kv.KeyRange, error) {
krs := make([]kv.KeyRange, 0, len(ranges))
for _, ran := range ranges {
low, high, err := encodeIndexKey(sc, ran)
if err != nil {
return nil, err
}
for _, tid := range tids {
startKey := tablecodec.EncodeIndexSeekKey(tid, idxID, low)
endKey := tablecodec.EncodeIndexSeekKey(tid, idxID, high)
krs = append(krs, kv.KeyRange{StartKey: startKey, EndKey: endKey})
}
}
return krs, nil
}
func encodeIndexKey(sc *stmtctx.StatementContext, ran *ranger.Range) ([]byte, []byte, error) {
low, err := codec.EncodeKey(sc, nil, ran.LowVal...)
if err != nil {
return nil, nil, err
}
if ran.LowExclude {
low = kv.Key(low).PrefixNext()
}
high, err := codec.EncodeKey(sc, nil, ran.HighVal...)
if err != nil {
return nil, nil, err
}
if !ran.HighExclude {
high = kv.Key(high).PrefixNext()
}
var hasNull bool
for _, highVal := range ran.HighVal {
if highVal.IsNull() {
hasNull = true
break
}
}
if hasNull {
// Append 0 to make unique-key range [null, null] to be a scan rather than point-get.
high = kv.Key(high).Next()
}
return low, high, nil
}
| distsql/request_builder.go | 1 | https://github.com/pingcap/tidb/commit/3f6dfe91daab7acb178ad24941dfca1745993afd | [
0.9920428395271301,
0.10298201441764832,
0.00016221401165239513,
0.00030405796132981777,
0.29208025336265564
] |
{
"id": 9,
"code_window": [
"\texecutorID := \"\"\n",
"\tif storeType == kv.TiFlash && p.IsGlobalRead {\n",
"\t\ttsExec.NextReadEngine = tipb.EngineType_TiFlash\n",
"\t\tsplitedRanges, _ := distsql.SplitRangesBySign(p.Ranges, false, false, p.Table.IsCommonHandle)\n",
"\t\tranges, err := distsql.TableHandleRangesToKVRanges(ctx.GetSessionVars().StmtCtx, []int64{tsExec.TableId}, p.Table.IsCommonHandle, splitedRanges, nil)\n",
"\t\tif err != nil {\n",
"\t\t\treturn nil, err\n",
"\t\t}\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tsplitedRanges, _ := distsql.SplitRangesAcrossInt64Boundary(p.Ranges, false, false, p.Table.IsCommonHandle)\n"
],
"file_path": "planner/core/plan_to_pb.go",
"type": "replace",
"edit_start_line_idx": 187
} | // Copyright 2018 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package server
import (
. "github.com/pingcap/check"
"github.com/pingcap/parser/mysql"
"github.com/pingcap/parser/terror"
"github.com/pingcap/tidb/sessionctx/stmtctx"
"github.com/pingcap/tidb/types"
)
func (ts *ConnTestSuite) TestParseExecArgs(c *C) {
type args struct {
args []types.Datum
boundParams [][]byte
nullBitmap []byte
paramTypes []byte
paramValues []byte
}
tests := []struct {
args args
err error
expect interface{}
}{
// Tests for int overflow
{
args{
make([]types.Datum, 1),
[][]byte{nil},
[]byte{0x0},
[]byte{1, 0},
[]byte{0xff},
},
nil,
int64(-1),
},
{
args{
make([]types.Datum, 1),
[][]byte{nil},
[]byte{0x0},
[]byte{2, 0},
[]byte{0xff, 0xff},
},
nil,
int64(-1),
},
{
args{
make([]types.Datum, 1),
[][]byte{nil},
[]byte{0x0},
[]byte{3, 0},
[]byte{0xff, 0xff, 0xff, 0xff},
},
nil,
int64(-1),
},
// Tests for date/datetime/timestamp
{
args{
make([]types.Datum, 1),
[][]byte{nil},
[]byte{0x0},
[]byte{12, 0},
[]byte{0x0b, 0xda, 0x07, 0x0a, 0x11, 0x13, 0x1b, 0x1e, 0x01, 0x00, 0x00, 0x00},
},
nil,
"2010-10-17 19:27:30.000001",
},
{
args{
make([]types.Datum, 1),
[][]byte{nil},
[]byte{0x0},
[]byte{10, 0},
[]byte{0x04, 0xda, 0x07, 0x0a, 0x11},
},
nil,
"2010-10-17",
},
{
args{
make([]types.Datum, 1),
[][]byte{nil},
[]byte{0x0},
[]byte{7, 0},
[]byte{0x0b, 0xda, 0x07, 0x0a, 0x11, 0x13, 0x1b, 0x1e, 0x01, 0x00, 0x00, 0x00},
},
nil,
"2010-10-17 19:27:30.000001",
},
{
args{
make([]types.Datum, 1),
[][]byte{nil},
[]byte{0x0},
[]byte{7, 0},
[]byte{0x07, 0xda, 0x07, 0x0a, 0x11, 0x13, 0x1b, 0x1e},
},
nil,
"2010-10-17 19:27:30",
},
{
args{
make([]types.Datum, 1),
[][]byte{nil},
[]byte{0x0},
[]byte{7, 0},
[]byte{0x00},
},
nil,
types.ZeroDatetimeStr,
},
// Tests for time
{
args{
make([]types.Datum, 1),
[][]byte{nil},
[]byte{0x0},
[]byte{11, 0},
[]byte{0x0c, 0x01, 0x78, 0x00, 0x00, 0x00, 0x13, 0x1b, 0x1e, 0x01, 0x00, 0x00, 0x00},
},
nil,
"-120 19:27:30.000001",
},
{
args{
make([]types.Datum, 1),
[][]byte{nil},
[]byte{0x0},
[]byte{11, 0},
[]byte{0x08, 0x01, 0x78, 0x00, 0x00, 0x00, 0x13, 0x1b, 0x1e},
},
nil,
"-120 19:27:30",
},
{
args{
make([]types.Datum, 1),
[][]byte{nil},
[]byte{0x0},
[]byte{11, 0},
[]byte{0x00},
},
nil,
"0",
},
// For error test
{
args{
make([]types.Datum, 1),
[][]byte{nil},
[]byte{0x0},
[]byte{7, 0},
[]byte{10},
},
mysql.ErrMalformPacket,
nil,
},
{
args{
make([]types.Datum, 1),
[][]byte{nil},
[]byte{0x0},
[]byte{11, 0},
[]byte{10},
},
mysql.ErrMalformPacket,
nil,
},
{
args{
make([]types.Datum, 1),
[][]byte{nil},
[]byte{0x0},
[]byte{11, 0},
[]byte{8, 2},
},
mysql.ErrMalformPacket,
nil,
},
}
for _, tt := range tests {
err := parseExecArgs(&stmtctx.StatementContext{}, tt.args.args, tt.args.boundParams, tt.args.nullBitmap, tt.args.paramTypes, tt.args.paramValues)
c.Assert(terror.ErrorEqual(err, tt.err), IsTrue, Commentf("err %v", err))
c.Assert(tt.args.args[0].GetValue(), Equals, tt.expect)
}
}
func (ts *ConnTestSuite) TestParseStmtFetchCmd(c *C) {
tests := []struct {
arg []byte
stmtID uint32
fetchSize uint32
err error
}{
{[]byte{3, 0, 0, 0, 50, 0, 0, 0}, 3, 50, nil},
{[]byte{5, 0, 0, 0, 232, 3, 0, 0}, 5, 1000, nil},
{[]byte{5, 0, 0, 0, 0, 8, 0, 0}, 5, maxFetchSize, nil},
{[]byte{5, 0, 0}, 0, 0, mysql.ErrMalformPacket},
{[]byte{1, 0, 0, 0, 3, 2, 0, 0, 3, 5, 6}, 0, 0, mysql.ErrMalformPacket},
{[]byte{}, 0, 0, mysql.ErrMalformPacket},
}
for _, t := range tests {
stmtID, fetchSize, err := parseStmtFetchCmd(t.arg)
c.Assert(stmtID, Equals, t.stmtID)
c.Assert(fetchSize, Equals, t.fetchSize)
c.Assert(err, Equals, t.err)
}
}
| server/conn_stmt_test.go | 0 | https://github.com/pingcap/tidb/commit/3f6dfe91daab7acb178ad24941dfca1745993afd | [
0.0022074885200709105,
0.0003167389950249344,
0.00016304284508805722,
0.00017273800040129572,
0.0004769304068759084
] |
{
"id": 9,
"code_window": [
"\texecutorID := \"\"\n",
"\tif storeType == kv.TiFlash && p.IsGlobalRead {\n",
"\t\ttsExec.NextReadEngine = tipb.EngineType_TiFlash\n",
"\t\tsplitedRanges, _ := distsql.SplitRangesBySign(p.Ranges, false, false, p.Table.IsCommonHandle)\n",
"\t\tranges, err := distsql.TableHandleRangesToKVRanges(ctx.GetSessionVars().StmtCtx, []int64{tsExec.TableId}, p.Table.IsCommonHandle, splitedRanges, nil)\n",
"\t\tif err != nil {\n",
"\t\t\treturn nil, err\n",
"\t\t}\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tsplitedRanges, _ := distsql.SplitRangesAcrossInt64Boundary(p.Ranges, false, false, p.Table.IsCommonHandle)\n"
],
"file_path": "planner/core/plan_to_pb.go",
"type": "replace",
"edit_start_line_idx": 187
} | // Copyright 2019 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import "testing"
func TestIsQuery(t *testing.T) {
tbl := []struct {
sql string
ok bool
}{
{"/*comment*/ select 1;", true},
{"/*comment*/ /*comment*/ select 1;", true},
{"select /*comment*/ 1 /*comment*/;", true},
{"(select /*comment*/ 1 /*comment*/);", true},
}
for _, tb := range tbl {
if isQuery(tb.sql) != tb.ok {
t.Fatalf("%s", tb.sql)
}
}
}
func TestTrimSQL(t *testing.T) {
tbl := []struct {
sql string
target string
}{
{"/*comment*/ select 1; ", "select 1;"},
{"/*comment*/ /*comment*/ select 1;", "select 1;"},
{"select /*comment*/ 1 /*comment*/;", "select /*comment*/ 1 /*comment*/;"},
{"/*comment select 1; ", "/*comment select 1;"},
}
for _, tb := range tbl {
if trimSQL(tb.sql) != tb.target {
t.Fatalf("%s", tb.sql)
}
}
}
| cmd/explaintest/main_test.go | 0 | https://github.com/pingcap/tidb/commit/3f6dfe91daab7acb178ad24941dfca1745993afd | [
0.00033291830914095044,
0.00022146658739075065,
0.0001667836040724069,
0.00017632526578381658,
0.00006918192229932174
] |
{
"id": 9,
"code_window": [
"\texecutorID := \"\"\n",
"\tif storeType == kv.TiFlash && p.IsGlobalRead {\n",
"\t\ttsExec.NextReadEngine = tipb.EngineType_TiFlash\n",
"\t\tsplitedRanges, _ := distsql.SplitRangesBySign(p.Ranges, false, false, p.Table.IsCommonHandle)\n",
"\t\tranges, err := distsql.TableHandleRangesToKVRanges(ctx.GetSessionVars().StmtCtx, []int64{tsExec.TableId}, p.Table.IsCommonHandle, splitedRanges, nil)\n",
"\t\tif err != nil {\n",
"\t\t\treturn nil, err\n",
"\t\t}\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tsplitedRanges, _ := distsql.SplitRangesAcrossInt64Boundary(p.Ranges, false, false, p.Table.IsCommonHandle)\n"
],
"file_path": "planner/core/plan_to_pb.go",
"type": "replace",
"edit_start_line_idx": 187
} | // Copyright 2019-present PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package tikv
import (
"context"
"io"
"sync/atomic"
"time"
"github.com/pingcap/errors"
"github.com/pingcap/kvproto/pkg/coprocessor"
deadlockPb "github.com/pingcap/kvproto/pkg/deadlock"
"github.com/pingcap/kvproto/pkg/errorpb"
"github.com/pingcap/kvproto/pkg/kvrpcpb"
"github.com/pingcap/kvproto/pkg/metapb"
"github.com/pingcap/kvproto/pkg/mpp"
"github.com/pingcap/kvproto/pkg/tikvpb"
"github.com/pingcap/log"
"github.com/pingcap/tidb/kv"
"github.com/pingcap/tidb/store/mockstore/unistore/client"
"github.com/pingcap/tidb/store/mockstore/unistore/cophandler"
"github.com/pingcap/tidb/store/mockstore/unistore/tikv/dbreader"
"github.com/pingcap/tidb/store/mockstore/unistore/tikv/pberror"
"github.com/pingcap/tidb/store/mockstore/unistore/util/lockwaiter"
"github.com/pingcap/tipb/go-tipb"
"go.uber.org/zap"
)
var _ tikvpb.TikvServer = new(Server)
// Server implements the tikvpb.TikvServer interface.
type Server struct {
mvccStore *MVCCStore
regionManager RegionManager
innerServer InnerServer
RPCClient client.Client
refCount int32
stopped int32
}
// NewServer returns a new server.
func NewServer(rm RegionManager, store *MVCCStore, innerServer InnerServer) *Server {
return &Server{
mvccStore: store,
regionManager: rm,
innerServer: innerServer,
}
}
const requestMaxSize = 6 * 1024 * 1024
func (svr *Server) checkRequestSize(size int) *errorpb.Error {
// TiKV has a limitation on raft log size.
// mocktikv has no raft inside, so we check the request's size instead.
if size >= requestMaxSize {
return &errorpb.Error{
RaftEntryTooLarge: &errorpb.RaftEntryTooLarge{},
}
}
return nil
}
// Stop stops the server.
func (svr *Server) Stop() {
atomic.StoreInt32(&svr.stopped, 1)
for {
if atomic.LoadInt32(&svr.refCount) == 0 {
break
}
time.Sleep(time.Millisecond * 10)
}
if err := svr.mvccStore.Close(); err != nil {
log.Error("close mvcc store failed", zap.Error(err))
}
if err := svr.regionManager.Close(); err != nil {
log.Error("close region manager failed", zap.Error(err))
}
if err := svr.innerServer.Stop(); err != nil {
log.Error("close inner server failed", zap.Error(err))
}
}
// GetStoreIDByAddr gets a store id by the store address.
func (svr *Server) GetStoreIDByAddr(addr string) (uint64, error) {
return svr.regionManager.GetStoreIDByAddr(addr)
}
// GetStoreAddrByStoreID gets a store address by the store id.
func (svr *Server) GetStoreAddrByStoreID(storeID uint64) (string, error) {
return svr.regionManager.GetStoreAddrByStoreID(storeID)
}
type requestCtx struct {
svr *Server
regCtx RegionCtx
regErr *errorpb.Error
buf []byte
reader *dbreader.DBReader
method string
startTime time.Time
rpcCtx *kvrpcpb.Context
storeAddr string
storeID uint64
asyncMinCommitTS uint64
onePCCommitTS uint64
}
func newRequestCtx(svr *Server, ctx *kvrpcpb.Context, method string) (*requestCtx, error) {
atomic.AddInt32(&svr.refCount, 1)
if atomic.LoadInt32(&svr.stopped) > 0 {
atomic.AddInt32(&svr.refCount, -1)
return nil, ErrRetryable("server is closed")
}
req := &requestCtx{
svr: svr,
method: method,
startTime: time.Now(),
rpcCtx: ctx,
}
req.regCtx, req.regErr = svr.regionManager.GetRegionFromCtx(ctx)
storeAddr, storeID, regErr := svr.regionManager.GetStoreInfoFromCtx(ctx)
req.storeAddr = storeAddr
req.storeID = storeID
if regErr != nil {
req.regErr = regErr
}
return req, nil
}
// For read-only requests that doesn't acquire latches, this function must be called after all locks has been checked.
func (req *requestCtx) getDBReader() *dbreader.DBReader {
if req.reader == nil {
mvccStore := req.svr.mvccStore
txn := mvccStore.db.NewTransaction(false)
req.reader = dbreader.NewDBReader(req.regCtx.RawStart(), req.regCtx.RawEnd(), txn)
}
return req.reader
}
func (req *requestCtx) finish() {
atomic.AddInt32(&req.svr.refCount, -1)
if req.reader != nil {
req.reader.Close()
}
}
// KvGet implements implements the tikvpb.TikvServer interface.
func (svr *Server) KvGet(ctx context.Context, req *kvrpcpb.GetRequest) (*kvrpcpb.GetResponse, error) {
reqCtx, err := newRequestCtx(svr, req.Context, "KvGet")
if err != nil {
return &kvrpcpb.GetResponse{Error: convertToKeyError(err)}, nil
}
defer reqCtx.finish()
if reqCtx.regErr != nil {
return &kvrpcpb.GetResponse{RegionError: reqCtx.regErr}, nil
}
err = svr.mvccStore.CheckKeysLock(req.GetVersion(), req.Context.ResolvedLocks, req.Key)
if err != nil {
return &kvrpcpb.GetResponse{Error: convertToKeyError(err)}, nil
}
reader := reqCtx.getDBReader()
val, err := reader.Get(req.Key, req.GetVersion())
if err != nil {
return &kvrpcpb.GetResponse{
Error: convertToKeyError(err),
}, nil
}
val = safeCopy(val)
return &kvrpcpb.GetResponse{
Value: val,
}, nil
}
// KvScan implements implements the tikvpb.TikvServer interface.
func (svr *Server) KvScan(ctx context.Context, req *kvrpcpb.ScanRequest) (*kvrpcpb.ScanResponse, error) {
reqCtx, err := newRequestCtx(svr, req.Context, "KvScan")
if err != nil {
return &kvrpcpb.ScanResponse{Pairs: []*kvrpcpb.KvPair{{Error: convertToKeyError(err)}}}, nil
}
defer reqCtx.finish()
if reqCtx.regErr != nil {
return &kvrpcpb.ScanResponse{RegionError: reqCtx.regErr}, nil
}
pairs := svr.mvccStore.Scan(reqCtx, req)
return &kvrpcpb.ScanResponse{
Pairs: pairs,
}, nil
}
// KvPessimisticLock implements implements the tikvpb.TikvServer interface.
func (svr *Server) KvPessimisticLock(ctx context.Context, req *kvrpcpb.PessimisticLockRequest) (*kvrpcpb.PessimisticLockResponse, error) {
reqCtx, err := newRequestCtx(svr, req.Context, "PessimisticLock")
if err != nil {
return &kvrpcpb.PessimisticLockResponse{Errors: []*kvrpcpb.KeyError{convertToKeyError(err)}}, nil
}
defer reqCtx.finish()
if reqCtx.regErr != nil {
return &kvrpcpb.PessimisticLockResponse{RegionError: reqCtx.regErr}, nil
}
resp := &kvrpcpb.PessimisticLockResponse{}
waiter, err := svr.mvccStore.PessimisticLock(reqCtx, req, resp)
resp.Errors, resp.RegionError = convertToPBErrors(err)
if waiter == nil {
return resp, nil
}
result := waiter.Wait()
svr.mvccStore.DeadlockDetectCli.CleanUpWaitFor(req.StartVersion, waiter.LockTS, waiter.KeyHash)
svr.mvccStore.lockWaiterManager.CleanUp(waiter)
if result.WakeupSleepTime == lockwaiter.WaitTimeout {
return resp, nil
}
if result.DeadlockResp != nil {
log.Error("deadlock found", zap.Stringer("entry", &result.DeadlockResp.Entry))
errLocked := err.(*ErrLocked)
deadlockErr := &ErrDeadlock{
LockKey: errLocked.Key,
LockTS: errLocked.Lock.StartTS,
DeadlockKeyHash: result.DeadlockResp.DeadlockKeyHash,
}
resp.Errors, resp.RegionError = convertToPBErrors(deadlockErr)
return resp, nil
}
if result.WakeupSleepTime == lockwaiter.WakeUpThisWaiter {
if req.Force {
req.WaitTimeout = lockwaiter.LockNoWait
_, err := svr.mvccStore.PessimisticLock(reqCtx, req, resp)
resp.Errors, resp.RegionError = convertToPBErrors(err)
if err == nil {
return resp, nil
}
if _, ok := err.(*ErrLocked); !ok {
resp.Errors, resp.RegionError = convertToPBErrors(err)
return resp, nil
}
log.Warn("wakeup force lock request, try lock still failed", zap.Error(err))
}
}
// The key is rollbacked, we don't have the exact commitTS, but we can use the server's latest.
// Always use the store latest ts since the waiter result commitTs may not be the real conflict ts
conflictCommitTS := svr.mvccStore.getLatestTS()
err = &ErrConflict{
StartTS: req.GetForUpdateTs(),
ConflictTS: waiter.LockTS,
ConflictCommitTS: conflictCommitTS,
}
resp.Errors, _ = convertToPBErrors(err)
return resp, nil
}
// KVPessimisticRollback implements implements the tikvpb.TikvServer interface.
func (svr *Server) KVPessimisticRollback(ctx context.Context, req *kvrpcpb.PessimisticRollbackRequest) (*kvrpcpb.PessimisticRollbackResponse, error) {
reqCtx, err := newRequestCtx(svr, req.Context, "PessimisticRollback")
if err != nil {
return &kvrpcpb.PessimisticRollbackResponse{Errors: []*kvrpcpb.KeyError{convertToKeyError(err)}}, nil
}
defer reqCtx.finish()
if reqCtx.regErr != nil {
return &kvrpcpb.PessimisticRollbackResponse{RegionError: reqCtx.regErr}, nil
}
err = svr.mvccStore.PessimisticRollback(reqCtx, req)
resp := &kvrpcpb.PessimisticRollbackResponse{}
resp.Errors, resp.RegionError = convertToPBErrors(err)
return resp, nil
}
// KvTxnHeartBeat implements implements the tikvpb.TikvServer interface.
func (svr *Server) KvTxnHeartBeat(ctx context.Context, req *kvrpcpb.TxnHeartBeatRequest) (*kvrpcpb.TxnHeartBeatResponse, error) {
reqCtx, err := newRequestCtx(svr, req.Context, "TxnHeartBeat")
if err != nil {
return &kvrpcpb.TxnHeartBeatResponse{Error: convertToKeyError(err)}, nil
}
defer reqCtx.finish()
if reqCtx.regErr != nil {
return &kvrpcpb.TxnHeartBeatResponse{RegionError: reqCtx.regErr}, nil
}
lockTTL, err := svr.mvccStore.TxnHeartBeat(reqCtx, req)
resp := &kvrpcpb.TxnHeartBeatResponse{LockTtl: lockTTL}
resp.Error, resp.RegionError = convertToPBError(err)
return resp, nil
}
// KvCheckTxnStatus implements implements the tikvpb.TikvServer interface.
func (svr *Server) KvCheckTxnStatus(ctx context.Context, req *kvrpcpb.CheckTxnStatusRequest) (*kvrpcpb.CheckTxnStatusResponse, error) {
reqCtx, err := newRequestCtx(svr, req.Context, "KvCheckTxnStatus")
if err != nil {
return &kvrpcpb.CheckTxnStatusResponse{Error: convertToKeyError(err)}, nil
}
defer reqCtx.finish()
if reqCtx.regErr != nil {
return &kvrpcpb.CheckTxnStatusResponse{RegionError: reqCtx.regErr}, nil
}
txnStatus, err := svr.mvccStore.CheckTxnStatus(reqCtx, req)
ttl := uint64(0)
if txnStatus.lockInfo != nil {
ttl = txnStatus.lockInfo.LockTtl
}
resp := &kvrpcpb.CheckTxnStatusResponse{
LockTtl: ttl,
CommitVersion: txnStatus.commitTS,
Action: txnStatus.action,
LockInfo: txnStatus.lockInfo,
}
resp.Error, resp.RegionError = convertToPBError(err)
return resp, nil
}
// KvCheckSecondaryLocks implements implements the tikvpb.TikvServer interface.
func (svr *Server) KvCheckSecondaryLocks(ctx context.Context, req *kvrpcpb.CheckSecondaryLocksRequest) (*kvrpcpb.CheckSecondaryLocksResponse, error) {
reqCtx, err := newRequestCtx(svr, req.Context, "KvCheckSecondaryLocks")
if err != nil {
return &kvrpcpb.CheckSecondaryLocksResponse{Error: convertToKeyError(err)}, nil
}
defer reqCtx.finish()
if reqCtx.regErr != nil {
return &kvrpcpb.CheckSecondaryLocksResponse{RegionError: reqCtx.regErr}, nil
}
locksStatus, err := svr.mvccStore.CheckSecondaryLocks(reqCtx, req.Keys, req.StartVersion)
resp := &kvrpcpb.CheckSecondaryLocksResponse{}
if err == nil {
resp.Locks = locksStatus.locks
resp.CommitTs = locksStatus.commitTS
} else {
resp.Error, resp.RegionError = convertToPBError(err)
}
return resp, nil
}
// KvPrewrite implements implements the tikvpb.TikvServer interface.
func (svr *Server) KvPrewrite(ctx context.Context, req *kvrpcpb.PrewriteRequest) (*kvrpcpb.PrewriteResponse, error) {
reqCtx, err := newRequestCtx(svr, req.Context, "KvPrewrite")
if err != nil {
return &kvrpcpb.PrewriteResponse{Errors: []*kvrpcpb.KeyError{convertToKeyError(err)}}, nil
}
defer reqCtx.finish()
if reqCtx.regErr != nil {
return &kvrpcpb.PrewriteResponse{RegionError: reqCtx.regErr}, nil
}
err = svr.mvccStore.Prewrite(reqCtx, req)
resp := &kvrpcpb.PrewriteResponse{}
if reqCtx.asyncMinCommitTS > 0 {
resp.MinCommitTs = reqCtx.asyncMinCommitTS
}
if reqCtx.onePCCommitTS > 0 {
resp.OnePcCommitTs = reqCtx.onePCCommitTS
}
resp.Errors, resp.RegionError = convertToPBErrors(err)
return resp, nil
}
// KvCommit implements implements the tikvpb.TikvServer interface.
func (svr *Server) KvCommit(ctx context.Context, req *kvrpcpb.CommitRequest) (*kvrpcpb.CommitResponse, error) {
reqCtx, err := newRequestCtx(svr, req.Context, "KvCommit")
if err != nil {
return &kvrpcpb.CommitResponse{Error: convertToKeyError(err)}, nil
}
defer reqCtx.finish()
if reqCtx.regErr != nil {
return &kvrpcpb.CommitResponse{RegionError: reqCtx.regErr}, nil
}
resp := new(kvrpcpb.CommitResponse)
err = svr.mvccStore.Commit(reqCtx, req.Keys, req.GetStartVersion(), req.GetCommitVersion())
if err != nil {
resp.Error, resp.RegionError = convertToPBError(err)
}
return resp, nil
}
// RawGetKeyTTL implements implements the tikvpb.TikvServer interface.
func (svr *Server) RawGetKeyTTL(ctx context.Context, req *kvrpcpb.RawGetKeyTTLRequest) (*kvrpcpb.RawGetKeyTTLResponse, error) {
// TODO
return &kvrpcpb.RawGetKeyTTLResponse{}, nil
}
// KvImport implements implements the tikvpb.TikvServer interface.
func (svr *Server) KvImport(context.Context, *kvrpcpb.ImportRequest) (*kvrpcpb.ImportResponse, error) {
// TODO
return &kvrpcpb.ImportResponse{}, nil
}
// KvCleanup implements implements the tikvpb.TikvServer interface.
func (svr *Server) KvCleanup(ctx context.Context, req *kvrpcpb.CleanupRequest) (*kvrpcpb.CleanupResponse, error) {
reqCtx, err := newRequestCtx(svr, req.Context, "KvCleanup")
if err != nil {
return &kvrpcpb.CleanupResponse{Error: convertToKeyError(err)}, nil
}
defer reqCtx.finish()
if reqCtx.regErr != nil {
return &kvrpcpb.CleanupResponse{RegionError: reqCtx.regErr}, nil
}
err = svr.mvccStore.Cleanup(reqCtx, req.Key, req.StartVersion, req.CurrentTs)
resp := new(kvrpcpb.CleanupResponse)
if committed, ok := err.(ErrAlreadyCommitted); ok {
resp.CommitVersion = uint64(committed)
} else if err != nil {
log.Error("cleanup failed", zap.Error(err))
resp.Error, resp.RegionError = convertToPBError(err)
}
return resp, nil
}
// KvBatchGet implements implements the tikvpb.TikvServer interface.
func (svr *Server) KvBatchGet(ctx context.Context, req *kvrpcpb.BatchGetRequest) (*kvrpcpb.BatchGetResponse, error) {
reqCtx, err := newRequestCtx(svr, req.Context, "KvBatchGet")
if err != nil {
return &kvrpcpb.BatchGetResponse{Pairs: []*kvrpcpb.KvPair{{Error: convertToKeyError(err)}}}, nil
}
defer reqCtx.finish()
if reqCtx.regErr != nil {
return &kvrpcpb.BatchGetResponse{RegionError: reqCtx.regErr}, nil
}
pairs := svr.mvccStore.BatchGet(reqCtx, req.Keys, req.GetVersion())
return &kvrpcpb.BatchGetResponse{
Pairs: pairs,
}, nil
}
// KvBatchRollback implements implements the tikvpb.TikvServer interface.
func (svr *Server) KvBatchRollback(ctx context.Context, req *kvrpcpb.BatchRollbackRequest) (*kvrpcpb.BatchRollbackResponse, error) {
reqCtx, err := newRequestCtx(svr, req.Context, "KvBatchRollback")
if err != nil {
return &kvrpcpb.BatchRollbackResponse{Error: convertToKeyError(err)}, nil
}
defer reqCtx.finish()
if reqCtx.regErr != nil {
return &kvrpcpb.BatchRollbackResponse{RegionError: reqCtx.regErr}, nil
}
resp := new(kvrpcpb.BatchRollbackResponse)
err = svr.mvccStore.Rollback(reqCtx, req.Keys, req.StartVersion)
resp.Error, resp.RegionError = convertToPBError(err)
return resp, nil
}
// KvScanLock implements implements the tikvpb.TikvServer interface.
func (svr *Server) KvScanLock(ctx context.Context, req *kvrpcpb.ScanLockRequest) (*kvrpcpb.ScanLockResponse, error) {
reqCtx, err := newRequestCtx(svr, req.Context, "KvScanLock")
if err != nil {
return &kvrpcpb.ScanLockResponse{Error: convertToKeyError(err)}, nil
}
defer reqCtx.finish()
if reqCtx.regErr != nil {
return &kvrpcpb.ScanLockResponse{RegionError: reqCtx.regErr}, nil
}
log.Debug("kv scan lock")
locks, err := svr.mvccStore.ScanLock(reqCtx, req.MaxVersion, int(req.Limit))
return &kvrpcpb.ScanLockResponse{Error: convertToKeyError(err), Locks: locks}, nil
}
// KvResolveLock implements implements the tikvpb.TikvServer interface.
func (svr *Server) KvResolveLock(ctx context.Context, req *kvrpcpb.ResolveLockRequest) (*kvrpcpb.ResolveLockResponse, error) {
reqCtx, err := newRequestCtx(svr, req.Context, "KvResolveLock")
if err != nil {
return &kvrpcpb.ResolveLockResponse{Error: convertToKeyError(err)}, nil
}
defer reqCtx.finish()
if reqCtx.regErr != nil {
return &kvrpcpb.ResolveLockResponse{RegionError: reqCtx.regErr}, nil
}
resp := &kvrpcpb.ResolveLockResponse{}
if len(req.TxnInfos) > 0 {
for _, txnInfo := range req.TxnInfos {
log.S().Debugf("kv resolve lock region:%d txn:%v", reqCtx.regCtx.Meta().Id, txnInfo.Txn)
err := svr.mvccStore.ResolveLock(reqCtx, nil, txnInfo.Txn, txnInfo.Status)
if err != nil {
resp.Error, resp.RegionError = convertToPBError(err)
break
}
}
} else {
log.S().Debugf("kv resolve lock region:%d txn:%v", reqCtx.regCtx.Meta().Id, req.StartVersion)
err := svr.mvccStore.ResolveLock(reqCtx, req.Keys, req.StartVersion, req.CommitVersion)
resp.Error, resp.RegionError = convertToPBError(err)
}
return resp, nil
}
// KvGC implements implements the tikvpb.TikvServer interface.
func (svr *Server) KvGC(ctx context.Context, req *kvrpcpb.GCRequest) (*kvrpcpb.GCResponse, error) {
reqCtx, err := newRequestCtx(svr, req.Context, "KvGC")
if err != nil {
return &kvrpcpb.GCResponse{Error: convertToKeyError(err)}, nil
}
defer reqCtx.finish()
svr.mvccStore.UpdateSafePoint(req.SafePoint)
return &kvrpcpb.GCResponse{}, nil
}
// KvDeleteRange implements implements the tikvpb.TikvServer interface.
func (svr *Server) KvDeleteRange(ctx context.Context, req *kvrpcpb.DeleteRangeRequest) (*kvrpcpb.DeleteRangeResponse, error) {
reqCtx, err := newRequestCtx(svr, req.Context, "KvDeleteRange")
if err != nil {
return &kvrpcpb.DeleteRangeResponse{Error: convertToKeyError(err).String()}, nil
}
defer reqCtx.finish()
if reqCtx.regErr != nil {
return &kvrpcpb.DeleteRangeResponse{RegionError: reqCtx.regErr}, nil
}
err = svr.mvccStore.dbWriter.DeleteRange(req.StartKey, req.EndKey, reqCtx.regCtx)
if err != nil {
log.Error("delete range failed", zap.Error(err))
}
return &kvrpcpb.DeleteRangeResponse{}, nil
}
// RawKV commands.
// RawGet implements implements the tikvpb.TikvServer interface.
func (svr *Server) RawGet(context.Context, *kvrpcpb.RawGetRequest) (*kvrpcpb.RawGetResponse, error) {
return &kvrpcpb.RawGetResponse{}, nil
}
// RawPut implements implements the tikvpb.TikvServer interface.
func (svr *Server) RawPut(context.Context, *kvrpcpb.RawPutRequest) (*kvrpcpb.RawPutResponse, error) {
return &kvrpcpb.RawPutResponse{}, nil
}
// RawDelete implements implements the tikvpb.TikvServer interface.
func (svr *Server) RawDelete(context.Context, *kvrpcpb.RawDeleteRequest) (*kvrpcpb.RawDeleteResponse, error) {
return &kvrpcpb.RawDeleteResponse{}, nil
}
// RawScan implements implements the tikvpb.TikvServer interface.
func (svr *Server) RawScan(context.Context, *kvrpcpb.RawScanRequest) (*kvrpcpb.RawScanResponse, error) {
return &kvrpcpb.RawScanResponse{}, nil
}
// RawBatchDelete implements implements the tikvpb.TikvServer interface.
func (svr *Server) RawBatchDelete(context.Context, *kvrpcpb.RawBatchDeleteRequest) (*kvrpcpb.RawBatchDeleteResponse, error) {
return &kvrpcpb.RawBatchDeleteResponse{}, nil
}
// RawBatchGet implements implements the tikvpb.TikvServer interface.
func (svr *Server) RawBatchGet(context.Context, *kvrpcpb.RawBatchGetRequest) (*kvrpcpb.RawBatchGetResponse, error) {
return &kvrpcpb.RawBatchGetResponse{}, nil
}
// RawBatchPut implements implements the tikvpb.TikvServer interface.
func (svr *Server) RawBatchPut(context.Context, *kvrpcpb.RawBatchPutRequest) (*kvrpcpb.RawBatchPutResponse, error) {
return &kvrpcpb.RawBatchPutResponse{}, nil
}
// RawBatchScan implements implements the tikvpb.TikvServer interface.
func (svr *Server) RawBatchScan(context.Context, *kvrpcpb.RawBatchScanRequest) (*kvrpcpb.RawBatchScanResponse, error) {
return &kvrpcpb.RawBatchScanResponse{}, nil
}
// RawDeleteRange implements implements the tikvpb.TikvServer interface.
func (svr *Server) RawDeleteRange(context.Context, *kvrpcpb.RawDeleteRangeRequest) (*kvrpcpb.RawDeleteRangeResponse, error) {
return &kvrpcpb.RawDeleteRangeResponse{}, nil
}
// SQL push down commands.
// Coprocessor implements implements the tikvpb.TikvServer interface.
func (svr *Server) Coprocessor(_ context.Context, req *coprocessor.Request) (*coprocessor.Response, error) {
reqCtx, err := newRequestCtx(svr, req.Context, "Coprocessor")
if err != nil {
return &coprocessor.Response{OtherError: convertToKeyError(err).String()}, nil
}
defer reqCtx.finish()
if reqCtx.regErr != nil {
return &coprocessor.Response{RegionError: reqCtx.regErr}, nil
}
return cophandler.HandleCopRequest(reqCtx.getDBReader(), svr.mvccStore.lockStore, req), nil
}
// CoprocessorStream implements implements the tikvpb.TikvServer interface.
func (svr *Server) CoprocessorStream(*coprocessor.Request, tikvpb.Tikv_CoprocessorStreamServer) error {
// TODO
return nil
}
// RegionError represents a region error
type RegionError struct {
err *errorpb.Error
}
// Error implements Error method.
func (regionError *RegionError) Error() string {
return regionError.err.Message
}
// BatchCoprocessor implements implements the tikvpb.TikvServer interface.
func (svr *Server) BatchCoprocessor(req *coprocessor.BatchRequest, batchCopServer tikvpb.Tikv_BatchCoprocessorServer) error {
reqCtxs := make([]*requestCtx, 0, len(req.Regions))
defer func() {
for _, ctx := range reqCtxs {
ctx.finish()
}
}()
for _, ri := range req.Regions {
cop := coprocessor.Request{
Tp: kv.ReqTypeDAG,
Data: req.Data,
StartTs: req.StartTs,
Ranges: ri.Ranges,
}
regionCtx := *req.Context
regionCtx.RegionEpoch = ri.RegionEpoch
regionCtx.RegionId = ri.RegionId
cop.Context = ®ionCtx
reqCtx, err := newRequestCtx(svr, ®ionCtx, "Coprocessor")
if err != nil {
return err
}
reqCtxs = append(reqCtxs, reqCtx)
if reqCtx.regErr != nil {
return &RegionError{err: reqCtx.regErr}
}
copResponse := cophandler.HandleCopRequestWithMPPCtx(reqCtx.getDBReader(), svr.mvccStore.lockStore, &cop, nil)
err = batchCopServer.Send(&coprocessor.BatchResponse{Data: copResponse.Data})
if err != nil {
return err
}
}
return nil
}
func (mrm *MockRegionManager) getMPPTaskHandler(rpcClient client.Client, meta *mpp.TaskMeta, createdIfNotExist bool, storeID uint64) (*cophandler.MPPTaskHandler, bool, error) {
set := mrm.getMPPTaskSet(storeID)
if set == nil {
return nil, false, errors.New("cannot find mpp task set for store")
}
set.mu.Lock()
defer set.mu.Unlock()
if handler, ok := set.taskHandlers[meta.TaskId]; ok {
return handler, false, nil
}
if createdIfNotExist {
handler := &cophandler.MPPTaskHandler{
TunnelSet: make(map[int64]*cophandler.ExchangerTunnel),
Meta: meta,
RPCClient: rpcClient,
}
set.taskHandlers[meta.TaskId] = handler
return handler, true, nil
}
return nil, false, nil
}
func (mrm *MockRegionManager) removeMPPTaskHandler(taskID int64, storeID uint64) error {
set := mrm.getMPPTaskSet(storeID)
if set == nil {
return errors.New("cannot find mpp task set for store")
}
set.mu.Lock()
defer set.mu.Unlock()
if _, ok := set.taskHandlers[taskID]; ok {
delete(set.taskHandlers, taskID)
return nil
}
return errors.New("cannot find mpp task")
}
// DispatchMPPTask implements implements the tikvpb.TikvServer interface.
func (svr *Server) DispatchMPPTask(_ context.Context, _ *mpp.DispatchTaskRequest) (*mpp.DispatchTaskResponse, error) {
panic("todo")
}
func (svr *Server) executeMPPDispatch(ctx context.Context, req *mpp.DispatchTaskRequest, storeAddr string, storeID uint64, handler *cophandler.MPPTaskHandler) error {
var reqCtx *requestCtx
if len(req.Regions) > 0 {
kvContext := &kvrpcpb.Context{
RegionId: req.Regions[0].RegionId,
RegionEpoch: req.Regions[0].RegionEpoch,
// this is a hack to reuse task id in kvContext to pass mpp task id
TaskId: uint64(handler.Meta.TaskId),
Peer: &metapb.Peer{StoreId: storeID},
}
var err error
reqCtx, err = newRequestCtx(svr, kvContext, "Mpp")
if err != nil {
return errors.Trace(err)
}
}
copReq := &coprocessor.Request{
Tp: kv.ReqTypeDAG,
Data: req.EncodedPlan,
StartTs: req.Meta.StartTs,
}
for _, regionMeta := range req.Regions {
copReq.Ranges = append(copReq.Ranges, regionMeta.Ranges...)
}
var dbreader *dbreader.DBReader
if reqCtx != nil {
dbreader = reqCtx.getDBReader()
}
go func() {
resp := cophandler.HandleCopRequestWithMPPCtx(dbreader, svr.mvccStore.lockStore, copReq, &cophandler.MPPCtx{
RPCClient: svr.RPCClient,
StoreAddr: storeAddr,
TaskHandler: handler,
Ctx: ctx,
})
handler.Err = svr.RemoveMPPTaskHandler(req.Meta.TaskId, storeID)
if len(resp.OtherError) > 0 {
handler.Err = errors.New(resp.OtherError)
}
if reqCtx != nil {
reqCtx.finish()
}
}()
return nil
}
// DispatchMPPTaskWithStoreID implements implements the tikvpb.TikvServer interface.
func (svr *Server) DispatchMPPTaskWithStoreID(ctx context.Context, req *mpp.DispatchTaskRequest, storeID uint64) (*mpp.DispatchTaskResponse, error) {
mppHandler, err := svr.CreateMPPTaskHandler(req.Meta, storeID)
if err != nil {
return nil, errors.Trace(err)
}
storeAddr, err := svr.GetStoreAddrByStoreID(storeID)
if err != nil {
return nil, err
}
err = svr.executeMPPDispatch(ctx, req, storeAddr, storeID, mppHandler)
resp := &mpp.DispatchTaskResponse{}
if err != nil {
resp.Error = &mpp.Error{Msg: err.Error()}
}
return resp, nil
}
// CancelMPPTask implements implements the tikvpb.TikvServer interface.
func (svr *Server) CancelMPPTask(_ context.Context, _ *mpp.CancelTaskRequest) (*mpp.CancelTaskResponse, error) {
panic("todo")
}
// GetMPPTaskHandler implements implements the tikvpb.TikvServer interface.
func (svr *Server) GetMPPTaskHandler(taskID int64, storeID uint64) (*cophandler.MPPTaskHandler, error) {
if mrm, ok := svr.regionManager.(*MockRegionManager); ok {
set := mrm.getMPPTaskSet(storeID)
if set == nil {
return nil, errors.New("cannot find mpp task set for store")
}
set.mu.Lock()
defer set.mu.Unlock()
if handler, ok := set.taskHandlers[taskID]; ok {
return handler, nil
}
return nil, nil
}
return nil, errors.New("Only mock region mgr supports get mpp task")
}
// RemoveMPPTaskHandler implements implements the tikvpb.TikvServer interface.
func (svr *Server) RemoveMPPTaskHandler(taskID int64, storeID uint64) error {
if mrm, ok := svr.regionManager.(*MockRegionManager); ok {
err := mrm.removeMPPTaskHandler(taskID, storeID)
return errors.Trace(err)
}
return errors.New("Only mock region mgr supports remove mpp task")
}
// CreateMPPTaskHandler implements implements the tikvpb.TikvServer interface.
func (svr *Server) CreateMPPTaskHandler(meta *mpp.TaskMeta, storeID uint64) (*cophandler.MPPTaskHandler, error) {
if mrm, ok := svr.regionManager.(*MockRegionManager); ok {
set := mrm.getMPPTaskSet(storeID)
if set == nil {
return nil, errors.New("cannot find mpp task set for store")
}
set.mu.Lock()
defer set.mu.Unlock()
if handler, ok := set.taskHandlers[meta.TaskId]; ok {
return handler, errors.Errorf("Task %d has been created", meta.TaskId)
}
handler := &cophandler.MPPTaskHandler{
TunnelSet: make(map[int64]*cophandler.ExchangerTunnel),
Meta: meta,
RPCClient: svr.RPCClient,
}
set.taskHandlers[meta.TaskId] = handler
return handler, nil
}
return nil, errors.New("Only mock region mgr supports get mpp task")
}
// EstablishMPPConnection implements implements the tikvpb.TikvServer interface.
func (svr *Server) EstablishMPPConnection(*mpp.EstablishMPPConnectionRequest, tikvpb.Tikv_EstablishMPPConnectionServer) error {
panic("todo")
}
// EstablishMPPConnectionWithStoreID implements implements the tikvpb.TikvServer interface.
func (svr *Server) EstablishMPPConnectionWithStoreID(req *mpp.EstablishMPPConnectionRequest, server tikvpb.Tikv_EstablishMPPConnectionServer, storeID uint64) error {
var (
mppHandler *cophandler.MPPTaskHandler
err error
)
maxRetryTime := 5
for i := 0; i < maxRetryTime; i++ {
mppHandler, err = svr.GetMPPTaskHandler(req.SenderMeta.TaskId, storeID)
if err != nil {
return errors.Trace(err)
}
if mppHandler == nil {
time.Sleep(time.Second)
} else {
break
}
}
if mppHandler == nil {
return errors.New("tatsk not found")
}
ctx1, cancel := context.WithCancel(context.Background())
defer cancel()
tunnel, err := mppHandler.HandleEstablishConn(ctx1, req)
if err != nil {
return errors.Trace(err)
}
var sendError error
for sendError == nil {
chunk, err := tunnel.RecvChunk()
if err != nil {
sendError = server.Send(&mpp.MPPDataPacket{Error: &mpp.Error{Msg: err.Error()}})
break
}
if chunk == nil {
// todo return io.EOF error?
break
}
res := tipb.SelectResponse{
Chunks: []tipb.Chunk{*chunk},
}
raw, err := res.Marshal()
if err != nil {
sendError = server.Send(&mpp.MPPDataPacket{Error: &mpp.Error{Msg: err.Error()}})
break
}
sendError = server.Send(&mpp.MPPDataPacket{Data: raw})
}
return sendError
}
// Raft commands (tikv <-> tikv).
// Raft implements implements the tikvpb.TikvServer interface.
func (svr *Server) Raft(stream tikvpb.Tikv_RaftServer) error {
return svr.innerServer.Raft(stream)
}
// Snapshot implements implements the tikvpb.TikvServer interface.
func (svr *Server) Snapshot(stream tikvpb.Tikv_SnapshotServer) error {
return svr.innerServer.Snapshot(stream)
}
// BatchRaft implements implements the tikvpb.TikvServer interface.
func (svr *Server) BatchRaft(stream tikvpb.Tikv_BatchRaftServer) error {
return svr.innerServer.BatchRaft(stream)
}
// Region commands.
// SplitRegion implements implements the tikvpb.TikvServer interface.
func (svr *Server) SplitRegion(ctx context.Context, req *kvrpcpb.SplitRegionRequest) (*kvrpcpb.SplitRegionResponse, error) {
reqCtx, err := newRequestCtx(svr, req.Context, "SplitRegion")
if err != nil {
return &kvrpcpb.SplitRegionResponse{RegionError: &errorpb.Error{Message: err.Error()}}, nil
}
defer reqCtx.finish()
return svr.regionManager.SplitRegion(req), nil
}
// ReadIndex implements implements the tikvpb.TikvServer interface.
func (svr *Server) ReadIndex(context.Context, *kvrpcpb.ReadIndexRequest) (*kvrpcpb.ReadIndexResponse, error) {
// TODO:
return &kvrpcpb.ReadIndexResponse{}, nil
}
// transaction debugger commands.
// MvccGetByKey implements implements the tikvpb.TikvServer interface.
func (svr *Server) MvccGetByKey(ctx context.Context, req *kvrpcpb.MvccGetByKeyRequest) (*kvrpcpb.MvccGetByKeyResponse, error) {
reqCtx, err := newRequestCtx(svr, req.Context, "MvccGetByKey")
if err != nil {
return &kvrpcpb.MvccGetByKeyResponse{Error: err.Error()}, nil
}
defer reqCtx.finish()
if reqCtx.regErr != nil {
return &kvrpcpb.MvccGetByKeyResponse{RegionError: reqCtx.regErr}, nil
}
resp := new(kvrpcpb.MvccGetByKeyResponse)
mvccInfo, err := svr.mvccStore.MvccGetByKey(reqCtx, req.GetKey())
if err != nil {
resp.Error = err.Error()
}
resp.Info = mvccInfo
return resp, nil
}
// MvccGetByStartTs implements implements the tikvpb.TikvServer interface.
func (svr *Server) MvccGetByStartTs(ctx context.Context, req *kvrpcpb.MvccGetByStartTsRequest) (*kvrpcpb.MvccGetByStartTsResponse, error) {
reqCtx, err := newRequestCtx(svr, req.Context, "MvccGetByStartTs")
if err != nil {
return &kvrpcpb.MvccGetByStartTsResponse{Error: err.Error()}, nil
}
defer reqCtx.finish()
if reqCtx.regErr != nil {
return &kvrpcpb.MvccGetByStartTsResponse{RegionError: reqCtx.regErr}, nil
}
resp := new(kvrpcpb.MvccGetByStartTsResponse)
mvccInfo, key, err := svr.mvccStore.MvccGetByStartTs(reqCtx, req.StartTs)
if err != nil {
resp.Error = err.Error()
}
resp.Info = mvccInfo
resp.Key = key
return resp, nil
}
// UnsafeDestroyRange implements implements the tikvpb.TikvServer interface.
func (svr *Server) UnsafeDestroyRange(ctx context.Context, req *kvrpcpb.UnsafeDestroyRangeRequest) (*kvrpcpb.UnsafeDestroyRangeResponse, error) {
start, end := req.GetStartKey(), req.GetEndKey()
svr.mvccStore.DeleteFileInRange(start, end)
return &kvrpcpb.UnsafeDestroyRangeResponse{}, nil
}
// GetWaitForEntries tries to get the waitFor entries
// deadlock detection related services
func (svr *Server) GetWaitForEntries(ctx context.Context,
req *deadlockPb.WaitForEntriesRequest) (*deadlockPb.WaitForEntriesResponse, error) {
// TODO
return &deadlockPb.WaitForEntriesResponse{}, nil
}
// Detect will handle detection rpc from other nodes
func (svr *Server) Detect(stream deadlockPb.Deadlock_DetectServer) error {
for {
req, err := stream.Recv()
if err != nil {
if err == io.EOF {
break
}
return err
}
if !svr.mvccStore.DeadlockDetectSvr.IsLeader() {
log.Warn("detection requests received on non leader node")
break
}
resp := svr.mvccStore.DeadlockDetectSvr.Detect(req)
if resp != nil {
if sendErr := stream.Send(resp); sendErr != nil {
log.Error("send deadlock response failed", zap.Error(sendErr))
break
}
}
}
return nil
}
// CheckLockObserver implements implements the tikvpb.TikvServer interface.
func (svr *Server) CheckLockObserver(context.Context, *kvrpcpb.CheckLockObserverRequest) (*kvrpcpb.CheckLockObserverResponse, error) {
// TODO: implement Observer
return &kvrpcpb.CheckLockObserverResponse{IsClean: true}, nil
}
// PhysicalScanLock implements implements the tikvpb.TikvServer interface.
func (svr *Server) PhysicalScanLock(ctx context.Context, req *kvrpcpb.PhysicalScanLockRequest) (*kvrpcpb.PhysicalScanLockResponse, error) {
resp := &kvrpcpb.PhysicalScanLockResponse{}
resp.Locks = svr.mvccStore.PhysicalScanLock(req.StartKey, req.MaxTs, int(req.Limit))
return resp, nil
}
// RegisterLockObserver implements implements the tikvpb.TikvServer interface.
func (svr *Server) RegisterLockObserver(context.Context, *kvrpcpb.RegisterLockObserverRequest) (*kvrpcpb.RegisterLockObserverResponse, error) {
// TODO: implement Observer
return &kvrpcpb.RegisterLockObserverResponse{}, nil
}
// RemoveLockObserver implements implements the tikvpb.TikvServer interface.
func (svr *Server) RemoveLockObserver(context.Context, *kvrpcpb.RemoveLockObserverRequest) (*kvrpcpb.RemoveLockObserverResponse, error) {
// TODO: implement Observer
return &kvrpcpb.RemoveLockObserverResponse{}, nil
}
// VerGet implements implements the tikvpb.TikvServer interface.
func (svr *Server) VerGet(context.Context, *kvrpcpb.VerGetRequest) (*kvrpcpb.VerGetResponse, error) {
panic("unimplemented")
}
// VerBatchGet implements implements the tikvpb.TikvServer interface.
func (svr *Server) VerBatchGet(context.Context, *kvrpcpb.VerBatchGetRequest) (*kvrpcpb.VerBatchGetResponse, error) {
panic("unimplemented")
}
// VerMut implements implements the tikvpb.TikvServer interface.
func (svr *Server) VerMut(context.Context, *kvrpcpb.VerMutRequest) (*kvrpcpb.VerMutResponse, error) {
panic("unimplemented")
}
// VerBatchMut implements implements the tikvpb.TikvServer interface.
func (svr *Server) VerBatchMut(context.Context, *kvrpcpb.VerBatchMutRequest) (*kvrpcpb.VerBatchMutResponse, error) {
panic("unimplemented")
}
// VerScan implements implements the tikvpb.TikvServer interface.
func (svr *Server) VerScan(context.Context, *kvrpcpb.VerScanRequest) (*kvrpcpb.VerScanResponse, error) {
panic("unimplemented")
}
// VerDeleteRange implements implements the tikvpb.TikvServer interface.
func (svr *Server) VerDeleteRange(context.Context, *kvrpcpb.VerDeleteRangeRequest) (*kvrpcpb.VerDeleteRangeResponse, error) {
panic("unimplemented")
}
// CheckLeader implements implements the tikvpb.TikvServer interface.
func (svr *Server) CheckLeader(context.Context, *kvrpcpb.CheckLeaderRequest) (*kvrpcpb.CheckLeaderResponse, error) {
panic("unimplemented")
}
func convertToKeyError(err error) *kvrpcpb.KeyError {
if err == nil {
return nil
}
causeErr := errors.Cause(err)
switch x := causeErr.(type) {
case *ErrLocked:
return &kvrpcpb.KeyError{
Locked: x.Lock.ToLockInfo(x.Key),
}
case ErrRetryable:
return &kvrpcpb.KeyError{
Retryable: x.Error(),
}
case *ErrKeyAlreadyExists:
return &kvrpcpb.KeyError{
AlreadyExist: &kvrpcpb.AlreadyExist{
Key: x.Key,
},
}
case *ErrConflict:
return &kvrpcpb.KeyError{
Conflict: &kvrpcpb.WriteConflict{
StartTs: x.StartTS,
ConflictTs: x.ConflictTS,
ConflictCommitTs: x.ConflictCommitTS,
Key: x.Key,
},
}
case *ErrDeadlock:
return &kvrpcpb.KeyError{
Deadlock: &kvrpcpb.Deadlock{
LockKey: x.LockKey,
LockTs: x.LockTS,
DeadlockKeyHash: x.DeadlockKeyHash,
},
}
case *ErrCommitExpire:
return &kvrpcpb.KeyError{
CommitTsExpired: &kvrpcpb.CommitTsExpired{
StartTs: x.StartTs,
AttemptedCommitTs: x.CommitTs,
Key: x.Key,
MinCommitTs: x.MinCommitTs,
},
}
case *ErrTxnNotFound:
return &kvrpcpb.KeyError{
TxnNotFound: &kvrpcpb.TxnNotFound{
StartTs: x.StartTS,
PrimaryKey: x.PrimaryKey,
},
}
default:
return &kvrpcpb.KeyError{
Abort: err.Error(),
}
}
}
func convertToPBError(err error) (*kvrpcpb.KeyError, *errorpb.Error) {
if regErr := extractRegionError(err); regErr != nil {
return nil, regErr
}
return convertToKeyError(err), nil
}
func convertToPBErrors(err error) ([]*kvrpcpb.KeyError, *errorpb.Error) {
if err != nil {
if regErr := extractRegionError(err); regErr != nil {
return nil, regErr
}
return []*kvrpcpb.KeyError{convertToKeyError(err)}, nil
}
return nil, nil
}
func extractRegionError(err error) *errorpb.Error {
if pbError, ok := err.(*pberror.PBError); ok {
return pbError.RequestErr
}
return nil
}
| store/mockstore/unistore/tikv/server.go | 0 | https://github.com/pingcap/tidb/commit/3f6dfe91daab7acb178ad24941dfca1745993afd | [
0.0034079640172421932,
0.00026183563750237226,
0.00015885751054156572,
0.0001701981236692518,
0.00035310463863424957
] |
{
"id": 0,
"code_window": [
"\tdefer cancel()\n",
"\n",
"\tsettings := cluster.MakeTestingClusterSettings()\n",
"\tsql.FeatureTLSAutoJoinEnabled.Override(ctx, &settings.SV, true)\n",
"\ts, sqldb, _ := serverutils.StartServer(t, base.TestServerArgs{\n",
"\t\tSettings: settings,\n",
"\t})\n",
"\tdefer s.Stopper().Stop(ctx)\n",
"\n",
"\trows, err := sqldb.Query(\"SELECT crdb_internal.create_join_token();\")\n",
"\trequire.NoError(t, err)\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t// This logic is for node-node connections.\n",
"\t\tDefaultTestTenant: base.TestIsSpecificToStorageLayerAndNeedsASystemTenant,\n",
"\t\tSettings: settings,\n"
],
"file_path": "pkg/cli/connect_join_test.go",
"type": "replace",
"edit_start_line_idx": 42
} | // Copyright 2021 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package cli
import (
"context"
"fmt"
"os"
"testing"
"time"
"github.com/cockroachdb/cockroach/pkg/base"
"github.com/cockroachdb/cockroach/pkg/clusterversion"
"github.com/cockroachdb/cockroach/pkg/keys"
"github.com/cockroachdb/cockroach/pkg/kv"
"github.com/cockroachdb/cockroach/pkg/kv/kvserver"
"github.com/cockroachdb/cockroach/pkg/kv/kvserver/liveness/livenesspb"
"github.com/cockroachdb/cockroach/pkg/kv/kvserver/loqrecovery"
"github.com/cockroachdb/cockroach/pkg/kv/kvserver/loqrecovery/loqrecoverypb"
"github.com/cockroachdb/cockroach/pkg/roachpb"
"github.com/cockroachdb/cockroach/pkg/server"
"github.com/cockroachdb/cockroach/pkg/server/serverpb"
"github.com/cockroachdb/cockroach/pkg/testutils"
"github.com/cockroachdb/cockroach/pkg/testutils/listenerutil"
"github.com/cockroachdb/cockroach/pkg/testutils/skip"
"github.com/cockroachdb/cockroach/pkg/testutils/sqlutils"
"github.com/cockroachdb/cockroach/pkg/testutils/testcluster"
"github.com/cockroachdb/cockroach/pkg/util/leaktest"
"github.com/cockroachdb/cockroach/pkg/util/log"
"github.com/cockroachdb/cockroach/pkg/util/protoutil"
"github.com/cockroachdb/cockroach/pkg/util/uuid"
"github.com/cockroachdb/errors"
"github.com/stretchr/testify/require"
)
// TestCollectInfoFromMultipleStores performs basic sanity checks on replica info collection.
// This is done by running three node cluster with disk backed storage,
// stopping it and verifying content of collected replica info file.
// This check verifies that:
//
// we successfully iterate requested stores,
// data is written in expected location,
// data contains info only about stores requested.
func TestCollectInfoFromMultipleStores(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
ctx := context.Background()
dir, cleanupFn := testutils.TempDir(t)
defer cleanupFn()
c := NewCLITest(TestCLIParams{
NoServer: true,
})
defer c.Cleanup()
tc := testcluster.NewTestCluster(t, 3, base.TestClusterArgs{
ServerArgsPerNode: map[int]base.TestServerArgs{
0: {StoreSpecs: []base.StoreSpec{{Path: dir + "/store-1"}}},
1: {StoreSpecs: []base.StoreSpec{{Path: dir + "/store-2"}}},
2: {StoreSpecs: []base.StoreSpec{{Path: dir + "/store-3"}}},
},
})
tc.Start(t)
defer tc.Stopper().Stop(ctx)
// Wait up-replication.
require.NoError(t, tc.WaitForFullReplication())
// Shutdown.
tc.Stopper().Stop(ctx)
replicaInfoFileName := dir + "/node-1.json"
c.RunWithArgs([]string{"debug", "recover", "collect-info", "--store=" + dir + "/store-1",
"--store=" + dir + "/store-2", replicaInfoFileName})
replicas, err := readReplicaInfoData([]string{replicaInfoFileName})
require.NoError(t, err, "failed to read generated replica info")
stores := map[roachpb.StoreID]interface{}{}
for _, r := range replicas.LocalInfo[0].Replicas {
stores[r.StoreID] = struct{}{}
}
require.Equal(t, 2, len(stores), "collected replicas from stores")
require.Equal(t, clusterversion.ByKey(clusterversion.BinaryVersionKey), replicas.Version,
"collected version info from stores")
}
// TestCollectInfoFromOnlineCluster verifies that given a test cluster with
// one stopped node, we can collect replica info and metadata from remaining
// nodes using an admin recovery call.
func TestCollectInfoFromOnlineCluster(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
ctx := context.Background()
dir, cleanupFn := testutils.TempDir(t)
defer cleanupFn()
c := NewCLITest(TestCLIParams{
NoServer: true,
})
defer c.Cleanup()
tc := testcluster.NewTestCluster(t, 3, base.TestClusterArgs{
ServerArgs: base.TestServerArgs{
StoreSpecs: []base.StoreSpec{{InMemory: true}},
Insecure: true,
},
})
tc.Start(t)
defer tc.Stopper().Stop(ctx)
require.NoError(t, tc.WaitForFullReplication())
tc.ToggleReplicateQueues(false)
r := tc.ServerConn(0).QueryRow("select count(*) from crdb_internal.ranges_no_leases")
var totalRanges int
require.NoError(t, r.Scan(&totalRanges), "failed to query range count")
tc.StopServer(0)
replicaInfoFileName := dir + "/all-nodes.json"
c.RunWithArgs([]string{
"debug",
"recover",
"collect-info",
"--insecure",
"--host",
tc.Server(2).AdvRPCAddr(),
replicaInfoFileName,
})
replicas, err := readReplicaInfoData([]string{replicaInfoFileName})
require.NoError(t, err, "failed to read generated replica info")
stores := map[roachpb.StoreID]interface{}{}
totalReplicas := 0
for _, li := range replicas.LocalInfo {
for _, r := range li.Replicas {
stores[r.StoreID] = struct{}{}
}
totalReplicas += len(li.Replicas)
}
require.Equal(t, 2, len(stores), "collected replicas from stores")
require.Equal(t, 2, len(replicas.LocalInfo), "collected info is not split by node")
require.Equal(t, totalRanges*2, totalReplicas, "number of collected replicas")
require.Equal(t, totalRanges, len(replicas.Descriptors),
"number of collected descriptors from metadata")
require.Equal(t, clusterversion.ByKey(clusterversion.BinaryVersionKey), replicas.Version,
"collected version info from stores")
}
// TestLossOfQuorumRecovery performs a sanity check on end to end recovery workflow.
// This test doesn't try to validate all possible test cases, but instead check that
// artifacts are correctly produced and overall cluster recovery could be performed
// where it would be completely broken otherwise.
func TestLossOfQuorumRecovery(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
skip.UnderDeadlock(t, "slow under deadlock")
ctx := context.Background()
dir, cleanupFn := testutils.TempDir(t)
defer cleanupFn()
c := NewCLITest(TestCLIParams{
NoServer: true,
})
defer c.Cleanup()
// Test cluster contains 3 nodes that we would turn into a single node
// cluster using loss of quorum recovery. After it is stopped, single node
// would not be able to progress, but we will apply recovery procedure and
// mark on replicas on node 1 as designated survivors. After that, starting
// single node should succeed.
tcBefore := testcluster.NewTestCluster(t, 3, base.TestClusterArgs{
ServerArgsPerNode: map[int]base.TestServerArgs{
0: {StoreSpecs: []base.StoreSpec{{Path: dir + "/store-1"}}},
},
})
tcBefore.Start(t)
s := sqlutils.MakeSQLRunner(tcBefore.Conns[0])
s.Exec(t, "set cluster setting cluster.organization='remove dead replicas test'")
defer tcBefore.Stopper().Stop(ctx)
// We use scratch range to test special case for pending update on the
// descriptor which has to be cleaned up before recovery could proceed.
// For that we'll ensure it is not empty and then put an intent. After
// recovery, we'll check that the range is still accessible for writes as
// normal.
sk := tcBefore.ScratchRange(t)
require.NoError(t,
tcBefore.Server(0).DB().Put(ctx, testutils.MakeKey(sk, []byte{1}), "value"),
"failed to write value to scratch range")
createIntentOnRangeDescriptor(ctx, t, tcBefore, sk)
node1ID := tcBefore.Servers[0].NodeID()
// Now that stores are prepared and replicated we can shut down cluster
// and perform store manipulations.
tcBefore.Stopper().Stop(ctx)
server1StoreDir := dir + "/store-1"
replicaInfoFileName := dir + "/node-1.json"
c.RunWithArgs(
[]string{"debug", "recover", "collect-info", "--store=" + server1StoreDir,
replicaInfoFileName})
// Generate recovery plan and try to verify that plan file was generated and contains
// meaningful data. This is not strictly necessary for verifying end-to-end flow, but
// having assertions on generated data helps to identify which stage of pipeline broke
// if test fails.
planFile := dir + "/recovery-plan.json"
out, err := c.RunWithCaptureArgs(
[]string{"debug", "recover", "make-plan", "--confirm=y", "--plan=" + planFile,
replicaInfoFileName})
require.NoError(t, err, "failed to run make-plan")
require.Contains(t, out, fmt.Sprintf("- node n%d", node1ID),
"planner didn't provide correct apply instructions")
require.FileExists(t, planFile, "generated plan file")
planFileContent, err := os.ReadFile(planFile)
require.NoError(t, err, "test infra failed, can't open created plan file")
plan := loqrecoverypb.ReplicaUpdatePlan{}
jsonpb := protoutil.JSONPb{}
require.NoError(t, jsonpb.Unmarshal(planFileContent, &plan),
"failed to deserialize replica recovery plan")
require.NotEmpty(t, plan.Updates, "resulting plan contains no updates")
out, err = c.RunWithCaptureArgs(
[]string{"debug", "recover", "apply-plan", "--confirm=y", "--store=" + server1StoreDir,
planFile})
require.NoError(t, err, "failed to run apply plan")
// Check that there were at least one mention of replica being promoted.
require.Contains(t, out, "will be updated", "no replica updates were recorded")
require.Contains(t, out, fmt.Sprintf("Updated store(s): s%d", node1ID),
"apply plan was not executed on requested node")
tcAfter := testcluster.NewTestCluster(t, 3, base.TestClusterArgs{
ReplicationMode: base.ReplicationManual,
ServerArgsPerNode: map[int]base.TestServerArgs{
0: {StoreSpecs: []base.StoreSpec{{Path: dir + "/store-1"}}},
},
})
// NB: If recovery is not performed, new cluster will just hang on startup.
// This is caused by liveness range becoming unavailable and preventing any
// progress. So it is likely that test will timeout if basic workflow fails.
tcAfter.Start(t)
defer tcAfter.Stopper().Stop(ctx)
// In the new cluster, we will still have nodes 2 and 3 remaining from the first
// attempt. That would increase number of replicas on system ranges to 5 and we
// would not be able to upreplicate properly. So we need to decommission old nodes
// first before proceeding.
adminClient := tcAfter.Server(0).GetAdminClient(t)
require.NoError(t, runDecommissionNodeImpl(
ctx, adminClient, nodeDecommissionWaitNone, nodeDecommissionChecksSkip, false,
[]roachpb.NodeID{roachpb.NodeID(2), roachpb.NodeID(3)}, tcAfter.Server(0).NodeID()),
"Failed to decommission removed nodes")
for i := 0; i < len(tcAfter.Servers); i++ {
require.NoError(t, tcAfter.Servers[i].GetStores().(*kvserver.Stores).VisitStores(func(store *kvserver.Store) error {
store.SetReplicateQueueActive(true)
return nil
}), "Failed to activate replication queue")
}
require.NoError(t, tcAfter.WaitForZoneConfigPropagation(),
"Failed to ensure zone configs are propagated")
require.NoError(t, tcAfter.WaitForFullReplication(), "Failed to perform full replication")
for i := 0; i < len(tcAfter.Servers); i++ {
require.NoError(t, tcAfter.Servers[i].GetStores().(*kvserver.Stores).VisitStores(func(store *kvserver.Store) error {
return store.ForceConsistencyQueueProcess()
}), "Failed to force replicas to consistency queue")
}
// As a validation step we will just pick one range and get its replicas to see
// if they were up-replicated to the new nodes.
s = sqlutils.MakeSQLRunner(tcAfter.Conns[0])
r := s.QueryRow(t, "select replicas from crdb_internal.ranges limit 1")
var replicas string
r.Scan(&replicas)
require.Equal(t, "{1,4,5}", replicas, "Replicas after loss of quorum recovery")
// Validate that rangelog is updated by recovery records after cluster restarts.
testutils.SucceedsSoon(t, func() error {
r := s.QueryRow(t,
`select count(*) from system.rangelog where "eventType" = 'unsafe_quorum_recovery'`)
var recoveries int
r.Scan(&recoveries)
if recoveries != len(plan.Updates) {
return errors.Errorf("found %d recovery events while expecting %d", recoveries,
len(plan.Updates))
}
return nil
})
// We were using scratch range to test cleanup of pending transaction on
// rangedescriptor key. We want to verify that after recovery, range is still
// writable e.g. recovery succeeded.
require.NoError(t,
tcAfter.Server(0).DB().Put(ctx, testutils.MakeKey(sk, []byte{1}), "value2"),
"failed to write value to scratch range after recovery")
}
// TestStageVersionCheck verifies that we can force plan with different internal
// version onto cluster. To do this, we create a plan with internal version
// above current but matching major and minor. Then we check that staging fails
// and that force flag will update plan version to match local node.
func TestStageVersionCheck(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
skip.UnderDeadlock(t, "slow under deadlock")
ctx := context.Background()
_, cleanupFn := testutils.TempDir(t)
defer cleanupFn()
c := NewCLITest(TestCLIParams{
NoServer: true,
})
defer c.Cleanup()
listenerReg := listenerutil.NewListenerRegistry()
defer listenerReg.Close()
storeReg := server.NewStickyVFSRegistry()
tc := testcluster.NewTestCluster(t, 4, base.TestClusterArgs{
ReplicationMode: base.ReplicationManual,
ServerArgsPerNode: map[int]base.TestServerArgs{
0: {
Knobs: base.TestingKnobs{
Server: &server.TestingKnobs{
StickyVFSRegistry: storeReg,
},
},
StoreSpecs: []base.StoreSpec{
{InMemory: true, StickyVFSID: "1"},
},
},
},
ReusableListenerReg: listenerReg,
})
tc.Start(t)
defer tc.Stopper().Stop(ctx)
tc.StopServer(3)
adminClient := tc.Server(0).GetAdminClient(t)
v := clusterversion.ByKey(clusterversion.BinaryVersionKey)
v.Internal++
// To avoid crafting real replicas we use StaleLeaseholderNodeIDs to force
// node to stage plan for verification.
p := loqrecoverypb.ReplicaUpdatePlan{
PlanID: uuid.FastMakeV4(),
Version: v,
ClusterID: tc.Server(0).StorageClusterID().String(),
DecommissionedNodeIDs: []roachpb.NodeID{4},
StaleLeaseholderNodeIDs: []roachpb.NodeID{1},
}
// Attempts to stage plan with different internal version must fail.
_, err := adminClient.RecoveryStagePlan(ctx, &serverpb.RecoveryStagePlanRequest{
Plan: &p,
AllNodes: true,
ForcePlan: false,
ForceLocalInternalVersion: false,
})
require.ErrorContains(t, err, "doesn't match cluster active version")
// Enable "stuck upgrade bypass" to stage plan on the cluster.
_, err = adminClient.RecoveryStagePlan(ctx, &serverpb.RecoveryStagePlanRequest{
Plan: &p,
AllNodes: true,
ForcePlan: false,
ForceLocalInternalVersion: true,
})
require.NoError(t, err, "force local must fix incorrect version")
// Check that stored plan has version matching cluster version.
ps := loqrecovery.NewPlanStore("", storeReg.Get("1"))
p, ok, err := ps.LoadPlan()
require.NoError(t, err, "failed to read node 0 plan")
require.True(t, ok, "plan was not staged")
require.Equal(t, clusterversion.ByKey(clusterversion.BinaryVersionKey), p.Version,
"plan version was not updated")
}
func createIntentOnRangeDescriptor(
ctx context.Context, t *testing.T, tcBefore *testcluster.TestCluster, sk roachpb.Key,
) {
txn := kv.NewTxn(ctx, tcBefore.Servers[0].DB(), 1)
var desc roachpb.RangeDescriptor
// Pick one of the predefined split points.
rdKey := keys.RangeDescriptorKey(roachpb.RKey(sk))
if err := txn.GetProto(ctx, rdKey, &desc); err != nil {
t.Fatal(err)
}
desc.NextReplicaID++
if err := txn.Put(ctx, rdKey, &desc); err != nil {
t.Fatal(err)
}
// At this point the intent has been written to Pebble but this
// write was not synced (only the raft log append was synced). We
// need to force another sync, but we're far from the storage
// layer here so the easiest thing to do is simply perform a
// second write. This will force the first write to be persisted
// to disk (the second write may or may not make it to disk due to
// timing).
desc.NextReplicaID++
if err := txn.Put(ctx, rdKey, &desc); err != nil {
t.Fatal(err)
}
}
func TestHalfOnlineLossOfQuorumRecovery(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
skip.UnderDeadlock(t, "slow under deadlock")
ctx := context.Background()
dir, cleanupFn := testutils.TempDir(t)
defer cleanupFn()
c := NewCLITest(TestCLIParams{
NoServer: true,
})
defer c.Cleanup()
listenerReg := listenerutil.NewListenerRegistry()
defer listenerReg.Close()
// Test cluster contains 3 nodes that we would turn into a single node
// cluster using loss of quorum recovery. To do that, we will terminate
// two nodes and run recovery on remaining one. Restarting node should
// bring it back to healthy (but underreplicated) state.
// Note that we inject reusable listeners into all nodes to prevent tests
// running in parallel from taking over ports of stopped nodes and responding
// to gateway node with errors.
// TODO(oleg): Make test run with 7 nodes to exercise cases where multiple
// replicas survive. Current startup and allocator behaviour would make
// this test flaky.
sa := make(map[int]base.TestServerArgs)
for i := 0; i < 3; i++ {
sa[i] = base.TestServerArgs{
Knobs: base.TestingKnobs{
Server: &server.TestingKnobs{
StickyVFSRegistry: server.NewStickyVFSRegistry(),
},
},
StoreSpecs: []base.StoreSpec{
{
InMemory: true,
},
},
}
}
tc := testcluster.NewTestCluster(t, 3, base.TestClusterArgs{
ServerArgs: base.TestServerArgs{
DefaultTestTenant: base.TestIsSpecificToStorageLayerAndNeedsASystemTenant,
},
ReusableListenerReg: listenerReg,
ServerArgsPerNode: sa,
})
tc.Start(t)
s := sqlutils.MakeSQLRunner(tc.Conns[0])
s.Exec(t, "set cluster setting cluster.organization='remove dead replicas test'")
defer tc.Stopper().Stop(ctx)
// We use scratch range to test special case for pending update on the
// descriptor which has to be cleaned up before recovery could proceed.
// For that we'll ensure it is not empty and then put an intent. After
// recovery, we'll check that the range is still accessible for writes as
// normal.
sk := tc.ScratchRange(t)
require.NoError(t,
tc.Server(0).DB().Put(ctx, testutils.MakeKey(sk, []byte{1}), "value"),
"failed to write value to scratch range")
createIntentOnRangeDescriptor(ctx, t, tc, sk)
node1ID := tc.Servers[0].NodeID()
// Now that stores are prepared and replicated we can shut down cluster
// and perform store manipulations.
tc.StopServer(1)
tc.StopServer(2)
// Generate recovery plan and try to verify that plan file was generated and contains
// meaningful data. This is not strictly necessary for verifying end-to-end flow, but
// having assertions on generated data helps to identify which stage of pipeline broke
// if test fails.
planFile := dir + "/recovery-plan.json"
out, err := c.RunWithCaptureArgs(
[]string{
"debug",
"recover",
"make-plan",
"--confirm=y",
"--certs-dir=test_certs",
"--host=" + tc.Server(0).AdvRPCAddr(),
"--plan=" + planFile,
})
require.NoError(t, err, "failed to run make-plan")
require.Contains(t, out, fmt.Sprintf("- node n%d", node1ID),
"planner didn't provide correct apply instructions")
require.FileExists(t, planFile, "generated plan file")
planFileContent, err := os.ReadFile(planFile)
require.NoError(t, err, "test infra failed, can't open created plan file")
plan := loqrecoverypb.ReplicaUpdatePlan{}
jsonpb := protoutil.JSONPb{}
require.NoError(t, jsonpb.Unmarshal(planFileContent, &plan),
"failed to deserialize replica recovery plan")
require.NotEmpty(t, plan.Updates, "resulting plan contains no updates")
out, err = c.RunWithCaptureArgs(
[]string{
"debug", "recover", "apply-plan",
"--certs-dir=test_certs",
"--host=" + tc.Server(0).AdvRPCAddr(),
"--confirm=y", planFile,
})
require.NoError(t, err, "failed to run apply plan")
// Check that there were at least one mention of replica being promoted.
require.Contains(t, out, "updating replica", "no replica updates were recorded")
require.Contains(t, out,
fmt.Sprintf("Plan staged. To complete recovery restart nodes n%d.", node1ID),
"apply plan failed to stage on expected nodes")
// Verify plan is staged on nodes
out, err = c.RunWithCaptureArgs(
[]string{
"debug", "recover", "verify",
"--certs-dir=test_certs",
"--host=" + tc.Server(0).AdvRPCAddr(),
planFile,
})
require.NoError(t, err, "failed to run verify plan")
require.Contains(t, out, "ERROR: loss of quorum recovery is not finished yet")
tc.StopServer(0)
// NB: If recovery is not performed, server will just hang on startup.
// This is caused by liveness range becoming unavailable and preventing any
// progress. So it is likely that test will timeout if basic workflow fails.
require.NoError(t, tc.RestartServer(0), "restart failed")
s = sqlutils.MakeSQLRunner(tc.Conns[0])
// Verifying that post start cleanup performed node decommissioning that
// prevents old nodes from rejoining.
ac := tc.GetAdminClient(t, 0)
testutils.SucceedsSoon(t, func() error {
dr, err := ac.DecommissionStatus(ctx,
&serverpb.DecommissionStatusRequest{NodeIDs: []roachpb.NodeID{2, 3}})
if err != nil {
return err
}
for _, s := range dr.Status {
if s.Membership != livenesspb.MembershipStatus_DECOMMISSIONED {
return errors.Newf("expecting n%d to be decommissioned", s.NodeID)
}
}
return nil
})
// Validate that rangelog is updated by recovery records after cluster restarts.
testutils.SucceedsSoon(t, func() error {
r := s.QueryRow(t,
`select count(*) from system.rangelog where "eventType" = 'unsafe_quorum_recovery'`)
var recoveries int
r.Scan(&recoveries)
if recoveries != len(plan.Updates) {
return errors.Errorf("found %d recovery events while expecting %d", recoveries,
len(plan.Updates))
}
return nil
})
// Verify recovery complete.
out, err = c.RunWithCaptureArgs(
[]string{
"debug", "recover", "verify",
"--certs-dir=test_certs",
"--host=" + tc.Server(0).AdvRPCAddr(),
planFile,
})
require.NoError(t, err, "failed to run verify plan")
require.Contains(t, out, "Loss of quorum recovery is complete.")
// We were using scratch range to test cleanup of pending transaction on
// rangedescriptor key. We want to verify that after recovery, range is still
// writable e.g. recovery succeeded.
require.NoError(t,
tc.Server(0).DB().Put(ctx, testutils.MakeKey(sk, []byte{1}), "value2"),
"failed to write value to scratch range after recovery")
// Finally split scratch range to ensure metadata ranges are recovered.
_, _, err = tc.Server(0).SplitRange(testutils.MakeKey(sk, []byte{42}))
require.NoError(t, err, "failed to split range after recovery")
}
func TestUpdatePlanVsClusterDiff(t *testing.T) {
defer leaktest.AfterTest(t)()
var empty uuid.UUID
planID, _ := uuid.FromString("123e4567-e89b-12d3-a456-426614174000")
otherPlanID, _ := uuid.FromString("123e4567-e89b-12d3-a456-426614174001")
applyTime, _ := time.Parse(time.RFC3339, "2023-01-24T10:30:00Z")
status := func(id roachpb.NodeID, pending, applied uuid.UUID, err string) loqrecoverypb.NodeRecoveryStatus {
s := loqrecoverypb.NodeRecoveryStatus{
NodeID: id,
}
if !pending.Equal(empty) {
s.PendingPlanID = &pending
}
if !applied.Equal(empty) {
s.AppliedPlanID = &applied
s.ApplyTimestamp = &applyTime
}
s.Error = err
return s
}
for _, d := range []struct {
name string
updatedNodes []int
staleLeases []int
status []loqrecoverypb.NodeRecoveryStatus
pending int
errors int
report []string
}{
{
name: "after staging",
updatedNodes: []int{1, 2},
staleLeases: []int{3},
status: []loqrecoverypb.NodeRecoveryStatus{
status(1, planID, empty, ""),
status(2, planID, empty, ""),
status(3, planID, empty, ""),
},
pending: 3,
report: []string{
" plan application pending on node n1",
" plan application pending on node n2",
" plan application pending on node n3",
},
},
{
name: "partially applied",
updatedNodes: []int{1, 2, 3},
status: []loqrecoverypb.NodeRecoveryStatus{
status(1, planID, empty, ""),
status(2, empty, planID, ""),
status(3, planID, empty, ""),
},
pending: 2,
report: []string{
" plan application pending on node n1",
" plan applied successfully on node n2",
" plan application pending on node n3",
},
},
{
name: "fully applied",
updatedNodes: []int{1, 2, 3},
status: []loqrecoverypb.NodeRecoveryStatus{
status(1, empty, planID, ""),
status(2, empty, planID, ""),
status(3, empty, planID, ""),
},
report: []string{
" plan applied successfully on node n1",
" plan applied successfully on node n2",
" plan applied successfully on node n3",
},
},
{
name: "staging lost no node",
updatedNodes: []int{1, 2, 3},
status: []loqrecoverypb.NodeRecoveryStatus{
status(1, planID, empty, ""),
status(3, planID, empty, ""),
},
pending: 2,
errors: 1,
report: []string{
" plan application pending on node n1",
" plan application pending on node n3",
" failed to find node n2 where plan must be staged",
},
},
{
name: "staging lost no plan",
updatedNodes: []int{1, 2},
staleLeases: []int{3},
status: []loqrecoverypb.NodeRecoveryStatus{
status(1, planID, empty, ""),
status(2, planID, empty, ""),
status(3, empty, empty, ""),
},
pending: 2,
errors: 1,
report: []string{
" plan application pending on node n1",
" plan application pending on node n2",
" failed to find staged plan on node n3",
},
},
{
name: "partial failure",
updatedNodes: []int{1, 2, 3},
status: []loqrecoverypb.NodeRecoveryStatus{
status(1, planID, empty, ""),
status(2, empty, planID, "found stale replica"),
status(3, planID, empty, ""),
},
pending: 2,
errors: 1,
report: []string{
" plan application pending on node n1",
" plan application failed on node n2: found stale replica",
" plan application pending on node n3",
},
},
{
name: "no plan",
status: []loqrecoverypb.NodeRecoveryStatus{
status(1, planID, empty, ""),
status(2, empty, planID, "found stale replica"),
status(3, empty, otherPlanID, ""),
},
report: []string{
" node n1 staged plan: 123e4567-e89b-12d3-a456-426614174000",
" node n2 failed to apply plan 123e4567-e89b-12d3-a456-426614174000: found stale replica",
" node n3 applied plan: 123e4567-e89b-12d3-a456-426614174001 at 2023-01-24 10:30:00 +0000 UTC",
},
},
{
name: "wrong plan",
updatedNodes: []int{1, 2},
status: []loqrecoverypb.NodeRecoveryStatus{
status(1, planID, empty, ""),
status(2, otherPlanID, empty, ""),
status(3, otherPlanID, empty, ""),
},
pending: 1,
errors: 2,
report: []string{
" plan application pending on node n1",
" unexpected staged plan 123e4567-e89b-12d3-a456-426614174001 on node n2",
" unexpected staged plan 123e4567-e89b-12d3-a456-426614174001 on node n3",
},
},
} {
t.Run(d.name, func(t *testing.T) {
plan := loqrecoverypb.ReplicaUpdatePlan{
PlanID: planID,
}
// Plan will contain single replica update for each requested node.
rangeSeq := 1
for _, id := range d.updatedNodes {
plan.Updates = append(plan.Updates, loqrecoverypb.ReplicaUpdate{
RangeID: roachpb.RangeID(rangeSeq),
StartKey: nil,
OldReplicaID: roachpb.ReplicaID(1),
NewReplica: roachpb.ReplicaDescriptor{
NodeID: roachpb.NodeID(id),
StoreID: roachpb.StoreID(id),
ReplicaID: roachpb.ReplicaID(rangeSeq + 17),
},
NextReplicaID: roachpb.ReplicaID(rangeSeq + 18),
})
}
for _, id := range d.staleLeases {
plan.StaleLeaseholderNodeIDs = append(plan.StaleLeaseholderNodeIDs, roachpb.NodeID(id))
}
diff := diffPlanWithNodeStatus(plan, d.status)
require.Equal(t, d.pending, diff.pending, "number of pending changes")
require.Equal(t, d.errors, diff.errors, "number of node errors")
if d.report != nil {
require.Equal(t, len(d.report), len(diff.report), "number of lines in diff")
for i := range d.report {
require.Equal(t, d.report[i], diff.report[i], "wrong line %d of report", i)
}
}
})
}
}
func TestTruncateKeyOutput(t *testing.T) {
defer leaktest.AfterTest(t)()
for _, d := range []struct {
len uint
result string
}{
{
len: 13,
result: "/System/No...",
},
{
len: 30,
result: "/System/NodeLiveness",
},
{
len: 3,
result: "/Sy",
},
{
len: 4,
result: "/...",
},
} {
t.Run("", func(t *testing.T) {
helper := outputFormatHelper{
maxPrintedKeyLength: d.len,
}
require.Equal(t, d.result, helper.formatKey(keys.NodeLivenessPrefix))
})
}
}
func TestTruncateSpanOutput(t *testing.T) {
defer leaktest.AfterTest(t)()
for _, d := range []struct {
len uint
result string
}{
{
len: 30,
result: "/System/{NodeLiveness-Syste...",
},
{
len: 90,
result: "/System/{NodeLiveness-SystemSpanConfigKeys}",
},
{
len: 3,
result: "/Sy",
},
{
len: 4,
result: "/...",
},
} {
t.Run("", func(t *testing.T) {
helper := outputFormatHelper{
maxPrintedKeyLength: d.len,
}
require.Equal(t, d.result, helper.formatSpan(roachpb.Span{
Key: keys.NodeLivenessPrefix,
EndKey: keys.SystemSpanConfigPrefix,
}))
})
}
}
| pkg/cli/debug_recover_loss_of_quorum_test.go | 1 | https://github.com/cockroachdb/cockroach/commit/0d77d949392d0daabeb77a82d468b15d4774e1de | [
0.8945127129554749,
0.040481697767972946,
0.0001604584394954145,
0.00017749734979588538,
0.17616473138332367
] |
{
"id": 0,
"code_window": [
"\tdefer cancel()\n",
"\n",
"\tsettings := cluster.MakeTestingClusterSettings()\n",
"\tsql.FeatureTLSAutoJoinEnabled.Override(ctx, &settings.SV, true)\n",
"\ts, sqldb, _ := serverutils.StartServer(t, base.TestServerArgs{\n",
"\t\tSettings: settings,\n",
"\t})\n",
"\tdefer s.Stopper().Stop(ctx)\n",
"\n",
"\trows, err := sqldb.Query(\"SELECT crdb_internal.create_join_token();\")\n",
"\trequire.NoError(t, err)\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t// This logic is for node-node connections.\n",
"\t\tDefaultTestTenant: base.TestIsSpecificToStorageLayerAndNeedsASystemTenant,\n",
"\t\tSettings: settings,\n"
],
"file_path": "pkg/cli/connect_join_test.go",
"type": "replace",
"edit_start_line_idx": 42
} | // Copyright 2020 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package geomfn
import (
"math"
"github.com/cockroachdb/cockroach/pkg/geo"
"github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode"
"github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror"
"github.com/twpayne/go-geom"
)
// Angle calculates the clockwise angle between two vectors given by points
// p1,p2 and p3,p4. If p4 is an empty geometry (of any type, to follow PostGIS
// behavior), it instead calculates the clockwise angle between p2,p1 and p2,p3.
func Angle(g1, g2, g3, g4 geo.Geometry) (*float64, error) {
if g4.Empty() {
g1, g2, g3, g4 = g2, g1, g2, g3
}
if g1.SRID() != g2.SRID() {
return nil, geo.NewMismatchingSRIDsError(g1.SpatialObject(), g2.SpatialObject())
}
if g1.SRID() != g3.SRID() {
return nil, geo.NewMismatchingSRIDsError(g1.SpatialObject(), g3.SpatialObject())
}
if g1.SRID() != g4.SRID() {
return nil, geo.NewMismatchingSRIDsError(g1.SpatialObject(), g4.SpatialObject())
}
t1, err := g1.AsGeomT()
if err != nil {
return nil, err
}
t2, err := g2.AsGeomT()
if err != nil {
return nil, err
}
t3, err := g3.AsGeomT()
if err != nil {
return nil, err
}
t4, err := g4.AsGeomT()
if err != nil {
return nil, err
}
p1, p1ok := t1.(*geom.Point)
p2, p2ok := t2.(*geom.Point)
p3, p3ok := t3.(*geom.Point)
p4, p4ok := t4.(*geom.Point)
if !p1ok || !p2ok || !p3ok || !p4ok {
return nil, pgerror.Newf(pgcode.InvalidParameterValue, "arguments must be POINT geometries")
}
if p1.Empty() || p2.Empty() || p3.Empty() || p4.Empty() {
return nil, pgerror.Newf(pgcode.InvalidParameterValue, "received EMPTY geometry")
}
return angleFromCoords(p1.Coords(), p2.Coords(), p3.Coords(), p4.Coords()), nil
}
// AngleLineString calculates the clockwise angle between two linestrings,
// treating them as vectors between the start- and endpoints. Type conflicts
// or empty geometries return nil (as opposed to Angle which errors), to
// follow PostGIS behavior.
func AngleLineString(g1, g2 geo.Geometry) (*float64, error) {
if g1.SRID() != g2.SRID() {
return nil, geo.NewMismatchingSRIDsError(g1.SpatialObject(), g2.SpatialObject())
}
t1, err := g1.AsGeomT()
if err != nil {
return nil, err
}
t2, err := g2.AsGeomT()
if err != nil {
return nil, err
}
l1, l1ok := t1.(*geom.LineString)
l2, l2ok := t2.(*geom.LineString)
if !l1ok || !l2ok || l1.Empty() || l2.Empty() {
return nil, nil // follow PostGIS behavior
}
return angleFromCoords(
l1.Coord(0), l1.Coord(l1.NumCoords()-1), l2.Coord(0), l2.Coord(l2.NumCoords()-1)), nil
}
// angleFromCoords returns the clockwise angle between the vectors c1,c2 and
// c3,c4. For compatibility with PostGIS, it returns nil if any vectors have
// length 0.
func angleFromCoords(c1, c2, c3, c4 geom.Coord) *float64 {
a := coordSub(c2, c1)
b := coordSub(c4, c3)
if (a.X() == 0 && a.Y() == 0) || (b.X() == 0 && b.Y() == 0) {
return nil
}
// We want the clockwise angle, not the smallest interior angle, so can't use cosine formula.
angle := math.Atan2(-coordDet(a, b), coordDot(a, b))
// We want the angle in the interval [0,2π), while Atan2 returns [-π,π]
//
// NB: In Go, the literal -0.0 does not produce negative
// zero. However, since IEEE 754 requires that -0.0 == 0.0,
// this code is still correct and we use -0.0 here for
// semantic clarity.
//lint:ignore SA4026 -0.0 used here for clarity
if angle == -0.0 {
angle = 0.0
} else if angle < 0 {
angle += 2 * math.Pi
}
return &angle
}
| pkg/geo/geomfn/angle.go | 0 | https://github.com/cockroachdb/cockroach/commit/0d77d949392d0daabeb77a82d468b15d4774e1de | [
0.00018077487766277045,
0.00017380149802193046,
0.00016287316975649446,
0.00017582184227649122,
0.000005561571470025228
] |
{
"id": 0,
"code_window": [
"\tdefer cancel()\n",
"\n",
"\tsettings := cluster.MakeTestingClusterSettings()\n",
"\tsql.FeatureTLSAutoJoinEnabled.Override(ctx, &settings.SV, true)\n",
"\ts, sqldb, _ := serverutils.StartServer(t, base.TestServerArgs{\n",
"\t\tSettings: settings,\n",
"\t})\n",
"\tdefer s.Stopper().Stop(ctx)\n",
"\n",
"\trows, err := sqldb.Query(\"SELECT crdb_internal.create_join_token();\")\n",
"\trequire.NoError(t, err)\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t// This logic is for node-node connections.\n",
"\t\tDefaultTestTenant: base.TestIsSpecificToStorageLayerAndNeedsASystemTenant,\n",
"\t\tSettings: settings,\n"
],
"file_path": "pkg/cli/connect_join_test.go",
"type": "replace",
"edit_start_line_idx": 42
} | urn:uuid:6ba7b810-9dad-11d1-80b4-00c04fd430c8 | pkg/util/uuid/testdata/corpus/seed_valid_URNCanonical | 0 | https://github.com/cockroachdb/cockroach/commit/0d77d949392d0daabeb77a82d468b15d4774e1de | [
0.00016922895156312734,
0.00016922895156312734,
0.00016922895156312734,
0.00016922895156312734,
0
] |
{
"id": 0,
"code_window": [
"\tdefer cancel()\n",
"\n",
"\tsettings := cluster.MakeTestingClusterSettings()\n",
"\tsql.FeatureTLSAutoJoinEnabled.Override(ctx, &settings.SV, true)\n",
"\ts, sqldb, _ := serverutils.StartServer(t, base.TestServerArgs{\n",
"\t\tSettings: settings,\n",
"\t})\n",
"\tdefer s.Stopper().Stop(ctx)\n",
"\n",
"\trows, err := sqldb.Query(\"SELECT crdb_internal.create_join_token();\")\n",
"\trequire.NoError(t, err)\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t// This logic is for node-node connections.\n",
"\t\tDefaultTestTenant: base.TestIsSpecificToStorageLayerAndNeedsASystemTenant,\n",
"\t\tSettings: settings,\n"
],
"file_path": "pkg/cli/connect_join_test.go",
"type": "replace",
"edit_start_line_idx": 42
} | // Copyright 2018 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
// Package hba implements an hba.conf parser.
package hba
// conf.rl is a ragel v6.10 file containing a parser for pg_hba.conf
// files. "make" should be executed in this directory when conf.rl is
// changed. Since it is changed so rarely it is not hooked up to the top-level
// Makefile since that would require ragel being a dev dependency, which is
// an annoying burden since it's written in C and we can't auto install it
// on all systems.
import (
"fmt"
"net"
"reflect"
"strings"
"github.com/cockroachdb/cockroach/pkg/security/username"
"github.com/cockroachdb/cockroach/pkg/settings/rulebasedscanner"
"github.com/cockroachdb/cockroach/pkg/sql/sem/tree"
"github.com/cockroachdb/errors"
"github.com/olekukonko/tablewriter"
)
// Conf is a parsed configuration.
type Conf struct {
Entries []Entry
}
// Entry is a single line of a configuration.
type Entry struct {
// ConnType is the connection type to match.
ConnType ConnType
// Database is the list of databases to match. An empty list means
// "match any database".
Database []rulebasedscanner.String
// User is the list of users to match. An empty list means "match
// any user".
User []rulebasedscanner.String
// Address is either AnyAddr, *net.IPNet or (unsupported) String for a hostname.
Address interface{}
Method rulebasedscanner.String
// MethodFn is populated during name resolution of Method.
MethodFn interface{}
Options [][2]string
OptionQuotes []bool
// Input is the original configuration line in the HBA configuration string.
// This is used for auditing purposes.
Input string
// Generated is true if the entry was expanded from another. All the
// generated entries share the same value for Input.
Generated bool
}
// ConnType represents the type of connection matched by a rule.
type ConnType int
const (
// ConnLocal matches unix socket connections.
ConnLocal ConnType = 1 << iota
// ConnHostNoSSL matches TCP connections without SSL/TLS.
ConnHostNoSSL
// ConnHostSSL matches TCP connections with SSL/TLS.
ConnHostSSL
// ConnHostAny matches TCP connections with or without SSL/TLS.
ConnHostAny = ConnHostNoSSL | ConnHostSSL
// ConnInternalLoopback matches internal connections running over the loopback
// interface.
ConnInternalLoopback = 8
// ConnAny matches any connection type. Used when registering auth
// methods.
ConnAny = ConnHostAny | ConnLocal | ConnInternalLoopback
)
// String implements the fmt.Stringer interface.
func (t ConnType) String() string {
switch t {
case ConnLocal:
return "local"
case ConnHostNoSSL:
return "hostnossl"
case ConnHostSSL:
return "hostssl"
case ConnHostAny:
return "host"
case ConnInternalLoopback:
return "loopback"
default:
panic(errors.Newf("unimplemented conn type: %v", int(t)))
}
}
// String implements the fmt.Stringer interface.
func (c Conf) String() string {
if len(c.Entries) == 0 {
return "# (empty configuration)\n"
}
var sb strings.Builder
sb.WriteString("# Original configuration:\n")
for _, e := range c.Entries {
if e.Generated {
continue
}
fmt.Fprintf(&sb, "# %s\n", e.Input)
}
sb.WriteString("#\n# Interpreted configuration:\n")
table := tablewriter.NewWriter(&sb)
table.SetAutoWrapText(false)
table.SetReflowDuringAutoWrap(false)
table.SetAlignment(tablewriter.ALIGN_LEFT)
table.SetBorder(false)
table.SetNoWhiteSpace(true)
table.SetTrimWhiteSpaceAtEOL(true)
table.SetTablePadding(" ")
row := []string{"# TYPE", "DATABASE", "USER", "ADDRESS", "METHOD", "OPTIONS"}
table.Append(row)
for _, e := range c.Entries {
row[0] = e.ConnType.String()
row[1] = e.DatabaseString()
row[2] = e.UserString()
row[3] = e.AddressString()
row[4] = e.Method.String()
row[5] = e.OptionsString()
table.Append(row)
}
table.Render()
return sb.String()
}
// AnyAddr represents "any address" and is used when parsing "all" for
// the "Address" field.
type AnyAddr struct{}
// String implements the fmt.Stringer interface.
func (AnyAddr) String() string { return "all" }
// GetOption returns the value of option name if there is exactly one
// occurrence of name in the options list, otherwise the empty string.
func (h Entry) GetOption(name string) string {
var val string
for _, opt := range h.Options {
if opt[0] == name {
// If there is more than one entry, return empty string.
if val != "" {
return ""
}
val = opt[1]
}
}
return val
}
// Equivalent returns true iff the entry is equivalent to another,
// excluding the original syntax.
func (h Entry) Equivalent(other Entry) bool {
h.Input = ""
other.Input = ""
return reflect.DeepEqual(h, other)
}
// GetOptions returns all values of option name.
func (h Entry) GetOptions(name string) []string {
var val []string
for _, opt := range h.Options {
if opt[0] == name {
val = append(val, opt[1])
}
}
return val
}
// ConnTypeMatches returns true iff the provided actual client connection
// type matches the connection type specified in the rule.
func (h Entry) ConnTypeMatches(clientConn ConnType) bool {
switch clientConn {
case ConnLocal:
return h.ConnType == ConnLocal
case ConnHostSSL:
// A SSL connection matches both "hostssl" and "host".
return h.ConnType&ConnHostSSL != 0
case ConnHostNoSSL:
// A non-SSL connection matches both "hostnossl" and "host".
return h.ConnType&ConnHostNoSSL != 0
case ConnInternalLoopback:
return h.ConnType&ConnInternalLoopback != 0
default:
panic("unimplemented")
}
}
// ConnMatches returns true iff the provided client connection
// type and address matches the entry spec.
func (h Entry) ConnMatches(clientConn ConnType, ip net.IP) (bool, error) {
if !h.ConnTypeMatches(clientConn) {
return false, nil
}
if clientConn != ConnLocal {
return h.AddressMatches(ip)
}
return true, nil
}
// UserMatches returns true iff the provided username matches an
// entry in the User list or if the user list is empty (the entry
// matches all).
//
// The provided username must be normalized already.
// The function assumes the entry was normalized to contain only
// one user and its username normalized. See ParseAndNormalize().
func (h Entry) UserMatches(userName username.SQLUsername) bool {
if h.User == nil {
return true
}
for _, u := range h.User {
if u.Value == userName.Normalized() {
return true
}
}
return false
}
// AddressMatches returns true iff the provided address matches the
// entry. The function assumes the entry was normalized already.
// See ParseAndNormalize.
func (h Entry) AddressMatches(addr net.IP) (bool, error) {
switch a := h.Address.(type) {
case AnyAddr:
return true, nil
case *net.IPNet:
return a.Contains(addr), nil
default:
// This is where name-based validation can occur later.
return false, errors.Newf("unknown address type: %T", addr)
}
}
// DatabaseString returns a string that describes the database field.
func (h Entry) DatabaseString() string {
if h.Database == nil {
return "all"
}
var sb strings.Builder
comma := ""
for _, s := range h.Database {
sb.WriteString(comma)
sb.WriteString(s.String())
comma = ","
}
return sb.String()
}
// UserString returns a string that describes the username field.
func (h Entry) UserString() string {
if h.User == nil {
return "all"
}
var sb strings.Builder
comma := ""
for _, s := range h.User {
sb.WriteString(comma)
sb.WriteString(s.String())
comma = ","
}
return sb.String()
}
// AddressString returns a string that describes the address field.
func (h Entry) AddressString() string {
if h.Address == nil {
// This is possible for conn type "local".
return ""
}
return fmt.Sprintf("%s", h.Address)
}
// OptionsString returns a string that describes the option field.
func (h Entry) OptionsString() string {
var sb strings.Builder
sp := ""
for i, opt := range h.Options {
sb.WriteString(sp)
sb.WriteString(rulebasedscanner.String{Value: opt[0] + "=" + opt[1], Quoted: h.OptionQuotes[i]}.String())
sp = " "
}
return sb.String()
}
// String implements the fmt.Stringer interface.
func (h Entry) String() string {
return Conf{Entries: []Entry{h}}.String()
}
// ParseAndNormalize parses the HBA configuration from the provided
// string and performs two tasks:
//
// - it unicode-normalizes the usernames. Since usernames are
// initialized during pgwire session initialization, this
// ensures that string comparisons can be used to match usernames.
//
// - it ensures there is one entry per username. This simplifies
// the code in the authentication logic.
func ParseAndNormalize(val string) (*Conf, error) {
conf, err := Parse(val)
if err != nil {
return nil, err
}
entries := conf.Entries[:0]
entriesCopied := false
outer:
for i := range conf.Entries {
entry := conf.Entries[i]
// The database field is not supported yet in CockroachDB.
entry.Database = nil
// Normalize the 'all' keyword into AnyAddr.
if addr, ok := entry.Address.(rulebasedscanner.String); ok && addr.IsKeyword("all") {
entry.Address = AnyAddr{}
}
// If we're observing an "any" entry, just keep that and move
// along.
for _, iu := range entry.User {
if iu.IsKeyword("all") {
entry.User = nil
entries = append(entries, entry)
continue outer
}
}
// If we're about to change the size of the slice, first copy the
// result entries.
if len(entry.User) != 1 && !entriesCopied {
entries = append([]Entry(nil), conf.Entries[:len(entries)]...)
entriesCopied = true
}
// Expand and normalize the usernames.
allUsers := entry.User
for userIdx, iu := range allUsers {
entry.User = allUsers[userIdx : userIdx+1]
entry.User[0].Value = tree.Name(iu.Value).Normalize()
if userIdx > 0 {
entry.Generated = true
}
entries = append(entries, entry)
}
}
conf.Entries = entries
return conf, nil
}
| pkg/sql/pgwire/hba/hba.go | 0 | https://github.com/cockroachdb/cockroach/commit/0d77d949392d0daabeb77a82d468b15d4774e1de | [
0.0005525179440155625,
0.00018439805717207491,
0.00016148496069945395,
0.0001711137592792511,
0.00006308862793957815
] |
{
"id": 1,
"code_window": [
"\tdefer cancel()\n",
"\n",
"\tsettings := cluster.MakeTestingClusterSettings()\n",
"\tsql.FeatureTLSAutoJoinEnabled.Override(ctx, &settings.SV, true)\n",
"\ts, sqldb, _ := serverutils.StartServer(t, base.TestServerArgs{\n",
"\t\tSettings: settings,\n",
"\t})\n",
"\tdefer s.Stopper().Stop(ctx)\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t// This logic is for node-node connections.\n",
"\t\tDefaultTestTenant: base.TestIsSpecificToStorageLayerAndNeedsASystemTenant,\n",
"\t\tSettings: settings,\n"
],
"file_path": "pkg/cli/connect_join_test.go",
"type": "replace",
"edit_start_line_idx": 90
} | // Copyright 2021 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package cli
import (
"context"
"fmt"
"os"
"testing"
"time"
"github.com/cockroachdb/cockroach/pkg/base"
"github.com/cockroachdb/cockroach/pkg/clusterversion"
"github.com/cockroachdb/cockroach/pkg/keys"
"github.com/cockroachdb/cockroach/pkg/kv"
"github.com/cockroachdb/cockroach/pkg/kv/kvserver"
"github.com/cockroachdb/cockroach/pkg/kv/kvserver/liveness/livenesspb"
"github.com/cockroachdb/cockroach/pkg/kv/kvserver/loqrecovery"
"github.com/cockroachdb/cockroach/pkg/kv/kvserver/loqrecovery/loqrecoverypb"
"github.com/cockroachdb/cockroach/pkg/roachpb"
"github.com/cockroachdb/cockroach/pkg/server"
"github.com/cockroachdb/cockroach/pkg/server/serverpb"
"github.com/cockroachdb/cockroach/pkg/testutils"
"github.com/cockroachdb/cockroach/pkg/testutils/listenerutil"
"github.com/cockroachdb/cockroach/pkg/testutils/skip"
"github.com/cockroachdb/cockroach/pkg/testutils/sqlutils"
"github.com/cockroachdb/cockroach/pkg/testutils/testcluster"
"github.com/cockroachdb/cockroach/pkg/util/leaktest"
"github.com/cockroachdb/cockroach/pkg/util/log"
"github.com/cockroachdb/cockroach/pkg/util/protoutil"
"github.com/cockroachdb/cockroach/pkg/util/uuid"
"github.com/cockroachdb/errors"
"github.com/stretchr/testify/require"
)
// TestCollectInfoFromMultipleStores performs basic sanity checks on replica info collection.
// This is done by running three node cluster with disk backed storage,
// stopping it and verifying content of collected replica info file.
// This check verifies that:
//
// we successfully iterate requested stores,
// data is written in expected location,
// data contains info only about stores requested.
func TestCollectInfoFromMultipleStores(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
ctx := context.Background()
dir, cleanupFn := testutils.TempDir(t)
defer cleanupFn()
c := NewCLITest(TestCLIParams{
NoServer: true,
})
defer c.Cleanup()
tc := testcluster.NewTestCluster(t, 3, base.TestClusterArgs{
ServerArgsPerNode: map[int]base.TestServerArgs{
0: {StoreSpecs: []base.StoreSpec{{Path: dir + "/store-1"}}},
1: {StoreSpecs: []base.StoreSpec{{Path: dir + "/store-2"}}},
2: {StoreSpecs: []base.StoreSpec{{Path: dir + "/store-3"}}},
},
})
tc.Start(t)
defer tc.Stopper().Stop(ctx)
// Wait up-replication.
require.NoError(t, tc.WaitForFullReplication())
// Shutdown.
tc.Stopper().Stop(ctx)
replicaInfoFileName := dir + "/node-1.json"
c.RunWithArgs([]string{"debug", "recover", "collect-info", "--store=" + dir + "/store-1",
"--store=" + dir + "/store-2", replicaInfoFileName})
replicas, err := readReplicaInfoData([]string{replicaInfoFileName})
require.NoError(t, err, "failed to read generated replica info")
stores := map[roachpb.StoreID]interface{}{}
for _, r := range replicas.LocalInfo[0].Replicas {
stores[r.StoreID] = struct{}{}
}
require.Equal(t, 2, len(stores), "collected replicas from stores")
require.Equal(t, clusterversion.ByKey(clusterversion.BinaryVersionKey), replicas.Version,
"collected version info from stores")
}
// TestCollectInfoFromOnlineCluster verifies that given a test cluster with
// one stopped node, we can collect replica info and metadata from remaining
// nodes using an admin recovery call.
func TestCollectInfoFromOnlineCluster(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
ctx := context.Background()
dir, cleanupFn := testutils.TempDir(t)
defer cleanupFn()
c := NewCLITest(TestCLIParams{
NoServer: true,
})
defer c.Cleanup()
tc := testcluster.NewTestCluster(t, 3, base.TestClusterArgs{
ServerArgs: base.TestServerArgs{
StoreSpecs: []base.StoreSpec{{InMemory: true}},
Insecure: true,
},
})
tc.Start(t)
defer tc.Stopper().Stop(ctx)
require.NoError(t, tc.WaitForFullReplication())
tc.ToggleReplicateQueues(false)
r := tc.ServerConn(0).QueryRow("select count(*) from crdb_internal.ranges_no_leases")
var totalRanges int
require.NoError(t, r.Scan(&totalRanges), "failed to query range count")
tc.StopServer(0)
replicaInfoFileName := dir + "/all-nodes.json"
c.RunWithArgs([]string{
"debug",
"recover",
"collect-info",
"--insecure",
"--host",
tc.Server(2).AdvRPCAddr(),
replicaInfoFileName,
})
replicas, err := readReplicaInfoData([]string{replicaInfoFileName})
require.NoError(t, err, "failed to read generated replica info")
stores := map[roachpb.StoreID]interface{}{}
totalReplicas := 0
for _, li := range replicas.LocalInfo {
for _, r := range li.Replicas {
stores[r.StoreID] = struct{}{}
}
totalReplicas += len(li.Replicas)
}
require.Equal(t, 2, len(stores), "collected replicas from stores")
require.Equal(t, 2, len(replicas.LocalInfo), "collected info is not split by node")
require.Equal(t, totalRanges*2, totalReplicas, "number of collected replicas")
require.Equal(t, totalRanges, len(replicas.Descriptors),
"number of collected descriptors from metadata")
require.Equal(t, clusterversion.ByKey(clusterversion.BinaryVersionKey), replicas.Version,
"collected version info from stores")
}
// TestLossOfQuorumRecovery performs a sanity check on end to end recovery workflow.
// This test doesn't try to validate all possible test cases, but instead check that
// artifacts are correctly produced and overall cluster recovery could be performed
// where it would be completely broken otherwise.
func TestLossOfQuorumRecovery(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
skip.UnderDeadlock(t, "slow under deadlock")
ctx := context.Background()
dir, cleanupFn := testutils.TempDir(t)
defer cleanupFn()
c := NewCLITest(TestCLIParams{
NoServer: true,
})
defer c.Cleanup()
// Test cluster contains 3 nodes that we would turn into a single node
// cluster using loss of quorum recovery. After it is stopped, single node
// would not be able to progress, but we will apply recovery procedure and
// mark on replicas on node 1 as designated survivors. After that, starting
// single node should succeed.
tcBefore := testcluster.NewTestCluster(t, 3, base.TestClusterArgs{
ServerArgsPerNode: map[int]base.TestServerArgs{
0: {StoreSpecs: []base.StoreSpec{{Path: dir + "/store-1"}}},
},
})
tcBefore.Start(t)
s := sqlutils.MakeSQLRunner(tcBefore.Conns[0])
s.Exec(t, "set cluster setting cluster.organization='remove dead replicas test'")
defer tcBefore.Stopper().Stop(ctx)
// We use scratch range to test special case for pending update on the
// descriptor which has to be cleaned up before recovery could proceed.
// For that we'll ensure it is not empty and then put an intent. After
// recovery, we'll check that the range is still accessible for writes as
// normal.
sk := tcBefore.ScratchRange(t)
require.NoError(t,
tcBefore.Server(0).DB().Put(ctx, testutils.MakeKey(sk, []byte{1}), "value"),
"failed to write value to scratch range")
createIntentOnRangeDescriptor(ctx, t, tcBefore, sk)
node1ID := tcBefore.Servers[0].NodeID()
// Now that stores are prepared and replicated we can shut down cluster
// and perform store manipulations.
tcBefore.Stopper().Stop(ctx)
server1StoreDir := dir + "/store-1"
replicaInfoFileName := dir + "/node-1.json"
c.RunWithArgs(
[]string{"debug", "recover", "collect-info", "--store=" + server1StoreDir,
replicaInfoFileName})
// Generate recovery plan and try to verify that plan file was generated and contains
// meaningful data. This is not strictly necessary for verifying end-to-end flow, but
// having assertions on generated data helps to identify which stage of pipeline broke
// if test fails.
planFile := dir + "/recovery-plan.json"
out, err := c.RunWithCaptureArgs(
[]string{"debug", "recover", "make-plan", "--confirm=y", "--plan=" + planFile,
replicaInfoFileName})
require.NoError(t, err, "failed to run make-plan")
require.Contains(t, out, fmt.Sprintf("- node n%d", node1ID),
"planner didn't provide correct apply instructions")
require.FileExists(t, planFile, "generated plan file")
planFileContent, err := os.ReadFile(planFile)
require.NoError(t, err, "test infra failed, can't open created plan file")
plan := loqrecoverypb.ReplicaUpdatePlan{}
jsonpb := protoutil.JSONPb{}
require.NoError(t, jsonpb.Unmarshal(planFileContent, &plan),
"failed to deserialize replica recovery plan")
require.NotEmpty(t, plan.Updates, "resulting plan contains no updates")
out, err = c.RunWithCaptureArgs(
[]string{"debug", "recover", "apply-plan", "--confirm=y", "--store=" + server1StoreDir,
planFile})
require.NoError(t, err, "failed to run apply plan")
// Check that there were at least one mention of replica being promoted.
require.Contains(t, out, "will be updated", "no replica updates were recorded")
require.Contains(t, out, fmt.Sprintf("Updated store(s): s%d", node1ID),
"apply plan was not executed on requested node")
tcAfter := testcluster.NewTestCluster(t, 3, base.TestClusterArgs{
ReplicationMode: base.ReplicationManual,
ServerArgsPerNode: map[int]base.TestServerArgs{
0: {StoreSpecs: []base.StoreSpec{{Path: dir + "/store-1"}}},
},
})
// NB: If recovery is not performed, new cluster will just hang on startup.
// This is caused by liveness range becoming unavailable and preventing any
// progress. So it is likely that test will timeout if basic workflow fails.
tcAfter.Start(t)
defer tcAfter.Stopper().Stop(ctx)
// In the new cluster, we will still have nodes 2 and 3 remaining from the first
// attempt. That would increase number of replicas on system ranges to 5 and we
// would not be able to upreplicate properly. So we need to decommission old nodes
// first before proceeding.
adminClient := tcAfter.Server(0).GetAdminClient(t)
require.NoError(t, runDecommissionNodeImpl(
ctx, adminClient, nodeDecommissionWaitNone, nodeDecommissionChecksSkip, false,
[]roachpb.NodeID{roachpb.NodeID(2), roachpb.NodeID(3)}, tcAfter.Server(0).NodeID()),
"Failed to decommission removed nodes")
for i := 0; i < len(tcAfter.Servers); i++ {
require.NoError(t, tcAfter.Servers[i].GetStores().(*kvserver.Stores).VisitStores(func(store *kvserver.Store) error {
store.SetReplicateQueueActive(true)
return nil
}), "Failed to activate replication queue")
}
require.NoError(t, tcAfter.WaitForZoneConfigPropagation(),
"Failed to ensure zone configs are propagated")
require.NoError(t, tcAfter.WaitForFullReplication(), "Failed to perform full replication")
for i := 0; i < len(tcAfter.Servers); i++ {
require.NoError(t, tcAfter.Servers[i].GetStores().(*kvserver.Stores).VisitStores(func(store *kvserver.Store) error {
return store.ForceConsistencyQueueProcess()
}), "Failed to force replicas to consistency queue")
}
// As a validation step we will just pick one range and get its replicas to see
// if they were up-replicated to the new nodes.
s = sqlutils.MakeSQLRunner(tcAfter.Conns[0])
r := s.QueryRow(t, "select replicas from crdb_internal.ranges limit 1")
var replicas string
r.Scan(&replicas)
require.Equal(t, "{1,4,5}", replicas, "Replicas after loss of quorum recovery")
// Validate that rangelog is updated by recovery records after cluster restarts.
testutils.SucceedsSoon(t, func() error {
r := s.QueryRow(t,
`select count(*) from system.rangelog where "eventType" = 'unsafe_quorum_recovery'`)
var recoveries int
r.Scan(&recoveries)
if recoveries != len(plan.Updates) {
return errors.Errorf("found %d recovery events while expecting %d", recoveries,
len(plan.Updates))
}
return nil
})
// We were using scratch range to test cleanup of pending transaction on
// rangedescriptor key. We want to verify that after recovery, range is still
// writable e.g. recovery succeeded.
require.NoError(t,
tcAfter.Server(0).DB().Put(ctx, testutils.MakeKey(sk, []byte{1}), "value2"),
"failed to write value to scratch range after recovery")
}
// TestStageVersionCheck verifies that we can force plan with different internal
// version onto cluster. To do this, we create a plan with internal version
// above current but matching major and minor. Then we check that staging fails
// and that force flag will update plan version to match local node.
func TestStageVersionCheck(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
skip.UnderDeadlock(t, "slow under deadlock")
ctx := context.Background()
_, cleanupFn := testutils.TempDir(t)
defer cleanupFn()
c := NewCLITest(TestCLIParams{
NoServer: true,
})
defer c.Cleanup()
listenerReg := listenerutil.NewListenerRegistry()
defer listenerReg.Close()
storeReg := server.NewStickyVFSRegistry()
tc := testcluster.NewTestCluster(t, 4, base.TestClusterArgs{
ReplicationMode: base.ReplicationManual,
ServerArgsPerNode: map[int]base.TestServerArgs{
0: {
Knobs: base.TestingKnobs{
Server: &server.TestingKnobs{
StickyVFSRegistry: storeReg,
},
},
StoreSpecs: []base.StoreSpec{
{InMemory: true, StickyVFSID: "1"},
},
},
},
ReusableListenerReg: listenerReg,
})
tc.Start(t)
defer tc.Stopper().Stop(ctx)
tc.StopServer(3)
adminClient := tc.Server(0).GetAdminClient(t)
v := clusterversion.ByKey(clusterversion.BinaryVersionKey)
v.Internal++
// To avoid crafting real replicas we use StaleLeaseholderNodeIDs to force
// node to stage plan for verification.
p := loqrecoverypb.ReplicaUpdatePlan{
PlanID: uuid.FastMakeV4(),
Version: v,
ClusterID: tc.Server(0).StorageClusterID().String(),
DecommissionedNodeIDs: []roachpb.NodeID{4},
StaleLeaseholderNodeIDs: []roachpb.NodeID{1},
}
// Attempts to stage plan with different internal version must fail.
_, err := adminClient.RecoveryStagePlan(ctx, &serverpb.RecoveryStagePlanRequest{
Plan: &p,
AllNodes: true,
ForcePlan: false,
ForceLocalInternalVersion: false,
})
require.ErrorContains(t, err, "doesn't match cluster active version")
// Enable "stuck upgrade bypass" to stage plan on the cluster.
_, err = adminClient.RecoveryStagePlan(ctx, &serverpb.RecoveryStagePlanRequest{
Plan: &p,
AllNodes: true,
ForcePlan: false,
ForceLocalInternalVersion: true,
})
require.NoError(t, err, "force local must fix incorrect version")
// Check that stored plan has version matching cluster version.
ps := loqrecovery.NewPlanStore("", storeReg.Get("1"))
p, ok, err := ps.LoadPlan()
require.NoError(t, err, "failed to read node 0 plan")
require.True(t, ok, "plan was not staged")
require.Equal(t, clusterversion.ByKey(clusterversion.BinaryVersionKey), p.Version,
"plan version was not updated")
}
func createIntentOnRangeDescriptor(
ctx context.Context, t *testing.T, tcBefore *testcluster.TestCluster, sk roachpb.Key,
) {
txn := kv.NewTxn(ctx, tcBefore.Servers[0].DB(), 1)
var desc roachpb.RangeDescriptor
// Pick one of the predefined split points.
rdKey := keys.RangeDescriptorKey(roachpb.RKey(sk))
if err := txn.GetProto(ctx, rdKey, &desc); err != nil {
t.Fatal(err)
}
desc.NextReplicaID++
if err := txn.Put(ctx, rdKey, &desc); err != nil {
t.Fatal(err)
}
// At this point the intent has been written to Pebble but this
// write was not synced (only the raft log append was synced). We
// need to force another sync, but we're far from the storage
// layer here so the easiest thing to do is simply perform a
// second write. This will force the first write to be persisted
// to disk (the second write may or may not make it to disk due to
// timing).
desc.NextReplicaID++
if err := txn.Put(ctx, rdKey, &desc); err != nil {
t.Fatal(err)
}
}
func TestHalfOnlineLossOfQuorumRecovery(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
skip.UnderDeadlock(t, "slow under deadlock")
ctx := context.Background()
dir, cleanupFn := testutils.TempDir(t)
defer cleanupFn()
c := NewCLITest(TestCLIParams{
NoServer: true,
})
defer c.Cleanup()
listenerReg := listenerutil.NewListenerRegistry()
defer listenerReg.Close()
// Test cluster contains 3 nodes that we would turn into a single node
// cluster using loss of quorum recovery. To do that, we will terminate
// two nodes and run recovery on remaining one. Restarting node should
// bring it back to healthy (but underreplicated) state.
// Note that we inject reusable listeners into all nodes to prevent tests
// running in parallel from taking over ports of stopped nodes and responding
// to gateway node with errors.
// TODO(oleg): Make test run with 7 nodes to exercise cases where multiple
// replicas survive. Current startup and allocator behaviour would make
// this test flaky.
sa := make(map[int]base.TestServerArgs)
for i := 0; i < 3; i++ {
sa[i] = base.TestServerArgs{
Knobs: base.TestingKnobs{
Server: &server.TestingKnobs{
StickyVFSRegistry: server.NewStickyVFSRegistry(),
},
},
StoreSpecs: []base.StoreSpec{
{
InMemory: true,
},
},
}
}
tc := testcluster.NewTestCluster(t, 3, base.TestClusterArgs{
ServerArgs: base.TestServerArgs{
DefaultTestTenant: base.TestIsSpecificToStorageLayerAndNeedsASystemTenant,
},
ReusableListenerReg: listenerReg,
ServerArgsPerNode: sa,
})
tc.Start(t)
s := sqlutils.MakeSQLRunner(tc.Conns[0])
s.Exec(t, "set cluster setting cluster.organization='remove dead replicas test'")
defer tc.Stopper().Stop(ctx)
// We use scratch range to test special case for pending update on the
// descriptor which has to be cleaned up before recovery could proceed.
// For that we'll ensure it is not empty and then put an intent. After
// recovery, we'll check that the range is still accessible for writes as
// normal.
sk := tc.ScratchRange(t)
require.NoError(t,
tc.Server(0).DB().Put(ctx, testutils.MakeKey(sk, []byte{1}), "value"),
"failed to write value to scratch range")
createIntentOnRangeDescriptor(ctx, t, tc, sk)
node1ID := tc.Servers[0].NodeID()
// Now that stores are prepared and replicated we can shut down cluster
// and perform store manipulations.
tc.StopServer(1)
tc.StopServer(2)
// Generate recovery plan and try to verify that plan file was generated and contains
// meaningful data. This is not strictly necessary for verifying end-to-end flow, but
// having assertions on generated data helps to identify which stage of pipeline broke
// if test fails.
planFile := dir + "/recovery-plan.json"
out, err := c.RunWithCaptureArgs(
[]string{
"debug",
"recover",
"make-plan",
"--confirm=y",
"--certs-dir=test_certs",
"--host=" + tc.Server(0).AdvRPCAddr(),
"--plan=" + planFile,
})
require.NoError(t, err, "failed to run make-plan")
require.Contains(t, out, fmt.Sprintf("- node n%d", node1ID),
"planner didn't provide correct apply instructions")
require.FileExists(t, planFile, "generated plan file")
planFileContent, err := os.ReadFile(planFile)
require.NoError(t, err, "test infra failed, can't open created plan file")
plan := loqrecoverypb.ReplicaUpdatePlan{}
jsonpb := protoutil.JSONPb{}
require.NoError(t, jsonpb.Unmarshal(planFileContent, &plan),
"failed to deserialize replica recovery plan")
require.NotEmpty(t, plan.Updates, "resulting plan contains no updates")
out, err = c.RunWithCaptureArgs(
[]string{
"debug", "recover", "apply-plan",
"--certs-dir=test_certs",
"--host=" + tc.Server(0).AdvRPCAddr(),
"--confirm=y", planFile,
})
require.NoError(t, err, "failed to run apply plan")
// Check that there were at least one mention of replica being promoted.
require.Contains(t, out, "updating replica", "no replica updates were recorded")
require.Contains(t, out,
fmt.Sprintf("Plan staged. To complete recovery restart nodes n%d.", node1ID),
"apply plan failed to stage on expected nodes")
// Verify plan is staged on nodes
out, err = c.RunWithCaptureArgs(
[]string{
"debug", "recover", "verify",
"--certs-dir=test_certs",
"--host=" + tc.Server(0).AdvRPCAddr(),
planFile,
})
require.NoError(t, err, "failed to run verify plan")
require.Contains(t, out, "ERROR: loss of quorum recovery is not finished yet")
tc.StopServer(0)
// NB: If recovery is not performed, server will just hang on startup.
// This is caused by liveness range becoming unavailable and preventing any
// progress. So it is likely that test will timeout if basic workflow fails.
require.NoError(t, tc.RestartServer(0), "restart failed")
s = sqlutils.MakeSQLRunner(tc.Conns[0])
// Verifying that post start cleanup performed node decommissioning that
// prevents old nodes from rejoining.
ac := tc.GetAdminClient(t, 0)
testutils.SucceedsSoon(t, func() error {
dr, err := ac.DecommissionStatus(ctx,
&serverpb.DecommissionStatusRequest{NodeIDs: []roachpb.NodeID{2, 3}})
if err != nil {
return err
}
for _, s := range dr.Status {
if s.Membership != livenesspb.MembershipStatus_DECOMMISSIONED {
return errors.Newf("expecting n%d to be decommissioned", s.NodeID)
}
}
return nil
})
// Validate that rangelog is updated by recovery records after cluster restarts.
testutils.SucceedsSoon(t, func() error {
r := s.QueryRow(t,
`select count(*) from system.rangelog where "eventType" = 'unsafe_quorum_recovery'`)
var recoveries int
r.Scan(&recoveries)
if recoveries != len(plan.Updates) {
return errors.Errorf("found %d recovery events while expecting %d", recoveries,
len(plan.Updates))
}
return nil
})
// Verify recovery complete.
out, err = c.RunWithCaptureArgs(
[]string{
"debug", "recover", "verify",
"--certs-dir=test_certs",
"--host=" + tc.Server(0).AdvRPCAddr(),
planFile,
})
require.NoError(t, err, "failed to run verify plan")
require.Contains(t, out, "Loss of quorum recovery is complete.")
// We were using scratch range to test cleanup of pending transaction on
// rangedescriptor key. We want to verify that after recovery, range is still
// writable e.g. recovery succeeded.
require.NoError(t,
tc.Server(0).DB().Put(ctx, testutils.MakeKey(sk, []byte{1}), "value2"),
"failed to write value to scratch range after recovery")
// Finally split scratch range to ensure metadata ranges are recovered.
_, _, err = tc.Server(0).SplitRange(testutils.MakeKey(sk, []byte{42}))
require.NoError(t, err, "failed to split range after recovery")
}
func TestUpdatePlanVsClusterDiff(t *testing.T) {
defer leaktest.AfterTest(t)()
var empty uuid.UUID
planID, _ := uuid.FromString("123e4567-e89b-12d3-a456-426614174000")
otherPlanID, _ := uuid.FromString("123e4567-e89b-12d3-a456-426614174001")
applyTime, _ := time.Parse(time.RFC3339, "2023-01-24T10:30:00Z")
status := func(id roachpb.NodeID, pending, applied uuid.UUID, err string) loqrecoverypb.NodeRecoveryStatus {
s := loqrecoverypb.NodeRecoveryStatus{
NodeID: id,
}
if !pending.Equal(empty) {
s.PendingPlanID = &pending
}
if !applied.Equal(empty) {
s.AppliedPlanID = &applied
s.ApplyTimestamp = &applyTime
}
s.Error = err
return s
}
for _, d := range []struct {
name string
updatedNodes []int
staleLeases []int
status []loqrecoverypb.NodeRecoveryStatus
pending int
errors int
report []string
}{
{
name: "after staging",
updatedNodes: []int{1, 2},
staleLeases: []int{3},
status: []loqrecoverypb.NodeRecoveryStatus{
status(1, planID, empty, ""),
status(2, planID, empty, ""),
status(3, planID, empty, ""),
},
pending: 3,
report: []string{
" plan application pending on node n1",
" plan application pending on node n2",
" plan application pending on node n3",
},
},
{
name: "partially applied",
updatedNodes: []int{1, 2, 3},
status: []loqrecoverypb.NodeRecoveryStatus{
status(1, planID, empty, ""),
status(2, empty, planID, ""),
status(3, planID, empty, ""),
},
pending: 2,
report: []string{
" plan application pending on node n1",
" plan applied successfully on node n2",
" plan application pending on node n3",
},
},
{
name: "fully applied",
updatedNodes: []int{1, 2, 3},
status: []loqrecoverypb.NodeRecoveryStatus{
status(1, empty, planID, ""),
status(2, empty, planID, ""),
status(3, empty, planID, ""),
},
report: []string{
" plan applied successfully on node n1",
" plan applied successfully on node n2",
" plan applied successfully on node n3",
},
},
{
name: "staging lost no node",
updatedNodes: []int{1, 2, 3},
status: []loqrecoverypb.NodeRecoveryStatus{
status(1, planID, empty, ""),
status(3, planID, empty, ""),
},
pending: 2,
errors: 1,
report: []string{
" plan application pending on node n1",
" plan application pending on node n3",
" failed to find node n2 where plan must be staged",
},
},
{
name: "staging lost no plan",
updatedNodes: []int{1, 2},
staleLeases: []int{3},
status: []loqrecoverypb.NodeRecoveryStatus{
status(1, planID, empty, ""),
status(2, planID, empty, ""),
status(3, empty, empty, ""),
},
pending: 2,
errors: 1,
report: []string{
" plan application pending on node n1",
" plan application pending on node n2",
" failed to find staged plan on node n3",
},
},
{
name: "partial failure",
updatedNodes: []int{1, 2, 3},
status: []loqrecoverypb.NodeRecoveryStatus{
status(1, planID, empty, ""),
status(2, empty, planID, "found stale replica"),
status(3, planID, empty, ""),
},
pending: 2,
errors: 1,
report: []string{
" plan application pending on node n1",
" plan application failed on node n2: found stale replica",
" plan application pending on node n3",
},
},
{
name: "no plan",
status: []loqrecoverypb.NodeRecoveryStatus{
status(1, planID, empty, ""),
status(2, empty, planID, "found stale replica"),
status(3, empty, otherPlanID, ""),
},
report: []string{
" node n1 staged plan: 123e4567-e89b-12d3-a456-426614174000",
" node n2 failed to apply plan 123e4567-e89b-12d3-a456-426614174000: found stale replica",
" node n3 applied plan: 123e4567-e89b-12d3-a456-426614174001 at 2023-01-24 10:30:00 +0000 UTC",
},
},
{
name: "wrong plan",
updatedNodes: []int{1, 2},
status: []loqrecoverypb.NodeRecoveryStatus{
status(1, planID, empty, ""),
status(2, otherPlanID, empty, ""),
status(3, otherPlanID, empty, ""),
},
pending: 1,
errors: 2,
report: []string{
" plan application pending on node n1",
" unexpected staged plan 123e4567-e89b-12d3-a456-426614174001 on node n2",
" unexpected staged plan 123e4567-e89b-12d3-a456-426614174001 on node n3",
},
},
} {
t.Run(d.name, func(t *testing.T) {
plan := loqrecoverypb.ReplicaUpdatePlan{
PlanID: planID,
}
// Plan will contain single replica update for each requested node.
rangeSeq := 1
for _, id := range d.updatedNodes {
plan.Updates = append(plan.Updates, loqrecoverypb.ReplicaUpdate{
RangeID: roachpb.RangeID(rangeSeq),
StartKey: nil,
OldReplicaID: roachpb.ReplicaID(1),
NewReplica: roachpb.ReplicaDescriptor{
NodeID: roachpb.NodeID(id),
StoreID: roachpb.StoreID(id),
ReplicaID: roachpb.ReplicaID(rangeSeq + 17),
},
NextReplicaID: roachpb.ReplicaID(rangeSeq + 18),
})
}
for _, id := range d.staleLeases {
plan.StaleLeaseholderNodeIDs = append(plan.StaleLeaseholderNodeIDs, roachpb.NodeID(id))
}
diff := diffPlanWithNodeStatus(plan, d.status)
require.Equal(t, d.pending, diff.pending, "number of pending changes")
require.Equal(t, d.errors, diff.errors, "number of node errors")
if d.report != nil {
require.Equal(t, len(d.report), len(diff.report), "number of lines in diff")
for i := range d.report {
require.Equal(t, d.report[i], diff.report[i], "wrong line %d of report", i)
}
}
})
}
}
func TestTruncateKeyOutput(t *testing.T) {
defer leaktest.AfterTest(t)()
for _, d := range []struct {
len uint
result string
}{
{
len: 13,
result: "/System/No...",
},
{
len: 30,
result: "/System/NodeLiveness",
},
{
len: 3,
result: "/Sy",
},
{
len: 4,
result: "/...",
},
} {
t.Run("", func(t *testing.T) {
helper := outputFormatHelper{
maxPrintedKeyLength: d.len,
}
require.Equal(t, d.result, helper.formatKey(keys.NodeLivenessPrefix))
})
}
}
func TestTruncateSpanOutput(t *testing.T) {
defer leaktest.AfterTest(t)()
for _, d := range []struct {
len uint
result string
}{
{
len: 30,
result: "/System/{NodeLiveness-Syste...",
},
{
len: 90,
result: "/System/{NodeLiveness-SystemSpanConfigKeys}",
},
{
len: 3,
result: "/Sy",
},
{
len: 4,
result: "/...",
},
} {
t.Run("", func(t *testing.T) {
helper := outputFormatHelper{
maxPrintedKeyLength: d.len,
}
require.Equal(t, d.result, helper.formatSpan(roachpb.Span{
Key: keys.NodeLivenessPrefix,
EndKey: keys.SystemSpanConfigPrefix,
}))
})
}
}
| pkg/cli/debug_recover_loss_of_quorum_test.go | 1 | https://github.com/cockroachdb/cockroach/commit/0d77d949392d0daabeb77a82d468b15d4774e1de | [
0.9654173851013184,
0.04893037676811218,
0.00016133660392370075,
0.00017780717462301254,
0.1976744532585144
] |
{
"id": 1,
"code_window": [
"\tdefer cancel()\n",
"\n",
"\tsettings := cluster.MakeTestingClusterSettings()\n",
"\tsql.FeatureTLSAutoJoinEnabled.Override(ctx, &settings.SV, true)\n",
"\ts, sqldb, _ := serverutils.StartServer(t, base.TestServerArgs{\n",
"\t\tSettings: settings,\n",
"\t})\n",
"\tdefer s.Stopper().Stop(ctx)\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t// This logic is for node-node connections.\n",
"\t\tDefaultTestTenant: base.TestIsSpecificToStorageLayerAndNeedsASystemTenant,\n",
"\t\tSettings: settings,\n"
],
"file_path": "pkg/cli/connect_join_test.go",
"type": "replace",
"edit_start_line_idx": 90
} | // Copyright 2020 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package geogfn
import (
"math"
"testing"
"github.com/cockroachdb/cockroach/pkg/geo"
"github.com/cockroachdb/cockroach/pkg/geo/geotest"
"github.com/golang/geo/s1"
"github.com/stretchr/testify/require"
"github.com/twpayne/go-geom"
)
type unaryOperatorExpectedResult struct {
expectedArea float64
expectedLength float64
expectedPerimeter float64
}
var unaryOperatorTestCases = []struct {
wkt string
sphere unaryOperatorExpectedResult
spheroid unaryOperatorExpectedResult
}{
{
wkt: "POINT(1.0 1.0)",
},
{
wkt: "LINESTRING(1.0 1.0, 2.0 2.0, 3.0 3.0)",
sphere: unaryOperatorExpectedResult{
expectedLength: 314403.4167139704,
},
spheroid: unaryOperatorExpectedResult{
expectedLength: 313705.47851796006,
},
},
{
wkt: "POLYGON((0.0 0.0, 1.0 0.0, 1.0 1.0, 0.0 0.0))",
sphere: unaryOperatorExpectedResult{
expectedArea: 6182486746.455541,
expectedPerimeter: 379639.75723776827,
},
spheroid: unaryOperatorExpectedResult{
expectedArea: 6154854786.721433,
expectedPerimeter: 378793.4476424126,
},
},
{
wkt: "SRID=4004;LINESTRING(1.0 1.0, 2.0 2.0, 3.0 3.0)",
sphere: unaryOperatorExpectedResult{
expectedLength: 314367.99984330626,
},
spheroid: unaryOperatorExpectedResult{
expectedLength: 313672.2213232639,
},
},
{
wkt: "SRID=4004;POLYGON((0.0 0.0, 1.0 0.0, 1.0 1.0, 0.0 0.0))",
sphere: unaryOperatorExpectedResult{
expectedArea: 6181093937.160788,
expectedPerimeter: 379596.9916332415,
},
spheroid: unaryOperatorExpectedResult{
expectedArea: 6153550906.915973,
expectedPerimeter: 378753.30454341066,
},
},
{
wkt: "POLYGON((0.0 0.0, 1.0 0.0, 1.0 1.0, 0.0 0.0), (0.1 0.1, 0.2 0.1, 0.2 0.2, 0.1 0.1))",
sphere: unaryOperatorExpectedResult{
expectedArea: 6120665080.445181,
expectedPerimeter: 417604.087288779,
},
spheroid: unaryOperatorExpectedResult{
expectedArea: 6093309483.796953,
expectedPerimeter: 416673.1281208417,
},
},
{
wkt: "MULTIPOINT((1.0 1.0), (2.0 2.0))",
},
{
wkt: "MULTILINESTRING((1.0 1.0, 2.0 2.0, 3.0 3.0), (6.0 6.0, 7.0 6.0))",
sphere: unaryOperatorExpectedResult{
expectedLength: 424989.34283080546,
},
spheroid: unaryOperatorExpectedResult{
expectedLength: 424419.1832424484,
},
},
{
wkt: "MULTIPOLYGON(((3.0 3.0, 4.0 3.0, 4.0 4.0, 3.0 3.0)), ((0.0 0.0, 1.0 0.0, 1.0 1.0, 0.0 0.0), (0.1 0.1, 0.2 0.1, 0.2 0.2, 0.1 0.1)))",
sphere: unaryOperatorExpectedResult{
expectedArea: 12294677441.341661,
expectedPerimeter: 796947.8473004946,
},
spheroid: unaryOperatorExpectedResult{
expectedArea: 12240009431.86529,
expectedPerimeter: 795178.6592721482,
},
},
{
wkt: "GEOMETRYCOLLECTION (POINT (40 10),LINESTRING (10 10, 20 20, 10 40),POLYGON ((40 40, 20 45, 45 30, 40 40)))",
sphere: unaryOperatorExpectedResult{
expectedArea: 691570576619.521,
expectedLength: 9637039.459995955,
expectedPerimeter: 9637039.459995955,
},
spheroid: unaryOperatorExpectedResult{
expectedArea: 691638769184.1753,
expectedLength: 9632838.874863794,
expectedPerimeter: 9632838.874863794,
},
},
{
wkt: "GEOMETRYCOLLECTION (MULTIPOINT EMPTY, POINT (40 10),LINESTRING (10 10, 20 20, 10 40),POLYGON ((40 40, 20 45, 45 30, 40 40)))",
sphere: unaryOperatorExpectedResult{
expectedArea: 691570576619.521,
expectedLength: 9637039.459995955,
expectedPerimeter: 9637039.459995955,
},
spheroid: unaryOperatorExpectedResult{
expectedArea: 691638769184.1753,
expectedLength: 9632838.874863794,
expectedPerimeter: 9632838.874863794,
},
},
{
wkt: "GEOMETRYCOLLECTION EMPTY",
sphere: unaryOperatorExpectedResult{
expectedArea: 0,
expectedLength: 0,
expectedPerimeter: 0,
},
spheroid: unaryOperatorExpectedResult{
expectedArea: 0,
expectedLength: 0,
expectedPerimeter: 0,
},
},
}
func TestArea(t *testing.T) {
for _, tc := range unaryOperatorTestCases {
t.Run(tc.wkt, func(t *testing.T) {
g, err := geo.ParseGeography(tc.wkt)
require.NoError(t, err)
for _, subTC := range []struct {
desc string
useSphereOrSpheroid UseSphereOrSpheroid
expected float64
}{
{"sphere", UseSphere, tc.sphere.expectedArea},
{"spheroid", UseSpheroid, tc.spheroid.expectedArea},
} {
t.Run(subTC.desc, func(t *testing.T) {
ret, err := Area(g, subTC.useSphereOrSpheroid)
require.NoError(t, err)
require.LessOrEqualf(
t,
math.Abs(ret-subTC.expected),
0.1, // allow 0.1m^2 difference.
"expected %f, found %f",
subTC.expected,
ret,
)
})
}
})
}
}
func TestPerimeter(t *testing.T) {
for _, tc := range unaryOperatorTestCases {
t.Run(tc.wkt, func(t *testing.T) {
g, err := geo.ParseGeography(tc.wkt)
require.NoError(t, err)
for _, subTC := range []struct {
desc string
useSphereOrSpheroid UseSphereOrSpheroid
expected float64
}{
{"sphere", UseSphere, tc.sphere.expectedPerimeter},
{"spheroid", UseSpheroid, tc.spheroid.expectedPerimeter},
} {
t.Run(subTC.desc, func(t *testing.T) {
ret, err := Perimeter(g, subTC.useSphereOrSpheroid)
require.NoError(t, err)
require.LessOrEqualf(
t,
math.Abs(ret-subTC.expected),
0.01, // allow 0.01m difference.
"expected %f, found %f",
subTC.expected,
ret,
)
})
}
})
}
}
func TestLength(t *testing.T) {
for _, tc := range unaryOperatorTestCases {
t.Run(tc.wkt, func(t *testing.T) {
g, err := geo.ParseGeography(tc.wkt)
require.NoError(t, err)
for _, subTC := range []struct {
desc string
useSphereOrSpheroid UseSphereOrSpheroid
expected float64
}{
{"sphere", UseSphere, tc.sphere.expectedLength},
{"spheroid", UseSpheroid, tc.spheroid.expectedLength},
} {
t.Run(subTC.desc, func(t *testing.T) {
ret, err := Length(g, subTC.useSphereOrSpheroid)
require.NoError(t, err)
require.LessOrEqualf(
t,
math.Abs(ret-subTC.expected),
0.01, // allow 0.01m difference
"expected %f, found %f",
subTC.expected,
ret,
)
})
}
})
}
}
func TestProject(t *testing.T) {
var testCases = []struct {
desc string
point geo.Geography
distance float64
azimuth float64
projected geo.Geography
}{
{
"POINT(0 0), 100000, radians(45)",
geo.MustMakeGeographyFromGeomT(geom.NewPointFlat(geom.XY, []float64{0, 0}).SetSRID(4326)),
100000,
45 * math.Pi / 180.0,
geo.MustMakeGeographyFromGeomT(geom.NewPointFlat(geom.XY, []float64{0.6352310291255374, 0.6394723347291977}).SetSRID(4326)),
},
{
"SRID=4004;POINT(0 0), 100000, radians(45)",
geo.MustMakeGeographyFromGeomT(geom.NewPointFlat(geom.XY, []float64{0, 0}).SetSRID(4004)),
100000,
45 * math.Pi / 180.0,
geo.MustMakeGeographyFromGeomT(geom.NewPointFlat(geom.XY, []float64{0.6353047281438549, 0.6395336363116583}).SetSRID(4004)),
},
}
for _, tc := range testCases {
t.Run(tc.desc, func(t *testing.T) {
projected, err := Project(tc.point, tc.distance, s1.Angle(tc.azimuth))
require.NoError(t, err)
geotest.RequireGeographyInEpsilon(
t,
tc.projected,
projected,
geotest.Epsilon,
)
})
}
errorTestCases := []struct {
p string
d float64
a s1.Angle
expectedErr string
}{
{
"POINT EMPTY",
0,
0,
"cannot project POINT EMPTY",
},
}
for _, tc := range errorTestCases {
t.Run(tc.expectedErr, func(t *testing.T) {
p, err := geo.ParseGeography(tc.p)
require.NoError(t, err)
_, err = Project(p, tc.d, tc.a)
require.Error(t, err)
require.EqualError(t, err, tc.expectedErr)
})
}
}
| pkg/geo/geogfn/unary_operators_test.go | 0 | https://github.com/cockroachdb/cockroach/commit/0d77d949392d0daabeb77a82d468b15d4774e1de | [
0.0005412555183283985,
0.00019995584443677217,
0.0001584081182954833,
0.0001761512248776853,
0.00008344167872564867
] |
{
"id": 1,
"code_window": [
"\tdefer cancel()\n",
"\n",
"\tsettings := cluster.MakeTestingClusterSettings()\n",
"\tsql.FeatureTLSAutoJoinEnabled.Override(ctx, &settings.SV, true)\n",
"\ts, sqldb, _ := serverutils.StartServer(t, base.TestServerArgs{\n",
"\t\tSettings: settings,\n",
"\t})\n",
"\tdefer s.Stopper().Stop(ctx)\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t// This logic is for node-node connections.\n",
"\t\tDefaultTestTenant: base.TestIsSpecificToStorageLayerAndNeedsASystemTenant,\n",
"\t\tSettings: settings,\n"
],
"file_path": "pkg/cli/connect_join_test.go",
"type": "replace",
"edit_start_line_idx": 90
} | // Copyright 2017 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package tpcc
import (
"github.com/cockroachdb/cockroach/pkg/util/bufalloc"
"github.com/cockroachdb/cockroach/pkg/workload/workloadimpl"
"golang.org/x/exp/rand"
)
var cLastTokens = [...]string{
"BAR", "OUGHT", "ABLE", "PRI", "PRES",
"ESE", "ANTI", "CALLY", "ATION", "EING"}
func (w *tpcc) initNonUniformRandomConstants() {
rng := rand.New(rand.NewSource(RandomSeed.Seed()))
w.cLoad = rng.Intn(256)
w.cItemID = rng.Intn(1024)
w.cCustomerID = rng.Intn(8192)
}
const precomputedLength = 10000
const aCharsAlphabet = `abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890`
const lettersAlphabet = `ABCDEFGHIJKLMNOPQRSTUVWXYZ`
const numbersAlphabet = `1234567890`
type tpccRand struct {
*rand.Rand
aChars, letters, numbers workloadimpl.PrecomputedRand
}
type aCharsOffset int
type lettersOffset int
type numbersOffset int
func randStringFromAlphabet(
rng *rand.Rand,
a *bufalloc.ByteAllocator,
minLen, maxLen int,
pr workloadimpl.PrecomputedRand,
prOffset *int,
) []byte {
size := maxLen
if maxLen-minLen != 0 {
size = int(randInt(rng, minLen, maxLen))
}
if size == 0 {
return nil
}
var b []byte
*a, b = a.Alloc(size, 0 /* extraCap */)
*prOffset = pr.FillBytes(*prOffset, b)
return b
}
// randAStringInitialDataOnly generates a random alphanumeric string of length
// between min and max inclusive. It uses a set of pregenerated random data,
// which the spec allows only for initial data. See 4.3.2.2.
//
// For speed, this is done using precomputed random data, which is explicitly
// allowed by the spec for initial data only. See 4.3.2.1.
func randAStringInitialDataOnly(
rng *tpccRand, ao *aCharsOffset, a *bufalloc.ByteAllocator, min, max int,
) []byte {
return randStringFromAlphabet(rng.Rand, a, min, max, rng.aChars, (*int)(ao))
}
// randNStringInitialDataOnly generates a random numeric string of length
// between min and max inclusive. See 4.3.2.2.
//
// For speed, this is done using precomputed random data, which is explicitly
// allowed by the spec for initial data only. See 4.3.2.1.
func randNStringInitialDataOnly(
rng *tpccRand, no *numbersOffset, a *bufalloc.ByteAllocator, min, max int,
) []byte {
return randStringFromAlphabet(rng.Rand, a, min, max, rng.numbers, (*int)(no))
}
// randStateInitialDataOnly produces a random US state. (spec just says 2
// letters)
//
// For speed, this is done using precomputed random data, which is explicitly
// allowed by the spec for initial data only. See 4.3.2.1.
func randStateInitialDataOnly(rng *tpccRand, lo *lettersOffset, a *bufalloc.ByteAllocator) []byte {
return randStringFromAlphabet(rng.Rand, a, 2, 2, rng.letters, (*int)(lo))
}
// randOriginalStringInitialDataOnly generates a random a-string[26..50] with
// 10% chance of containing the string "ORIGINAL" somewhere in the middle of the
// string. See 4.3.3.1.
//
// For speed, this is done using precomputed random data, which is explicitly
// allowed by the spec for initial data only. See 4.3.2.1.
func randOriginalStringInitialDataOnly(
rng *tpccRand, ao *aCharsOffset, a *bufalloc.ByteAllocator,
) []byte {
if rng.Rand.Intn(9) == 0 {
l := int(randInt(rng.Rand, 26, 50))
off := int(randInt(rng.Rand, 0, l-8))
var buf []byte
*a, buf = a.Alloc(l, 0 /* extraCap */)
copy(buf[:off], randAStringInitialDataOnly(rng, ao, a, off, off))
copy(buf[off:off+8], originalString)
copy(buf[off+8:], randAStringInitialDataOnly(rng, ao, a, l-off-8, l-off-8))
return buf
}
return randAStringInitialDataOnly(rng, ao, a, 26, 50)
}
// randZip produces a random "zip code" - a 4-digit number plus the constant
// "11111". See 4.3.2.7.
//
// For speed, this is done using precomputed random data, which is explicitly
// allowed by the spec for initial data only. See 4.3.2.1.
func randZipInitialDataOnly(rng *tpccRand, no *numbersOffset, a *bufalloc.ByteAllocator) []byte {
var buf []byte
*a, buf = a.Alloc(9, 0 /* extraCap */)
copy(buf[:4], randNStringInitialDataOnly(rng, no, a, 4, 4))
copy(buf[4:], `11111`)
return buf
}
// randTax produces a random tax between [0.0000..0.2000]
// See 2.1.5.
func randTax(rng *rand.Rand) float64 {
return float64(randInt(rng, 0, 2000)) / float64(10000.0)
}
// randInt returns a number within [min, max] inclusive.
// See 2.1.4.
func randInt(rng *rand.Rand, min, max int) int64 {
return int64(rng.Intn(max-min+1) + min)
}
// randCLastSyllables returns a customer last name string generated according to
// the table in 4.3.2.3. Given a number between 0 and 999, each of the three
// syllables is determined by the corresponding digit in the three digit
// representation of the number. For example, the number 371 generates the name
// PRICALLYOUGHT, and the number 40 generates the name BARPRESBAR.
func randCLastSyllables(n int, a *bufalloc.ByteAllocator) []byte {
const scratchLen = 3 * 5 // 3 entries from cLastTokens * max len of an entry
var buf []byte
*a, buf = a.Alloc(scratchLen, 0 /* extraCap */)
buf = buf[:0]
buf = append(buf, cLastTokens[n/100]...)
n = n % 100
buf = append(buf, cLastTokens[n/10]...)
n = n % 10
buf = append(buf, cLastTokens[n]...)
return buf
}
// See 4.3.2.3.
func (w *tpcc) randCLast(rng *rand.Rand, a *bufalloc.ByteAllocator) []byte {
return randCLastSyllables(((rng.Intn(256)|rng.Intn(1000))+w.cLoad)%1000, a)
}
// Return a non-uniform random customer ID. See 2.1.6.
func (w *tpcc) randCustomerID(rng *rand.Rand) int {
return ((rng.Intn(1024) | (rng.Intn(3000) + 1) + w.cCustomerID) % 3000) + 1
}
// Return a non-uniform random item ID. See 2.1.6.
func (w *tpcc) randItemID(rng *rand.Rand) int {
return ((rng.Intn(8190) | (rng.Intn(100000) + 1) + w.cItemID) % 100000) + 1
}
| pkg/workload/tpcc/random.go | 0 | https://github.com/cockroachdb/cockroach/commit/0d77d949392d0daabeb77a82d468b15d4774e1de | [
0.00023304919886868447,
0.00017610118084121495,
0.0001664141018409282,
0.00017376909090671688,
0.000014542243661708198
] |
{
"id": 1,
"code_window": [
"\tdefer cancel()\n",
"\n",
"\tsettings := cluster.MakeTestingClusterSettings()\n",
"\tsql.FeatureTLSAutoJoinEnabled.Override(ctx, &settings.SV, true)\n",
"\ts, sqldb, _ := serverutils.StartServer(t, base.TestServerArgs{\n",
"\t\tSettings: settings,\n",
"\t})\n",
"\tdefer s.Stopper().Stop(ctx)\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t// This logic is for node-node connections.\n",
"\t\tDefaultTestTenant: base.TestIsSpecificToStorageLayerAndNeedsASystemTenant,\n",
"\t\tSettings: settings,\n"
],
"file_path": "pkg/cli/connect_join_test.go",
"type": "replace",
"edit_start_line_idx": 90
} | # =============================================================================
# window.opt contains normalization rules for the Window operator.
# =============================================================================
# TODO(justin): add a rule to best-effort collapse same ordering+partition
# window functions, like in:
# SELECT
# rank() OVER (PARTITION BY i), rank() OVER (PARTITION BY i, 1), rank() OVER (PARTITION BY i, 2)
# FROM
# a
# EliminateWindow removes a Window operator with no window functions (which can
# occur via column pruning).
[EliminateWindow, Normalize]
(Window $input:* [])
=>
$input
# ReduceWindowPartitionCols reduces a set of partition columns to a simpler form
# using FDs. Window partition columns are redundant if they are functionally
# determined by other window partition columns.
[ReduceWindowPartitionCols, Normalize]
(Window
$input:*
$fn:*
$private:* &
^(ColsAreEmpty
$redundantCols:(RedundantCols
$input
(WindowPartition $private)
)
)
)
=>
(Window
$input
$fn
(RemoveWindowPartitionCols $private $redundantCols)
)
# SimplifyWindowOrdering reduces an ordering to a simpler form using FDs.
#
# This rules does not match when window functions have a RANGE frame with an
# offset, like max(a) OVER (PARTITION BY a ORDER BY a RANGE 1 PRECEDING). The
# ordering column cannot be pruned because the execution engine requires an
# ordering column in this case, even if the ordering is constant.
[SimplifyWindowOrdering, Normalize]
(Window
$input:*
$fn:*
$private:* &
(CanSimplifyWindowOrdering $input $private) &
^(HasRangeFrameWithOffset $fn)
)
=>
(Window $input $fn (SimplifyWindowOrdering $input $private))
# PushSelectIntoWindow pushes down a Select which can be satisfied by only the
# functional closure of the columns being partitioned over. This is valid
# because it's "all-or-nothing" - we only entirely eliminate a partition or
# don't eliminate it at all.
[PushSelectIntoWindow, Normalize]
(Select
(Window $input:* $fn:* $private:*)
$filters:[
...
$item:* &
(ColsAreDeterminedBy
(OuterCols $item)
$partitionCols:(WindowPartition $private)
$input
)
...
]
)
=>
(Select
(Window
(Select
$input
(ExtractDeterminedConditions
$filters
$partitionCols
$input
)
)
$fn
$private
)
(ExtractUndeterminedConditions
$filters
$partitionCols
$input
)
)
# PushLimitIntoWindow moves a Limit below a Window when able. This is
# all-or-nothing. Even if we could push the limit below *some* of the window
# functions, if there are any we cannot, then we don't. This is because
# computing additional window functions is not that expensive, and the
# expensive part is doing the sorting and partitioning. Once exec supports
# passing orderings through and does not require re-partitioning and re-sorting
# of window functions, pushing past some-but-not-all of the window functions
# might be profitable.
#
# SELECT rank() OVER (ORDER BY c) FROM abc ORDER BY c LIMIT 10
# =>
# SELECT
# rank() OVER (ORDER BY c)
# FROM
# (SELECT c FROM abc ORDER BY c LIMIT 10)
#
# SELECT rank() OVER (PARTITION BY b ORDER BY c) FROM abc LIMIT 10
# =>
# SELECT
# rank() OVER (PARTITION BY b ORDER BY c)
# FROM
# (SELECT b, c FROM abc ORDER BY b, c LIMIT 10)
#
# First, we construct a "segmented ordering" consisting of the Window's
# partition columns followed by its ordering columns (the relative positions of
# the partition columns are arbitrary). This ordering is useful because it
# performs the partitioning and then the ordering within each partition. If
# this ordering does not imply the Limit's ordering, we do not proceed.
#
# Since we now know that the segmented ordering is stronger than the Limit's
# ordering, it's safe to replace the limit's ordering with it.
#
# The Limit having the segmented ordering means that there are three kinds of
# partitions:
# 1. those that are completely contained within the limited set of rows,
# 2. those that are completely excluded from the set of rows, and
# 3. *at most one* partition which is "cut off" partway through.
# Including the window function's ordering in the Limit's ordering does not
# matter for (1)- and (2)-style partitions (since the window function itself
# will re-sort them), but for the (3)-style partition, we need to ensure that
# the limit operator allows through a prefix of it, rather than an arbitrary
# subset.
#
# Finally, we require that every window function+frame pair being computed has
# the "prefix-safe" property. A window function is prefix safe if it can be
# correctly computed over only a prefix of a partition. For example, rank() has
# this property because rows that come later in the ordering don't affect the
# rank of the rows before, but avg()+UNBOUNDED {PRECEDING,FOLLOWING} doesn't,
# because we must see the entire partition to compute the average over it.
#
# TODO(justin): Add a rule that translates a limit with an ordering on rank()
# or dense_rank() into one using the ordering of the window function. This will
# allow us to push down limits in cases like:
#
# SELECT rank() OVER (ORDER BY f) rnk FROM a ORDER BY rnk LIMIT 10
# =>
# SELECT rank() OVER (ORDER BY f) rnk FROM a ORDER BY f LIMIT 10
# =>
# SELECT rank() OVER (ORDER BY f) rnk FROM (SELECT * FROM a ORDER BY f LIMIT 10)
[PushLimitIntoWindow, Normalize]
(Limit
(Window $input:* $fns:* & (AllArePrefixSafe $fns) $private:*)
$limit:*
$ordering:* &
(OrderingCanProjectCols
$ordering
$inputCols:(OutputCols $input)
) &
(Let
($newOrdering $ok):(MakeSegmentedOrdering
$input
(WindowPartition $private)
(WindowOrdering $private)
$ordering
)
$ok
)
)
=>
(Window
(Limit
$input
$limit
(PruneOrdering
(DerefOrderingChoice $newOrdering)
$inputCols
)
)
$fns
$private
)
| pkg/sql/opt/norm/rules/window.opt | 0 | https://github.com/cockroachdb/cockroach/commit/0d77d949392d0daabeb77a82d468b15d4774e1de | [
0.0003498551086522639,
0.0001838934695115313,
0.0001618615788174793,
0.00017552518693264574,
0.000039320500945905223
] |
{
"id": 2,
"code_window": [
"\t// Number of nodes. Increasing this will make the test flaky as written\n",
"\t// because it relies on finding r1 on n1.\n",
"\tconst n = 3\n",
"\n",
"\tclusterArgs := base.TestClusterArgs{\n",
"\t\tServerArgsPerNode: map[int]base.TestServerArgs{},\n",
"\t}\n",
"\tvar storePaths []string\n",
"\tfor i := 0; i < n; i++ {\n",
"\t\targs := base.TestServerArgs{}\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tServerArgs: base.TestServerArgs{\n",
"\t\t\t// This logic is specific to the storage layer.\n",
"\t\t\tDefaultTestTenant: base.TestIsSpecificToStorageLayerAndNeedsASystemTenant,\n",
"\t\t},\n"
],
"file_path": "pkg/cli/debug_check_store_test.go",
"type": "add",
"edit_start_line_idx": 46
} | // Copyright 2021 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package cli
import (
"context"
"fmt"
"os"
"testing"
"time"
"github.com/cockroachdb/cockroach/pkg/base"
"github.com/cockroachdb/cockroach/pkg/clusterversion"
"github.com/cockroachdb/cockroach/pkg/keys"
"github.com/cockroachdb/cockroach/pkg/kv"
"github.com/cockroachdb/cockroach/pkg/kv/kvserver"
"github.com/cockroachdb/cockroach/pkg/kv/kvserver/liveness/livenesspb"
"github.com/cockroachdb/cockroach/pkg/kv/kvserver/loqrecovery"
"github.com/cockroachdb/cockroach/pkg/kv/kvserver/loqrecovery/loqrecoverypb"
"github.com/cockroachdb/cockroach/pkg/roachpb"
"github.com/cockroachdb/cockroach/pkg/server"
"github.com/cockroachdb/cockroach/pkg/server/serverpb"
"github.com/cockroachdb/cockroach/pkg/testutils"
"github.com/cockroachdb/cockroach/pkg/testutils/listenerutil"
"github.com/cockroachdb/cockroach/pkg/testutils/skip"
"github.com/cockroachdb/cockroach/pkg/testutils/sqlutils"
"github.com/cockroachdb/cockroach/pkg/testutils/testcluster"
"github.com/cockroachdb/cockroach/pkg/util/leaktest"
"github.com/cockroachdb/cockroach/pkg/util/log"
"github.com/cockroachdb/cockroach/pkg/util/protoutil"
"github.com/cockroachdb/cockroach/pkg/util/uuid"
"github.com/cockroachdb/errors"
"github.com/stretchr/testify/require"
)
// TestCollectInfoFromMultipleStores performs basic sanity checks on replica info collection.
// This is done by running three node cluster with disk backed storage,
// stopping it and verifying content of collected replica info file.
// This check verifies that:
//
// we successfully iterate requested stores,
// data is written in expected location,
// data contains info only about stores requested.
func TestCollectInfoFromMultipleStores(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
ctx := context.Background()
dir, cleanupFn := testutils.TempDir(t)
defer cleanupFn()
c := NewCLITest(TestCLIParams{
NoServer: true,
})
defer c.Cleanup()
tc := testcluster.NewTestCluster(t, 3, base.TestClusterArgs{
ServerArgsPerNode: map[int]base.TestServerArgs{
0: {StoreSpecs: []base.StoreSpec{{Path: dir + "/store-1"}}},
1: {StoreSpecs: []base.StoreSpec{{Path: dir + "/store-2"}}},
2: {StoreSpecs: []base.StoreSpec{{Path: dir + "/store-3"}}},
},
})
tc.Start(t)
defer tc.Stopper().Stop(ctx)
// Wait up-replication.
require.NoError(t, tc.WaitForFullReplication())
// Shutdown.
tc.Stopper().Stop(ctx)
replicaInfoFileName := dir + "/node-1.json"
c.RunWithArgs([]string{"debug", "recover", "collect-info", "--store=" + dir + "/store-1",
"--store=" + dir + "/store-2", replicaInfoFileName})
replicas, err := readReplicaInfoData([]string{replicaInfoFileName})
require.NoError(t, err, "failed to read generated replica info")
stores := map[roachpb.StoreID]interface{}{}
for _, r := range replicas.LocalInfo[0].Replicas {
stores[r.StoreID] = struct{}{}
}
require.Equal(t, 2, len(stores), "collected replicas from stores")
require.Equal(t, clusterversion.ByKey(clusterversion.BinaryVersionKey), replicas.Version,
"collected version info from stores")
}
// TestCollectInfoFromOnlineCluster verifies that given a test cluster with
// one stopped node, we can collect replica info and metadata from remaining
// nodes using an admin recovery call.
func TestCollectInfoFromOnlineCluster(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
ctx := context.Background()
dir, cleanupFn := testutils.TempDir(t)
defer cleanupFn()
c := NewCLITest(TestCLIParams{
NoServer: true,
})
defer c.Cleanup()
tc := testcluster.NewTestCluster(t, 3, base.TestClusterArgs{
ServerArgs: base.TestServerArgs{
StoreSpecs: []base.StoreSpec{{InMemory: true}},
Insecure: true,
},
})
tc.Start(t)
defer tc.Stopper().Stop(ctx)
require.NoError(t, tc.WaitForFullReplication())
tc.ToggleReplicateQueues(false)
r := tc.ServerConn(0).QueryRow("select count(*) from crdb_internal.ranges_no_leases")
var totalRanges int
require.NoError(t, r.Scan(&totalRanges), "failed to query range count")
tc.StopServer(0)
replicaInfoFileName := dir + "/all-nodes.json"
c.RunWithArgs([]string{
"debug",
"recover",
"collect-info",
"--insecure",
"--host",
tc.Server(2).AdvRPCAddr(),
replicaInfoFileName,
})
replicas, err := readReplicaInfoData([]string{replicaInfoFileName})
require.NoError(t, err, "failed to read generated replica info")
stores := map[roachpb.StoreID]interface{}{}
totalReplicas := 0
for _, li := range replicas.LocalInfo {
for _, r := range li.Replicas {
stores[r.StoreID] = struct{}{}
}
totalReplicas += len(li.Replicas)
}
require.Equal(t, 2, len(stores), "collected replicas from stores")
require.Equal(t, 2, len(replicas.LocalInfo), "collected info is not split by node")
require.Equal(t, totalRanges*2, totalReplicas, "number of collected replicas")
require.Equal(t, totalRanges, len(replicas.Descriptors),
"number of collected descriptors from metadata")
require.Equal(t, clusterversion.ByKey(clusterversion.BinaryVersionKey), replicas.Version,
"collected version info from stores")
}
// TestLossOfQuorumRecovery performs a sanity check on end to end recovery workflow.
// This test doesn't try to validate all possible test cases, but instead check that
// artifacts are correctly produced and overall cluster recovery could be performed
// where it would be completely broken otherwise.
func TestLossOfQuorumRecovery(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
skip.UnderDeadlock(t, "slow under deadlock")
ctx := context.Background()
dir, cleanupFn := testutils.TempDir(t)
defer cleanupFn()
c := NewCLITest(TestCLIParams{
NoServer: true,
})
defer c.Cleanup()
// Test cluster contains 3 nodes that we would turn into a single node
// cluster using loss of quorum recovery. After it is stopped, single node
// would not be able to progress, but we will apply recovery procedure and
// mark on replicas on node 1 as designated survivors. After that, starting
// single node should succeed.
tcBefore := testcluster.NewTestCluster(t, 3, base.TestClusterArgs{
ServerArgsPerNode: map[int]base.TestServerArgs{
0: {StoreSpecs: []base.StoreSpec{{Path: dir + "/store-1"}}},
},
})
tcBefore.Start(t)
s := sqlutils.MakeSQLRunner(tcBefore.Conns[0])
s.Exec(t, "set cluster setting cluster.organization='remove dead replicas test'")
defer tcBefore.Stopper().Stop(ctx)
// We use scratch range to test special case for pending update on the
// descriptor which has to be cleaned up before recovery could proceed.
// For that we'll ensure it is not empty and then put an intent. After
// recovery, we'll check that the range is still accessible for writes as
// normal.
sk := tcBefore.ScratchRange(t)
require.NoError(t,
tcBefore.Server(0).DB().Put(ctx, testutils.MakeKey(sk, []byte{1}), "value"),
"failed to write value to scratch range")
createIntentOnRangeDescriptor(ctx, t, tcBefore, sk)
node1ID := tcBefore.Servers[0].NodeID()
// Now that stores are prepared and replicated we can shut down cluster
// and perform store manipulations.
tcBefore.Stopper().Stop(ctx)
server1StoreDir := dir + "/store-1"
replicaInfoFileName := dir + "/node-1.json"
c.RunWithArgs(
[]string{"debug", "recover", "collect-info", "--store=" + server1StoreDir,
replicaInfoFileName})
// Generate recovery plan and try to verify that plan file was generated and contains
// meaningful data. This is not strictly necessary for verifying end-to-end flow, but
// having assertions on generated data helps to identify which stage of pipeline broke
// if test fails.
planFile := dir + "/recovery-plan.json"
out, err := c.RunWithCaptureArgs(
[]string{"debug", "recover", "make-plan", "--confirm=y", "--plan=" + planFile,
replicaInfoFileName})
require.NoError(t, err, "failed to run make-plan")
require.Contains(t, out, fmt.Sprintf("- node n%d", node1ID),
"planner didn't provide correct apply instructions")
require.FileExists(t, planFile, "generated plan file")
planFileContent, err := os.ReadFile(planFile)
require.NoError(t, err, "test infra failed, can't open created plan file")
plan := loqrecoverypb.ReplicaUpdatePlan{}
jsonpb := protoutil.JSONPb{}
require.NoError(t, jsonpb.Unmarshal(planFileContent, &plan),
"failed to deserialize replica recovery plan")
require.NotEmpty(t, plan.Updates, "resulting plan contains no updates")
out, err = c.RunWithCaptureArgs(
[]string{"debug", "recover", "apply-plan", "--confirm=y", "--store=" + server1StoreDir,
planFile})
require.NoError(t, err, "failed to run apply plan")
// Check that there were at least one mention of replica being promoted.
require.Contains(t, out, "will be updated", "no replica updates were recorded")
require.Contains(t, out, fmt.Sprintf("Updated store(s): s%d", node1ID),
"apply plan was not executed on requested node")
tcAfter := testcluster.NewTestCluster(t, 3, base.TestClusterArgs{
ReplicationMode: base.ReplicationManual,
ServerArgsPerNode: map[int]base.TestServerArgs{
0: {StoreSpecs: []base.StoreSpec{{Path: dir + "/store-1"}}},
},
})
// NB: If recovery is not performed, new cluster will just hang on startup.
// This is caused by liveness range becoming unavailable and preventing any
// progress. So it is likely that test will timeout if basic workflow fails.
tcAfter.Start(t)
defer tcAfter.Stopper().Stop(ctx)
// In the new cluster, we will still have nodes 2 and 3 remaining from the first
// attempt. That would increase number of replicas on system ranges to 5 and we
// would not be able to upreplicate properly. So we need to decommission old nodes
// first before proceeding.
adminClient := tcAfter.Server(0).GetAdminClient(t)
require.NoError(t, runDecommissionNodeImpl(
ctx, adminClient, nodeDecommissionWaitNone, nodeDecommissionChecksSkip, false,
[]roachpb.NodeID{roachpb.NodeID(2), roachpb.NodeID(3)}, tcAfter.Server(0).NodeID()),
"Failed to decommission removed nodes")
for i := 0; i < len(tcAfter.Servers); i++ {
require.NoError(t, tcAfter.Servers[i].GetStores().(*kvserver.Stores).VisitStores(func(store *kvserver.Store) error {
store.SetReplicateQueueActive(true)
return nil
}), "Failed to activate replication queue")
}
require.NoError(t, tcAfter.WaitForZoneConfigPropagation(),
"Failed to ensure zone configs are propagated")
require.NoError(t, tcAfter.WaitForFullReplication(), "Failed to perform full replication")
for i := 0; i < len(tcAfter.Servers); i++ {
require.NoError(t, tcAfter.Servers[i].GetStores().(*kvserver.Stores).VisitStores(func(store *kvserver.Store) error {
return store.ForceConsistencyQueueProcess()
}), "Failed to force replicas to consistency queue")
}
// As a validation step we will just pick one range and get its replicas to see
// if they were up-replicated to the new nodes.
s = sqlutils.MakeSQLRunner(tcAfter.Conns[0])
r := s.QueryRow(t, "select replicas from crdb_internal.ranges limit 1")
var replicas string
r.Scan(&replicas)
require.Equal(t, "{1,4,5}", replicas, "Replicas after loss of quorum recovery")
// Validate that rangelog is updated by recovery records after cluster restarts.
testutils.SucceedsSoon(t, func() error {
r := s.QueryRow(t,
`select count(*) from system.rangelog where "eventType" = 'unsafe_quorum_recovery'`)
var recoveries int
r.Scan(&recoveries)
if recoveries != len(plan.Updates) {
return errors.Errorf("found %d recovery events while expecting %d", recoveries,
len(plan.Updates))
}
return nil
})
// We were using scratch range to test cleanup of pending transaction on
// rangedescriptor key. We want to verify that after recovery, range is still
// writable e.g. recovery succeeded.
require.NoError(t,
tcAfter.Server(0).DB().Put(ctx, testutils.MakeKey(sk, []byte{1}), "value2"),
"failed to write value to scratch range after recovery")
}
// TestStageVersionCheck verifies that we can force plan with different internal
// version onto cluster. To do this, we create a plan with internal version
// above current but matching major and minor. Then we check that staging fails
// and that force flag will update plan version to match local node.
func TestStageVersionCheck(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
skip.UnderDeadlock(t, "slow under deadlock")
ctx := context.Background()
_, cleanupFn := testutils.TempDir(t)
defer cleanupFn()
c := NewCLITest(TestCLIParams{
NoServer: true,
})
defer c.Cleanup()
listenerReg := listenerutil.NewListenerRegistry()
defer listenerReg.Close()
storeReg := server.NewStickyVFSRegistry()
tc := testcluster.NewTestCluster(t, 4, base.TestClusterArgs{
ReplicationMode: base.ReplicationManual,
ServerArgsPerNode: map[int]base.TestServerArgs{
0: {
Knobs: base.TestingKnobs{
Server: &server.TestingKnobs{
StickyVFSRegistry: storeReg,
},
},
StoreSpecs: []base.StoreSpec{
{InMemory: true, StickyVFSID: "1"},
},
},
},
ReusableListenerReg: listenerReg,
})
tc.Start(t)
defer tc.Stopper().Stop(ctx)
tc.StopServer(3)
adminClient := tc.Server(0).GetAdminClient(t)
v := clusterversion.ByKey(clusterversion.BinaryVersionKey)
v.Internal++
// To avoid crafting real replicas we use StaleLeaseholderNodeIDs to force
// node to stage plan for verification.
p := loqrecoverypb.ReplicaUpdatePlan{
PlanID: uuid.FastMakeV4(),
Version: v,
ClusterID: tc.Server(0).StorageClusterID().String(),
DecommissionedNodeIDs: []roachpb.NodeID{4},
StaleLeaseholderNodeIDs: []roachpb.NodeID{1},
}
// Attempts to stage plan with different internal version must fail.
_, err := adminClient.RecoveryStagePlan(ctx, &serverpb.RecoveryStagePlanRequest{
Plan: &p,
AllNodes: true,
ForcePlan: false,
ForceLocalInternalVersion: false,
})
require.ErrorContains(t, err, "doesn't match cluster active version")
// Enable "stuck upgrade bypass" to stage plan on the cluster.
_, err = adminClient.RecoveryStagePlan(ctx, &serverpb.RecoveryStagePlanRequest{
Plan: &p,
AllNodes: true,
ForcePlan: false,
ForceLocalInternalVersion: true,
})
require.NoError(t, err, "force local must fix incorrect version")
// Check that stored plan has version matching cluster version.
ps := loqrecovery.NewPlanStore("", storeReg.Get("1"))
p, ok, err := ps.LoadPlan()
require.NoError(t, err, "failed to read node 0 plan")
require.True(t, ok, "plan was not staged")
require.Equal(t, clusterversion.ByKey(clusterversion.BinaryVersionKey), p.Version,
"plan version was not updated")
}
func createIntentOnRangeDescriptor(
ctx context.Context, t *testing.T, tcBefore *testcluster.TestCluster, sk roachpb.Key,
) {
txn := kv.NewTxn(ctx, tcBefore.Servers[0].DB(), 1)
var desc roachpb.RangeDescriptor
// Pick one of the predefined split points.
rdKey := keys.RangeDescriptorKey(roachpb.RKey(sk))
if err := txn.GetProto(ctx, rdKey, &desc); err != nil {
t.Fatal(err)
}
desc.NextReplicaID++
if err := txn.Put(ctx, rdKey, &desc); err != nil {
t.Fatal(err)
}
// At this point the intent has been written to Pebble but this
// write was not synced (only the raft log append was synced). We
// need to force another sync, but we're far from the storage
// layer here so the easiest thing to do is simply perform a
// second write. This will force the first write to be persisted
// to disk (the second write may or may not make it to disk due to
// timing).
desc.NextReplicaID++
if err := txn.Put(ctx, rdKey, &desc); err != nil {
t.Fatal(err)
}
}
func TestHalfOnlineLossOfQuorumRecovery(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
skip.UnderDeadlock(t, "slow under deadlock")
ctx := context.Background()
dir, cleanupFn := testutils.TempDir(t)
defer cleanupFn()
c := NewCLITest(TestCLIParams{
NoServer: true,
})
defer c.Cleanup()
listenerReg := listenerutil.NewListenerRegistry()
defer listenerReg.Close()
// Test cluster contains 3 nodes that we would turn into a single node
// cluster using loss of quorum recovery. To do that, we will terminate
// two nodes and run recovery on remaining one. Restarting node should
// bring it back to healthy (but underreplicated) state.
// Note that we inject reusable listeners into all nodes to prevent tests
// running in parallel from taking over ports of stopped nodes and responding
// to gateway node with errors.
// TODO(oleg): Make test run with 7 nodes to exercise cases where multiple
// replicas survive. Current startup and allocator behaviour would make
// this test flaky.
sa := make(map[int]base.TestServerArgs)
for i := 0; i < 3; i++ {
sa[i] = base.TestServerArgs{
Knobs: base.TestingKnobs{
Server: &server.TestingKnobs{
StickyVFSRegistry: server.NewStickyVFSRegistry(),
},
},
StoreSpecs: []base.StoreSpec{
{
InMemory: true,
},
},
}
}
tc := testcluster.NewTestCluster(t, 3, base.TestClusterArgs{
ServerArgs: base.TestServerArgs{
DefaultTestTenant: base.TestIsSpecificToStorageLayerAndNeedsASystemTenant,
},
ReusableListenerReg: listenerReg,
ServerArgsPerNode: sa,
})
tc.Start(t)
s := sqlutils.MakeSQLRunner(tc.Conns[0])
s.Exec(t, "set cluster setting cluster.organization='remove dead replicas test'")
defer tc.Stopper().Stop(ctx)
// We use scratch range to test special case for pending update on the
// descriptor which has to be cleaned up before recovery could proceed.
// For that we'll ensure it is not empty and then put an intent. After
// recovery, we'll check that the range is still accessible for writes as
// normal.
sk := tc.ScratchRange(t)
require.NoError(t,
tc.Server(0).DB().Put(ctx, testutils.MakeKey(sk, []byte{1}), "value"),
"failed to write value to scratch range")
createIntentOnRangeDescriptor(ctx, t, tc, sk)
node1ID := tc.Servers[0].NodeID()
// Now that stores are prepared and replicated we can shut down cluster
// and perform store manipulations.
tc.StopServer(1)
tc.StopServer(2)
// Generate recovery plan and try to verify that plan file was generated and contains
// meaningful data. This is not strictly necessary for verifying end-to-end flow, but
// having assertions on generated data helps to identify which stage of pipeline broke
// if test fails.
planFile := dir + "/recovery-plan.json"
out, err := c.RunWithCaptureArgs(
[]string{
"debug",
"recover",
"make-plan",
"--confirm=y",
"--certs-dir=test_certs",
"--host=" + tc.Server(0).AdvRPCAddr(),
"--plan=" + planFile,
})
require.NoError(t, err, "failed to run make-plan")
require.Contains(t, out, fmt.Sprintf("- node n%d", node1ID),
"planner didn't provide correct apply instructions")
require.FileExists(t, planFile, "generated plan file")
planFileContent, err := os.ReadFile(planFile)
require.NoError(t, err, "test infra failed, can't open created plan file")
plan := loqrecoverypb.ReplicaUpdatePlan{}
jsonpb := protoutil.JSONPb{}
require.NoError(t, jsonpb.Unmarshal(planFileContent, &plan),
"failed to deserialize replica recovery plan")
require.NotEmpty(t, plan.Updates, "resulting plan contains no updates")
out, err = c.RunWithCaptureArgs(
[]string{
"debug", "recover", "apply-plan",
"--certs-dir=test_certs",
"--host=" + tc.Server(0).AdvRPCAddr(),
"--confirm=y", planFile,
})
require.NoError(t, err, "failed to run apply plan")
// Check that there were at least one mention of replica being promoted.
require.Contains(t, out, "updating replica", "no replica updates were recorded")
require.Contains(t, out,
fmt.Sprintf("Plan staged. To complete recovery restart nodes n%d.", node1ID),
"apply plan failed to stage on expected nodes")
// Verify plan is staged on nodes
out, err = c.RunWithCaptureArgs(
[]string{
"debug", "recover", "verify",
"--certs-dir=test_certs",
"--host=" + tc.Server(0).AdvRPCAddr(),
planFile,
})
require.NoError(t, err, "failed to run verify plan")
require.Contains(t, out, "ERROR: loss of quorum recovery is not finished yet")
tc.StopServer(0)
// NB: If recovery is not performed, server will just hang on startup.
// This is caused by liveness range becoming unavailable and preventing any
// progress. So it is likely that test will timeout if basic workflow fails.
require.NoError(t, tc.RestartServer(0), "restart failed")
s = sqlutils.MakeSQLRunner(tc.Conns[0])
// Verifying that post start cleanup performed node decommissioning that
// prevents old nodes from rejoining.
ac := tc.GetAdminClient(t, 0)
testutils.SucceedsSoon(t, func() error {
dr, err := ac.DecommissionStatus(ctx,
&serverpb.DecommissionStatusRequest{NodeIDs: []roachpb.NodeID{2, 3}})
if err != nil {
return err
}
for _, s := range dr.Status {
if s.Membership != livenesspb.MembershipStatus_DECOMMISSIONED {
return errors.Newf("expecting n%d to be decommissioned", s.NodeID)
}
}
return nil
})
// Validate that rangelog is updated by recovery records after cluster restarts.
testutils.SucceedsSoon(t, func() error {
r := s.QueryRow(t,
`select count(*) from system.rangelog where "eventType" = 'unsafe_quorum_recovery'`)
var recoveries int
r.Scan(&recoveries)
if recoveries != len(plan.Updates) {
return errors.Errorf("found %d recovery events while expecting %d", recoveries,
len(plan.Updates))
}
return nil
})
// Verify recovery complete.
out, err = c.RunWithCaptureArgs(
[]string{
"debug", "recover", "verify",
"--certs-dir=test_certs",
"--host=" + tc.Server(0).AdvRPCAddr(),
planFile,
})
require.NoError(t, err, "failed to run verify plan")
require.Contains(t, out, "Loss of quorum recovery is complete.")
// We were using scratch range to test cleanup of pending transaction on
// rangedescriptor key. We want to verify that after recovery, range is still
// writable e.g. recovery succeeded.
require.NoError(t,
tc.Server(0).DB().Put(ctx, testutils.MakeKey(sk, []byte{1}), "value2"),
"failed to write value to scratch range after recovery")
// Finally split scratch range to ensure metadata ranges are recovered.
_, _, err = tc.Server(0).SplitRange(testutils.MakeKey(sk, []byte{42}))
require.NoError(t, err, "failed to split range after recovery")
}
func TestUpdatePlanVsClusterDiff(t *testing.T) {
defer leaktest.AfterTest(t)()
var empty uuid.UUID
planID, _ := uuid.FromString("123e4567-e89b-12d3-a456-426614174000")
otherPlanID, _ := uuid.FromString("123e4567-e89b-12d3-a456-426614174001")
applyTime, _ := time.Parse(time.RFC3339, "2023-01-24T10:30:00Z")
status := func(id roachpb.NodeID, pending, applied uuid.UUID, err string) loqrecoverypb.NodeRecoveryStatus {
s := loqrecoverypb.NodeRecoveryStatus{
NodeID: id,
}
if !pending.Equal(empty) {
s.PendingPlanID = &pending
}
if !applied.Equal(empty) {
s.AppliedPlanID = &applied
s.ApplyTimestamp = &applyTime
}
s.Error = err
return s
}
for _, d := range []struct {
name string
updatedNodes []int
staleLeases []int
status []loqrecoverypb.NodeRecoveryStatus
pending int
errors int
report []string
}{
{
name: "after staging",
updatedNodes: []int{1, 2},
staleLeases: []int{3},
status: []loqrecoverypb.NodeRecoveryStatus{
status(1, planID, empty, ""),
status(2, planID, empty, ""),
status(3, planID, empty, ""),
},
pending: 3,
report: []string{
" plan application pending on node n1",
" plan application pending on node n2",
" plan application pending on node n3",
},
},
{
name: "partially applied",
updatedNodes: []int{1, 2, 3},
status: []loqrecoverypb.NodeRecoveryStatus{
status(1, planID, empty, ""),
status(2, empty, planID, ""),
status(3, planID, empty, ""),
},
pending: 2,
report: []string{
" plan application pending on node n1",
" plan applied successfully on node n2",
" plan application pending on node n3",
},
},
{
name: "fully applied",
updatedNodes: []int{1, 2, 3},
status: []loqrecoverypb.NodeRecoveryStatus{
status(1, empty, planID, ""),
status(2, empty, planID, ""),
status(3, empty, planID, ""),
},
report: []string{
" plan applied successfully on node n1",
" plan applied successfully on node n2",
" plan applied successfully on node n3",
},
},
{
name: "staging lost no node",
updatedNodes: []int{1, 2, 3},
status: []loqrecoverypb.NodeRecoveryStatus{
status(1, planID, empty, ""),
status(3, planID, empty, ""),
},
pending: 2,
errors: 1,
report: []string{
" plan application pending on node n1",
" plan application pending on node n3",
" failed to find node n2 where plan must be staged",
},
},
{
name: "staging lost no plan",
updatedNodes: []int{1, 2},
staleLeases: []int{3},
status: []loqrecoverypb.NodeRecoveryStatus{
status(1, planID, empty, ""),
status(2, planID, empty, ""),
status(3, empty, empty, ""),
},
pending: 2,
errors: 1,
report: []string{
" plan application pending on node n1",
" plan application pending on node n2",
" failed to find staged plan on node n3",
},
},
{
name: "partial failure",
updatedNodes: []int{1, 2, 3},
status: []loqrecoverypb.NodeRecoveryStatus{
status(1, planID, empty, ""),
status(2, empty, planID, "found stale replica"),
status(3, planID, empty, ""),
},
pending: 2,
errors: 1,
report: []string{
" plan application pending on node n1",
" plan application failed on node n2: found stale replica",
" plan application pending on node n3",
},
},
{
name: "no plan",
status: []loqrecoverypb.NodeRecoveryStatus{
status(1, planID, empty, ""),
status(2, empty, planID, "found stale replica"),
status(3, empty, otherPlanID, ""),
},
report: []string{
" node n1 staged plan: 123e4567-e89b-12d3-a456-426614174000",
" node n2 failed to apply plan 123e4567-e89b-12d3-a456-426614174000: found stale replica",
" node n3 applied plan: 123e4567-e89b-12d3-a456-426614174001 at 2023-01-24 10:30:00 +0000 UTC",
},
},
{
name: "wrong plan",
updatedNodes: []int{1, 2},
status: []loqrecoverypb.NodeRecoveryStatus{
status(1, planID, empty, ""),
status(2, otherPlanID, empty, ""),
status(3, otherPlanID, empty, ""),
},
pending: 1,
errors: 2,
report: []string{
" plan application pending on node n1",
" unexpected staged plan 123e4567-e89b-12d3-a456-426614174001 on node n2",
" unexpected staged plan 123e4567-e89b-12d3-a456-426614174001 on node n3",
},
},
} {
t.Run(d.name, func(t *testing.T) {
plan := loqrecoverypb.ReplicaUpdatePlan{
PlanID: planID,
}
// Plan will contain single replica update for each requested node.
rangeSeq := 1
for _, id := range d.updatedNodes {
plan.Updates = append(plan.Updates, loqrecoverypb.ReplicaUpdate{
RangeID: roachpb.RangeID(rangeSeq),
StartKey: nil,
OldReplicaID: roachpb.ReplicaID(1),
NewReplica: roachpb.ReplicaDescriptor{
NodeID: roachpb.NodeID(id),
StoreID: roachpb.StoreID(id),
ReplicaID: roachpb.ReplicaID(rangeSeq + 17),
},
NextReplicaID: roachpb.ReplicaID(rangeSeq + 18),
})
}
for _, id := range d.staleLeases {
plan.StaleLeaseholderNodeIDs = append(plan.StaleLeaseholderNodeIDs, roachpb.NodeID(id))
}
diff := diffPlanWithNodeStatus(plan, d.status)
require.Equal(t, d.pending, diff.pending, "number of pending changes")
require.Equal(t, d.errors, diff.errors, "number of node errors")
if d.report != nil {
require.Equal(t, len(d.report), len(diff.report), "number of lines in diff")
for i := range d.report {
require.Equal(t, d.report[i], diff.report[i], "wrong line %d of report", i)
}
}
})
}
}
func TestTruncateKeyOutput(t *testing.T) {
defer leaktest.AfterTest(t)()
for _, d := range []struct {
len uint
result string
}{
{
len: 13,
result: "/System/No...",
},
{
len: 30,
result: "/System/NodeLiveness",
},
{
len: 3,
result: "/Sy",
},
{
len: 4,
result: "/...",
},
} {
t.Run("", func(t *testing.T) {
helper := outputFormatHelper{
maxPrintedKeyLength: d.len,
}
require.Equal(t, d.result, helper.formatKey(keys.NodeLivenessPrefix))
})
}
}
func TestTruncateSpanOutput(t *testing.T) {
defer leaktest.AfterTest(t)()
for _, d := range []struct {
len uint
result string
}{
{
len: 30,
result: "/System/{NodeLiveness-Syste...",
},
{
len: 90,
result: "/System/{NodeLiveness-SystemSpanConfigKeys}",
},
{
len: 3,
result: "/Sy",
},
{
len: 4,
result: "/...",
},
} {
t.Run("", func(t *testing.T) {
helper := outputFormatHelper{
maxPrintedKeyLength: d.len,
}
require.Equal(t, d.result, helper.formatSpan(roachpb.Span{
Key: keys.NodeLivenessPrefix,
EndKey: keys.SystemSpanConfigPrefix,
}))
})
}
}
| pkg/cli/debug_recover_loss_of_quorum_test.go | 1 | https://github.com/cockroachdb/cockroach/commit/0d77d949392d0daabeb77a82d468b15d4774e1de | [
0.14328964054584503,
0.002750797662883997,
0.00016270397463813424,
0.00017334203585051,
0.015883438289165497
] |
{
"id": 2,
"code_window": [
"\t// Number of nodes. Increasing this will make the test flaky as written\n",
"\t// because it relies on finding r1 on n1.\n",
"\tconst n = 3\n",
"\n",
"\tclusterArgs := base.TestClusterArgs{\n",
"\t\tServerArgsPerNode: map[int]base.TestServerArgs{},\n",
"\t}\n",
"\tvar storePaths []string\n",
"\tfor i := 0; i < n; i++ {\n",
"\t\targs := base.TestServerArgs{}\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tServerArgs: base.TestServerArgs{\n",
"\t\t\t// This logic is specific to the storage layer.\n",
"\t\t\tDefaultTestTenant: base.TestIsSpecificToStorageLayerAndNeedsASystemTenant,\n",
"\t\t},\n"
],
"file_path": "pkg/cli/debug_check_store_test.go",
"type": "add",
"edit_start_line_idx": 46
} | // Copyright 2021 The Cockroach Authors.
//
// Licensed as a CockroachDB Enterprise file under the Cockroach Community
// License (the "License"); you may not use this file except in compliance with
// the License. You may obtain a copy of the License at
//
// https://github.com/cockroachdb/cockroach/blob/master/licenses/CCL.txt
package backupresolver_test
import (
"os"
"testing"
"github.com/cockroachdb/cockroach/pkg/ccl"
_ "github.com/cockroachdb/cockroach/pkg/ccl/storageccl"
"github.com/cockroachdb/cockroach/pkg/security/securityassets"
"github.com/cockroachdb/cockroach/pkg/security/securitytest"
"github.com/cockroachdb/cockroach/pkg/server"
"github.com/cockroachdb/cockroach/pkg/testutils/serverutils"
"github.com/cockroachdb/cockroach/pkg/testutils/testcluster"
"github.com/cockroachdb/cockroach/pkg/util/randutil"
)
func TestMain(m *testing.M) {
defer ccl.TestingEnableEnterprise()()
securityassets.SetLoader(securitytest.EmbeddedAssets)
randutil.SeedForTests()
serverutils.InitTestServerFactory(server.TestServerFactory)
serverutils.InitTestClusterFactory(testcluster.TestClusterFactory)
os.Exit(m.Run())
}
//go:generate ../../../util/leaktest/add-leaktest.sh *_test.go
| pkg/ccl/backupccl/backupresolver/main_test.go | 0 | https://github.com/cockroachdb/cockroach/commit/0d77d949392d0daabeb77a82d468b15d4774e1de | [
0.00046761412522755563,
0.0002486467419657856,
0.00017181171278934926,
0.00017758057219907641,
0.00012645209790207446
] |
{
"id": 2,
"code_window": [
"\t// Number of nodes. Increasing this will make the test flaky as written\n",
"\t// because it relies on finding r1 on n1.\n",
"\tconst n = 3\n",
"\n",
"\tclusterArgs := base.TestClusterArgs{\n",
"\t\tServerArgsPerNode: map[int]base.TestServerArgs{},\n",
"\t}\n",
"\tvar storePaths []string\n",
"\tfor i := 0; i < n; i++ {\n",
"\t\targs := base.TestServerArgs{}\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tServerArgs: base.TestServerArgs{\n",
"\t\t\t// This logic is specific to the storage layer.\n",
"\t\t\tDefaultTestTenant: base.TestIsSpecificToStorageLayerAndNeedsASystemTenant,\n",
"\t\t},\n"
],
"file_path": "pkg/cli/debug_check_store_test.go",
"type": "add",
"edit_start_line_idx": 46
} | // Copyright 2015 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package storage_test
//go:generate ../util/leaktest/add-leaktest.sh *_test.go
| pkg/storage/main_test.go | 0 | https://github.com/cockroachdb/cockroach/commit/0d77d949392d0daabeb77a82d468b15d4774e1de | [
0.0001812009431887418,
0.00017880677478387952,
0.00017641260637901723,
0.00017880677478387952,
0.0000023941684048622847
] |
{
"id": 2,
"code_window": [
"\t// Number of nodes. Increasing this will make the test flaky as written\n",
"\t// because it relies on finding r1 on n1.\n",
"\tconst n = 3\n",
"\n",
"\tclusterArgs := base.TestClusterArgs{\n",
"\t\tServerArgsPerNode: map[int]base.TestServerArgs{},\n",
"\t}\n",
"\tvar storePaths []string\n",
"\tfor i := 0; i < n; i++ {\n",
"\t\targs := base.TestServerArgs{}\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tServerArgs: base.TestServerArgs{\n",
"\t\t\t// This logic is specific to the storage layer.\n",
"\t\t\tDefaultTestTenant: base.TestIsSpecificToStorageLayerAndNeedsASystemTenant,\n",
"\t\t},\n"
],
"file_path": "pkg/cli/debug_check_store_test.go",
"type": "add",
"edit_start_line_idx": 46
} | load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "regionlatency",
srcs = ["region_latencies.go"],
importpath = "github.com/cockroachdb/cockroach/pkg/testutils/serverutils/regionlatency",
visibility = ["//visibility:public"],
deps = [
"//pkg/rpc",
"//pkg/server",
"//pkg/sql",
"//pkg/testutils/serverutils",
"@com_github_cockroachdb_errors//:errors",
],
)
| pkg/testutils/serverutils/regionlatency/BUILD.bazel | 0 | https://github.com/cockroachdb/cockroach/commit/0d77d949392d0daabeb77a82d468b15d4774e1de | [
0.00017823050438892096,
0.00017655009287409484,
0.0001748696668073535,
0.00017655009287409484,
0.0000016804187907837331
] |
{
"id": 3,
"code_window": [
"\t})\n",
"\tdefer c.Cleanup()\n",
"\n",
"\ttc := testcluster.NewTestCluster(t, 3, base.TestClusterArgs{\n",
"\t\tServerArgsPerNode: map[int]base.TestServerArgs{\n",
"\t\t\t0: {StoreSpecs: []base.StoreSpec{{Path: dir + \"/store-1\"}}},\n",
"\t\t\t1: {StoreSpecs: []base.StoreSpec{{Path: dir + \"/store-2\"}}},\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tServerArgs: base.TestServerArgs{\n",
"\t\t\t// This logic is specific to the storage layer.\n",
"\t\t\tDefaultTestTenant: base.TestIsSpecificToStorageLayerAndNeedsASystemTenant,\n",
"\t\t},\n"
],
"file_path": "pkg/cli/debug_recover_loss_of_quorum_test.go",
"type": "add",
"edit_start_line_idx": 65
} | // Copyright 2021 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package cli
import (
"context"
"fmt"
"os"
"testing"
"time"
"github.com/cockroachdb/cockroach/pkg/base"
"github.com/cockroachdb/cockroach/pkg/clusterversion"
"github.com/cockroachdb/cockroach/pkg/keys"
"github.com/cockroachdb/cockroach/pkg/kv"
"github.com/cockroachdb/cockroach/pkg/kv/kvserver"
"github.com/cockroachdb/cockroach/pkg/kv/kvserver/liveness/livenesspb"
"github.com/cockroachdb/cockroach/pkg/kv/kvserver/loqrecovery"
"github.com/cockroachdb/cockroach/pkg/kv/kvserver/loqrecovery/loqrecoverypb"
"github.com/cockroachdb/cockroach/pkg/roachpb"
"github.com/cockroachdb/cockroach/pkg/server"
"github.com/cockroachdb/cockroach/pkg/server/serverpb"
"github.com/cockroachdb/cockroach/pkg/testutils"
"github.com/cockroachdb/cockroach/pkg/testutils/listenerutil"
"github.com/cockroachdb/cockroach/pkg/testutils/skip"
"github.com/cockroachdb/cockroach/pkg/testutils/sqlutils"
"github.com/cockroachdb/cockroach/pkg/testutils/testcluster"
"github.com/cockroachdb/cockroach/pkg/util/leaktest"
"github.com/cockroachdb/cockroach/pkg/util/log"
"github.com/cockroachdb/cockroach/pkg/util/protoutil"
"github.com/cockroachdb/cockroach/pkg/util/uuid"
"github.com/cockroachdb/errors"
"github.com/stretchr/testify/require"
)
// TestCollectInfoFromMultipleStores performs basic sanity checks on replica info collection.
// This is done by running three node cluster with disk backed storage,
// stopping it and verifying content of collected replica info file.
// This check verifies that:
//
// we successfully iterate requested stores,
// data is written in expected location,
// data contains info only about stores requested.
func TestCollectInfoFromMultipleStores(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
ctx := context.Background()
dir, cleanupFn := testutils.TempDir(t)
defer cleanupFn()
c := NewCLITest(TestCLIParams{
NoServer: true,
})
defer c.Cleanup()
tc := testcluster.NewTestCluster(t, 3, base.TestClusterArgs{
ServerArgsPerNode: map[int]base.TestServerArgs{
0: {StoreSpecs: []base.StoreSpec{{Path: dir + "/store-1"}}},
1: {StoreSpecs: []base.StoreSpec{{Path: dir + "/store-2"}}},
2: {StoreSpecs: []base.StoreSpec{{Path: dir + "/store-3"}}},
},
})
tc.Start(t)
defer tc.Stopper().Stop(ctx)
// Wait up-replication.
require.NoError(t, tc.WaitForFullReplication())
// Shutdown.
tc.Stopper().Stop(ctx)
replicaInfoFileName := dir + "/node-1.json"
c.RunWithArgs([]string{"debug", "recover", "collect-info", "--store=" + dir + "/store-1",
"--store=" + dir + "/store-2", replicaInfoFileName})
replicas, err := readReplicaInfoData([]string{replicaInfoFileName})
require.NoError(t, err, "failed to read generated replica info")
stores := map[roachpb.StoreID]interface{}{}
for _, r := range replicas.LocalInfo[0].Replicas {
stores[r.StoreID] = struct{}{}
}
require.Equal(t, 2, len(stores), "collected replicas from stores")
require.Equal(t, clusterversion.ByKey(clusterversion.BinaryVersionKey), replicas.Version,
"collected version info from stores")
}
// TestCollectInfoFromOnlineCluster verifies that given a test cluster with
// one stopped node, we can collect replica info and metadata from remaining
// nodes using an admin recovery call.
func TestCollectInfoFromOnlineCluster(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
ctx := context.Background()
dir, cleanupFn := testutils.TempDir(t)
defer cleanupFn()
c := NewCLITest(TestCLIParams{
NoServer: true,
})
defer c.Cleanup()
tc := testcluster.NewTestCluster(t, 3, base.TestClusterArgs{
ServerArgs: base.TestServerArgs{
StoreSpecs: []base.StoreSpec{{InMemory: true}},
Insecure: true,
},
})
tc.Start(t)
defer tc.Stopper().Stop(ctx)
require.NoError(t, tc.WaitForFullReplication())
tc.ToggleReplicateQueues(false)
r := tc.ServerConn(0).QueryRow("select count(*) from crdb_internal.ranges_no_leases")
var totalRanges int
require.NoError(t, r.Scan(&totalRanges), "failed to query range count")
tc.StopServer(0)
replicaInfoFileName := dir + "/all-nodes.json"
c.RunWithArgs([]string{
"debug",
"recover",
"collect-info",
"--insecure",
"--host",
tc.Server(2).AdvRPCAddr(),
replicaInfoFileName,
})
replicas, err := readReplicaInfoData([]string{replicaInfoFileName})
require.NoError(t, err, "failed to read generated replica info")
stores := map[roachpb.StoreID]interface{}{}
totalReplicas := 0
for _, li := range replicas.LocalInfo {
for _, r := range li.Replicas {
stores[r.StoreID] = struct{}{}
}
totalReplicas += len(li.Replicas)
}
require.Equal(t, 2, len(stores), "collected replicas from stores")
require.Equal(t, 2, len(replicas.LocalInfo), "collected info is not split by node")
require.Equal(t, totalRanges*2, totalReplicas, "number of collected replicas")
require.Equal(t, totalRanges, len(replicas.Descriptors),
"number of collected descriptors from metadata")
require.Equal(t, clusterversion.ByKey(clusterversion.BinaryVersionKey), replicas.Version,
"collected version info from stores")
}
// TestLossOfQuorumRecovery performs a sanity check on end to end recovery workflow.
// This test doesn't try to validate all possible test cases, but instead check that
// artifacts are correctly produced and overall cluster recovery could be performed
// where it would be completely broken otherwise.
func TestLossOfQuorumRecovery(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
skip.UnderDeadlock(t, "slow under deadlock")
ctx := context.Background()
dir, cleanupFn := testutils.TempDir(t)
defer cleanupFn()
c := NewCLITest(TestCLIParams{
NoServer: true,
})
defer c.Cleanup()
// Test cluster contains 3 nodes that we would turn into a single node
// cluster using loss of quorum recovery. After it is stopped, single node
// would not be able to progress, but we will apply recovery procedure and
// mark on replicas on node 1 as designated survivors. After that, starting
// single node should succeed.
tcBefore := testcluster.NewTestCluster(t, 3, base.TestClusterArgs{
ServerArgsPerNode: map[int]base.TestServerArgs{
0: {StoreSpecs: []base.StoreSpec{{Path: dir + "/store-1"}}},
},
})
tcBefore.Start(t)
s := sqlutils.MakeSQLRunner(tcBefore.Conns[0])
s.Exec(t, "set cluster setting cluster.organization='remove dead replicas test'")
defer tcBefore.Stopper().Stop(ctx)
// We use scratch range to test special case for pending update on the
// descriptor which has to be cleaned up before recovery could proceed.
// For that we'll ensure it is not empty and then put an intent. After
// recovery, we'll check that the range is still accessible for writes as
// normal.
sk := tcBefore.ScratchRange(t)
require.NoError(t,
tcBefore.Server(0).DB().Put(ctx, testutils.MakeKey(sk, []byte{1}), "value"),
"failed to write value to scratch range")
createIntentOnRangeDescriptor(ctx, t, tcBefore, sk)
node1ID := tcBefore.Servers[0].NodeID()
// Now that stores are prepared and replicated we can shut down cluster
// and perform store manipulations.
tcBefore.Stopper().Stop(ctx)
server1StoreDir := dir + "/store-1"
replicaInfoFileName := dir + "/node-1.json"
c.RunWithArgs(
[]string{"debug", "recover", "collect-info", "--store=" + server1StoreDir,
replicaInfoFileName})
// Generate recovery plan and try to verify that plan file was generated and contains
// meaningful data. This is not strictly necessary for verifying end-to-end flow, but
// having assertions on generated data helps to identify which stage of pipeline broke
// if test fails.
planFile := dir + "/recovery-plan.json"
out, err := c.RunWithCaptureArgs(
[]string{"debug", "recover", "make-plan", "--confirm=y", "--plan=" + planFile,
replicaInfoFileName})
require.NoError(t, err, "failed to run make-plan")
require.Contains(t, out, fmt.Sprintf("- node n%d", node1ID),
"planner didn't provide correct apply instructions")
require.FileExists(t, planFile, "generated plan file")
planFileContent, err := os.ReadFile(planFile)
require.NoError(t, err, "test infra failed, can't open created plan file")
plan := loqrecoverypb.ReplicaUpdatePlan{}
jsonpb := protoutil.JSONPb{}
require.NoError(t, jsonpb.Unmarshal(planFileContent, &plan),
"failed to deserialize replica recovery plan")
require.NotEmpty(t, plan.Updates, "resulting plan contains no updates")
out, err = c.RunWithCaptureArgs(
[]string{"debug", "recover", "apply-plan", "--confirm=y", "--store=" + server1StoreDir,
planFile})
require.NoError(t, err, "failed to run apply plan")
// Check that there were at least one mention of replica being promoted.
require.Contains(t, out, "will be updated", "no replica updates were recorded")
require.Contains(t, out, fmt.Sprintf("Updated store(s): s%d", node1ID),
"apply plan was not executed on requested node")
tcAfter := testcluster.NewTestCluster(t, 3, base.TestClusterArgs{
ReplicationMode: base.ReplicationManual,
ServerArgsPerNode: map[int]base.TestServerArgs{
0: {StoreSpecs: []base.StoreSpec{{Path: dir + "/store-1"}}},
},
})
// NB: If recovery is not performed, new cluster will just hang on startup.
// This is caused by liveness range becoming unavailable and preventing any
// progress. So it is likely that test will timeout if basic workflow fails.
tcAfter.Start(t)
defer tcAfter.Stopper().Stop(ctx)
// In the new cluster, we will still have nodes 2 and 3 remaining from the first
// attempt. That would increase number of replicas on system ranges to 5 and we
// would not be able to upreplicate properly. So we need to decommission old nodes
// first before proceeding.
adminClient := tcAfter.Server(0).GetAdminClient(t)
require.NoError(t, runDecommissionNodeImpl(
ctx, adminClient, nodeDecommissionWaitNone, nodeDecommissionChecksSkip, false,
[]roachpb.NodeID{roachpb.NodeID(2), roachpb.NodeID(3)}, tcAfter.Server(0).NodeID()),
"Failed to decommission removed nodes")
for i := 0; i < len(tcAfter.Servers); i++ {
require.NoError(t, tcAfter.Servers[i].GetStores().(*kvserver.Stores).VisitStores(func(store *kvserver.Store) error {
store.SetReplicateQueueActive(true)
return nil
}), "Failed to activate replication queue")
}
require.NoError(t, tcAfter.WaitForZoneConfigPropagation(),
"Failed to ensure zone configs are propagated")
require.NoError(t, tcAfter.WaitForFullReplication(), "Failed to perform full replication")
for i := 0; i < len(tcAfter.Servers); i++ {
require.NoError(t, tcAfter.Servers[i].GetStores().(*kvserver.Stores).VisitStores(func(store *kvserver.Store) error {
return store.ForceConsistencyQueueProcess()
}), "Failed to force replicas to consistency queue")
}
// As a validation step we will just pick one range and get its replicas to see
// if they were up-replicated to the new nodes.
s = sqlutils.MakeSQLRunner(tcAfter.Conns[0])
r := s.QueryRow(t, "select replicas from crdb_internal.ranges limit 1")
var replicas string
r.Scan(&replicas)
require.Equal(t, "{1,4,5}", replicas, "Replicas after loss of quorum recovery")
// Validate that rangelog is updated by recovery records after cluster restarts.
testutils.SucceedsSoon(t, func() error {
r := s.QueryRow(t,
`select count(*) from system.rangelog where "eventType" = 'unsafe_quorum_recovery'`)
var recoveries int
r.Scan(&recoveries)
if recoveries != len(plan.Updates) {
return errors.Errorf("found %d recovery events while expecting %d", recoveries,
len(plan.Updates))
}
return nil
})
// We were using scratch range to test cleanup of pending transaction on
// rangedescriptor key. We want to verify that after recovery, range is still
// writable e.g. recovery succeeded.
require.NoError(t,
tcAfter.Server(0).DB().Put(ctx, testutils.MakeKey(sk, []byte{1}), "value2"),
"failed to write value to scratch range after recovery")
}
// TestStageVersionCheck verifies that we can force plan with different internal
// version onto cluster. To do this, we create a plan with internal version
// above current but matching major and minor. Then we check that staging fails
// and that force flag will update plan version to match local node.
func TestStageVersionCheck(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
skip.UnderDeadlock(t, "slow under deadlock")
ctx := context.Background()
_, cleanupFn := testutils.TempDir(t)
defer cleanupFn()
c := NewCLITest(TestCLIParams{
NoServer: true,
})
defer c.Cleanup()
listenerReg := listenerutil.NewListenerRegistry()
defer listenerReg.Close()
storeReg := server.NewStickyVFSRegistry()
tc := testcluster.NewTestCluster(t, 4, base.TestClusterArgs{
ReplicationMode: base.ReplicationManual,
ServerArgsPerNode: map[int]base.TestServerArgs{
0: {
Knobs: base.TestingKnobs{
Server: &server.TestingKnobs{
StickyVFSRegistry: storeReg,
},
},
StoreSpecs: []base.StoreSpec{
{InMemory: true, StickyVFSID: "1"},
},
},
},
ReusableListenerReg: listenerReg,
})
tc.Start(t)
defer tc.Stopper().Stop(ctx)
tc.StopServer(3)
adminClient := tc.Server(0).GetAdminClient(t)
v := clusterversion.ByKey(clusterversion.BinaryVersionKey)
v.Internal++
// To avoid crafting real replicas we use StaleLeaseholderNodeIDs to force
// node to stage plan for verification.
p := loqrecoverypb.ReplicaUpdatePlan{
PlanID: uuid.FastMakeV4(),
Version: v,
ClusterID: tc.Server(0).StorageClusterID().String(),
DecommissionedNodeIDs: []roachpb.NodeID{4},
StaleLeaseholderNodeIDs: []roachpb.NodeID{1},
}
// Attempts to stage plan with different internal version must fail.
_, err := adminClient.RecoveryStagePlan(ctx, &serverpb.RecoveryStagePlanRequest{
Plan: &p,
AllNodes: true,
ForcePlan: false,
ForceLocalInternalVersion: false,
})
require.ErrorContains(t, err, "doesn't match cluster active version")
// Enable "stuck upgrade bypass" to stage plan on the cluster.
_, err = adminClient.RecoveryStagePlan(ctx, &serverpb.RecoveryStagePlanRequest{
Plan: &p,
AllNodes: true,
ForcePlan: false,
ForceLocalInternalVersion: true,
})
require.NoError(t, err, "force local must fix incorrect version")
// Check that stored plan has version matching cluster version.
ps := loqrecovery.NewPlanStore("", storeReg.Get("1"))
p, ok, err := ps.LoadPlan()
require.NoError(t, err, "failed to read node 0 plan")
require.True(t, ok, "plan was not staged")
require.Equal(t, clusterversion.ByKey(clusterversion.BinaryVersionKey), p.Version,
"plan version was not updated")
}
func createIntentOnRangeDescriptor(
ctx context.Context, t *testing.T, tcBefore *testcluster.TestCluster, sk roachpb.Key,
) {
txn := kv.NewTxn(ctx, tcBefore.Servers[0].DB(), 1)
var desc roachpb.RangeDescriptor
// Pick one of the predefined split points.
rdKey := keys.RangeDescriptorKey(roachpb.RKey(sk))
if err := txn.GetProto(ctx, rdKey, &desc); err != nil {
t.Fatal(err)
}
desc.NextReplicaID++
if err := txn.Put(ctx, rdKey, &desc); err != nil {
t.Fatal(err)
}
// At this point the intent has been written to Pebble but this
// write was not synced (only the raft log append was synced). We
// need to force another sync, but we're far from the storage
// layer here so the easiest thing to do is simply perform a
// second write. This will force the first write to be persisted
// to disk (the second write may or may not make it to disk due to
// timing).
desc.NextReplicaID++
if err := txn.Put(ctx, rdKey, &desc); err != nil {
t.Fatal(err)
}
}
func TestHalfOnlineLossOfQuorumRecovery(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
skip.UnderDeadlock(t, "slow under deadlock")
ctx := context.Background()
dir, cleanupFn := testutils.TempDir(t)
defer cleanupFn()
c := NewCLITest(TestCLIParams{
NoServer: true,
})
defer c.Cleanup()
listenerReg := listenerutil.NewListenerRegistry()
defer listenerReg.Close()
// Test cluster contains 3 nodes that we would turn into a single node
// cluster using loss of quorum recovery. To do that, we will terminate
// two nodes and run recovery on remaining one. Restarting node should
// bring it back to healthy (but underreplicated) state.
// Note that we inject reusable listeners into all nodes to prevent tests
// running in parallel from taking over ports of stopped nodes and responding
// to gateway node with errors.
// TODO(oleg): Make test run with 7 nodes to exercise cases where multiple
// replicas survive. Current startup and allocator behaviour would make
// this test flaky.
sa := make(map[int]base.TestServerArgs)
for i := 0; i < 3; i++ {
sa[i] = base.TestServerArgs{
Knobs: base.TestingKnobs{
Server: &server.TestingKnobs{
StickyVFSRegistry: server.NewStickyVFSRegistry(),
},
},
StoreSpecs: []base.StoreSpec{
{
InMemory: true,
},
},
}
}
tc := testcluster.NewTestCluster(t, 3, base.TestClusterArgs{
ServerArgs: base.TestServerArgs{
DefaultTestTenant: base.TestIsSpecificToStorageLayerAndNeedsASystemTenant,
},
ReusableListenerReg: listenerReg,
ServerArgsPerNode: sa,
})
tc.Start(t)
s := sqlutils.MakeSQLRunner(tc.Conns[0])
s.Exec(t, "set cluster setting cluster.organization='remove dead replicas test'")
defer tc.Stopper().Stop(ctx)
// We use scratch range to test special case for pending update on the
// descriptor which has to be cleaned up before recovery could proceed.
// For that we'll ensure it is not empty and then put an intent. After
// recovery, we'll check that the range is still accessible for writes as
// normal.
sk := tc.ScratchRange(t)
require.NoError(t,
tc.Server(0).DB().Put(ctx, testutils.MakeKey(sk, []byte{1}), "value"),
"failed to write value to scratch range")
createIntentOnRangeDescriptor(ctx, t, tc, sk)
node1ID := tc.Servers[0].NodeID()
// Now that stores are prepared and replicated we can shut down cluster
// and perform store manipulations.
tc.StopServer(1)
tc.StopServer(2)
// Generate recovery plan and try to verify that plan file was generated and contains
// meaningful data. This is not strictly necessary for verifying end-to-end flow, but
// having assertions on generated data helps to identify which stage of pipeline broke
// if test fails.
planFile := dir + "/recovery-plan.json"
out, err := c.RunWithCaptureArgs(
[]string{
"debug",
"recover",
"make-plan",
"--confirm=y",
"--certs-dir=test_certs",
"--host=" + tc.Server(0).AdvRPCAddr(),
"--plan=" + planFile,
})
require.NoError(t, err, "failed to run make-plan")
require.Contains(t, out, fmt.Sprintf("- node n%d", node1ID),
"planner didn't provide correct apply instructions")
require.FileExists(t, planFile, "generated plan file")
planFileContent, err := os.ReadFile(planFile)
require.NoError(t, err, "test infra failed, can't open created plan file")
plan := loqrecoverypb.ReplicaUpdatePlan{}
jsonpb := protoutil.JSONPb{}
require.NoError(t, jsonpb.Unmarshal(planFileContent, &plan),
"failed to deserialize replica recovery plan")
require.NotEmpty(t, plan.Updates, "resulting plan contains no updates")
out, err = c.RunWithCaptureArgs(
[]string{
"debug", "recover", "apply-plan",
"--certs-dir=test_certs",
"--host=" + tc.Server(0).AdvRPCAddr(),
"--confirm=y", planFile,
})
require.NoError(t, err, "failed to run apply plan")
// Check that there were at least one mention of replica being promoted.
require.Contains(t, out, "updating replica", "no replica updates were recorded")
require.Contains(t, out,
fmt.Sprintf("Plan staged. To complete recovery restart nodes n%d.", node1ID),
"apply plan failed to stage on expected nodes")
// Verify plan is staged on nodes
out, err = c.RunWithCaptureArgs(
[]string{
"debug", "recover", "verify",
"--certs-dir=test_certs",
"--host=" + tc.Server(0).AdvRPCAddr(),
planFile,
})
require.NoError(t, err, "failed to run verify plan")
require.Contains(t, out, "ERROR: loss of quorum recovery is not finished yet")
tc.StopServer(0)
// NB: If recovery is not performed, server will just hang on startup.
// This is caused by liveness range becoming unavailable and preventing any
// progress. So it is likely that test will timeout if basic workflow fails.
require.NoError(t, tc.RestartServer(0), "restart failed")
s = sqlutils.MakeSQLRunner(tc.Conns[0])
// Verifying that post start cleanup performed node decommissioning that
// prevents old nodes from rejoining.
ac := tc.GetAdminClient(t, 0)
testutils.SucceedsSoon(t, func() error {
dr, err := ac.DecommissionStatus(ctx,
&serverpb.DecommissionStatusRequest{NodeIDs: []roachpb.NodeID{2, 3}})
if err != nil {
return err
}
for _, s := range dr.Status {
if s.Membership != livenesspb.MembershipStatus_DECOMMISSIONED {
return errors.Newf("expecting n%d to be decommissioned", s.NodeID)
}
}
return nil
})
// Validate that rangelog is updated by recovery records after cluster restarts.
testutils.SucceedsSoon(t, func() error {
r := s.QueryRow(t,
`select count(*) from system.rangelog where "eventType" = 'unsafe_quorum_recovery'`)
var recoveries int
r.Scan(&recoveries)
if recoveries != len(plan.Updates) {
return errors.Errorf("found %d recovery events while expecting %d", recoveries,
len(plan.Updates))
}
return nil
})
// Verify recovery complete.
out, err = c.RunWithCaptureArgs(
[]string{
"debug", "recover", "verify",
"--certs-dir=test_certs",
"--host=" + tc.Server(0).AdvRPCAddr(),
planFile,
})
require.NoError(t, err, "failed to run verify plan")
require.Contains(t, out, "Loss of quorum recovery is complete.")
// We were using scratch range to test cleanup of pending transaction on
// rangedescriptor key. We want to verify that after recovery, range is still
// writable e.g. recovery succeeded.
require.NoError(t,
tc.Server(0).DB().Put(ctx, testutils.MakeKey(sk, []byte{1}), "value2"),
"failed to write value to scratch range after recovery")
// Finally split scratch range to ensure metadata ranges are recovered.
_, _, err = tc.Server(0).SplitRange(testutils.MakeKey(sk, []byte{42}))
require.NoError(t, err, "failed to split range after recovery")
}
func TestUpdatePlanVsClusterDiff(t *testing.T) {
defer leaktest.AfterTest(t)()
var empty uuid.UUID
planID, _ := uuid.FromString("123e4567-e89b-12d3-a456-426614174000")
otherPlanID, _ := uuid.FromString("123e4567-e89b-12d3-a456-426614174001")
applyTime, _ := time.Parse(time.RFC3339, "2023-01-24T10:30:00Z")
status := func(id roachpb.NodeID, pending, applied uuid.UUID, err string) loqrecoverypb.NodeRecoveryStatus {
s := loqrecoverypb.NodeRecoveryStatus{
NodeID: id,
}
if !pending.Equal(empty) {
s.PendingPlanID = &pending
}
if !applied.Equal(empty) {
s.AppliedPlanID = &applied
s.ApplyTimestamp = &applyTime
}
s.Error = err
return s
}
for _, d := range []struct {
name string
updatedNodes []int
staleLeases []int
status []loqrecoverypb.NodeRecoveryStatus
pending int
errors int
report []string
}{
{
name: "after staging",
updatedNodes: []int{1, 2},
staleLeases: []int{3},
status: []loqrecoverypb.NodeRecoveryStatus{
status(1, planID, empty, ""),
status(2, planID, empty, ""),
status(3, planID, empty, ""),
},
pending: 3,
report: []string{
" plan application pending on node n1",
" plan application pending on node n2",
" plan application pending on node n3",
},
},
{
name: "partially applied",
updatedNodes: []int{1, 2, 3},
status: []loqrecoverypb.NodeRecoveryStatus{
status(1, planID, empty, ""),
status(2, empty, planID, ""),
status(3, planID, empty, ""),
},
pending: 2,
report: []string{
" plan application pending on node n1",
" plan applied successfully on node n2",
" plan application pending on node n3",
},
},
{
name: "fully applied",
updatedNodes: []int{1, 2, 3},
status: []loqrecoverypb.NodeRecoveryStatus{
status(1, empty, planID, ""),
status(2, empty, planID, ""),
status(3, empty, planID, ""),
},
report: []string{
" plan applied successfully on node n1",
" plan applied successfully on node n2",
" plan applied successfully on node n3",
},
},
{
name: "staging lost no node",
updatedNodes: []int{1, 2, 3},
status: []loqrecoverypb.NodeRecoveryStatus{
status(1, planID, empty, ""),
status(3, planID, empty, ""),
},
pending: 2,
errors: 1,
report: []string{
" plan application pending on node n1",
" plan application pending on node n3",
" failed to find node n2 where plan must be staged",
},
},
{
name: "staging lost no plan",
updatedNodes: []int{1, 2},
staleLeases: []int{3},
status: []loqrecoverypb.NodeRecoveryStatus{
status(1, planID, empty, ""),
status(2, planID, empty, ""),
status(3, empty, empty, ""),
},
pending: 2,
errors: 1,
report: []string{
" plan application pending on node n1",
" plan application pending on node n2",
" failed to find staged plan on node n3",
},
},
{
name: "partial failure",
updatedNodes: []int{1, 2, 3},
status: []loqrecoverypb.NodeRecoveryStatus{
status(1, planID, empty, ""),
status(2, empty, planID, "found stale replica"),
status(3, planID, empty, ""),
},
pending: 2,
errors: 1,
report: []string{
" plan application pending on node n1",
" plan application failed on node n2: found stale replica",
" plan application pending on node n3",
},
},
{
name: "no plan",
status: []loqrecoverypb.NodeRecoveryStatus{
status(1, planID, empty, ""),
status(2, empty, planID, "found stale replica"),
status(3, empty, otherPlanID, ""),
},
report: []string{
" node n1 staged plan: 123e4567-e89b-12d3-a456-426614174000",
" node n2 failed to apply plan 123e4567-e89b-12d3-a456-426614174000: found stale replica",
" node n3 applied plan: 123e4567-e89b-12d3-a456-426614174001 at 2023-01-24 10:30:00 +0000 UTC",
},
},
{
name: "wrong plan",
updatedNodes: []int{1, 2},
status: []loqrecoverypb.NodeRecoveryStatus{
status(1, planID, empty, ""),
status(2, otherPlanID, empty, ""),
status(3, otherPlanID, empty, ""),
},
pending: 1,
errors: 2,
report: []string{
" plan application pending on node n1",
" unexpected staged plan 123e4567-e89b-12d3-a456-426614174001 on node n2",
" unexpected staged plan 123e4567-e89b-12d3-a456-426614174001 on node n3",
},
},
} {
t.Run(d.name, func(t *testing.T) {
plan := loqrecoverypb.ReplicaUpdatePlan{
PlanID: planID,
}
// Plan will contain single replica update for each requested node.
rangeSeq := 1
for _, id := range d.updatedNodes {
plan.Updates = append(plan.Updates, loqrecoverypb.ReplicaUpdate{
RangeID: roachpb.RangeID(rangeSeq),
StartKey: nil,
OldReplicaID: roachpb.ReplicaID(1),
NewReplica: roachpb.ReplicaDescriptor{
NodeID: roachpb.NodeID(id),
StoreID: roachpb.StoreID(id),
ReplicaID: roachpb.ReplicaID(rangeSeq + 17),
},
NextReplicaID: roachpb.ReplicaID(rangeSeq + 18),
})
}
for _, id := range d.staleLeases {
plan.StaleLeaseholderNodeIDs = append(plan.StaleLeaseholderNodeIDs, roachpb.NodeID(id))
}
diff := diffPlanWithNodeStatus(plan, d.status)
require.Equal(t, d.pending, diff.pending, "number of pending changes")
require.Equal(t, d.errors, diff.errors, "number of node errors")
if d.report != nil {
require.Equal(t, len(d.report), len(diff.report), "number of lines in diff")
for i := range d.report {
require.Equal(t, d.report[i], diff.report[i], "wrong line %d of report", i)
}
}
})
}
}
func TestTruncateKeyOutput(t *testing.T) {
defer leaktest.AfterTest(t)()
for _, d := range []struct {
len uint
result string
}{
{
len: 13,
result: "/System/No...",
},
{
len: 30,
result: "/System/NodeLiveness",
},
{
len: 3,
result: "/Sy",
},
{
len: 4,
result: "/...",
},
} {
t.Run("", func(t *testing.T) {
helper := outputFormatHelper{
maxPrintedKeyLength: d.len,
}
require.Equal(t, d.result, helper.formatKey(keys.NodeLivenessPrefix))
})
}
}
func TestTruncateSpanOutput(t *testing.T) {
defer leaktest.AfterTest(t)()
for _, d := range []struct {
len uint
result string
}{
{
len: 30,
result: "/System/{NodeLiveness-Syste...",
},
{
len: 90,
result: "/System/{NodeLiveness-SystemSpanConfigKeys}",
},
{
len: 3,
result: "/Sy",
},
{
len: 4,
result: "/...",
},
} {
t.Run("", func(t *testing.T) {
helper := outputFormatHelper{
maxPrintedKeyLength: d.len,
}
require.Equal(t, d.result, helper.formatSpan(roachpb.Span{
Key: keys.NodeLivenessPrefix,
EndKey: keys.SystemSpanConfigPrefix,
}))
})
}
}
| pkg/cli/debug_recover_loss_of_quorum_test.go | 1 | https://github.com/cockroachdb/cockroach/commit/0d77d949392d0daabeb77a82d468b15d4774e1de | [
0.9984055161476135,
0.16172514855861664,
0.00016206110012717545,
0.00017715126159600914,
0.3501083254814148
] |
{
"id": 3,
"code_window": [
"\t})\n",
"\tdefer c.Cleanup()\n",
"\n",
"\ttc := testcluster.NewTestCluster(t, 3, base.TestClusterArgs{\n",
"\t\tServerArgsPerNode: map[int]base.TestServerArgs{\n",
"\t\t\t0: {StoreSpecs: []base.StoreSpec{{Path: dir + \"/store-1\"}}},\n",
"\t\t\t1: {StoreSpecs: []base.StoreSpec{{Path: dir + \"/store-2\"}}},\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tServerArgs: base.TestServerArgs{\n",
"\t\t\t// This logic is specific to the storage layer.\n",
"\t\t\tDefaultTestTenant: base.TestIsSpecificToStorageLayerAndNeedsASystemTenant,\n",
"\t\t},\n"
],
"file_path": "pkg/cli/debug_recover_loss_of_quorum_test.go",
"type": "add",
"edit_start_line_idx": 65
} | // Copyright 2020 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package server
import (
"context"
"io"
"math"
"strings"
"time"
"github.com/cockroachdb/cockroach/pkg/kv/kvserver/liveness"
"github.com/cockroachdb/cockroach/pkg/roachpb"
"github.com/cockroachdb/cockroach/pkg/server/serverctl"
"github.com/cockroachdb/cockroach/pkg/server/serverpb"
"github.com/cockroachdb/cockroach/pkg/server/srverrors"
"github.com/cockroachdb/cockroach/pkg/settings"
"github.com/cockroachdb/cockroach/pkg/sql/sqlstats/persistedsqlstats"
"github.com/cockroachdb/cockroach/pkg/util/grpcutil"
"github.com/cockroachdb/cockroach/pkg/util/log"
"github.com/cockroachdb/cockroach/pkg/util/stop"
"github.com/cockroachdb/cockroach/pkg/util/syncutil"
"github.com/cockroachdb/cockroach/pkg/util/timeutil"
"github.com/cockroachdb/errors"
"github.com/cockroachdb/logtags"
"github.com/cockroachdb/redact"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
var (
queryWait = settings.RegisterDurationSetting(
settings.TenantWritable,
"server.shutdown.query_wait",
"the timeout for waiting for active queries to finish during a drain "+
"(note that the --drain-wait parameter for cockroach node drain may need adjustment "+
"after changing this setting)",
10*time.Second,
settings.NonNegativeDurationWithMaximum(10*time.Hour),
settings.WithPublic)
drainWait = settings.RegisterDurationSetting(
settings.TenantWritable,
"server.shutdown.drain_wait",
"the amount of time a server waits in an unready state before proceeding with a drain "+
"(note that the --drain-wait parameter for cockroach node drain may need adjustment "+
"after changing this setting. --drain-wait is to specify the duration of the "+
"whole draining process, while server.shutdown.drain_wait is to set the "+
"wait time for health probes to notice that the node is not ready.)",
0*time.Second,
settings.NonNegativeDurationWithMaximum(10*time.Hour),
settings.WithPublic)
connectionWait = settings.RegisterDurationSetting(
settings.TenantWritable,
"server.shutdown.connection_wait",
"the maximum amount of time a server waits for all SQL connections to "+
"be closed before proceeding with a drain. "+
"(note that the --drain-wait parameter for cockroach node drain may need adjustment "+
"after changing this setting)",
0*time.Second,
settings.NonNegativeDurationWithMaximum(10*time.Hour),
settings.WithPublic)
jobRegistryWait = settings.RegisterDurationSetting(
settings.TenantWritable,
"server.shutdown.jobs_wait",
"the maximum amount of time a server waits for all currently executing jobs "+
"to notice drain request and to perform orderly shutdown",
10*time.Second,
settings.NonNegativeDurationWithMaximum(10*time.Minute),
settings.WithPublic)
)
// Drain puts the node into the specified drain mode(s) and optionally
// instructs the process to terminate.
// This method is part of the serverpb.AdminClient interface.
func (s *adminServer) Drain(req *serverpb.DrainRequest, stream serverpb.Admin_DrainServer) error {
ctx := stream.Context()
ctx = s.AnnotateCtx(ctx)
// Which node is this request for?
nodeID, local, err := s.serverIterator.parseServerID(req.NodeId)
if err != nil {
return status.Errorf(codes.InvalidArgument, err.Error())
}
if !local {
// This request is for another node. Forward it.
// In contrast to many RPC calls we implement around
// the server package, the Drain RPC is a *streaming*
// RPC. This means that it may have more than one
// response. We must forward all of them.
// Connect to the target node.
client, err := s.dialNode(ctx, roachpb.NodeID(nodeID))
if err != nil {
return srverrors.ServerError(ctx, err)
}
return delegateDrain(ctx, req, client, stream)
}
return s.drainServer.handleDrain(ctx, req, stream)
}
type drainServer struct {
stopper *stop.Stopper
// stopTrigger is used to request that the server is shut down.
stopTrigger *stopTrigger
grpc *grpcServer
sqlServer *SQLServer
drainSleepFn func(time.Duration)
serverCtl *serverController
kvServer struct {
nodeLiveness *liveness.NodeLiveness
node *Node
}
}
// newDrainServer constructs a drainServer suitable for any kind of server.
func newDrainServer(
cfg BaseConfig,
stopper *stop.Stopper,
stopTrigger *stopTrigger,
grpc *grpcServer,
sqlServer *SQLServer,
) *drainServer {
var drainSleepFn = time.Sleep
if cfg.TestingKnobs.Server != nil {
if cfg.TestingKnobs.Server.(*TestingKnobs).DrainSleepFn != nil {
drainSleepFn = cfg.TestingKnobs.Server.(*TestingKnobs).DrainSleepFn
}
}
return &drainServer{
stopper: stopper,
stopTrigger: stopTrigger,
grpc: grpc,
sqlServer: sqlServer,
drainSleepFn: drainSleepFn,
}
}
// setNode configures the drainServer to also support KV node shutdown.
func (s *drainServer) setNode(node *Node, nodeLiveness *liveness.NodeLiveness) {
s.kvServer.node = node
s.kvServer.nodeLiveness = nodeLiveness
}
func (s *drainServer) handleDrain(
ctx context.Context, req *serverpb.DrainRequest, stream serverpb.Admin_DrainServer,
) error {
log.Ops.Infof(ctx, "drain request received with doDrain = %v, shutdown = %v", req.DoDrain, req.Shutdown)
res := serverpb.DrainResponse{}
if req.DoDrain {
remaining, info, err := s.runDrain(ctx, req.Verbose)
if err != nil {
log.Ops.Errorf(ctx, "drain failed: %v", err)
return err
}
res.DrainRemainingIndicator = remaining
res.DrainRemainingDescription = info.StripMarkers()
}
if s.isDraining() {
res.IsDraining = true
}
if err := stream.Send(&res); err != nil {
return err
}
return s.maybeShutdownAfterDrain(ctx, req)
}
func (s *drainServer) maybeShutdownAfterDrain(
ctx context.Context, req *serverpb.DrainRequest,
) error {
if !req.Shutdown {
if req.DoDrain {
// The condition "if doDrain" is because we don't need an info
// message for just a probe.
log.Ops.Infof(ctx, "drain request completed without server shutdown")
}
return nil
}
go func() {
// TODO(tbg): why don't we stop the stopper first? Stopping the stopper
// first seems more reasonable since grpc.Stop closes the listener right
// away (and who knows whether gRPC-goroutines are tied up in some
// stopper task somewhere).
s.grpc.Stop()
s.stopTrigger.signalStop(ctx, serverctl.MakeShutdownRequest(serverctl.ShutdownReasonDrainRPC, nil /* err */))
}()
select {
case <-s.stopper.IsStopped():
return nil
case <-ctx.Done():
return ctx.Err()
case <-time.After(10 * time.Second):
// This is a hack to work around the problem in
// https://github.com/cockroachdb/cockroach/issues/37425#issuecomment-494336131
//
// There appear to be deadlock scenarios in which we don't manage to
// fully stop the grpc server (which implies closing the listener, i.e.
// seeming dead to the outside world) or don't manage to shut down the
// stopper (the evidence in #37425 is inconclusive which one it is).
//
// Other problems in this area are known, such as
// https://github.com/cockroachdb/cockroach/pull/31692
//
// The signal-based shutdown path uses a similar time-based escape hatch.
// Until we spend (potentially lots of time to) understand and fix this
// issue, this will serve us well.
log.Fatal(ctx, "timeout after drain")
return errors.New("unreachable")
}
}
// delegateDrain forwards a drain request to another node.
// 'client' is where the request should be forwarded to.
// 'stream' is where the request came from, and where the response should go.
func delegateDrain(
ctx context.Context,
req *serverpb.DrainRequest,
client serverpb.AdminClient,
stream serverpb.Admin_DrainServer,
) error {
// Retrieve the stream interface to the target node.
drainClient, err := client.Drain(ctx, req)
if err != nil {
return err
}
// Forward all the responses from the remote server,
// to our client.
for {
// Receive one response message from the target node.
resp, err := drainClient.Recv()
if err != nil {
if err == io.EOF {
break
}
if grpcutil.IsClosedConnection(err) {
// If the drain request contained Shutdown==true, it's
// possible for the RPC connection to the target node to be
// shut down before a DrainResponse and EOF is
// received. This is not truly an error.
break
}
return err
}
// Forward the response from the target node to our remote
// client.
if err := stream.Send(resp); err != nil {
return err
}
}
return nil
}
// runDrain idempotently activates the draining mode.
// Note: this represents ONE round of draining. This code is iterated on
// indefinitely until all range leases have been drained.
// This iteration can be found here: pkg/cli/start.go, pkg/cli/quit.go.
//
// Note: new code should not be taught to use this method
// directly. Use the Drain() RPC instead with a suitably crafted
// DrainRequest.
//
// On failure, the system may be in a partially drained
// state; the client should either continue calling Drain() or shut
// down the server.
//
// The reporter function, if non-nil, is called for each
// packet of load shed away from the server during the drain.
func (s *drainServer) runDrain(
ctx context.Context, verbose bool,
) (remaining uint64, info redact.RedactableString, err error) {
reports := make(map[redact.SafeString]int)
var mu syncutil.Mutex
reporter := func(howMany int, what redact.SafeString) {
if howMany > 0 {
mu.Lock()
reports[what] += howMany
mu.Unlock()
}
}
defer func() {
// Detail the counts based on the collected reports.
var descBuf strings.Builder
comma := redact.SafeString("")
for what, howMany := range reports {
remaining += uint64(howMany)
redact.Fprintf(&descBuf, "%s%s: %d", comma, what, howMany)
comma = ", "
}
info = redact.RedactableString(descBuf.String())
log.Ops.Infof(ctx, "drain remaining: %d", remaining)
if info != "" {
log.Ops.Infof(ctx, "drain details: %s", info)
}
}()
if err = s.drainInner(ctx, reporter, verbose); err != nil {
return 0, "", err
}
return
}
func (s *drainServer) drainInner(
ctx context.Context, reporter func(int, redact.SafeString), verbose bool,
) (err error) {
if s.serverCtl != nil {
// We are on a KV node, with a server controller.
//
// First tell the controller to stop starting new servers.
s.serverCtl.draining.Set(true)
// Then shut down tenant servers orchestrated from
// this node.
stillRunning := s.serverCtl.drain(ctx)
reporter(stillRunning, "tenant servers")
// If we still have tenant servers, we can't make progress on
// draining SQL clients (on the system tenant) and the KV node,
// because that would block the graceful drain of the tenant
// server(s).
if stillRunning > 0 {
return nil
}
log.Infof(ctx, "all tenant servers stopped")
}
// Drain the SQL layer.
// Drains all SQL connections, distributed SQL execution flows, and SQL table leases.
if err = s.drainClients(ctx, reporter); err != nil {
return err
}
log.Infof(ctx, "done draining clients")
// Mark the node as draining in liveness and drain all range leases.
return s.drainNode(ctx, reporter, verbose)
}
// isDraining returns true if either SQL client connections are being drained
// or if one of the stores on the node is not accepting replicas.
func (s *drainServer) isDraining() bool {
return s.sqlServer.pgServer.IsDraining() || (s.kvServer.node != nil && s.kvServer.node.IsDraining())
}
// drainClients starts draining the SQL layer.
func (s *drainServer) drainClients(
ctx context.Context, reporter func(int, redact.SafeString),
) error {
// Setup a cancelable context so that the logOpenConns goroutine exits when
// this function returns.
var cancel context.CancelFunc
ctx, cancel = context.WithCancel(ctx)
defer cancel()
shouldDelayDraining := !s.isDraining()
// Set the gRPC mode of the node to "draining" and mark the node as "not ready".
// Probes to /health?ready=1 will now notice the change in the node's readiness.
s.grpc.setMode(modeDraining)
s.sqlServer.isReady.Set(false)
// Log the number of connections periodically.
if err := s.logOpenConns(ctx); err != nil {
log.Ops.Warningf(ctx, "error showing alive SQL connections: %v", err)
}
// Wait the duration of drainWait.
// This will fail load balancer checks and delay draining so that client
// traffic can move off this node.
// Note delay only happens on first call to drain.
if shouldDelayDraining {
log.Ops.Info(ctx, "waiting for health probes to notice that the node "+
"is not ready for new sql connections")
s.drainSleepFn(drainWait.Get(&s.sqlServer.execCfg.Settings.SV))
}
// Wait for users to close the existing SQL connections.
// During this phase, the server is rejecting new SQL connections.
// The server exits this phase either once all SQL connections are closed,
// or the connectionMaxWait timeout elapses, whichever happens earlier.
if err := s.sqlServer.pgServer.WaitForSQLConnsToClose(ctx, connectionWait.Get(&s.sqlServer.execCfg.Settings.SV), s.stopper); err != nil {
return err
}
// Inform the job system that the node is draining.
//
// We cannot do this before SQL clients disconnect, because
// otherwise there is a risk that one of the remaining SQL sessions
// issues a BACKUP or some other job-based statement before it
// disconnects, and encounters a job error as a result -- that the
// registry is now unavailable due to the drain.
{
_ = timeutil.RunWithTimeout(ctx, "drain-job-registry",
jobRegistryWait.Get(&s.sqlServer.execCfg.Settings.SV),
func(ctx context.Context) error {
s.sqlServer.jobRegistry.DrainRequested(ctx)
return nil
})
}
// Inform the auto-stats tasks that the node is draining.
s.sqlServer.statsRefresher.SetDraining()
// Drain any remaining SQL connections.
// The queryWait duration is a timeout for waiting for SQL queries to finish.
// If the timeout is reached, any remaining connections
// will be closed.
queryMaxWait := queryWait.Get(&s.sqlServer.execCfg.Settings.SV)
if err := s.sqlServer.pgServer.Drain(ctx, queryMaxWait, reporter, s.stopper); err != nil {
return err
}
// Drain all distributed SQL execution flows.
// The queryWait duration is used to wait on currently running flows to finish.
s.sqlServer.distSQLServer.Drain(ctx, queryMaxWait, reporter)
// Flush in-memory SQL stats into the statement stats system table.
statsProvider := s.sqlServer.pgServer.SQLServer.GetSQLStatsProvider().(*persistedsqlstats.PersistedSQLStats)
statsProvider.Flush(ctx)
statsProvider.Stop(ctx)
// Inform the async tasks for table stats that the node is draining
// and wait for task shutdown.
s.sqlServer.statsRefresher.WaitForAutoStatsShutdown(ctx)
// Inform the job system that the node is draining and wait for task
// shutdown.
s.sqlServer.jobRegistry.WaitForRegistryShutdown(ctx)
// Drain all SQL table leases. This must be done after the pgServer has
// given sessions a chance to finish ongoing work and after the background
// tasks that may issue SQL statements have shut down.
s.sqlServer.leaseMgr.SetDraining(ctx, true /* drain */, reporter)
session, err := s.sqlServer.sqlLivenessProvider.Release(ctx)
if err != nil {
return err
}
instanceID := s.sqlServer.sqlIDContainer.SQLInstanceID()
err = s.sqlServer.sqlInstanceStorage.ReleaseInstance(ctx, session, instanceID)
if err != nil {
return err
}
// Mark the node as fully drained.
s.sqlServer.gracefulDrainComplete.Set(true)
// Mark this phase in the logs to clarify the context of any subsequent
// errors/warnings, if any.
log.Infof(ctx, "SQL server drained successfully; SQL queries cannot execute any more")
return nil
}
// drainNode initiates the draining mode for the node, which
// starts draining range leases.
func (s *drainServer) drainNode(
ctx context.Context, reporter func(int, redact.SafeString), verbose bool,
) (err error) {
if s.kvServer.node == nil {
// No KV subsystem. Nothing to do.
return nil
}
// Set the node's liveness status to "draining".
if err = s.kvServer.nodeLiveness.SetDraining(ctx, true /* drain */, reporter); err != nil {
return err
}
// Mark the stores of the node as "draining" and drain all range leases.
return s.kvServer.node.SetDraining(true /* drain */, reporter, verbose)
}
// logOpenConns logs the number of open SQL connections every 3 seconds.
func (s *drainServer) logOpenConns(ctx context.Context) error {
return s.stopper.RunAsyncTask(ctx, "log-open-conns", func(ctx context.Context) {
ticker := time.NewTicker(3 * time.Second)
defer ticker.Stop()
for {
select {
case <-ticker.C:
log.Ops.Infof(ctx, "number of open connections: %d\n", s.sqlServer.pgServer.GetConnCancelMapLen())
case <-s.stopper.ShouldQuiesce():
return
case <-ctx.Done():
return
}
}
})
}
// CallDrainServerSide is a reference implementation for a server-side
// function that wishes to shut down a server gracefully via the Drain
// interface. The Drain interface is responsible for notifying clients
// and shutting down systems in a particular order that prevents
// client app disruptions. We generally prefer graceful drains to the
// disorderly shutdown caused by either a process crash or a direct
// call to the stopper's Stop() method.
//
// By default, this code will wait forever for a graceful drain to
// complete. The caller can override this behavior by passing a context
// with a deadline.
//
// For an example client-side implementation (drain client over RPC),
// see the code in pkg/cli/node.go, doDrain().
func CallDrainServerSide(ctx context.Context, drainFn ServerSideDrainFn) {
var (
prevRemaining = uint64(math.MaxUint64)
verbose = false
)
ctx = logtags.AddTag(ctx, "call-graceful-drain", nil)
for {
// Let the caller interrupt the process via context cancellation
// if so desired.
select {
case <-ctx.Done():
log.Ops.Errorf(ctx, "drain interrupted by caller: %v", ctx.Err())
return
default:
}
remaining, _, err := drainFn(ctx, verbose)
if err != nil {
log.Ops.Errorf(ctx, "graceful drain failed: %v", err)
return
}
if remaining == 0 {
// No more work to do.
log.Ops.Infof(ctx, "graceful drain complete")
return
}
// If range lease transfer stalls or the number of
// remaining leases somehow increases, verbosity is set
// to help with troubleshooting.
if remaining >= prevRemaining {
verbose = true
}
// Avoid a busy wait with high CPU usage if the server replies
// with an incomplete drain too quickly.
time.Sleep(200 * time.Millisecond)
// Remember the remaining work to set the verbose flag in the next
// iteration.
prevRemaining = remaining
}
}
// ServerSideDrainFn is the interface of the server-side handler for the Drain logic.
type ServerSideDrainFn func(ctx context.Context, verbose bool) (uint64, redact.RedactableString, error)
| pkg/server/drain.go | 0 | https://github.com/cockroachdb/cockroach/commit/0d77d949392d0daabeb77a82d468b15d4774e1de | [
0.002255542203783989,
0.00020987470634281635,
0.0001597225054865703,
0.000170224389876239,
0.00027416268130764365
] |
{
"id": 3,
"code_window": [
"\t})\n",
"\tdefer c.Cleanup()\n",
"\n",
"\ttc := testcluster.NewTestCluster(t, 3, base.TestClusterArgs{\n",
"\t\tServerArgsPerNode: map[int]base.TestServerArgs{\n",
"\t\t\t0: {StoreSpecs: []base.StoreSpec{{Path: dir + \"/store-1\"}}},\n",
"\t\t\t1: {StoreSpecs: []base.StoreSpec{{Path: dir + \"/store-2\"}}},\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tServerArgs: base.TestServerArgs{\n",
"\t\t\t// This logic is specific to the storage layer.\n",
"\t\t\tDefaultTestTenant: base.TestIsSpecificToStorageLayerAndNeedsASystemTenant,\n",
"\t\t},\n"
],
"file_path": "pkg/cli/debug_recover_loss_of_quorum_test.go",
"type": "add",
"edit_start_line_idx": 65
} | // Copyright 2019 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package log
import "os"
const (
tracebackNone = iota
tracebackSingle
tracebackAll
)
// Obey the GOTRACEBACK environment variable for determining which stacks to
// output during a log.Fatal.
var traceback = func() int {
switch os.Getenv("GOTRACEBACK") {
case "none":
return tracebackNone
case "single", "":
return tracebackSingle
default: // "all", "system", "crash"
return tracebackAll
}
}()
// DisableTracebacks turns off tracebacks for log.Fatals. Returns a function
// that sets the traceback settings back to where they were.
// Only intended for use by tests.
func DisableTracebacks() func() {
oldVal := traceback
traceback = tracebackNone
return func() { traceback = oldVal }
}
| pkg/util/log/tracebacks.go | 0 | https://github.com/cockroachdb/cockroach/commit/0d77d949392d0daabeb77a82d468b15d4774e1de | [
0.00017886450223159045,
0.00017257724539376795,
0.00016365840565413237,
0.00017325810040347278,
0.0000055589230214536656
] |
{
"id": 3,
"code_window": [
"\t})\n",
"\tdefer c.Cleanup()\n",
"\n",
"\ttc := testcluster.NewTestCluster(t, 3, base.TestClusterArgs{\n",
"\t\tServerArgsPerNode: map[int]base.TestServerArgs{\n",
"\t\t\t0: {StoreSpecs: []base.StoreSpec{{Path: dir + \"/store-1\"}}},\n",
"\t\t\t1: {StoreSpecs: []base.StoreSpec{{Path: dir + \"/store-2\"}}},\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tServerArgs: base.TestServerArgs{\n",
"\t\t\t// This logic is specific to the storage layer.\n",
"\t\t\tDefaultTestTenant: base.TestIsSpecificToStorageLayerAndNeedsASystemTenant,\n",
"\t\t},\n"
],
"file_path": "pkg/cli/debug_recover_loss_of_quorum_test.go",
"type": "add",
"edit_start_line_idx": 65
} | // Copyright 2020 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package invertedidx
import (
"context"
"fmt"
"github.com/cockroachdb/cockroach/pkg/geo/geoindex"
"github.com/cockroachdb/cockroach/pkg/sql/inverted"
"github.com/cockroachdb/cockroach/pkg/sql/opt"
"github.com/cockroachdb/cockroach/pkg/sql/opt/cat"
"github.com/cockroachdb/cockroach/pkg/sql/opt/constraint"
"github.com/cockroachdb/cockroach/pkg/sql/opt/idxconstraint"
"github.com/cockroachdb/cockroach/pkg/sql/opt/invertedexpr"
"github.com/cockroachdb/cockroach/pkg/sql/opt/memo"
"github.com/cockroachdb/cockroach/pkg/sql/opt/norm"
"github.com/cockroachdb/cockroach/pkg/sql/opt/props"
"github.com/cockroachdb/cockroach/pkg/sql/sem/eval"
"github.com/cockroachdb/cockroach/pkg/sql/sem/tree"
"github.com/cockroachdb/cockroach/pkg/sql/types"
)
// NewDatumsToInvertedExpr returns a new DatumsToInvertedExpr.
func NewDatumsToInvertedExpr(
ctx context.Context,
evalCtx *eval.Context,
colTypes []*types.T,
expr tree.TypedExpr,
geoConfig geoindex.Config,
) (invertedexpr.DatumsToInvertedExpr, error) {
if !geoConfig.IsEmpty() {
return NewGeoDatumsToInvertedExpr(ctx, evalCtx, colTypes, expr, geoConfig)
}
return NewJSONOrArrayDatumsToInvertedExpr(ctx, evalCtx, colTypes, expr)
}
// NewBoundPreFilterer returns a PreFilterer for the given expr where the type
// of the bound param is specified by typ. Unlike the use of PreFilterer in an
// inverted join, where each left value is bound, this function is for the
// invertedFilterer where the param to be bound is already specified as a
// constant in the expr. The callee will bind this parameter and return the
// opaque pre-filtering state for that binding (the interface{}) in the return
// values).
func NewBoundPreFilterer(typ *types.T, expr tree.TypedExpr) (*PreFilterer, interface{}, error) {
if !typ.Equivalent(types.Geometry) && !typ.Equivalent(types.Geography) {
return nil, nil, fmt.Errorf("pre-filtering not supported for type %s", typ)
}
return newGeoBoundPreFilterer(typ, expr)
}
// TryFilterInvertedIndex tries to derive an inverted filter condition for
// the given inverted index from the specified filters. If an inverted filter
// condition is derived, it is returned with ok=true. If no condition can be
// derived, then TryFilterInvertedIndex returns ok=false.
//
// In addition to the inverted filter condition (spanExpr), returns:
// - a constraint of the prefix columns if there are any,
// - remaining filters that must be applied if the span expression is not tight,
// and
// - pre-filterer state that can be used by the invertedFilterer operator to
// reduce the number of false positives returned by the span expression.
func TryFilterInvertedIndex(
ctx context.Context,
evalCtx *eval.Context,
factory *norm.Factory,
filters memo.FiltersExpr,
optionalFilters memo.FiltersExpr,
tabID opt.TableID,
index cat.Index,
computedColumns map[opt.ColumnID]opt.ScalarExpr,
checkCancellation func(),
) (
spanExpr *inverted.SpanExpression,
constraint *constraint.Constraint,
remainingFilters memo.FiltersExpr,
preFiltererState *invertedexpr.PreFiltererStateForInvertedFilterer,
ok bool,
) {
// Attempt to constrain the prefix columns, if there are any. If they cannot
// be constrained to single values, the index cannot be used.
constraint, filters, ok = constrainPrefixColumns(
evalCtx, factory, filters, optionalFilters, tabID, index, checkCancellation,
)
if !ok {
return nil, nil, nil, nil, false
}
config := index.GeoConfig()
var typ *types.T
var filterPlanner invertedFilterPlanner
if config.IsGeography() {
filterPlanner = &geoFilterPlanner{
factory: factory,
tabID: tabID,
index: index,
getSpanExpr: getSpanExprForGeographyIndex,
}
typ = types.Geography
} else if config.IsGeometry() {
filterPlanner = &geoFilterPlanner{
factory: factory,
tabID: tabID,
index: index,
getSpanExpr: getSpanExprForGeometryIndex,
}
typ = types.Geometry
} else {
col := index.InvertedColumn().InvertedSourceColumnOrdinal()
typ = factory.Metadata().Table(tabID).Column(col).DatumType()
switch typ.Family() {
case types.StringFamily:
filterPlanner = &trigramFilterPlanner{
tabID: tabID,
index: index,
computedColumns: computedColumns,
}
case types.TSVectorFamily:
filterPlanner = &tsqueryFilterPlanner{
tabID: tabID,
index: index,
computedColumns: computedColumns,
}
case types.JsonFamily, types.ArrayFamily:
filterPlanner = &jsonOrArrayFilterPlanner{
tabID: tabID,
index: index,
computedColumns: computedColumns,
}
default:
return nil, nil, nil, nil, false
}
}
var invertedExpr inverted.Expression
var pfState *invertedexpr.PreFiltererStateForInvertedFilterer
for i := range filters {
invertedExprLocal, remFiltersLocal, pfStateLocal := extractInvertedFilterCondition(
ctx, evalCtx, factory, filters[i].Condition, filterPlanner,
)
if invertedExpr == nil {
invertedExpr = invertedExprLocal
pfState = pfStateLocal
} else {
invertedExpr = inverted.And(invertedExpr, invertedExprLocal)
// Do pre-filtering using the first of the conjuncts that provided
// non-nil pre-filtering state.
if pfState == nil {
pfState = pfStateLocal
}
}
if remFiltersLocal != nil {
remainingFilters = append(remainingFilters, factory.ConstructFiltersItem(remFiltersLocal))
}
}
if invertedExpr == nil {
return nil, nil, nil, nil, false
}
spanExpr, ok = invertedExpr.(*inverted.SpanExpression)
if !ok {
return nil, nil, nil, nil, false
}
if pfState != nil {
pfState.Typ = typ
}
return spanExpr, constraint, remainingFilters, pfState, true
}
// TryJoinInvertedIndex tries to create an inverted join with the given input
// and inverted index from the specified filters. If a join is created, the
// inverted join condition is returned. If no join can be created, then
// TryJoinInvertedIndex returns nil.
func TryJoinInvertedIndex(
ctx context.Context,
factory *norm.Factory,
filters memo.FiltersExpr,
tabID opt.TableID,
index cat.Index,
inputCols opt.ColSet,
) opt.ScalarExpr {
if !index.IsInverted() {
return nil
}
config := index.GeoConfig()
var joinPlanner invertedJoinPlanner
if config.IsGeography() {
joinPlanner = &geoJoinPlanner{
factory: factory,
tabID: tabID,
index: index,
inputCols: inputCols,
getSpanExpr: getSpanExprForGeographyIndex,
}
} else if config.IsGeometry() {
joinPlanner = &geoJoinPlanner{
factory: factory,
tabID: tabID,
index: index,
inputCols: inputCols,
getSpanExpr: getSpanExprForGeometryIndex,
}
} else {
joinPlanner = &jsonOrArrayJoinPlanner{
factory: factory,
tabID: tabID,
index: index,
inputCols: inputCols,
}
}
var invertedExpr opt.ScalarExpr
for i := range filters {
invertedExprLocal := extractInvertedJoinCondition(
ctx, factory, filters[i].Condition, joinPlanner,
)
if invertedExprLocal == nil {
continue
}
if invertedExpr == nil {
invertedExpr = invertedExprLocal
} else {
invertedExpr = factory.ConstructAnd(invertedExpr, invertedExprLocal)
}
}
if invertedExpr == nil {
return nil
}
// The resulting expression must contain at least one column from the input.
var p props.Shared
memo.BuildSharedProps(invertedExpr, &p, factory.EvalContext())
if !p.OuterCols.Intersects(inputCols) {
return nil
}
return invertedExpr
}
type invertedJoinPlanner interface {
// extractInvertedJoinConditionFromLeaf extracts a join condition from the
// given expression, which represents a leaf of an expression tree in which
// the internal nodes are And and/or Or expressions. Returns nil if no join
// condition could be extracted.
extractInvertedJoinConditionFromLeaf(ctx context.Context, expr opt.ScalarExpr) opt.ScalarExpr
}
// extractInvertedJoinCondition extracts a scalar expression from the given
// filter condition, where the scalar expression represents a join condition
// between the input columns and inverted index. Returns nil if no join
// condition could be extracted.
//
// The filter condition should be an expression tree of And, Or, and leaf
// expressions. Extraction of the join condition from the leaves is delegated
// to the given invertedJoinPlanner.
func extractInvertedJoinCondition(
ctx context.Context,
factory *norm.Factory,
filterCond opt.ScalarExpr,
joinPlanner invertedJoinPlanner,
) opt.ScalarExpr {
switch t := filterCond.(type) {
case *memo.AndExpr:
leftExpr := extractInvertedJoinCondition(ctx, factory, t.Left, joinPlanner)
rightExpr := extractInvertedJoinCondition(ctx, factory, t.Right, joinPlanner)
if leftExpr == nil {
return rightExpr
}
if rightExpr == nil {
return leftExpr
}
return factory.ConstructAnd(leftExpr, rightExpr)
case *memo.OrExpr:
leftExpr := extractInvertedJoinCondition(ctx, factory, t.Left, joinPlanner)
rightExpr := extractInvertedJoinCondition(ctx, factory, t.Right, joinPlanner)
if leftExpr == nil || rightExpr == nil {
return nil
}
return factory.ConstructOr(leftExpr, rightExpr)
default:
return joinPlanner.extractInvertedJoinConditionFromLeaf(ctx, filterCond)
}
}
// getInvertedExpr takes a TypedExpr tree consisting of And, Or and leaf
// expressions, and constructs a new TypedExpr tree in which the leaves are
// replaced by the given getInvertedExprLeaf function.
func getInvertedExpr(
expr tree.TypedExpr, getInvertedExprLeaf func(expr tree.TypedExpr) (tree.TypedExpr, error),
) (tree.TypedExpr, error) {
switch t := expr.(type) {
case *tree.AndExpr:
leftExpr, err := getInvertedExpr(t.TypedLeft(), getInvertedExprLeaf)
if err != nil {
return nil, err
}
rightExpr, err := getInvertedExpr(t.TypedRight(), getInvertedExprLeaf)
if err != nil {
return nil, err
}
return tree.NewTypedAndExpr(leftExpr, rightExpr), nil
case *tree.OrExpr:
leftExpr, err := getInvertedExpr(t.TypedLeft(), getInvertedExprLeaf)
if err != nil {
return nil, err
}
rightExpr, err := getInvertedExpr(t.TypedRight(), getInvertedExprLeaf)
if err != nil {
return nil, err
}
return tree.NewTypedOrExpr(leftExpr, rightExpr), nil
default:
return getInvertedExprLeaf(expr)
}
}
// evalInvertedExpr evaluates a TypedExpr tree consisting of And, Or and leaf
// expressions, and returns the resulting inverted.Expression. Delegates
// evaluation of leaf expressions to the given evalInvertedExprLeaf function.
func evalInvertedExpr(
expr tree.TypedExpr, evalInvertedExprLeaf func(expr tree.TypedExpr) (inverted.Expression, error),
) (inverted.Expression, error) {
switch t := expr.(type) {
case *tree.AndExpr:
leftExpr, err := evalInvertedExpr(t.TypedLeft(), evalInvertedExprLeaf)
if err != nil {
return nil, err
}
rightExpr, err := evalInvertedExpr(t.TypedRight(), evalInvertedExprLeaf)
if err != nil {
return nil, err
}
if leftExpr == nil || rightExpr == nil {
return nil, nil
}
return inverted.And(leftExpr, rightExpr), nil
case *tree.OrExpr:
leftExpr, err := evalInvertedExpr(t.TypedLeft(), evalInvertedExprLeaf)
if err != nil {
return nil, err
}
rightExpr, err := evalInvertedExpr(t.TypedRight(), evalInvertedExprLeaf)
if err != nil {
return nil, err
}
if leftExpr == nil {
return rightExpr, nil
}
if rightExpr == nil {
return leftExpr, nil
}
return inverted.Or(leftExpr, rightExpr), nil
default:
return evalInvertedExprLeaf(expr)
}
}
// constrainPrefixColumns attempts to build a constraint for the non-inverted
// prefix columns of the given index. If a constraint is successfully built, it
// is returned along with remaining filters and ok=true. The function is only
// successful if it can generate a constraint where all spans have the same
// start and end keys for all non-inverted prefix columns. This is required for
// building spans for scanning multi-column inverted indexes (see
// span.Builder.SpansFromInvertedSpans).
//
// If the index is a single-column inverted index, there are no prefix columns
// to constrain, and ok=true is returned.
func constrainPrefixColumns(
evalCtx *eval.Context,
factory *norm.Factory,
filters memo.FiltersExpr,
optionalFilters memo.FiltersExpr,
tabID opt.TableID,
index cat.Index,
checkCancellation func(),
) (constraint *constraint.Constraint, remainingFilters memo.FiltersExpr, ok bool) {
tabMeta := factory.Metadata().TableMeta(tabID)
prefixColumnCount := index.NonInvertedPrefixColumnCount()
ps := tabMeta.IndexPartitionLocality(index.Ordinal())
// If this is a single-column inverted index, there are no prefix columns to
// constrain.
if prefixColumnCount == 0 {
return nil, filters, true
}
prefixColumns := make([]opt.OrderingColumn, prefixColumnCount)
var notNullCols opt.ColSet
for i := range prefixColumns {
col := index.Column(i)
colID := tabID.ColumnID(col.Ordinal())
prefixColumns[i] = opt.MakeOrderingColumn(colID, col.Descending)
if !col.IsNullable() {
notNullCols.Add(colID)
}
}
// Consolidation of a constraint converts contiguous spans into a single
// span. By definition, the consolidated span would have different start and
// end keys and could not be used for multi-column inverted index scans.
// Therefore, we only generate and check the unconsolidated constraint,
// allowing the optimizer to plan multi-column inverted index scans in more
// cases.
//
// For example, the consolidated constraint for (x IN (1, 2, 3)) is:
//
// /x: [/1 - /3]
// Prefix: 0
//
// The unconsolidated constraint for the same expression is:
//
// /x: [/1 - /1] [/2 - /2] [/3 - /3]
// Prefix: 1
//
var ic idxconstraint.Instance
ic.Init(
filters, optionalFilters,
prefixColumns, notNullCols, tabMeta.ComputedCols,
tabMeta.ColsInComputedColsExpressions,
false, /* consolidate */
evalCtx, factory, ps, checkCancellation,
)
constraint = ic.UnconsolidatedConstraint()
if constraint.Prefix(evalCtx) < prefixColumnCount {
// If all of the constraint spans do not have the same start and end keys
// for all columns, the index cannot be used.
return nil, nil, false
}
// Make a copy of constraint so that the idxconstraint.Instance is not
// referenced.
copy := *constraint
remainingFilters = ic.RemainingFilters()
return ©, remainingFilters, true
}
type invertedFilterPlanner interface {
// extractInvertedFilterConditionFromLeaf extracts an inverted filter
// condition from the given expression, which represents a leaf of an
// expression tree in which the internal nodes are And and/or Or expressions.
// Returns an empty inverted.Expression if no inverted filter condition could
// be extracted.
//
// Additionally, returns:
// - remaining filters that must be applied if the inverted expression is not
// tight, and
// - pre-filterer state that can be used to reduce false positives.
extractInvertedFilterConditionFromLeaf(ctx context.Context, evalCtx *eval.Context, expr opt.ScalarExpr) (
invertedExpr inverted.Expression,
remainingFilters opt.ScalarExpr,
_ *invertedexpr.PreFiltererStateForInvertedFilterer,
)
}
// extractInvertedFilterCondition extracts an inverted.Expression from the given
// filter condition, where the inverted.Expression represents an inverted filter
// over the given inverted index. Returns an empty inverted.Expression if no
// inverted filter condition could be extracted.
//
// The filter condition should be an expression tree of And, Or, and leaf
// expressions. Extraction of the inverted.Expression from the leaves is
// delegated to the given invertedFilterPlanner.
//
// In addition to the inverted.Expression, returns:
// - remaining filters that must be applied if the inverted expression is not
// tight, and
// - pre-filterer state that can be used to reduce false positives. This is
// only non-nil if filterCond is a leaf condition (i.e., has no ANDs or ORs).
func extractInvertedFilterCondition(
ctx context.Context,
evalCtx *eval.Context,
factory *norm.Factory,
filterCond opt.ScalarExpr,
filterPlanner invertedFilterPlanner,
) (
invertedExpr inverted.Expression,
remainingFilters opt.ScalarExpr,
_ *invertedexpr.PreFiltererStateForInvertedFilterer,
) {
switch t := filterCond.(type) {
case *memo.AndExpr:
l, remLeft, _ := extractInvertedFilterCondition(ctx, evalCtx, factory, t.Left, filterPlanner)
r, remRight, _ := extractInvertedFilterCondition(ctx, evalCtx, factory, t.Right, filterPlanner)
if remLeft == nil {
remainingFilters = remRight
} else if remRight == nil {
remainingFilters = remLeft
} else {
remainingFilters = factory.ConstructAnd(remLeft, remRight)
}
return inverted.And(l, r), remainingFilters, nil
case *memo.OrExpr:
l, remLeft, _ := extractInvertedFilterCondition(ctx, evalCtx, factory, t.Left, filterPlanner)
r, remRight, _ := extractInvertedFilterCondition(ctx, evalCtx, factory, t.Right, filterPlanner)
if remLeft != nil || remRight != nil {
// If either child has remaining filters, we must return the original
// condition as the remaining filter. It would be incorrect to return
// only part of the original condition.
remainingFilters = filterCond
}
return inverted.Or(l, r), remainingFilters, nil
default:
return filterPlanner.extractInvertedFilterConditionFromLeaf(ctx, evalCtx, filterCond)
}
}
// isIndexColumn returns true if e is an expression that corresponds to an
// inverted index column. The expression can be either:
// - a variable on the index column, or
// - an expression that matches the computed column expression (if the index
// column is computed).
func isIndexColumn(
tabID opt.TableID, index cat.Index, e opt.Expr, computedColumns map[opt.ColumnID]opt.ScalarExpr,
) bool {
invertedSourceCol := tabID.ColumnID(index.InvertedColumn().InvertedSourceColumnOrdinal())
if v, ok := e.(*memo.VariableExpr); ok && v.Col == invertedSourceCol {
return true
}
if computedColumns != nil && e == computedColumns[invertedSourceCol] {
return true
}
return false
}
| pkg/sql/opt/invertedidx/inverted_index_expr.go | 0 | https://github.com/cockroachdb/cockroach/commit/0d77d949392d0daabeb77a82d468b15d4774e1de | [
0.001082471921108663,
0.0001994781196117401,
0.00016124171088449657,
0.00017484415729995817,
0.0001302780583500862
] |
{
"id": 4,
"code_window": [
"\ttc := testcluster.NewTestCluster(t, 3, base.TestClusterArgs{\n",
"\t\tServerArgs: base.TestServerArgs{\n",
"\t\t\tStoreSpecs: []base.StoreSpec{{InMemory: true}},\n",
"\t\t\tInsecure: true,\n",
"\t\t},\n",
"\t})\n",
"\ttc.Start(t)\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\t// This logic is specific to the storage layer.\n",
"\t\t\tDefaultTestTenant: base.TestIsSpecificToStorageLayerAndNeedsASystemTenant,\n"
],
"file_path": "pkg/cli/debug_recover_loss_of_quorum_test.go",
"type": "add",
"edit_start_line_idx": 114
} | // Copyright 2022 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package cli
import (
"context"
"fmt"
"strings"
"testing"
"github.com/cockroachdb/cockroach/pkg/base"
"github.com/cockroachdb/cockroach/pkg/testutils/serverutils"
"github.com/cockroachdb/cockroach/pkg/testutils/sqlutils"
"github.com/cockroachdb/cockroach/pkg/util/leaktest"
"github.com/cockroachdb/cockroach/pkg/util/log"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestQueryForTable(t *testing.T) {
defer leaktest.AfterTest(t)()
reg := DebugZipTableRegistry{
"table_with_sensitive_cols": {
nonSensitiveCols: NonSensitiveColumns{"x", "y", "z"},
},
"table_with_empty_sensitive_cols": {
nonSensitiveCols: NonSensitiveColumns{},
},
"table_with_custom_queries": {
customQueryUnredacted: "SELECT * FROM table_with_custom_queries",
customQueryRedacted: "SELECT a, b, c FROM table_with_custom_queries",
},
}
t.Run("errors if no table config present in registry", func(t *testing.T) {
actual, err := reg.QueryForTable("does_not_exist", false /* redact */)
assert.Error(t, err)
assert.Contains(t, err.Error(), "no entry found")
assert.Empty(t, actual)
})
t.Run("produces `TABLE` query when unredacted with no custom query", func(t *testing.T) {
table := "table_with_sensitive_cols"
expected := "TABLE table_with_sensitive_cols"
actual, err := reg.QueryForTable(table, false /* redact */)
assert.NoError(t, err)
assert.Equal(t, expected, actual)
})
t.Run("produces custom query when unredacted and custom query supplied", func(t *testing.T) {
table := "table_with_custom_queries"
expected := "SELECT * FROM table_with_custom_queries"
actual, err := reg.QueryForTable(table, false /* redact */)
assert.NoError(t, err)
assert.Equal(t, expected, actual)
})
t.Run("produces query with only non-sensitive columns when redacted and no custom query", func(t *testing.T) {
table := "table_with_sensitive_cols"
expected := `SELECT x, y, z FROM table_with_sensitive_cols`
actual, err := reg.QueryForTable(table, true /* redact */)
assert.NoError(t, err)
assert.Equal(t, expected, actual)
})
t.Run("produces custom when redacted and custom query supplied", func(t *testing.T) {
table := "table_with_custom_queries"
expected := "SELECT a, b, c FROM table_with_custom_queries"
actual, err := reg.QueryForTable(table, true /* redact */)
assert.NoError(t, err)
assert.Equal(t, expected, actual)
})
t.Run("returns error when no custom queries and no non-sensitive columns supplied", func(t *testing.T) {
table := "table_with_empty_sensitive_cols"
actual, err := reg.QueryForTable(table, true /* redact */)
assert.Error(t, err)
assert.Contains(t, err.Error(), "no non-sensitive columns defined")
assert.Empty(t, actual)
})
}
func TestNoForbiddenSystemTablesInDebugZip(t *testing.T) {
defer leaktest.AfterTest(t)()
forbiddenSysTables := []string{
"system.users",
"system.web_sessions",
"system.join_tokens",
"system.comments",
"system.ui",
"system.zones",
"system.statement_bundle_chunks",
"system.statement_statistics",
"system.transaction_statistics",
"system.statement_activity",
"system.transaction_activity",
}
for _, forbiddenTable := range forbiddenSysTables {
query, err := zipSystemTables.QueryForTable(forbiddenTable, false /* redact */)
assert.Equal(t, "", query)
assert.Error(t, err)
assert.Equal(t, fmt.Sprintf("no entry found in table registry for: %s", forbiddenTable), err.Error())
}
}
func TestNoNonSensitiveColsAndCustomRedactedQueries(t *testing.T) {
defer leaktest.AfterTest(t)()
errFmtString := `FAILURE: The debug zip TableRegistryConfig for table %q
contains both a custom redacted query (customQueryRedacted) AND a list of
non sensitive columns (nonSensitiveCols). customQueryRedacted will ALWAYS
be used in place of nonSensitiveCols if defined, so please remove the
nonSensitiveCols. PLEASE be sure that NONE of the columns outside of those
listed in nonSensitiveCols have leaked into your customQueryRedacted, as
this would be a PCI leak. If any columns in your customQueryRedacted were
NOT already listed in nonSensitiveCols, you MUST confirm with the compliance
team that these columns are acceptable to reveal in an unredacted manner, or
you must redact them at the SQL level.`
for table, regConfig := range zipInternalTablesPerCluster {
if regConfig.customQueryRedacted != "" && len(regConfig.nonSensitiveCols) > 0 {
t.Fatalf(errFmtString, table)
}
}
for table, regConfig := range zipInternalTablesPerNode {
if regConfig.customQueryRedacted != "" && len(regConfig.nonSensitiveCols) > 0 {
t.Fatalf(errFmtString, table)
}
}
for table, regConfig := range zipSystemTables {
if regConfig.customQueryRedacted != "" && len(regConfig.nonSensitiveCols) > 0 {
t.Fatalf(errFmtString, table)
}
}
}
func executeAllCustomQuerys(
t *testing.T, sqlDB *sqlutils.SQLRunner, tableRegistry DebugZipTableRegistry,
) {
for table, regConfig := range tableRegistry {
if regConfig.customQueryRedacted != "" {
rows := sqlDB.Query(t, regConfig.customQueryRedacted)
require.NoError(t, rows.Err(), "failed to select for table %s redacted", table)
}
if regConfig.customQueryUnredacted != "" {
rows := sqlDB.Query(t, regConfig.customQueryUnredacted)
require.NoError(t, rows.Err(), "failed to select for table %s unredacted", table)
}
}
}
func TestCustomQuery(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
cluster := serverutils.StartCluster(t, 1 /* numNodes */, base.TestClusterArgs{
ServerArgs: base.TestServerArgs{},
})
defer cluster.Stopper().Stop(context.Background())
testConn := cluster.ServerConn(0 /* idx */)
sqlDB := sqlutils.MakeSQLRunner(testConn)
executeAllCustomQuerys(t, sqlDB, zipInternalTablesPerCluster)
executeAllCustomQuerys(t, sqlDB, zipInternalTablesPerNode)
executeAllCustomQuerys(t, sqlDB, zipSystemTables)
}
func executeSelectOnNonSensitiveColumns(
t *testing.T, sqlDB *sqlutils.SQLRunner, tableRegistry DebugZipTableRegistry,
) {
for table, regConfig := range tableRegistry {
if len(regConfig.nonSensitiveCols) != 0 {
columns := strings.Join(regConfig.nonSensitiveCols[:], ",")
rows := sqlDB.Query(t, fmt.Sprintf("SELECT %s FROM %s", columns, table))
require.NoError(t, rows.Err(), "failed to select non sensitive columns on table %s", table)
}
}
}
func TestNonSensitiveColumns(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
cluster := serverutils.StartCluster(t, 1 /* numNodes */, base.TestClusterArgs{
ServerArgs: base.TestServerArgs{},
})
defer cluster.Stopper().Stop(context.Background())
testConn := cluster.ServerConn(0 /* idx */)
sqlDB := sqlutils.MakeSQLRunner(testConn)
executeSelectOnNonSensitiveColumns(t, sqlDB, zipInternalTablesPerCluster)
executeSelectOnNonSensitiveColumns(t, sqlDB, zipInternalTablesPerNode)
executeSelectOnNonSensitiveColumns(t, sqlDB, zipSystemTables)
}
| pkg/cli/zip_table_registry_test.go | 1 | https://github.com/cockroachdb/cockroach/commit/0d77d949392d0daabeb77a82d468b15d4774e1de | [
0.01065543107688427,
0.0008458839147351682,
0.00016251651686616242,
0.00017120268603321165,
0.0023190146312117577
] |
{
"id": 4,
"code_window": [
"\ttc := testcluster.NewTestCluster(t, 3, base.TestClusterArgs{\n",
"\t\tServerArgs: base.TestServerArgs{\n",
"\t\t\tStoreSpecs: []base.StoreSpec{{InMemory: true}},\n",
"\t\t\tInsecure: true,\n",
"\t\t},\n",
"\t})\n",
"\ttc.Start(t)\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\t// This logic is specific to the storage layer.\n",
"\t\t\tDefaultTestTenant: base.TestIsSpecificToStorageLayerAndNeedsASystemTenant,\n"
],
"file_path": "pkg/cli/debug_recover_loss_of_quorum_test.go",
"type": "add",
"edit_start_line_idx": 114
} | " Vim syntax file
" Language: Andy's optimizer DSL prototype
" Maintainer: Radu Berinde <[email protected]>
"
" To use, copy the file to ~/.vim/syntax/
" Recommended in .vimrc:
" autocmd BufNewFile,BufRead *.opt set filetype=cropt tw=0
if exists("b:current_syntax")
finish
endif
syn match Comment display '#.*$' contains=Todo
syn keyword Todo TODO XXX
syn region ruletags start='^\[' end='\]$' contains=ruletag
syn match ruletag contained display '[A-Za-z0-9_]*'
syn region list start='(' end=')' contains=list,func,var,operator
syn match func contained display '\((\)\@<=[A-Za-z0-9_]\+\( | [A-Za-z0-9_]\+\)*'
syn match var '\$[A-Za-z0-9_]*'
syn match Special '^=>$'
syn region def start='^define [A-Za-z0-9_]\+ {' end='}' contains=define,operator
hi def link ruletag Identifier
hi def link func Type
hi def link var Macro
syn keyword define contained define
syn match func contained '(A-Za-z0-9_)*'
syn keyword operator Subquery SubqueryPrivate Any Exists Variable Const Null True False Placeholder
syn keyword operator Tuple Projections ColPrivate Aggregations AggregationsItem Filters FiltersItem
syn keyword operator Zip ZipItem ZipItemPrivate And Or Not Eq Lt Gt Le Ge Ne In NotIn
syn keyword operator Like NotLike ILike NotILike SimilarTo NotSimilarTo RegMatch NotRegMatch
syn keyword operator RegIMatch NotRegIMatch Is IsNot Contains JsonExists JsonAllExists
syn keyword operator JsonSomeExists AnyScalar Bitand Bitor Bitxor Plus Minus Mult Div FloorDiv
syn keyword operator Mod Pow Concat LShift RShift FetchVal FetchText FetchValPath FetchTextPath
syn keyword operator UnaryMinus UnaryComplement Cast AssignmentCast Case When Array Indirection
syn keyword operator Function FunctionPrivate Coalesce ColumnAccess
syn keyword operator Avg BoolAnd BoolOr ConcatAgg Count CountRows Max Min SumInt Sum SqrDiff
syn keyword operator Variance StdDev XorAgg JsonAgg JsonbAgg ConstAgg ConstNotNullAgg
syn keyword operator AnyNotNullAgg FirstAgg AggDistinct ScalarList
syn keyword operator Scan ScanPrivate VirtualScan VirtualScanPrivate Values Select Project
syn keyword operator InnerJoin LeftJoin RightJoin FullJoin SemiJoin AntiJoin
syn keyword operator IndexJoin IndexJoinPrivate LookupJoin LookupJoinPrivate
syn keyword operator MergeJoin MergeJoinPrivate
syn keyword operator InnerJoinApply LeftJoinApply
syn keyword operator SemiJoinApply AntiJoinApply
syn keyword operator GroupBy GroupingPrivate ScalarGroupBy
syn keyword operator DistinctOn EnsureDistinctOn UpsertDistinctOn EnsureUpsertDistinctOn
syn keyword operator Union SetPrivate Intersect Except UnionAll IntersectAll ExceptAll
syn keyword operator Limit Offset Max1Row Explain ExplainPrivate
syn keyword operator ShowTraceForSession ShowTracePrivate RowNumber RowNumberPrivate ProjectSet
syn keyword operator Sort Insert Update Upsert Delete CreateTable OpName
let b:current_syntax = "cropt"
| pkg/sql/opt/optgen/lang/support/vim/cropt.vim | 0 | https://github.com/cockroachdb/cockroach/commit/0d77d949392d0daabeb77a82d468b15d4774e1de | [
0.00017417674825992435,
0.00016986952687148005,
0.00016500549099873751,
0.0001687603653408587,
0.000003512301418595598
] |
{
"id": 4,
"code_window": [
"\ttc := testcluster.NewTestCluster(t, 3, base.TestClusterArgs{\n",
"\t\tServerArgs: base.TestServerArgs{\n",
"\t\t\tStoreSpecs: []base.StoreSpec{{InMemory: true}},\n",
"\t\t\tInsecure: true,\n",
"\t\t},\n",
"\t})\n",
"\ttc.Start(t)\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\t// This logic is specific to the storage layer.\n",
"\t\t\tDefaultTestTenant: base.TestIsSpecificToStorageLayerAndNeedsASystemTenant,\n"
],
"file_path": "pkg/cli/debug_recover_loss_of_quorum_test.go",
"type": "add",
"edit_start_line_idx": 114
} | // Copyright 2023 The Cockroach Authors.
//
// Licensed as a CockroachDB Enterprise file under the Cockroach Community
// License (the "License"); you may not use this file except in compliance with
// the License. You may obtain a copy of the License at
//
// https://github.com/cockroachdb/cockroach/blob/master/licenses/CCL.txt
package changefeedccl
import (
"context"
"fmt"
"testing"
"time"
"github.com/cockroachdb/cockroach/pkg/base"
"github.com/cockroachdb/cockroach/pkg/ccl/changefeedccl/cdctest"
"github.com/cockroachdb/cockroach/pkg/ccl/changefeedccl/changefeedbase"
"github.com/cockroachdb/cockroach/pkg/jobs"
"github.com/cockroachdb/cockroach/pkg/keys"
"github.com/cockroachdb/cockroach/pkg/kv/kvpb"
"github.com/cockroachdb/cockroach/pkg/kv/kvserver"
"github.com/cockroachdb/cockroach/pkg/kv/kvserver/kvserverbase"
"github.com/cockroachdb/cockroach/pkg/kv/kvserver/protectedts"
"github.com/cockroachdb/cockroach/pkg/roachpb"
"github.com/cockroachdb/cockroach/pkg/spanconfig"
"github.com/cockroachdb/cockroach/pkg/spanconfig/spanconfigptsreader"
"github.com/cockroachdb/cockroach/pkg/sql"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/bootstrap"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/desctestutils"
"github.com/cockroachdb/cockroach/pkg/sql/distsql"
"github.com/cockroachdb/cockroach/pkg/sql/isql"
"github.com/cockroachdb/cockroach/pkg/testutils"
"github.com/cockroachdb/cockroach/pkg/testutils/serverutils"
"github.com/cockroachdb/cockroach/pkg/testutils/sqlutils"
"github.com/cockroachdb/cockroach/pkg/util/hlc"
"github.com/cockroachdb/cockroach/pkg/util/leaktest"
"github.com/cockroachdb/cockroach/pkg/util/log"
"github.com/cockroachdb/cockroach/pkg/util/timeutil"
"github.com/cockroachdb/errors"
"github.com/stretchr/testify/require"
)
// TestChangefeedUpdateProtectedTimestamp tests that a running changefeed
// continuously advances the timestamp of its PTS record as its highwater
// advances.
func TestChangefeedUpdateProtectedTimestamp(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
testFn := func(t *testing.T, s TestServerWithSystem, f cdctest.TestFeedFactory) {
ctx := context.Background()
ptsInterval := 50 * time.Millisecond
changefeedbase.ProtectTimestampInterval.Override(
context.Background(), &s.Server.ClusterSettings().SV, ptsInterval)
sqlDB := sqlutils.MakeSQLRunner(s.DB)
sysDB := sqlutils.MakeSQLRunner(s.SystemServer.SQLConn(t, ""))
sysDB.Exec(t, "SET CLUSTER SETTING kv.protectedts.poll_interval = '10ms'")
sysDB.Exec(t, "SET CLUSTER SETTING kv.closed_timestamp.target_duration = '100ms'") // speeds up the test
sysDB.Exec(t, "ALTER TENANT ALL SET CLUSTER SETTING kv.closed_timestamp.target_duration = '100ms'")
sqlDB.Exec(t, `CREATE TABLE foo (a INT PRIMARY KEY)`)
foo := feed(t, f, `CREATE CHANGEFEED FOR foo WITH resolved = '20ms'`)
defer closeFeed(t, foo)
fooDesc := desctestutils.TestingGetPublicTableDescriptor(
s.SystemServer.DB(), s.Codec, "d", "foo")
ptp := s.Server.DistSQLServer().(*distsql.ServerImpl).ServerConfig.ProtectedTimestampProvider
store, err := s.SystemServer.GetStores().(*kvserver.Stores).GetStore(s.SystemServer.GetFirstStoreID())
require.NoError(t, err)
ptsReader := store.GetStoreConfig().ProtectedTimestampReader
// Wait and return the next resolved timestamp after the wait time
waitAndDrainResolved := func(ts time.Duration) hlc.Timestamp {
targetTs := timeutil.Now().Add(ts)
for {
resolvedTs, _ := expectResolvedTimestamp(t, foo)
if resolvedTs.GoTime().UnixNano() > targetTs.UnixNano() {
return resolvedTs
}
}
}
mkGetProtections := func(t *testing.T, ptp protectedts.Provider,
srv serverutils.ApplicationLayerInterface, ptsReader spanconfig.ProtectedTSReader,
span roachpb.Span) func() []hlc.Timestamp {
return func() (r []hlc.Timestamp) {
require.NoError(t,
spanconfigptsreader.TestingRefreshPTSState(ctx, t, ptsReader, srv.Clock().Now()))
protections, _, err := ptsReader.GetProtectionTimestamps(ctx, span)
require.NoError(t, err)
return protections
}
}
mkWaitForProtectionCond := func(t *testing.T, getProtection func() []hlc.Timestamp,
check func(protection []hlc.Timestamp) error) func() {
return func() {
t.Helper()
testutils.SucceedsSoon(t, func() error { return check(getProtection()) })
}
}
// Setup helpers on the system.descriptors table.
descriptorTableKey := s.Codec.TablePrefix(keys.DescriptorTableID)
descriptorTableSpan := roachpb.Span{
Key: descriptorTableKey, EndKey: descriptorTableKey.PrefixEnd(),
}
getDescriptorTableProtection := mkGetProtections(t, ptp, s.Server, ptsReader,
descriptorTableSpan)
// Setup helpers on the user table.
tableKey := s.Codec.TablePrefix(uint32(fooDesc.GetID()))
tableSpan := roachpb.Span{
Key: tableKey, EndKey: tableKey.PrefixEnd(),
}
getTableProtection := mkGetProtections(t, ptp, s.Server, ptsReader, tableSpan)
waitForProtectionAdvanced := func(ts hlc.Timestamp, getProtection func() []hlc.Timestamp) {
check := func(protections []hlc.Timestamp) error {
if len(protections) == 0 {
return errors.New("expected protection but found none")
}
for _, p := range protections {
if p.LessEq(ts) {
return errors.Errorf("expected protected timestamp to exceed %v, found %v", ts, p)
}
}
return nil
}
mkWaitForProtectionCond(t, getProtection, check)()
}
// Observe the protected timestamp advancing along with resolved timestamps
for i := 0; i < 5; i++ {
// Progress the changefeed and allow time for a pts record to be laid down
nextResolved := waitAndDrainResolved(100 * time.Millisecond)
waitForProtectionAdvanced(nextResolved, getTableProtection)
waitForProtectionAdvanced(nextResolved, getDescriptorTableProtection)
}
}
cdcTestWithSystem(t, testFn, feedTestEnterpriseSinks)
}
// TestChangefeedProtectedTimestamps asserts the state of changefeed PTS records
// in various scenarios
// - There is a protection during the initial scan which is advanced once it
// completes
// - There is a protection during a schema change backfill which is advanced
// once it completes
// - When a changefeed is cancelled the protection is removed.
func TestChangefeedProtectedTimestamps(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
var (
ctx = context.Background()
userSpan = roachpb.Span{
Key: bootstrap.TestingUserTableDataMin(),
EndKey: keys.TableDataMax,
}
done = make(chan struct{})
blockRequestCh = make(chan chan chan struct{}, 1)
requestBlockedScan = func() (waitForBlockedScan func() (unblockScan func())) {
blockRequest := make(chan chan struct{})
blockRequestCh <- blockRequest // test sends to filter to request a block
return func() (unblockScan func()) {
toClose := <-blockRequest // filter sends back to test to report blocked
return func() {
close(toClose) // test closes to unblock filter
}
}
}
requestFilter = kvserverbase.ReplicaRequestFilter(func(
ctx context.Context, ba *kvpb.BatchRequest,
) *kvpb.Error {
if ba.Txn == nil || ba.Txn.Name != "changefeed backfill" {
return nil
}
scanReq, ok := ba.GetArg(kvpb.Scan)
if !ok {
return nil
}
if !userSpan.Contains(scanReq.Header().Span()) {
return nil
}
select {
case notifyCh := <-blockRequestCh:
waitUntilClosed := make(chan struct{})
notifyCh <- waitUntilClosed
select {
case <-waitUntilClosed:
case <-done:
case <-ctx.Done():
}
default:
}
return nil
})
mkGetProtections = func(t *testing.T, ptp protectedts.Provider,
srv serverutils.ApplicationLayerInterface, ptsReader spanconfig.ProtectedTSReader,
span roachpb.Span) func() []hlc.Timestamp {
return func() (r []hlc.Timestamp) {
require.NoError(t,
spanconfigptsreader.TestingRefreshPTSState(ctx, t, ptsReader, srv.Clock().Now()))
protections, _, err := ptsReader.GetProtectionTimestamps(ctx, span)
require.NoError(t, err)
return protections
}
}
checkProtection = func(protections []hlc.Timestamp) error {
if len(protections) == 0 {
return errors.New("expected protected timestamp to exist")
}
return nil
}
checkNoProtection = func(protections []hlc.Timestamp) error {
if len(protections) != 0 {
return errors.Errorf("expected protected timestamp to not exist, found %v", protections)
}
return nil
}
mkWaitForProtectionCond = func(t *testing.T, getProtection func() []hlc.Timestamp,
check func(protection []hlc.Timestamp) error) func() {
return func() {
t.Helper()
testutils.SucceedsSoon(t, func() error { return check(getProtection()) })
}
}
)
testFn := func(t *testing.T, s TestServerWithSystem, f cdctest.TestFeedFactory) {
sqlDB := sqlutils.MakeSQLRunner(s.DB)
sysDB := sqlutils.MakeSQLRunner(s.SystemServer.SQLConn(t, ""))
sysDB.Exec(t, `SET CLUSTER SETTING kv.protectedts.poll_interval = '10ms'`)
sysDB.Exec(t, `SET CLUSTER SETTING kv.closed_timestamp.target_duration = '100ms'`)
sysDB.Exec(t, `ALTER TENANT ALL SET CLUSTER SETTING kv.closed_timestamp.target_duration = '100ms'`)
sqlDB.Exec(t, `ALTER RANGE default CONFIGURE ZONE USING gc.ttlseconds = 100`)
sqlDB.Exec(t, `ALTER RANGE system CONFIGURE ZONE USING gc.ttlseconds = 100`)
sqlDB.Exec(t, `CREATE TABLE foo (a INT PRIMARY KEY, b STRING)`)
sqlDB.Exec(t, `INSERT INTO foo VALUES (1, 'a'), (2, 'b'), (4, 'c'), (7, 'd'), (8, 'e')`)
var tableID int
sqlDB.QueryRow(t, `SELECT table_id FROM crdb_internal.tables `+
`WHERE name = 'foo' AND database_name = current_database()`).
Scan(&tableID)
changefeedbase.ProtectTimestampInterval.Override(
context.Background(), &s.Server.ClusterSettings().SV, 100*time.Millisecond)
ptp := s.Server.DistSQLServer().(*distsql.ServerImpl).ServerConfig.ProtectedTimestampProvider
store, err := s.SystemServer.GetStores().(*kvserver.Stores).GetStore(s.SystemServer.GetFirstStoreID())
require.NoError(t, err)
ptsReader := store.GetStoreConfig().ProtectedTimestampReader
// Setup helpers on the system.descriptors table.
descriptorTableKey := s.Codec.TablePrefix(keys.DescriptorTableID)
descriptorTableSpan := roachpb.Span{
Key: descriptorTableKey, EndKey: descriptorTableKey.PrefixEnd(),
}
getDescriptorTableProtection := mkGetProtections(t, ptp, s.Server, ptsReader,
descriptorTableSpan)
waitForDescriptorTableProtection := mkWaitForProtectionCond(t, getDescriptorTableProtection,
checkProtection)
waitForNoDescriptorTableProtection := mkWaitForProtectionCond(t, getDescriptorTableProtection,
checkNoProtection)
// Setup helpers on the user table.
tableKey := s.Codec.TablePrefix(uint32(tableID))
tableSpan := roachpb.Span{
Key: tableKey, EndKey: tableKey.PrefixEnd(),
}
getTableProtection := mkGetProtections(t, ptp, s.Server, ptsReader, tableSpan)
waitForTableProtection := mkWaitForProtectionCond(t, getTableProtection, checkProtection)
waitForNoTableProtection := mkWaitForProtectionCond(t, getTableProtection, checkNoProtection)
waitForBlocked := requestBlockedScan()
waitForProtectionAdvanced := func(ts hlc.Timestamp, getProtection func() []hlc.Timestamp) {
check := func(protections []hlc.Timestamp) error {
if len(protections) != 0 {
for _, p := range protections {
if p.LessEq(ts) {
return errors.Errorf("expected protected timestamp to exceed %v, found %v", ts, p)
}
}
}
return nil
}
mkWaitForProtectionCond(t, getProtection, check)()
}
foo := feed(t, f, `CREATE CHANGEFEED FOR foo WITH resolved`)
defer closeFeed(t, foo)
{
// Ensure that there's a protected timestamp on startup that goes
// away after the initial scan.
unblock := waitForBlocked()
waitForTableProtection()
unblock()
assertPayloads(t, foo, []string{
`foo: [1]->{"after": {"a": 1, "b": "a"}}`,
`foo: [2]->{"after": {"a": 2, "b": "b"}}`,
`foo: [4]->{"after": {"a": 4, "b": "c"}}`,
`foo: [7]->{"after": {"a": 7, "b": "d"}}`,
`foo: [8]->{"after": {"a": 8, "b": "e"}}`,
})
resolved, _ := expectResolvedTimestamp(t, foo)
waitForProtectionAdvanced(resolved, getTableProtection)
}
{
// Ensure that a protected timestamp is created for a backfill due
// to a schema change and removed after.
waitForBlocked = requestBlockedScan()
sqlDB.Exec(t, `ALTER TABLE foo ADD COLUMN c INT NOT NULL DEFAULT 1`)
unblock := waitForBlocked()
waitForTableProtection()
waitForDescriptorTableProtection()
unblock()
assertPayloads(t, foo, []string{
`foo: [1]->{"after": {"a": 1, "b": "a", "c": 1}}`,
`foo: [2]->{"after": {"a": 2, "b": "b", "c": 1}}`,
`foo: [4]->{"after": {"a": 4, "b": "c", "c": 1}}`,
`foo: [7]->{"after": {"a": 7, "b": "d", "c": 1}}`,
`foo: [8]->{"after": {"a": 8, "b": "e", "c": 1}}`,
})
resolved, _ := expectResolvedTimestamp(t, foo)
waitForProtectionAdvanced(resolved, getTableProtection)
waitForProtectionAdvanced(resolved, getDescriptorTableProtection)
}
{
// Ensure that the protected timestamp is removed when the job is
// canceled.
waitForBlocked = requestBlockedScan()
sqlDB.Exec(t, `ALTER TABLE foo ADD COLUMN d INT NOT NULL DEFAULT 2`)
_ = waitForBlocked()
waitForTableProtection()
waitForDescriptorTableProtection()
sqlDB.Exec(t, `CANCEL JOB $1`, foo.(cdctest.EnterpriseTestFeed).JobID())
waitForNoTableProtection()
waitForNoDescriptorTableProtection()
}
}
cdcTestWithSystem(t, testFn, feedTestNoTenants, feedTestEnterpriseSinks, withArgsFn(func(args *base.TestServerArgs) {
storeKnobs := &kvserver.StoreTestingKnobs{}
storeKnobs.TestingRequestFilter = requestFilter
args.Knobs.Store = storeKnobs
}))
}
// TestChangefeedAlterPTS is a regression test for (#103855).
// It verifies that we do not lose track of existing PTS records nor create
// extraneous PTS records when altering a changefeed by adding a table.
func TestChangefeedAlterPTS(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
testFn := func(t *testing.T, s TestServer, f cdctest.TestFeedFactory) {
sqlDB := sqlutils.MakeSQLRunner(s.DB)
sqlDB.Exec(t, `CREATE TABLE foo (a INT PRIMARY KEY, b STRING)`)
sqlDB.Exec(t, `CREATE TABLE foo2 (a INT PRIMARY KEY, b STRING)`)
f2 := feed(t, f, `CREATE CHANGEFEED FOR table foo with protect_data_from_gc_on_pause,
resolved='1s', min_checkpoint_frequency='1s'`)
defer closeFeed(t, f2)
getNumPTSRecords := func() int {
rows := sqlDB.Query(t, "SELECT * FROM system.protected_ts_records")
r, err := sqlutils.RowsToStrMatrix(rows)
if err != nil {
t.Fatalf("%v", err)
}
return len(r)
}
jobFeed := f2.(cdctest.EnterpriseTestFeed)
_, _ = expectResolvedTimestamp(t, f2)
require.Equal(t, 1, getNumPTSRecords())
require.NoError(t, jobFeed.Pause())
sqlDB.Exec(t, fmt.Sprintf("ALTER CHANGEFEED %d ADD TABLE foo2 with initial_scan='yes'", jobFeed.JobID()))
require.NoError(t, jobFeed.Resume())
_, _ = expectResolvedTimestamp(t, f2)
require.Equal(t, 1, getNumPTSRecords())
}
cdcTest(t, testFn, feedTestEnterpriseSinks)
}
// TestChangefeedCanceledWhenPTSIsOld is a test for the setting
// `kv.closed_timestamp.target_duration` which ensures that a paused changefeed
// job holding a PTS record gets canceled if paused for too long.
func TestChangefeedCanceledWhenPTSIsOld(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
testFn := func(t *testing.T, s TestServerWithSystem, f cdctest.TestFeedFactory) {
sqlDB := sqlutils.MakeSQLRunner(s.DB)
sysDB := sqlutils.MakeSQLRunner(s.SystemServer.SQLConn(t, ""))
sysDB.Exec(t, `SET CLUSTER SETTING kv.closed_timestamp.target_duration = '100ms'`)
sysDB.Exec(t, `ALTER TENANT ALL SET CLUSTER SETTING kv.closed_timestamp.target_duration = '100ms'`)
sqlDB.Exec(t, `SET CLUSTER SETTING jobs.metrics.interval.poll = '100ms'`) // speed up metrics poller
// Create the data table; it will only contain a
// single row with multiple versions.
sqlDB.Exec(t, `CREATE TABLE foo (a INT PRIMARY KEY, b INT)`)
feed, err := f.Feed("CREATE CHANGEFEED FOR TABLE foo WITH protect_data_from_gc_on_pause, gc_protect_expires_after='24h'")
require.NoError(t, err)
defer func() {
closeFeed(t, feed)
}()
jobFeed := feed.(cdctest.EnterpriseTestFeed)
require.NoError(t, jobFeed.Pause())
// While the job is paused, take opportunity to test that alter changefeed
// works when setting gc_protect_expires_after option.
// Verify we can set it to 0 -- i.e. disable.
sqlDB.Exec(t, fmt.Sprintf("ALTER CHANGEFEED %d SET gc_protect_expires_after = '0s'", jobFeed.JobID()))
// Now, set it to something very small.
sqlDB.Exec(t, fmt.Sprintf("ALTER CHANGEFEED %d SET gc_protect_expires_after = '250ms'", jobFeed.JobID()))
// Stale PTS record should trigger job cancellation.
require.NoError(t, jobFeed.WaitForStatus(func(s jobs.Status) bool {
return s == jobs.StatusCanceled
}))
}
cdcTestWithSystem(t, testFn, feedTestEnterpriseSinks)
}
// TestPTSRecordProtectsTargetsAndDescriptorTable tests that descriptors are not
// GC'd when they are protected by a PTS record.
func TestPTSRecordProtectsTargetsAndDescriptorTable(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
s, db, stopServer := startTestFullServer(t, feedTestOptions{})
defer stopServer()
execCfg := s.ExecutorConfig().(sql.ExecutorConfig)
sqlDB := sqlutils.MakeSQLRunner(db)
sqlDB.Exec(t, "CREATE TABLE foo (a INT, b STRING)")
ts := s.Clock().Now()
ctx := context.Background()
fooDescr := cdctest.GetHydratedTableDescriptor(t, s.ExecutorConfig(), "d", "foo")
var targets changefeedbase.Targets
targets.Add(changefeedbase.Target{
TableID: fooDescr.GetID(),
})
// Lay protected timestamp record.
ptr := createProtectedTimestampRecord(ctx, s.Codec(), 42, targets, ts)
require.NoError(t, execCfg.InternalDB.Txn(ctx, func(ctx context.Context, txn isql.Txn) error {
return execCfg.ProtectedTimestampProvider.WithTxn(txn).Protect(ctx, ptr)
}))
// Alter foo few times, then force GC at ts-1.
sqlDB.Exec(t, "ALTER TABLE foo ADD COLUMN c STRING")
sqlDB.Exec(t, "ALTER TABLE foo ADD COLUMN d STRING")
require.NoError(t, s.ForceTableGC(ctx, "system", "descriptor", ts.Add(-1, 0)))
// We can still fetch table descriptors because of protected timestamp record.
asOf := ts
_, err := fetchTableDescriptors(ctx, &execCfg, targets, asOf)
require.NoError(t, err)
}
| pkg/ccl/changefeedccl/protected_timestamps_test.go | 0 | https://github.com/cockroachdb/cockroach/commit/0d77d949392d0daabeb77a82d468b15d4774e1de | [
0.0006125093786977232,
0.00018933875253424048,
0.000163345190230757,
0.00017003875109367073,
0.00007353787077590823
] |
{
"id": 4,
"code_window": [
"\ttc := testcluster.NewTestCluster(t, 3, base.TestClusterArgs{\n",
"\t\tServerArgs: base.TestServerArgs{\n",
"\t\t\tStoreSpecs: []base.StoreSpec{{InMemory: true}},\n",
"\t\t\tInsecure: true,\n",
"\t\t},\n",
"\t})\n",
"\ttc.Start(t)\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\t// This logic is specific to the storage layer.\n",
"\t\t\tDefaultTestTenant: base.TestIsSpecificToStorageLayerAndNeedsASystemTenant,\n"
],
"file_path": "pkg/cli/debug_recover_loss_of_quorum_test.go",
"type": "add",
"edit_start_line_idx": 114
} | echo
----
db0.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error {
txn.SetIsoLevel(isolation.Serializable)
txn.Put(ctx, tk(1), sv(1)) // <nil>
txn.Del(ctx, tk(2) /* @s2 */) // <nil>
return nil
}) // result is ambiguous: boom
/Table/100/"0000000000000001"/0.000000001,0 @ s1 v1
/Table/100/"0000000000000002"/0.000000002,0 @ s2 <nil>
ambiguous serializable txn non-atomic timestamps: [w]/Table/100/"0000000000000001":0.000000001,0->v1@s1 [d]/Table/100/"0000000000000002":0.000000002,0-><nil>@s2
| pkg/kv/kvnemesis/testdata/TestValidate/ambiguous_put-del_transaction_committed_but_has_validation_error | 0 | https://github.com/cockroachdb/cockroach/commit/0d77d949392d0daabeb77a82d468b15d4774e1de | [
0.00016682242858223617,
0.0001657661923673004,
0.00016470997070427984,
0.0001657661923673004,
0.0000010562289389781654
] |
{
"id": 5,
"code_window": [
"\t// would not be able to progress, but we will apply recovery procedure and\n",
"\t// mark on replicas on node 1 as designated survivors. After that, starting\n",
"\t// single node should succeed.\n",
"\ttcBefore := testcluster.NewTestCluster(t, 3, base.TestClusterArgs{\n",
"\t\tServerArgsPerNode: map[int]base.TestServerArgs{\n",
"\t\t\t0: {StoreSpecs: []base.StoreSpec{{Path: dir + \"/store-1\"}}},\n",
"\t\t},\n",
"\t})\n",
"\ttcBefore.Start(t)\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tServerArgs: base.TestServerArgs{\n",
"\t\t\t// This logic is specific to the storage layer.\n",
"\t\t\tDefaultTestTenant: base.TestIsSpecificToStorageLayerAndNeedsASystemTenant,\n",
"\t\t},\n"
],
"file_path": "pkg/cli/debug_recover_loss_of_quorum_test.go",
"type": "add",
"edit_start_line_idx": 182
} | // Copyright 2021 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package cli
import (
"context"
"fmt"
"os"
"testing"
"time"
"github.com/cockroachdb/cockroach/pkg/base"
"github.com/cockroachdb/cockroach/pkg/clusterversion"
"github.com/cockroachdb/cockroach/pkg/keys"
"github.com/cockroachdb/cockroach/pkg/kv"
"github.com/cockroachdb/cockroach/pkg/kv/kvserver"
"github.com/cockroachdb/cockroach/pkg/kv/kvserver/liveness/livenesspb"
"github.com/cockroachdb/cockroach/pkg/kv/kvserver/loqrecovery"
"github.com/cockroachdb/cockroach/pkg/kv/kvserver/loqrecovery/loqrecoverypb"
"github.com/cockroachdb/cockroach/pkg/roachpb"
"github.com/cockroachdb/cockroach/pkg/server"
"github.com/cockroachdb/cockroach/pkg/server/serverpb"
"github.com/cockroachdb/cockroach/pkg/testutils"
"github.com/cockroachdb/cockroach/pkg/testutils/listenerutil"
"github.com/cockroachdb/cockroach/pkg/testutils/skip"
"github.com/cockroachdb/cockroach/pkg/testutils/sqlutils"
"github.com/cockroachdb/cockroach/pkg/testutils/testcluster"
"github.com/cockroachdb/cockroach/pkg/util/leaktest"
"github.com/cockroachdb/cockroach/pkg/util/log"
"github.com/cockroachdb/cockroach/pkg/util/protoutil"
"github.com/cockroachdb/cockroach/pkg/util/uuid"
"github.com/cockroachdb/errors"
"github.com/stretchr/testify/require"
)
// TestCollectInfoFromMultipleStores performs basic sanity checks on replica info collection.
// This is done by running three node cluster with disk backed storage,
// stopping it and verifying content of collected replica info file.
// This check verifies that:
//
// we successfully iterate requested stores,
// data is written in expected location,
// data contains info only about stores requested.
func TestCollectInfoFromMultipleStores(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
ctx := context.Background()
dir, cleanupFn := testutils.TempDir(t)
defer cleanupFn()
c := NewCLITest(TestCLIParams{
NoServer: true,
})
defer c.Cleanup()
tc := testcluster.NewTestCluster(t, 3, base.TestClusterArgs{
ServerArgsPerNode: map[int]base.TestServerArgs{
0: {StoreSpecs: []base.StoreSpec{{Path: dir + "/store-1"}}},
1: {StoreSpecs: []base.StoreSpec{{Path: dir + "/store-2"}}},
2: {StoreSpecs: []base.StoreSpec{{Path: dir + "/store-3"}}},
},
})
tc.Start(t)
defer tc.Stopper().Stop(ctx)
// Wait up-replication.
require.NoError(t, tc.WaitForFullReplication())
// Shutdown.
tc.Stopper().Stop(ctx)
replicaInfoFileName := dir + "/node-1.json"
c.RunWithArgs([]string{"debug", "recover", "collect-info", "--store=" + dir + "/store-1",
"--store=" + dir + "/store-2", replicaInfoFileName})
replicas, err := readReplicaInfoData([]string{replicaInfoFileName})
require.NoError(t, err, "failed to read generated replica info")
stores := map[roachpb.StoreID]interface{}{}
for _, r := range replicas.LocalInfo[0].Replicas {
stores[r.StoreID] = struct{}{}
}
require.Equal(t, 2, len(stores), "collected replicas from stores")
require.Equal(t, clusterversion.ByKey(clusterversion.BinaryVersionKey), replicas.Version,
"collected version info from stores")
}
// TestCollectInfoFromOnlineCluster verifies that given a test cluster with
// one stopped node, we can collect replica info and metadata from remaining
// nodes using an admin recovery call.
func TestCollectInfoFromOnlineCluster(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
ctx := context.Background()
dir, cleanupFn := testutils.TempDir(t)
defer cleanupFn()
c := NewCLITest(TestCLIParams{
NoServer: true,
})
defer c.Cleanup()
tc := testcluster.NewTestCluster(t, 3, base.TestClusterArgs{
ServerArgs: base.TestServerArgs{
StoreSpecs: []base.StoreSpec{{InMemory: true}},
Insecure: true,
},
})
tc.Start(t)
defer tc.Stopper().Stop(ctx)
require.NoError(t, tc.WaitForFullReplication())
tc.ToggleReplicateQueues(false)
r := tc.ServerConn(0).QueryRow("select count(*) from crdb_internal.ranges_no_leases")
var totalRanges int
require.NoError(t, r.Scan(&totalRanges), "failed to query range count")
tc.StopServer(0)
replicaInfoFileName := dir + "/all-nodes.json"
c.RunWithArgs([]string{
"debug",
"recover",
"collect-info",
"--insecure",
"--host",
tc.Server(2).AdvRPCAddr(),
replicaInfoFileName,
})
replicas, err := readReplicaInfoData([]string{replicaInfoFileName})
require.NoError(t, err, "failed to read generated replica info")
stores := map[roachpb.StoreID]interface{}{}
totalReplicas := 0
for _, li := range replicas.LocalInfo {
for _, r := range li.Replicas {
stores[r.StoreID] = struct{}{}
}
totalReplicas += len(li.Replicas)
}
require.Equal(t, 2, len(stores), "collected replicas from stores")
require.Equal(t, 2, len(replicas.LocalInfo), "collected info is not split by node")
require.Equal(t, totalRanges*2, totalReplicas, "number of collected replicas")
require.Equal(t, totalRanges, len(replicas.Descriptors),
"number of collected descriptors from metadata")
require.Equal(t, clusterversion.ByKey(clusterversion.BinaryVersionKey), replicas.Version,
"collected version info from stores")
}
// TestLossOfQuorumRecovery performs a sanity check on end to end recovery workflow.
// This test doesn't try to validate all possible test cases, but instead check that
// artifacts are correctly produced and overall cluster recovery could be performed
// where it would be completely broken otherwise.
func TestLossOfQuorumRecovery(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
skip.UnderDeadlock(t, "slow under deadlock")
ctx := context.Background()
dir, cleanupFn := testutils.TempDir(t)
defer cleanupFn()
c := NewCLITest(TestCLIParams{
NoServer: true,
})
defer c.Cleanup()
// Test cluster contains 3 nodes that we would turn into a single node
// cluster using loss of quorum recovery. After it is stopped, single node
// would not be able to progress, but we will apply recovery procedure and
// mark on replicas on node 1 as designated survivors. After that, starting
// single node should succeed.
tcBefore := testcluster.NewTestCluster(t, 3, base.TestClusterArgs{
ServerArgsPerNode: map[int]base.TestServerArgs{
0: {StoreSpecs: []base.StoreSpec{{Path: dir + "/store-1"}}},
},
})
tcBefore.Start(t)
s := sqlutils.MakeSQLRunner(tcBefore.Conns[0])
s.Exec(t, "set cluster setting cluster.organization='remove dead replicas test'")
defer tcBefore.Stopper().Stop(ctx)
// We use scratch range to test special case for pending update on the
// descriptor which has to be cleaned up before recovery could proceed.
// For that we'll ensure it is not empty and then put an intent. After
// recovery, we'll check that the range is still accessible for writes as
// normal.
sk := tcBefore.ScratchRange(t)
require.NoError(t,
tcBefore.Server(0).DB().Put(ctx, testutils.MakeKey(sk, []byte{1}), "value"),
"failed to write value to scratch range")
createIntentOnRangeDescriptor(ctx, t, tcBefore, sk)
node1ID := tcBefore.Servers[0].NodeID()
// Now that stores are prepared and replicated we can shut down cluster
// and perform store manipulations.
tcBefore.Stopper().Stop(ctx)
server1StoreDir := dir + "/store-1"
replicaInfoFileName := dir + "/node-1.json"
c.RunWithArgs(
[]string{"debug", "recover", "collect-info", "--store=" + server1StoreDir,
replicaInfoFileName})
// Generate recovery plan and try to verify that plan file was generated and contains
// meaningful data. This is not strictly necessary for verifying end-to-end flow, but
// having assertions on generated data helps to identify which stage of pipeline broke
// if test fails.
planFile := dir + "/recovery-plan.json"
out, err := c.RunWithCaptureArgs(
[]string{"debug", "recover", "make-plan", "--confirm=y", "--plan=" + planFile,
replicaInfoFileName})
require.NoError(t, err, "failed to run make-plan")
require.Contains(t, out, fmt.Sprintf("- node n%d", node1ID),
"planner didn't provide correct apply instructions")
require.FileExists(t, planFile, "generated plan file")
planFileContent, err := os.ReadFile(planFile)
require.NoError(t, err, "test infra failed, can't open created plan file")
plan := loqrecoverypb.ReplicaUpdatePlan{}
jsonpb := protoutil.JSONPb{}
require.NoError(t, jsonpb.Unmarshal(planFileContent, &plan),
"failed to deserialize replica recovery plan")
require.NotEmpty(t, plan.Updates, "resulting plan contains no updates")
out, err = c.RunWithCaptureArgs(
[]string{"debug", "recover", "apply-plan", "--confirm=y", "--store=" + server1StoreDir,
planFile})
require.NoError(t, err, "failed to run apply plan")
// Check that there were at least one mention of replica being promoted.
require.Contains(t, out, "will be updated", "no replica updates were recorded")
require.Contains(t, out, fmt.Sprintf("Updated store(s): s%d", node1ID),
"apply plan was not executed on requested node")
tcAfter := testcluster.NewTestCluster(t, 3, base.TestClusterArgs{
ReplicationMode: base.ReplicationManual,
ServerArgsPerNode: map[int]base.TestServerArgs{
0: {StoreSpecs: []base.StoreSpec{{Path: dir + "/store-1"}}},
},
})
// NB: If recovery is not performed, new cluster will just hang on startup.
// This is caused by liveness range becoming unavailable and preventing any
// progress. So it is likely that test will timeout if basic workflow fails.
tcAfter.Start(t)
defer tcAfter.Stopper().Stop(ctx)
// In the new cluster, we will still have nodes 2 and 3 remaining from the first
// attempt. That would increase number of replicas on system ranges to 5 and we
// would not be able to upreplicate properly. So we need to decommission old nodes
// first before proceeding.
adminClient := tcAfter.Server(0).GetAdminClient(t)
require.NoError(t, runDecommissionNodeImpl(
ctx, adminClient, nodeDecommissionWaitNone, nodeDecommissionChecksSkip, false,
[]roachpb.NodeID{roachpb.NodeID(2), roachpb.NodeID(3)}, tcAfter.Server(0).NodeID()),
"Failed to decommission removed nodes")
for i := 0; i < len(tcAfter.Servers); i++ {
require.NoError(t, tcAfter.Servers[i].GetStores().(*kvserver.Stores).VisitStores(func(store *kvserver.Store) error {
store.SetReplicateQueueActive(true)
return nil
}), "Failed to activate replication queue")
}
require.NoError(t, tcAfter.WaitForZoneConfigPropagation(),
"Failed to ensure zone configs are propagated")
require.NoError(t, tcAfter.WaitForFullReplication(), "Failed to perform full replication")
for i := 0; i < len(tcAfter.Servers); i++ {
require.NoError(t, tcAfter.Servers[i].GetStores().(*kvserver.Stores).VisitStores(func(store *kvserver.Store) error {
return store.ForceConsistencyQueueProcess()
}), "Failed to force replicas to consistency queue")
}
// As a validation step we will just pick one range and get its replicas to see
// if they were up-replicated to the new nodes.
s = sqlutils.MakeSQLRunner(tcAfter.Conns[0])
r := s.QueryRow(t, "select replicas from crdb_internal.ranges limit 1")
var replicas string
r.Scan(&replicas)
require.Equal(t, "{1,4,5}", replicas, "Replicas after loss of quorum recovery")
// Validate that rangelog is updated by recovery records after cluster restarts.
testutils.SucceedsSoon(t, func() error {
r := s.QueryRow(t,
`select count(*) from system.rangelog where "eventType" = 'unsafe_quorum_recovery'`)
var recoveries int
r.Scan(&recoveries)
if recoveries != len(plan.Updates) {
return errors.Errorf("found %d recovery events while expecting %d", recoveries,
len(plan.Updates))
}
return nil
})
// We were using scratch range to test cleanup of pending transaction on
// rangedescriptor key. We want to verify that after recovery, range is still
// writable e.g. recovery succeeded.
require.NoError(t,
tcAfter.Server(0).DB().Put(ctx, testutils.MakeKey(sk, []byte{1}), "value2"),
"failed to write value to scratch range after recovery")
}
// TestStageVersionCheck verifies that we can force plan with different internal
// version onto cluster. To do this, we create a plan with internal version
// above current but matching major and minor. Then we check that staging fails
// and that force flag will update plan version to match local node.
func TestStageVersionCheck(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
skip.UnderDeadlock(t, "slow under deadlock")
ctx := context.Background()
_, cleanupFn := testutils.TempDir(t)
defer cleanupFn()
c := NewCLITest(TestCLIParams{
NoServer: true,
})
defer c.Cleanup()
listenerReg := listenerutil.NewListenerRegistry()
defer listenerReg.Close()
storeReg := server.NewStickyVFSRegistry()
tc := testcluster.NewTestCluster(t, 4, base.TestClusterArgs{
ReplicationMode: base.ReplicationManual,
ServerArgsPerNode: map[int]base.TestServerArgs{
0: {
Knobs: base.TestingKnobs{
Server: &server.TestingKnobs{
StickyVFSRegistry: storeReg,
},
},
StoreSpecs: []base.StoreSpec{
{InMemory: true, StickyVFSID: "1"},
},
},
},
ReusableListenerReg: listenerReg,
})
tc.Start(t)
defer tc.Stopper().Stop(ctx)
tc.StopServer(3)
adminClient := tc.Server(0).GetAdminClient(t)
v := clusterversion.ByKey(clusterversion.BinaryVersionKey)
v.Internal++
// To avoid crafting real replicas we use StaleLeaseholderNodeIDs to force
// node to stage plan for verification.
p := loqrecoverypb.ReplicaUpdatePlan{
PlanID: uuid.FastMakeV4(),
Version: v,
ClusterID: tc.Server(0).StorageClusterID().String(),
DecommissionedNodeIDs: []roachpb.NodeID{4},
StaleLeaseholderNodeIDs: []roachpb.NodeID{1},
}
// Attempts to stage plan with different internal version must fail.
_, err := adminClient.RecoveryStagePlan(ctx, &serverpb.RecoveryStagePlanRequest{
Plan: &p,
AllNodes: true,
ForcePlan: false,
ForceLocalInternalVersion: false,
})
require.ErrorContains(t, err, "doesn't match cluster active version")
// Enable "stuck upgrade bypass" to stage plan on the cluster.
_, err = adminClient.RecoveryStagePlan(ctx, &serverpb.RecoveryStagePlanRequest{
Plan: &p,
AllNodes: true,
ForcePlan: false,
ForceLocalInternalVersion: true,
})
require.NoError(t, err, "force local must fix incorrect version")
// Check that stored plan has version matching cluster version.
ps := loqrecovery.NewPlanStore("", storeReg.Get("1"))
p, ok, err := ps.LoadPlan()
require.NoError(t, err, "failed to read node 0 plan")
require.True(t, ok, "plan was not staged")
require.Equal(t, clusterversion.ByKey(clusterversion.BinaryVersionKey), p.Version,
"plan version was not updated")
}
func createIntentOnRangeDescriptor(
ctx context.Context, t *testing.T, tcBefore *testcluster.TestCluster, sk roachpb.Key,
) {
txn := kv.NewTxn(ctx, tcBefore.Servers[0].DB(), 1)
var desc roachpb.RangeDescriptor
// Pick one of the predefined split points.
rdKey := keys.RangeDescriptorKey(roachpb.RKey(sk))
if err := txn.GetProto(ctx, rdKey, &desc); err != nil {
t.Fatal(err)
}
desc.NextReplicaID++
if err := txn.Put(ctx, rdKey, &desc); err != nil {
t.Fatal(err)
}
// At this point the intent has been written to Pebble but this
// write was not synced (only the raft log append was synced). We
// need to force another sync, but we're far from the storage
// layer here so the easiest thing to do is simply perform a
// second write. This will force the first write to be persisted
// to disk (the second write may or may not make it to disk due to
// timing).
desc.NextReplicaID++
if err := txn.Put(ctx, rdKey, &desc); err != nil {
t.Fatal(err)
}
}
func TestHalfOnlineLossOfQuorumRecovery(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
skip.UnderDeadlock(t, "slow under deadlock")
ctx := context.Background()
dir, cleanupFn := testutils.TempDir(t)
defer cleanupFn()
c := NewCLITest(TestCLIParams{
NoServer: true,
})
defer c.Cleanup()
listenerReg := listenerutil.NewListenerRegistry()
defer listenerReg.Close()
// Test cluster contains 3 nodes that we would turn into a single node
// cluster using loss of quorum recovery. To do that, we will terminate
// two nodes and run recovery on remaining one. Restarting node should
// bring it back to healthy (but underreplicated) state.
// Note that we inject reusable listeners into all nodes to prevent tests
// running in parallel from taking over ports of stopped nodes and responding
// to gateway node with errors.
// TODO(oleg): Make test run with 7 nodes to exercise cases where multiple
// replicas survive. Current startup and allocator behaviour would make
// this test flaky.
sa := make(map[int]base.TestServerArgs)
for i := 0; i < 3; i++ {
sa[i] = base.TestServerArgs{
Knobs: base.TestingKnobs{
Server: &server.TestingKnobs{
StickyVFSRegistry: server.NewStickyVFSRegistry(),
},
},
StoreSpecs: []base.StoreSpec{
{
InMemory: true,
},
},
}
}
tc := testcluster.NewTestCluster(t, 3, base.TestClusterArgs{
ServerArgs: base.TestServerArgs{
DefaultTestTenant: base.TestIsSpecificToStorageLayerAndNeedsASystemTenant,
},
ReusableListenerReg: listenerReg,
ServerArgsPerNode: sa,
})
tc.Start(t)
s := sqlutils.MakeSQLRunner(tc.Conns[0])
s.Exec(t, "set cluster setting cluster.organization='remove dead replicas test'")
defer tc.Stopper().Stop(ctx)
// We use scratch range to test special case for pending update on the
// descriptor which has to be cleaned up before recovery could proceed.
// For that we'll ensure it is not empty and then put an intent. After
// recovery, we'll check that the range is still accessible for writes as
// normal.
sk := tc.ScratchRange(t)
require.NoError(t,
tc.Server(0).DB().Put(ctx, testutils.MakeKey(sk, []byte{1}), "value"),
"failed to write value to scratch range")
createIntentOnRangeDescriptor(ctx, t, tc, sk)
node1ID := tc.Servers[0].NodeID()
// Now that stores are prepared and replicated we can shut down cluster
// and perform store manipulations.
tc.StopServer(1)
tc.StopServer(2)
// Generate recovery plan and try to verify that plan file was generated and contains
// meaningful data. This is not strictly necessary for verifying end-to-end flow, but
// having assertions on generated data helps to identify which stage of pipeline broke
// if test fails.
planFile := dir + "/recovery-plan.json"
out, err := c.RunWithCaptureArgs(
[]string{
"debug",
"recover",
"make-plan",
"--confirm=y",
"--certs-dir=test_certs",
"--host=" + tc.Server(0).AdvRPCAddr(),
"--plan=" + planFile,
})
require.NoError(t, err, "failed to run make-plan")
require.Contains(t, out, fmt.Sprintf("- node n%d", node1ID),
"planner didn't provide correct apply instructions")
require.FileExists(t, planFile, "generated plan file")
planFileContent, err := os.ReadFile(planFile)
require.NoError(t, err, "test infra failed, can't open created plan file")
plan := loqrecoverypb.ReplicaUpdatePlan{}
jsonpb := protoutil.JSONPb{}
require.NoError(t, jsonpb.Unmarshal(planFileContent, &plan),
"failed to deserialize replica recovery plan")
require.NotEmpty(t, plan.Updates, "resulting plan contains no updates")
out, err = c.RunWithCaptureArgs(
[]string{
"debug", "recover", "apply-plan",
"--certs-dir=test_certs",
"--host=" + tc.Server(0).AdvRPCAddr(),
"--confirm=y", planFile,
})
require.NoError(t, err, "failed to run apply plan")
// Check that there were at least one mention of replica being promoted.
require.Contains(t, out, "updating replica", "no replica updates were recorded")
require.Contains(t, out,
fmt.Sprintf("Plan staged. To complete recovery restart nodes n%d.", node1ID),
"apply plan failed to stage on expected nodes")
// Verify plan is staged on nodes
out, err = c.RunWithCaptureArgs(
[]string{
"debug", "recover", "verify",
"--certs-dir=test_certs",
"--host=" + tc.Server(0).AdvRPCAddr(),
planFile,
})
require.NoError(t, err, "failed to run verify plan")
require.Contains(t, out, "ERROR: loss of quorum recovery is not finished yet")
tc.StopServer(0)
// NB: If recovery is not performed, server will just hang on startup.
// This is caused by liveness range becoming unavailable and preventing any
// progress. So it is likely that test will timeout if basic workflow fails.
require.NoError(t, tc.RestartServer(0), "restart failed")
s = sqlutils.MakeSQLRunner(tc.Conns[0])
// Verifying that post start cleanup performed node decommissioning that
// prevents old nodes from rejoining.
ac := tc.GetAdminClient(t, 0)
testutils.SucceedsSoon(t, func() error {
dr, err := ac.DecommissionStatus(ctx,
&serverpb.DecommissionStatusRequest{NodeIDs: []roachpb.NodeID{2, 3}})
if err != nil {
return err
}
for _, s := range dr.Status {
if s.Membership != livenesspb.MembershipStatus_DECOMMISSIONED {
return errors.Newf("expecting n%d to be decommissioned", s.NodeID)
}
}
return nil
})
// Validate that rangelog is updated by recovery records after cluster restarts.
testutils.SucceedsSoon(t, func() error {
r := s.QueryRow(t,
`select count(*) from system.rangelog where "eventType" = 'unsafe_quorum_recovery'`)
var recoveries int
r.Scan(&recoveries)
if recoveries != len(plan.Updates) {
return errors.Errorf("found %d recovery events while expecting %d", recoveries,
len(plan.Updates))
}
return nil
})
// Verify recovery complete.
out, err = c.RunWithCaptureArgs(
[]string{
"debug", "recover", "verify",
"--certs-dir=test_certs",
"--host=" + tc.Server(0).AdvRPCAddr(),
planFile,
})
require.NoError(t, err, "failed to run verify plan")
require.Contains(t, out, "Loss of quorum recovery is complete.")
// We were using scratch range to test cleanup of pending transaction on
// rangedescriptor key. We want to verify that after recovery, range is still
// writable e.g. recovery succeeded.
require.NoError(t,
tc.Server(0).DB().Put(ctx, testutils.MakeKey(sk, []byte{1}), "value2"),
"failed to write value to scratch range after recovery")
// Finally split scratch range to ensure metadata ranges are recovered.
_, _, err = tc.Server(0).SplitRange(testutils.MakeKey(sk, []byte{42}))
require.NoError(t, err, "failed to split range after recovery")
}
func TestUpdatePlanVsClusterDiff(t *testing.T) {
defer leaktest.AfterTest(t)()
var empty uuid.UUID
planID, _ := uuid.FromString("123e4567-e89b-12d3-a456-426614174000")
otherPlanID, _ := uuid.FromString("123e4567-e89b-12d3-a456-426614174001")
applyTime, _ := time.Parse(time.RFC3339, "2023-01-24T10:30:00Z")
status := func(id roachpb.NodeID, pending, applied uuid.UUID, err string) loqrecoverypb.NodeRecoveryStatus {
s := loqrecoverypb.NodeRecoveryStatus{
NodeID: id,
}
if !pending.Equal(empty) {
s.PendingPlanID = &pending
}
if !applied.Equal(empty) {
s.AppliedPlanID = &applied
s.ApplyTimestamp = &applyTime
}
s.Error = err
return s
}
for _, d := range []struct {
name string
updatedNodes []int
staleLeases []int
status []loqrecoverypb.NodeRecoveryStatus
pending int
errors int
report []string
}{
{
name: "after staging",
updatedNodes: []int{1, 2},
staleLeases: []int{3},
status: []loqrecoverypb.NodeRecoveryStatus{
status(1, planID, empty, ""),
status(2, planID, empty, ""),
status(3, planID, empty, ""),
},
pending: 3,
report: []string{
" plan application pending on node n1",
" plan application pending on node n2",
" plan application pending on node n3",
},
},
{
name: "partially applied",
updatedNodes: []int{1, 2, 3},
status: []loqrecoverypb.NodeRecoveryStatus{
status(1, planID, empty, ""),
status(2, empty, planID, ""),
status(3, planID, empty, ""),
},
pending: 2,
report: []string{
" plan application pending on node n1",
" plan applied successfully on node n2",
" plan application pending on node n3",
},
},
{
name: "fully applied",
updatedNodes: []int{1, 2, 3},
status: []loqrecoverypb.NodeRecoveryStatus{
status(1, empty, planID, ""),
status(2, empty, planID, ""),
status(3, empty, planID, ""),
},
report: []string{
" plan applied successfully on node n1",
" plan applied successfully on node n2",
" plan applied successfully on node n3",
},
},
{
name: "staging lost no node",
updatedNodes: []int{1, 2, 3},
status: []loqrecoverypb.NodeRecoveryStatus{
status(1, planID, empty, ""),
status(3, planID, empty, ""),
},
pending: 2,
errors: 1,
report: []string{
" plan application pending on node n1",
" plan application pending on node n3",
" failed to find node n2 where plan must be staged",
},
},
{
name: "staging lost no plan",
updatedNodes: []int{1, 2},
staleLeases: []int{3},
status: []loqrecoverypb.NodeRecoveryStatus{
status(1, planID, empty, ""),
status(2, planID, empty, ""),
status(3, empty, empty, ""),
},
pending: 2,
errors: 1,
report: []string{
" plan application pending on node n1",
" plan application pending on node n2",
" failed to find staged plan on node n3",
},
},
{
name: "partial failure",
updatedNodes: []int{1, 2, 3},
status: []loqrecoverypb.NodeRecoveryStatus{
status(1, planID, empty, ""),
status(2, empty, planID, "found stale replica"),
status(3, planID, empty, ""),
},
pending: 2,
errors: 1,
report: []string{
" plan application pending on node n1",
" plan application failed on node n2: found stale replica",
" plan application pending on node n3",
},
},
{
name: "no plan",
status: []loqrecoverypb.NodeRecoveryStatus{
status(1, planID, empty, ""),
status(2, empty, planID, "found stale replica"),
status(3, empty, otherPlanID, ""),
},
report: []string{
" node n1 staged plan: 123e4567-e89b-12d3-a456-426614174000",
" node n2 failed to apply plan 123e4567-e89b-12d3-a456-426614174000: found stale replica",
" node n3 applied plan: 123e4567-e89b-12d3-a456-426614174001 at 2023-01-24 10:30:00 +0000 UTC",
},
},
{
name: "wrong plan",
updatedNodes: []int{1, 2},
status: []loqrecoverypb.NodeRecoveryStatus{
status(1, planID, empty, ""),
status(2, otherPlanID, empty, ""),
status(3, otherPlanID, empty, ""),
},
pending: 1,
errors: 2,
report: []string{
" plan application pending on node n1",
" unexpected staged plan 123e4567-e89b-12d3-a456-426614174001 on node n2",
" unexpected staged plan 123e4567-e89b-12d3-a456-426614174001 on node n3",
},
},
} {
t.Run(d.name, func(t *testing.T) {
plan := loqrecoverypb.ReplicaUpdatePlan{
PlanID: planID,
}
// Plan will contain single replica update for each requested node.
rangeSeq := 1
for _, id := range d.updatedNodes {
plan.Updates = append(plan.Updates, loqrecoverypb.ReplicaUpdate{
RangeID: roachpb.RangeID(rangeSeq),
StartKey: nil,
OldReplicaID: roachpb.ReplicaID(1),
NewReplica: roachpb.ReplicaDescriptor{
NodeID: roachpb.NodeID(id),
StoreID: roachpb.StoreID(id),
ReplicaID: roachpb.ReplicaID(rangeSeq + 17),
},
NextReplicaID: roachpb.ReplicaID(rangeSeq + 18),
})
}
for _, id := range d.staleLeases {
plan.StaleLeaseholderNodeIDs = append(plan.StaleLeaseholderNodeIDs, roachpb.NodeID(id))
}
diff := diffPlanWithNodeStatus(plan, d.status)
require.Equal(t, d.pending, diff.pending, "number of pending changes")
require.Equal(t, d.errors, diff.errors, "number of node errors")
if d.report != nil {
require.Equal(t, len(d.report), len(diff.report), "number of lines in diff")
for i := range d.report {
require.Equal(t, d.report[i], diff.report[i], "wrong line %d of report", i)
}
}
})
}
}
func TestTruncateKeyOutput(t *testing.T) {
defer leaktest.AfterTest(t)()
for _, d := range []struct {
len uint
result string
}{
{
len: 13,
result: "/System/No...",
},
{
len: 30,
result: "/System/NodeLiveness",
},
{
len: 3,
result: "/Sy",
},
{
len: 4,
result: "/...",
},
} {
t.Run("", func(t *testing.T) {
helper := outputFormatHelper{
maxPrintedKeyLength: d.len,
}
require.Equal(t, d.result, helper.formatKey(keys.NodeLivenessPrefix))
})
}
}
func TestTruncateSpanOutput(t *testing.T) {
defer leaktest.AfterTest(t)()
for _, d := range []struct {
len uint
result string
}{
{
len: 30,
result: "/System/{NodeLiveness-Syste...",
},
{
len: 90,
result: "/System/{NodeLiveness-SystemSpanConfigKeys}",
},
{
len: 3,
result: "/Sy",
},
{
len: 4,
result: "/...",
},
} {
t.Run("", func(t *testing.T) {
helper := outputFormatHelper{
maxPrintedKeyLength: d.len,
}
require.Equal(t, d.result, helper.formatSpan(roachpb.Span{
Key: keys.NodeLivenessPrefix,
EndKey: keys.SystemSpanConfigPrefix,
}))
})
}
}
| pkg/cli/debug_recover_loss_of_quorum_test.go | 1 | https://github.com/cockroachdb/cockroach/commit/0d77d949392d0daabeb77a82d468b15d4774e1de | [
0.9983720183372498,
0.05529750511050224,
0.0001618399692233652,
0.00017306723748333752,
0.22158542275428772
] |
{
"id": 5,
"code_window": [
"\t// would not be able to progress, but we will apply recovery procedure and\n",
"\t// mark on replicas on node 1 as designated survivors. After that, starting\n",
"\t// single node should succeed.\n",
"\ttcBefore := testcluster.NewTestCluster(t, 3, base.TestClusterArgs{\n",
"\t\tServerArgsPerNode: map[int]base.TestServerArgs{\n",
"\t\t\t0: {StoreSpecs: []base.StoreSpec{{Path: dir + \"/store-1\"}}},\n",
"\t\t},\n",
"\t})\n",
"\ttcBefore.Start(t)\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tServerArgs: base.TestServerArgs{\n",
"\t\t\t// This logic is specific to the storage layer.\n",
"\t\t\tDefaultTestTenant: base.TestIsSpecificToStorageLayerAndNeedsASystemTenant,\n",
"\t\t},\n"
],
"file_path": "pkg/cli/debug_recover_loss_of_quorum_test.go",
"type": "add",
"edit_start_line_idx": 182
} | // Copyright 2021 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
import { cockroach } from "src/js/protos";
import { resetSQLStats } from "src/util/api";
import { all, call, put, takeEvery } from "redux-saga/effects";
import { RESET_SQL_STATS, resetSQLStatsFailedAction } from "./sqlStatsActions";
import {
invalidateAllStatementDetails,
invalidateStatements,
} from "src/redux/apiReducers";
import ResetSQLStatsRequest = cockroach.server.serverpb.ResetSQLStatsRequest;
export function* resetSQLStatsSaga() {
const resetSQLStatsRequest = new ResetSQLStatsRequest({
// reset_persisted_stats is set to true in order to clear both
// in-memory stats as well as persisted stats.
reset_persisted_stats: true,
});
try {
yield call(resetSQLStats, resetSQLStatsRequest);
yield all([
put(invalidateStatements()),
put(invalidateAllStatementDetails()),
]);
} catch (e) {
yield put(resetSQLStatsFailedAction());
}
}
export function* sqlStatsSaga() {
yield all([takeEvery(RESET_SQL_STATS, resetSQLStatsSaga)]);
}
| pkg/ui/workspaces/db-console/src/redux/sqlStats/sqlStatsSagas.ts | 0 | https://github.com/cockroachdb/cockroach/commit/0d77d949392d0daabeb77a82d468b15d4774e1de | [
0.0001796495635062456,
0.00017592762014828622,
0.00017372591537423432,
0.0001750915835145861,
0.000002014743586187251
] |
{
"id": 5,
"code_window": [
"\t// would not be able to progress, but we will apply recovery procedure and\n",
"\t// mark on replicas on node 1 as designated survivors. After that, starting\n",
"\t// single node should succeed.\n",
"\ttcBefore := testcluster.NewTestCluster(t, 3, base.TestClusterArgs{\n",
"\t\tServerArgsPerNode: map[int]base.TestServerArgs{\n",
"\t\t\t0: {StoreSpecs: []base.StoreSpec{{Path: dir + \"/store-1\"}}},\n",
"\t\t},\n",
"\t})\n",
"\ttcBefore.Start(t)\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tServerArgs: base.TestServerArgs{\n",
"\t\t\t// This logic is specific to the storage layer.\n",
"\t\t\tDefaultTestTenant: base.TestIsSpecificToStorageLayerAndNeedsASystemTenant,\n",
"\t\t},\n"
],
"file_path": "pkg/cli/debug_recover_loss_of_quorum_test.go",
"type": "add",
"edit_start_line_idx": 182
} | // Code generated by execgen; DO NOT EDIT.
// Copyright 2019 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package colexec
import (
"container/heap"
"context"
"github.com/cockroachdb/cockroach/pkg/col/coldata"
"github.com/cockroachdb/cockroach/pkg/col/typeconv"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/colinfo"
"github.com/cockroachdb/cockroach/pkg/sql/colexec/colexecargs"
"github.com/cockroachdb/cockroach/pkg/sql/colexecerror"
"github.com/cockroachdb/cockroach/pkg/sql/colexecop"
"github.com/cockroachdb/cockroach/pkg/sql/colmem"
"github.com/cockroachdb/cockroach/pkg/sql/execinfra"
"github.com/cockroachdb/cockroach/pkg/sql/execinfra/execopnode"
"github.com/cockroachdb/cockroach/pkg/sql/execinfrapb"
"github.com/cockroachdb/cockroach/pkg/sql/types"
"github.com/cockroachdb/cockroach/pkg/util/encoding"
"github.com/cockroachdb/cockroach/pkg/util/tracing"
"github.com/cockroachdb/errors"
)
// OrderedSynchronizer receives rows from multiple inputs and produces a single
// stream of rows, ordered according to a set of columns. The rows in each input
// stream are assumed to be ordered according to the same set of columns.
type OrderedSynchronizer struct {
colexecop.InitHelper
flowCtx *execinfra.FlowCtx
processorID int32
span *tracing.Span
accountingHelper colmem.SetAccountingHelper
inputs []colexecargs.OpWithMetaInfo
ordering colinfo.ColumnOrdering
typs []*types.T
canonicalTypeFamilies []types.Family
// tuplesToMerge (when positive) tracks the number of tuples that are still
// to be merged by synchronizer.
tuplesToMerge int64
// inputBatches stores the current batch for each input.
inputBatches []coldata.Batch
// inputIndices stores the current index into each input batch.
inputIndices []int
// advanceMinBatch, if true, indicates that the minimum input (according to
// heap) needs to be advanced by one row. This advancement is delayed in
// order to not fetch the next batch from the input too eagerly.
advanceMinBatch bool
// heap is a min heap which stores indices into inputBatches. The "current
// value" of ith input batch is the tuple at inputIndices[i] position of
// inputBatches[i] batch. If an input is fully exhausted, it will be removed
// from heap.
heap []int
// comparators stores one comparator per ordering column.
comparators []vecComparator
output coldata.Batch
outVecs coldata.TypedVecs
}
var (
_ colexecop.Operator = &OrderedSynchronizer{}
_ colexecop.Closer = &OrderedSynchronizer{}
)
// ChildCount implements the execinfrapb.OpNode interface.
func (o *OrderedSynchronizer) ChildCount(verbose bool) int {
return len(o.inputs)
}
// Child implements the execinfrapb.OpNode interface.
func (o *OrderedSynchronizer) Child(nth int, verbose bool) execopnode.OpNode {
return o.inputs[nth].Root
}
// NewOrderedSynchronizer creates a new OrderedSynchronizer.
// - memoryLimit will limit the size of batches produced by the synchronizer.
// - tuplesToMerge, if positive, indicates the total number of tuples that will
// be emitted by all inputs, use 0 if unknown.
func NewOrderedSynchronizer(
flowCtx *execinfra.FlowCtx,
processorID int32,
allocator *colmem.Allocator,
memoryLimit int64,
inputs []colexecargs.OpWithMetaInfo,
typs []*types.T,
ordering colinfo.ColumnOrdering,
tuplesToMerge int64,
) *OrderedSynchronizer {
os := &OrderedSynchronizer{
flowCtx: flowCtx,
processorID: processorID,
inputs: inputs,
ordering: ordering,
typs: typs,
canonicalTypeFamilies: typeconv.ToCanonicalTypeFamilies(typs),
tuplesToMerge: tuplesToMerge,
}
os.accountingHelper.Init(allocator, memoryLimit, typs, false /* alwaysReallocate */)
return os
}
// Next is part of the Operator interface.
func (o *OrderedSynchronizer) Next() coldata.Batch {
if o.inputBatches == nil {
o.inputBatches = make([]coldata.Batch, len(o.inputs))
o.heap = make([]int, 0, len(o.inputs))
for i := range o.inputs {
o.inputBatches[i] = o.inputs[i].Root.Next()
o.updateComparators(i)
if o.inputBatches[i].Length() > 0 {
o.heap = append(o.heap, i)
}
}
heap.Init(o)
}
o.resetOutput()
outputIdx := 0
for batchDone := false; !batchDone; {
if o.advanceMinBatch {
// Advance the minimum input batch, fetching a new batch if
// necessary.
minBatch := o.heap[0]
if o.inputIndices[minBatch]+1 < o.inputBatches[minBatch].Length() {
o.inputIndices[minBatch]++
} else {
o.inputBatches[minBatch] = o.inputs[minBatch].Root.Next()
o.inputIndices[minBatch] = 0
o.updateComparators(minBatch)
}
if o.inputBatches[minBatch].Length() == 0 {
heap.Remove(o, 0)
} else {
heap.Fix(o, 0)
}
}
if o.Len() == 0 {
// All inputs exhausted.
o.advanceMinBatch = false
break
}
minBatch := o.heap[0]
// Copy the min row into the output.
batch := o.inputBatches[minBatch]
srcRowIdx := o.inputIndices[minBatch]
if sel := batch.Selection(); sel != nil {
srcRowIdx = sel[srcRowIdx]
}
for i := range o.typs {
vec := batch.ColVec(i)
if vec.Nulls().MaybeHasNulls() && vec.Nulls().NullAt(srcRowIdx) {
o.outVecs.Nulls[i].SetNull(outputIdx)
} else {
switch o.canonicalTypeFamilies[i] {
case types.BoolFamily:
switch o.typs[i].Width() {
case -1:
default:
srcCol := vec.Bool()
outCol := o.outVecs.BoolCols[o.outVecs.ColsMap[i]]
v := srcCol.Get(srcRowIdx)
outCol.Set(outputIdx, v)
}
case types.BytesFamily:
switch o.typs[i].Width() {
case -1:
default:
srcCol := vec.Bytes()
outCol := o.outVecs.BytesCols[o.outVecs.ColsMap[i]]
outCol.Copy(srcCol, outputIdx, srcRowIdx)
}
case types.DecimalFamily:
switch o.typs[i].Width() {
case -1:
default:
srcCol := vec.Decimal()
outCol := o.outVecs.DecimalCols[o.outVecs.ColsMap[i]]
v := srcCol.Get(srcRowIdx)
outCol.Set(outputIdx, v)
}
case types.IntFamily:
switch o.typs[i].Width() {
case 16:
srcCol := vec.Int16()
outCol := o.outVecs.Int16Cols[o.outVecs.ColsMap[i]]
v := srcCol.Get(srcRowIdx)
outCol.Set(outputIdx, v)
case 32:
srcCol := vec.Int32()
outCol := o.outVecs.Int32Cols[o.outVecs.ColsMap[i]]
v := srcCol.Get(srcRowIdx)
outCol.Set(outputIdx, v)
case -1:
default:
srcCol := vec.Int64()
outCol := o.outVecs.Int64Cols[o.outVecs.ColsMap[i]]
v := srcCol.Get(srcRowIdx)
outCol.Set(outputIdx, v)
}
case types.FloatFamily:
switch o.typs[i].Width() {
case -1:
default:
srcCol := vec.Float64()
outCol := o.outVecs.Float64Cols[o.outVecs.ColsMap[i]]
v := srcCol.Get(srcRowIdx)
outCol.Set(outputIdx, v)
}
case types.TimestampTZFamily:
switch o.typs[i].Width() {
case -1:
default:
srcCol := vec.Timestamp()
outCol := o.outVecs.TimestampCols[o.outVecs.ColsMap[i]]
v := srcCol.Get(srcRowIdx)
outCol.Set(outputIdx, v)
}
case types.IntervalFamily:
switch o.typs[i].Width() {
case -1:
default:
srcCol := vec.Interval()
outCol := o.outVecs.IntervalCols[o.outVecs.ColsMap[i]]
v := srcCol.Get(srcRowIdx)
outCol.Set(outputIdx, v)
}
case types.JsonFamily:
switch o.typs[i].Width() {
case -1:
default:
srcCol := vec.JSON()
outCol := o.outVecs.JSONCols[o.outVecs.ColsMap[i]]
outCol.Copy(srcCol, outputIdx, srcRowIdx)
}
case typeconv.DatumVecCanonicalTypeFamily:
switch o.typs[i].Width() {
case -1:
default:
srcCol := vec.Datum()
outCol := o.outVecs.DatumCols[o.outVecs.ColsMap[i]]
v := srcCol.Get(srcRowIdx)
outCol.Set(outputIdx, v)
}
default:
colexecerror.InternalError(errors.AssertionFailedf("unhandled type %s", o.typs[i].String()))
}
}
}
// Delay the advancement of the min input batch until the next row is
// needed.
o.advanceMinBatch = true
// Account for the memory of the row we have just set.
batchDone = o.accountingHelper.AccountForSet(outputIdx)
outputIdx++
}
o.output.SetLength(outputIdx)
// Note that it's ok if this number becomes negative - the accounting helper
// will ignore it.
o.tuplesToMerge -= int64(outputIdx)
return o.output
}
func (o *OrderedSynchronizer) resetOutput() {
var reallocated bool
o.output, reallocated = o.accountingHelper.ResetMaybeReallocate(
o.typs, o.output, int(o.tuplesToMerge), /* tuplesToBeSet */
)
if reallocated {
o.outVecs.SetBatch(o.output)
}
}
// Init is part of the Operator interface.
func (o *OrderedSynchronizer) Init(ctx context.Context) {
if !o.InitHelper.Init(ctx) {
return
}
o.Ctx, o.span = execinfra.ProcessorSpan(o.Ctx, o.flowCtx, "ordered sync", o.processorID)
o.inputIndices = make([]int, len(o.inputs))
for i := range o.inputs {
o.inputs[i].Root.Init(o.Ctx)
}
o.comparators = make([]vecComparator, len(o.ordering))
for i := range o.ordering {
typ := o.typs[o.ordering[i].ColIdx]
o.comparators[i] = GetVecComparator(typ, len(o.inputs))
}
}
func (o *OrderedSynchronizer) DrainMeta() []execinfrapb.ProducerMetadata {
var bufferedMeta []execinfrapb.ProducerMetadata
if o.span != nil {
for i := range o.inputs {
for _, stats := range o.inputs[i].StatsCollectors {
o.span.RecordStructured(stats.GetStats())
}
}
if meta := execinfra.GetTraceDataAsMetadata(o.flowCtx, o.span); meta != nil {
bufferedMeta = append(bufferedMeta, *meta)
}
}
for _, input := range o.inputs {
bufferedMeta = append(bufferedMeta, input.MetadataSources.DrainMeta()...)
}
return bufferedMeta
}
func (o *OrderedSynchronizer) Close(context.Context) error {
o.accountingHelper.Release()
if o.span != nil {
o.span.Finish()
}
*o = OrderedSynchronizer{}
return nil
}
func (o *OrderedSynchronizer) compareRow(batchIdx1 int, batchIdx2 int) int {
batch1 := o.inputBatches[batchIdx1]
batch2 := o.inputBatches[batchIdx2]
valIdx1 := o.inputIndices[batchIdx1]
valIdx2 := o.inputIndices[batchIdx2]
if sel := batch1.Selection(); sel != nil {
valIdx1 = sel[valIdx1]
}
if sel := batch2.Selection(); sel != nil {
valIdx2 = sel[valIdx2]
}
for i := range o.ordering {
info := o.ordering[i]
res := o.comparators[i].compare(batchIdx1, batchIdx2, valIdx1, valIdx2)
if res != 0 {
switch d := info.Direction; d {
case encoding.Ascending:
return res
case encoding.Descending:
return -res
default:
colexecerror.InternalError(errors.AssertionFailedf("unexpected direction value %d", d))
}
}
}
return 0
}
// updateComparators should be run whenever a new batch is fetched. It updates
// all the relevant vectors in o.comparators.
func (o *OrderedSynchronizer) updateComparators(batchIdx int) {
batch := o.inputBatches[batchIdx]
if batch.Length() == 0 {
return
}
for i := range o.ordering {
vec := batch.ColVec(o.ordering[i].ColIdx)
o.comparators[i].setVec(batchIdx, vec)
}
}
// Len is part of heap.Interface and is only meant to be used internally.
func (o *OrderedSynchronizer) Len() int {
return len(o.heap)
}
// Less is part of heap.Interface and is only meant to be used internally.
func (o *OrderedSynchronizer) Less(i, j int) bool {
return o.compareRow(o.heap[i], o.heap[j]) < 0
}
// Swap is part of heap.Interface and is only meant to be used internally.
func (o *OrderedSynchronizer) Swap(i, j int) {
o.heap[i], o.heap[j] = o.heap[j], o.heap[i]
}
// Push is part of heap.Interface and is only meant to be used internally.
func (o *OrderedSynchronizer) Push(x interface{}) {
o.heap = append(o.heap, x.(int))
}
// Pop is part of heap.Interface and is only meant to be used internally.
func (o *OrderedSynchronizer) Pop() interface{} {
x := o.heap[len(o.heap)-1]
o.heap = o.heap[:len(o.heap)-1]
return x
}
| pkg/sql/colexec/ordered_synchronizer.eg.go | 0 | https://github.com/cockroachdb/cockroach/commit/0d77d949392d0daabeb77a82d468b15d4774e1de | [
0.00017835444305092096,
0.00017300937906838953,
0.00016215631330851465,
0.0001740485313348472,
0.000003833707523881458
] |
{
"id": 5,
"code_window": [
"\t// would not be able to progress, but we will apply recovery procedure and\n",
"\t// mark on replicas on node 1 as designated survivors. After that, starting\n",
"\t// single node should succeed.\n",
"\ttcBefore := testcluster.NewTestCluster(t, 3, base.TestClusterArgs{\n",
"\t\tServerArgsPerNode: map[int]base.TestServerArgs{\n",
"\t\t\t0: {StoreSpecs: []base.StoreSpec{{Path: dir + \"/store-1\"}}},\n",
"\t\t},\n",
"\t})\n",
"\ttcBefore.Start(t)\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tServerArgs: base.TestServerArgs{\n",
"\t\t\t// This logic is specific to the storage layer.\n",
"\t\t\tDefaultTestTenant: base.TestIsSpecificToStorageLayerAndNeedsASystemTenant,\n",
"\t\t},\n"
],
"file_path": "pkg/cli/debug_recover_loss_of_quorum_test.go",
"type": "add",
"edit_start_line_idx": 182
} | load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "multitenantio",
srcs = ["cost_controlling_io.go"],
importpath = "github.com/cockroachdb/cockroach/pkg/multitenant/multitenantio",
visibility = ["//visibility:public"],
deps = [
"//pkg/cloud",
"//pkg/multitenant",
"//pkg/settings",
"//pkg/util/ioctx",
],
)
| pkg/multitenant/multitenantio/BUILD.bazel | 0 | https://github.com/cockroachdb/cockroach/commit/0d77d949392d0daabeb77a82d468b15d4774e1de | [
0.00017708454106468707,
0.00017598268459551036,
0.00017488084267824888,
0.00017598268459551036,
0.0000011018491932190955
] |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.