hunk
dict | file
stringlengths 0
11.8M
| file_path
stringlengths 2
234
| label
int64 0
1
| commit_url
stringlengths 74
103
| dependency_score
sequencelengths 5
5
|
---|---|---|---|---|---|
{
"id": 3,
"code_window": [
"\t\tvalue []byte\n",
"\t\tdata []byte\n",
"\t\terr error\n",
"\t)\n",
"\tkey, err = parseHexOrString(ctx.Args().Get(0))\n",
"\tif err != nil {\n",
"\t\tlog.Info(\"Could not decode the key\", \"error\", err)\n",
"\t\treturn err\n",
"\t}\n",
"\tvalue, err = hexutil.Decode(ctx.Args().Get(1))\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tkey, err = common.ParseHexOrString(ctx.Args().Get(0))\n"
],
"file_path": "cmd/geth/dbcmd.go",
"type": "replace",
"edit_start_line_idx": 479
} | // Copyright 2020 The go-ethereum Authors
// This file is part of go-ethereum.
//
// go-ethereum is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// go-ethereum is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
package main
import (
"bytes"
"errors"
"fmt"
"os"
"os/signal"
"path/filepath"
"sort"
"strconv"
"strings"
"syscall"
"time"
"github.com/ethereum/go-ethereum/cmd/utils"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/console/prompt"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/state/snapshot"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/trie"
"github.com/olekukonko/tablewriter"
"gopkg.in/urfave/cli.v1"
)
var (
removedbCommand = cli.Command{
Action: utils.MigrateFlags(removeDB),
Name: "removedb",
Usage: "Remove blockchain and state databases",
ArgsUsage: "",
Flags: []cli.Flag{
utils.DataDirFlag,
},
Category: "DATABASE COMMANDS",
Description: `
Remove blockchain and state databases`,
}
dbCommand = cli.Command{
Name: "db",
Usage: "Low level database operations",
ArgsUsage: "",
Category: "DATABASE COMMANDS",
Subcommands: []cli.Command{
dbInspectCmd,
dbStatCmd,
dbCompactCmd,
dbGetCmd,
dbDeleteCmd,
dbPutCmd,
dbGetSlotsCmd,
dbDumpFreezerIndex,
dbImportCmd,
dbExportCmd,
dbMetadataCmd,
dbMigrateFreezerCmd,
},
}
dbInspectCmd = cli.Command{
Action: utils.MigrateFlags(inspect),
Name: "inspect",
ArgsUsage: "<prefix> <start>",
Flags: []cli.Flag{
utils.DataDirFlag,
utils.AncientFlag,
utils.SyncModeFlag,
utils.MainnetFlag,
utils.RopstenFlag,
utils.SepoliaFlag,
utils.RinkebyFlag,
utils.GoerliFlag,
},
Usage: "Inspect the storage size for each type of data in the database",
Description: `This commands iterates the entire database. If the optional 'prefix' and 'start' arguments are provided, then the iteration is limited to the given subset of data.`,
}
dbStatCmd = cli.Command{
Action: utils.MigrateFlags(dbStats),
Name: "stats",
Usage: "Print leveldb statistics",
Flags: []cli.Flag{
utils.DataDirFlag,
utils.SyncModeFlag,
utils.MainnetFlag,
utils.RopstenFlag,
utils.SepoliaFlag,
utils.RinkebyFlag,
utils.GoerliFlag,
},
}
dbCompactCmd = cli.Command{
Action: utils.MigrateFlags(dbCompact),
Name: "compact",
Usage: "Compact leveldb database. WARNING: May take a very long time",
Flags: []cli.Flag{
utils.DataDirFlag,
utils.SyncModeFlag,
utils.MainnetFlag,
utils.RopstenFlag,
utils.SepoliaFlag,
utils.RinkebyFlag,
utils.GoerliFlag,
utils.CacheFlag,
utils.CacheDatabaseFlag,
},
Description: `This command performs a database compaction.
WARNING: This operation may take a very long time to finish, and may cause database
corruption if it is aborted during execution'!`,
}
dbGetCmd = cli.Command{
Action: utils.MigrateFlags(dbGet),
Name: "get",
Usage: "Show the value of a database key",
ArgsUsage: "<hex-encoded key>",
Flags: []cli.Flag{
utils.DataDirFlag,
utils.SyncModeFlag,
utils.MainnetFlag,
utils.RopstenFlag,
utils.SepoliaFlag,
utils.RinkebyFlag,
utils.GoerliFlag,
},
Description: "This command looks up the specified database key from the database.",
}
dbDeleteCmd = cli.Command{
Action: utils.MigrateFlags(dbDelete),
Name: "delete",
Usage: "Delete a database key (WARNING: may corrupt your database)",
ArgsUsage: "<hex-encoded key>",
Flags: []cli.Flag{
utils.DataDirFlag,
utils.SyncModeFlag,
utils.MainnetFlag,
utils.RopstenFlag,
utils.SepoliaFlag,
utils.RinkebyFlag,
utils.GoerliFlag,
},
Description: `This command deletes the specified database key from the database.
WARNING: This is a low-level operation which may cause database corruption!`,
}
dbPutCmd = cli.Command{
Action: utils.MigrateFlags(dbPut),
Name: "put",
Usage: "Set the value of a database key (WARNING: may corrupt your database)",
ArgsUsage: "<hex-encoded key> <hex-encoded value>",
Flags: []cli.Flag{
utils.DataDirFlag,
utils.SyncModeFlag,
utils.MainnetFlag,
utils.RopstenFlag,
utils.SepoliaFlag,
utils.RinkebyFlag,
utils.GoerliFlag,
},
Description: `This command sets a given database key to the given value.
WARNING: This is a low-level operation which may cause database corruption!`,
}
dbGetSlotsCmd = cli.Command{
Action: utils.MigrateFlags(dbDumpTrie),
Name: "dumptrie",
Usage: "Show the storage key/values of a given storage trie",
ArgsUsage: "<hex-encoded storage trie root> <hex-encoded start (optional)> <int max elements (optional)>",
Flags: []cli.Flag{
utils.DataDirFlag,
utils.SyncModeFlag,
utils.MainnetFlag,
utils.RopstenFlag,
utils.SepoliaFlag,
utils.RinkebyFlag,
utils.GoerliFlag,
},
Description: "This command looks up the specified database key from the database.",
}
dbDumpFreezerIndex = cli.Command{
Action: utils.MigrateFlags(freezerInspect),
Name: "freezer-index",
Usage: "Dump out the index of a given freezer type",
ArgsUsage: "<type> <start (int)> <end (int)>",
Flags: []cli.Flag{
utils.DataDirFlag,
utils.SyncModeFlag,
utils.MainnetFlag,
utils.RopstenFlag,
utils.SepoliaFlag,
utils.RinkebyFlag,
utils.GoerliFlag,
},
Description: "This command displays information about the freezer index.",
}
dbImportCmd = cli.Command{
Action: utils.MigrateFlags(importLDBdata),
Name: "import",
Usage: "Imports leveldb-data from an exported RLP dump.",
ArgsUsage: "<dumpfile> <start (optional)",
Flags: []cli.Flag{
utils.DataDirFlag,
utils.SyncModeFlag,
utils.MainnetFlag,
utils.RopstenFlag,
utils.RinkebyFlag,
utils.GoerliFlag,
},
Description: "The import command imports the specific chain data from an RLP encoded stream.",
}
dbExportCmd = cli.Command{
Action: utils.MigrateFlags(exportChaindata),
Name: "export",
Usage: "Exports the chain data into an RLP dump. If the <dumpfile> has .gz suffix, gzip compression will be used.",
ArgsUsage: "<type> <dumpfile>",
Flags: []cli.Flag{
utils.DataDirFlag,
utils.SyncModeFlag,
utils.MainnetFlag,
utils.RopstenFlag,
utils.RinkebyFlag,
utils.GoerliFlag,
},
Description: "Exports the specified chain data to an RLP encoded stream, optionally gzip-compressed.",
}
dbMetadataCmd = cli.Command{
Action: utils.MigrateFlags(showMetaData),
Name: "metadata",
Usage: "Shows metadata about the chain status.",
Flags: []cli.Flag{
utils.DataDirFlag,
utils.SyncModeFlag,
utils.MainnetFlag,
utils.RopstenFlag,
utils.SepoliaFlag,
utils.RinkebyFlag,
utils.GoerliFlag,
},
Description: "Shows metadata about the chain status.",
}
dbMigrateFreezerCmd = cli.Command{
Action: utils.MigrateFlags(freezerMigrate),
Name: "freezer-migrate",
Usage: "Migrate legacy parts of the freezer. (WARNING: may take a long time)",
ArgsUsage: "",
Flags: []cli.Flag{
utils.DataDirFlag,
utils.SyncModeFlag,
utils.MainnetFlag,
utils.RopstenFlag,
utils.SepoliaFlag,
utils.RinkebyFlag,
utils.GoerliFlag,
},
Description: `The freezer-migrate command checks your database for receipts in a legacy format and updates those.
WARNING: please back-up the receipt files in your ancients before running this command.`,
}
)
func removeDB(ctx *cli.Context) error {
stack, config := makeConfigNode(ctx)
// Remove the full node state database
path := stack.ResolvePath("chaindata")
if common.FileExist(path) {
confirmAndRemoveDB(path, "full node state database")
} else {
log.Info("Full node state database missing", "path", path)
}
// Remove the full node ancient database
path = config.Eth.DatabaseFreezer
switch {
case path == "":
path = filepath.Join(stack.ResolvePath("chaindata"), "ancient")
case !filepath.IsAbs(path):
path = config.Node.ResolvePath(path)
}
if common.FileExist(path) {
confirmAndRemoveDB(path, "full node ancient database")
} else {
log.Info("Full node ancient database missing", "path", path)
}
// Remove the light node database
path = stack.ResolvePath("lightchaindata")
if common.FileExist(path) {
confirmAndRemoveDB(path, "light node database")
} else {
log.Info("Light node database missing", "path", path)
}
return nil
}
// confirmAndRemoveDB prompts the user for a last confirmation and removes the
// folder if accepted.
func confirmAndRemoveDB(database string, kind string) {
confirm, err := prompt.Stdin.PromptConfirm(fmt.Sprintf("Remove %s (%s)?", kind, database))
switch {
case err != nil:
utils.Fatalf("%v", err)
case !confirm:
log.Info("Database deletion skipped", "path", database)
default:
start := time.Now()
filepath.Walk(database, func(path string, info os.FileInfo, err error) error {
// If we're at the top level folder, recurse into
if path == database {
return nil
}
// Delete all the files, but not subfolders
if !info.IsDir() {
os.Remove(path)
return nil
}
return filepath.SkipDir
})
log.Info("Database successfully deleted", "path", database, "elapsed", common.PrettyDuration(time.Since(start)))
}
}
func inspect(ctx *cli.Context) error {
var (
prefix []byte
start []byte
)
if ctx.NArg() > 2 {
return fmt.Errorf("Max 2 arguments: %v", ctx.Command.ArgsUsage)
}
if ctx.NArg() >= 1 {
if d, err := hexutil.Decode(ctx.Args().Get(0)); err != nil {
return fmt.Errorf("failed to hex-decode 'prefix': %v", err)
} else {
prefix = d
}
}
if ctx.NArg() >= 2 {
if d, err := hexutil.Decode(ctx.Args().Get(1)); err != nil {
return fmt.Errorf("failed to hex-decode 'start': %v", err)
} else {
start = d
}
}
stack, _ := makeConfigNode(ctx)
defer stack.Close()
db := utils.MakeChainDatabase(ctx, stack, true)
defer db.Close()
return rawdb.InspectDatabase(db, prefix, start)
}
func showLeveldbStats(db ethdb.Stater) {
if stats, err := db.Stat("leveldb.stats"); err != nil {
log.Warn("Failed to read database stats", "error", err)
} else {
fmt.Println(stats)
}
if ioStats, err := db.Stat("leveldb.iostats"); err != nil {
log.Warn("Failed to read database iostats", "error", err)
} else {
fmt.Println(ioStats)
}
}
func dbStats(ctx *cli.Context) error {
stack, _ := makeConfigNode(ctx)
defer stack.Close()
db := utils.MakeChainDatabase(ctx, stack, true)
defer db.Close()
showLeveldbStats(db)
return nil
}
func dbCompact(ctx *cli.Context) error {
stack, _ := makeConfigNode(ctx)
defer stack.Close()
db := utils.MakeChainDatabase(ctx, stack, false)
defer db.Close()
log.Info("Stats before compaction")
showLeveldbStats(db)
log.Info("Triggering compaction")
if err := db.Compact(nil, nil); err != nil {
log.Info("Compact err", "error", err)
return err
}
log.Info("Stats after compaction")
showLeveldbStats(db)
return nil
}
// dbGet shows the value of a given database key
func dbGet(ctx *cli.Context) error {
if ctx.NArg() != 1 {
return fmt.Errorf("required arguments: %v", ctx.Command.ArgsUsage)
}
stack, _ := makeConfigNode(ctx)
defer stack.Close()
db := utils.MakeChainDatabase(ctx, stack, true)
defer db.Close()
key, err := parseHexOrString(ctx.Args().Get(0))
if err != nil {
log.Info("Could not decode the key", "error", err)
return err
}
data, err := db.Get(key)
if err != nil {
log.Info("Get operation failed", "key", fmt.Sprintf("0x%#x", key), "error", err)
return err
}
fmt.Printf("key %#x: %#x\n", key, data)
return nil
}
// dbDelete deletes a key from the database
func dbDelete(ctx *cli.Context) error {
if ctx.NArg() != 1 {
return fmt.Errorf("required arguments: %v", ctx.Command.ArgsUsage)
}
stack, _ := makeConfigNode(ctx)
defer stack.Close()
db := utils.MakeChainDatabase(ctx, stack, false)
defer db.Close()
key, err := parseHexOrString(ctx.Args().Get(0))
if err != nil {
log.Info("Could not decode the key", "error", err)
return err
}
data, err := db.Get(key)
if err == nil {
fmt.Printf("Previous value: %#x\n", data)
}
if err = db.Delete(key); err != nil {
log.Info("Delete operation returned an error", "key", fmt.Sprintf("0x%#x", key), "error", err)
return err
}
return nil
}
// dbPut overwrite a value in the database
func dbPut(ctx *cli.Context) error {
if ctx.NArg() != 2 {
return fmt.Errorf("required arguments: %v", ctx.Command.ArgsUsage)
}
stack, _ := makeConfigNode(ctx)
defer stack.Close()
db := utils.MakeChainDatabase(ctx, stack, false)
defer db.Close()
var (
key []byte
value []byte
data []byte
err error
)
key, err = parseHexOrString(ctx.Args().Get(0))
if err != nil {
log.Info("Could not decode the key", "error", err)
return err
}
value, err = hexutil.Decode(ctx.Args().Get(1))
if err != nil {
log.Info("Could not decode the value", "error", err)
return err
}
data, err = db.Get(key)
if err == nil {
fmt.Printf("Previous value: %#x\n", data)
}
return db.Put(key, value)
}
// dbDumpTrie shows the key-value slots of a given storage trie
func dbDumpTrie(ctx *cli.Context) error {
if ctx.NArg() < 1 {
return fmt.Errorf("required arguments: %v", ctx.Command.ArgsUsage)
}
stack, _ := makeConfigNode(ctx)
defer stack.Close()
db := utils.MakeChainDatabase(ctx, stack, true)
defer db.Close()
var (
root []byte
start []byte
max = int64(-1)
err error
)
if root, err = hexutil.Decode(ctx.Args().Get(0)); err != nil {
log.Info("Could not decode the root", "error", err)
return err
}
stRoot := common.BytesToHash(root)
if ctx.NArg() >= 2 {
if start, err = hexutil.Decode(ctx.Args().Get(1)); err != nil {
log.Info("Could not decode the seek position", "error", err)
return err
}
}
if ctx.NArg() >= 3 {
if max, err = strconv.ParseInt(ctx.Args().Get(2), 10, 64); err != nil {
log.Info("Could not decode the max count", "error", err)
return err
}
}
theTrie, err := trie.New(stRoot, trie.NewDatabase(db))
if err != nil {
return err
}
var count int64
it := trie.NewIterator(theTrie.NodeIterator(start))
for it.Next() {
if max > 0 && count == max {
fmt.Printf("Exiting after %d values\n", count)
break
}
fmt.Printf(" %d. key %#x: %#x\n", count, it.Key, it.Value)
count++
}
return it.Err
}
func freezerInspect(ctx *cli.Context) error {
var (
start, end int64
disableSnappy bool
err error
)
if ctx.NArg() < 3 {
return fmt.Errorf("required arguments: %v", ctx.Command.ArgsUsage)
}
kind := ctx.Args().Get(0)
if noSnap, ok := rawdb.FreezerNoSnappy[kind]; !ok {
var options []string
for opt := range rawdb.FreezerNoSnappy {
options = append(options, opt)
}
sort.Strings(options)
return fmt.Errorf("Could read freezer-type '%v'. Available options: %v", kind, options)
} else {
disableSnappy = noSnap
}
if start, err = strconv.ParseInt(ctx.Args().Get(1), 10, 64); err != nil {
log.Info("Could read start-param", "error", err)
return err
}
if end, err = strconv.ParseInt(ctx.Args().Get(2), 10, 64); err != nil {
log.Info("Could read count param", "error", err)
return err
}
stack, _ := makeConfigNode(ctx)
defer stack.Close()
path := filepath.Join(stack.ResolvePath("chaindata"), "ancient")
log.Info("Opening freezer", "location", path, "name", kind)
if f, err := rawdb.NewFreezerTable(path, kind, disableSnappy, true); err != nil {
return err
} else {
f.DumpIndex(start, end)
}
return nil
}
// ParseHexOrString tries to hexdecode b, but if the prefix is missing, it instead just returns the raw bytes
func parseHexOrString(str string) ([]byte, error) {
b, err := hexutil.Decode(str)
if errors.Is(err, hexutil.ErrMissingPrefix) {
return []byte(str), nil
}
return b, err
}
func importLDBdata(ctx *cli.Context) error {
start := 0
switch ctx.NArg() {
case 1:
break
case 2:
s, err := strconv.Atoi(ctx.Args().Get(1))
if err != nil {
return fmt.Errorf("second arg must be an integer: %v", err)
}
start = s
default:
return fmt.Errorf("required arguments: %v", ctx.Command.ArgsUsage)
}
var (
fName = ctx.Args().Get(0)
stack, _ = makeConfigNode(ctx)
interrupt = make(chan os.Signal, 1)
stop = make(chan struct{})
)
defer stack.Close()
signal.Notify(interrupt, syscall.SIGINT, syscall.SIGTERM)
defer signal.Stop(interrupt)
defer close(interrupt)
go func() {
if _, ok := <-interrupt; ok {
log.Info("Interrupted during ldb import, stopping at next batch")
}
close(stop)
}()
db := utils.MakeChainDatabase(ctx, stack, false)
return utils.ImportLDBData(db, fName, int64(start), stop)
}
type preimageIterator struct {
iter ethdb.Iterator
}
func (iter *preimageIterator) Next() (byte, []byte, []byte, bool) {
for iter.iter.Next() {
key := iter.iter.Key()
if bytes.HasPrefix(key, rawdb.PreimagePrefix) && len(key) == (len(rawdb.PreimagePrefix)+common.HashLength) {
return utils.OpBatchAdd, key, iter.iter.Value(), true
}
}
return 0, nil, nil, false
}
func (iter *preimageIterator) Release() {
iter.iter.Release()
}
type snapshotIterator struct {
init bool
account ethdb.Iterator
storage ethdb.Iterator
}
func (iter *snapshotIterator) Next() (byte, []byte, []byte, bool) {
if !iter.init {
iter.init = true
return utils.OpBatchDel, rawdb.SnapshotRootKey, nil, true
}
for iter.account.Next() {
key := iter.account.Key()
if bytes.HasPrefix(key, rawdb.SnapshotAccountPrefix) && len(key) == (len(rawdb.SnapshotAccountPrefix)+common.HashLength) {
return utils.OpBatchAdd, key, iter.account.Value(), true
}
}
for iter.storage.Next() {
key := iter.storage.Key()
if bytes.HasPrefix(key, rawdb.SnapshotStoragePrefix) && len(key) == (len(rawdb.SnapshotStoragePrefix)+2*common.HashLength) {
return utils.OpBatchAdd, key, iter.storage.Value(), true
}
}
return 0, nil, nil, false
}
func (iter *snapshotIterator) Release() {
iter.account.Release()
iter.storage.Release()
}
// chainExporters defines the export scheme for all exportable chain data.
var chainExporters = map[string]func(db ethdb.Database) utils.ChainDataIterator{
"preimage": func(db ethdb.Database) utils.ChainDataIterator {
iter := db.NewIterator(rawdb.PreimagePrefix, nil)
return &preimageIterator{iter: iter}
},
"snapshot": func(db ethdb.Database) utils.ChainDataIterator {
account := db.NewIterator(rawdb.SnapshotAccountPrefix, nil)
storage := db.NewIterator(rawdb.SnapshotStoragePrefix, nil)
return &snapshotIterator{account: account, storage: storage}
},
}
func exportChaindata(ctx *cli.Context) error {
if ctx.NArg() < 2 {
return fmt.Errorf("required arguments: %v", ctx.Command.ArgsUsage)
}
// Parse the required chain data type, make sure it's supported.
kind := ctx.Args().Get(0)
kind = strings.ToLower(strings.Trim(kind, " "))
exporter, ok := chainExporters[kind]
if !ok {
var kinds []string
for kind := range chainExporters {
kinds = append(kinds, kind)
}
return fmt.Errorf("invalid data type %s, supported types: %s", kind, strings.Join(kinds, ", "))
}
var (
stack, _ = makeConfigNode(ctx)
interrupt = make(chan os.Signal, 1)
stop = make(chan struct{})
)
defer stack.Close()
signal.Notify(interrupt, syscall.SIGINT, syscall.SIGTERM)
defer signal.Stop(interrupt)
defer close(interrupt)
go func() {
if _, ok := <-interrupt; ok {
log.Info("Interrupted during db export, stopping at next batch")
}
close(stop)
}()
db := utils.MakeChainDatabase(ctx, stack, true)
return utils.ExportChaindata(ctx.Args().Get(1), kind, exporter(db), stop)
}
func showMetaData(ctx *cli.Context) error {
stack, _ := makeConfigNode(ctx)
defer stack.Close()
db := utils.MakeChainDatabase(ctx, stack, true)
ancients, err := db.Ancients()
if err != nil {
fmt.Fprintf(os.Stderr, "Error accessing ancients: %v", err)
}
pp := func(val *uint64) string {
if val == nil {
return "<nil>"
}
return fmt.Sprintf("%d (0x%x)", *val, *val)
}
data := [][]string{
{"databaseVersion", pp(rawdb.ReadDatabaseVersion(db))},
{"headBlockHash", fmt.Sprintf("%v", rawdb.ReadHeadBlockHash(db))},
{"headFastBlockHash", fmt.Sprintf("%v", rawdb.ReadHeadFastBlockHash(db))},
{"headHeaderHash", fmt.Sprintf("%v", rawdb.ReadHeadHeaderHash(db))}}
if b := rawdb.ReadHeadBlock(db); b != nil {
data = append(data, []string{"headBlock.Hash", fmt.Sprintf("%v", b.Hash())})
data = append(data, []string{"headBlock.Root", fmt.Sprintf("%v", b.Root())})
data = append(data, []string{"headBlock.Number", fmt.Sprintf("%d (0x%x)", b.Number(), b.Number())})
}
if b := rawdb.ReadSkeletonSyncStatus(db); b != nil {
data = append(data, []string{"SkeletonSyncStatus", string(b)})
}
if h := rawdb.ReadHeadHeader(db); h != nil {
data = append(data, []string{"headHeader.Hash", fmt.Sprintf("%v", h.Hash())})
data = append(data, []string{"headHeader.Root", fmt.Sprintf("%v", h.Root)})
data = append(data, []string{"headHeader.Number", fmt.Sprintf("%d (0x%x)", h.Number, h.Number)})
}
data = append(data, [][]string{{"frozen", fmt.Sprintf("%d items", ancients)},
{"lastPivotNumber", pp(rawdb.ReadLastPivotNumber(db))},
{"len(snapshotSyncStatus)", fmt.Sprintf("%d bytes", len(rawdb.ReadSnapshotSyncStatus(db)))},
{"snapshotGenerator", snapshot.ParseGeneratorStatus(rawdb.ReadSnapshotGenerator(db))},
{"snapshotDisabled", fmt.Sprintf("%v", rawdb.ReadSnapshotDisabled(db))},
{"snapshotJournal", fmt.Sprintf("%d bytes", len(rawdb.ReadSnapshotJournal(db)))},
{"snapshotRecoveryNumber", pp(rawdb.ReadSnapshotRecoveryNumber(db))},
{"snapshotRoot", fmt.Sprintf("%v", rawdb.ReadSnapshotRoot(db))},
{"txIndexTail", pp(rawdb.ReadTxIndexTail(db))},
{"fastTxLookupLimit", pp(rawdb.ReadFastTxLookupLimit(db))},
}...)
table := tablewriter.NewWriter(os.Stdout)
table.SetHeader([]string{"Field", "Value"})
table.AppendBulk(data)
table.Render()
return nil
}
func freezerMigrate(ctx *cli.Context) error {
stack, _ := makeConfigNode(ctx)
defer stack.Close()
db := utils.MakeChainDatabase(ctx, stack, false)
defer db.Close()
// Check first block for legacy receipt format
numAncients, err := db.Ancients()
if err != nil {
return err
}
if numAncients < 1 {
log.Info("No receipts in freezer to migrate")
return nil
}
isFirstLegacy, firstIdx, err := dbHasLegacyReceipts(db, 0)
if err != nil {
return err
}
if !isFirstLegacy {
log.Info("No legacy receipts to migrate")
return nil
}
log.Info("Starting migration", "ancients", numAncients, "firstLegacy", firstIdx)
start := time.Now()
if err := db.MigrateTable("receipts", types.ConvertLegacyStoredReceipts); err != nil {
return err
}
if err := db.Close(); err != nil {
return err
}
log.Info("Migration finished", "duration", time.Since(start))
return nil
}
// dbHasLegacyReceipts checks freezer entries for legacy receipts. It stops at the first
// non-empty receipt and checks its format. The index of this first non-empty element is
// the second return parameter.
func dbHasLegacyReceipts(db ethdb.Database, firstIdx uint64) (bool, uint64, error) {
// Check first block for legacy receipt format
numAncients, err := db.Ancients()
if err != nil {
return false, 0, err
}
if numAncients < 1 {
return false, 0, nil
}
if firstIdx >= numAncients {
return false, firstIdx, nil
}
var (
legacy bool
blob []byte
emptyRLPList = []byte{192}
)
// Find first block with non-empty receipt, only if
// the index is not already provided.
if firstIdx == 0 {
for i := uint64(0); i < numAncients; i++ {
blob, err = db.Ancient("receipts", i)
if err != nil {
return false, 0, err
}
if len(blob) == 0 {
continue
}
if !bytes.Equal(blob, emptyRLPList) {
firstIdx = i
break
}
}
}
// Is first non-empty receipt legacy?
first, err := db.Ancient("receipts", firstIdx)
if err != nil {
return false, 0, err
}
legacy, err = types.IsLegacyStoredReceipts(first)
return legacy, firstIdx, err
}
| cmd/geth/dbcmd.go | 1 | https://github.com/ethereum/go-ethereum/commit/16701c51697e28986feebd122c6a491e4d9ac0e7 | [
0.9973195195198059,
0.08075562119483948,
0.00016234074428211898,
0.00017193585517816246,
0.26925262808799744
] |
{
"id": 3,
"code_window": [
"\t\tvalue []byte\n",
"\t\tdata []byte\n",
"\t\terr error\n",
"\t)\n",
"\tkey, err = parseHexOrString(ctx.Args().Get(0))\n",
"\tif err != nil {\n",
"\t\tlog.Info(\"Could not decode the key\", \"error\", err)\n",
"\t\treturn err\n",
"\t}\n",
"\tvalue, err = hexutil.Decode(ctx.Args().Get(1))\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tkey, err = common.ParseHexOrString(ctx.Args().Get(0))\n"
],
"file_path": "cmd/geth/dbcmd.go",
"type": "replace",
"edit_start_line_idx": 479
} | // Copyright 2019 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package downloader
import (
"fmt"
"sync"
"sync/atomic"
"github.com/ethereum/go-ethereum/core/types"
)
// resultStore implements a structure for maintaining fetchResults, tracking their
// download-progress and delivering (finished) results.
type resultStore struct {
items []*fetchResult // Downloaded but not yet delivered fetch results
resultOffset uint64 // Offset of the first cached fetch result in the block chain
// Internal index of first non-completed entry, updated atomically when needed.
// If all items are complete, this will equal length(items), so
// *important* : is not safe to use for indexing without checking against length
indexIncomplete int32 // atomic access
// throttleThreshold is the limit up to which we _want_ to fill the
// results. If blocks are large, we want to limit the results to less
// than the number of available slots, and maybe only fill 1024 out of
// 8192 possible places. The queue will, at certain times, recalibrate
// this index.
throttleThreshold uint64
lock sync.RWMutex
}
func newResultStore(size int) *resultStore {
return &resultStore{
resultOffset: 0,
items: make([]*fetchResult, size),
throttleThreshold: uint64(size),
}
}
// SetThrottleThreshold updates the throttling threshold based on the requested
// limit and the total queue capacity. It returns the (possibly capped) threshold
func (r *resultStore) SetThrottleThreshold(threshold uint64) uint64 {
r.lock.Lock()
defer r.lock.Unlock()
limit := uint64(len(r.items))
if threshold >= limit {
threshold = limit
}
r.throttleThreshold = threshold
return r.throttleThreshold
}
// AddFetch adds a header for body/receipt fetching. This is used when the queue
// wants to reserve headers for fetching.
//
// It returns the following:
// stale - if true, this item is already passed, and should not be requested again
// throttled - if true, the store is at capacity, this particular header is not prio now
// item - the result to store data into
// err - any error that occurred
func (r *resultStore) AddFetch(header *types.Header, fastSync bool) (stale, throttled bool, item *fetchResult, err error) {
r.lock.Lock()
defer r.lock.Unlock()
var index int
item, index, stale, throttled, err = r.getFetchResult(header.Number.Uint64())
if err != nil || stale || throttled {
return stale, throttled, item, err
}
if item == nil {
item = newFetchResult(header, fastSync)
r.items[index] = item
}
return stale, throttled, item, err
}
// GetDeliverySlot returns the fetchResult for the given header. If the 'stale' flag
// is true, that means the header has already been delivered 'upstream'. This method
// does not bubble up the 'throttle' flag, since it's moot at the point in time when
// the item is downloaded and ready for delivery
func (r *resultStore) GetDeliverySlot(headerNumber uint64) (*fetchResult, bool, error) {
r.lock.RLock()
defer r.lock.RUnlock()
res, _, stale, _, err := r.getFetchResult(headerNumber)
return res, stale, err
}
// getFetchResult returns the fetchResult corresponding to the given item, and
// the index where the result is stored.
func (r *resultStore) getFetchResult(headerNumber uint64) (item *fetchResult, index int, stale, throttle bool, err error) {
index = int(int64(headerNumber) - int64(r.resultOffset))
throttle = index >= int(r.throttleThreshold)
stale = index < 0
if index >= len(r.items) {
err = fmt.Errorf("%w: index allocation went beyond available resultStore space "+
"(index [%d] = header [%d] - resultOffset [%d], len(resultStore) = %d", errInvalidChain,
index, headerNumber, r.resultOffset, len(r.items))
return nil, index, stale, throttle, err
}
if stale {
return nil, index, stale, throttle, nil
}
item = r.items[index]
return item, index, stale, throttle, nil
}
// hasCompletedItems returns true if there are processable items available
// this method is cheaper than countCompleted
func (r *resultStore) HasCompletedItems() bool {
r.lock.RLock()
defer r.lock.RUnlock()
if len(r.items) == 0 {
return false
}
if item := r.items[0]; item != nil && item.AllDone() {
return true
}
return false
}
// countCompleted returns the number of items ready for delivery, stopping at
// the first non-complete item.
//
// The mthod assumes (at least) rlock is held.
func (r *resultStore) countCompleted() int {
// We iterate from the already known complete point, and see
// if any more has completed since last count
index := atomic.LoadInt32(&r.indexIncomplete)
for ; ; index++ {
if index >= int32(len(r.items)) {
break
}
result := r.items[index]
if result == nil || !result.AllDone() {
break
}
}
atomic.StoreInt32(&r.indexIncomplete, index)
return int(index)
}
// GetCompleted returns the next batch of completed fetchResults
func (r *resultStore) GetCompleted(limit int) []*fetchResult {
r.lock.Lock()
defer r.lock.Unlock()
completed := r.countCompleted()
if limit > completed {
limit = completed
}
results := make([]*fetchResult, limit)
copy(results, r.items[:limit])
// Delete the results from the cache and clear the tail.
copy(r.items, r.items[limit:])
for i := len(r.items) - limit; i < len(r.items); i++ {
r.items[i] = nil
}
// Advance the expected block number of the first cache entry
r.resultOffset += uint64(limit)
atomic.AddInt32(&r.indexIncomplete, int32(-limit))
return results
}
// Prepare initialises the offset with the given block number
func (r *resultStore) Prepare(offset uint64) {
r.lock.Lock()
defer r.lock.Unlock()
if r.resultOffset < offset {
r.resultOffset = offset
}
}
| eth/downloader/resultstore.go | 0 | https://github.com/ethereum/go-ethereum/commit/16701c51697e28986feebd122c6a491e4d9ac0e7 | [
0.00019516210886649787,
0.00017041734827216715,
0.00016281958960462362,
0.00016880368639249355,
0.000006832471171946963
] |
{
"id": 3,
"code_window": [
"\t\tvalue []byte\n",
"\t\tdata []byte\n",
"\t\terr error\n",
"\t)\n",
"\tkey, err = parseHexOrString(ctx.Args().Get(0))\n",
"\tif err != nil {\n",
"\t\tlog.Info(\"Could not decode the key\", \"error\", err)\n",
"\t\treturn err\n",
"\t}\n",
"\tvalue, err = hexutil.Decode(ctx.Args().Get(1))\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tkey, err = common.ParseHexOrString(ctx.Args().Get(0))\n"
],
"file_path": "cmd/geth/dbcmd.go",
"type": "replace",
"edit_start_line_idx": 479
} | // Copyright 2019 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package les
import (
"sort"
"sync"
"sync/atomic"
"github.com/ethereum/go-ethereum/common/mclock"
"github.com/ethereum/go-ethereum/common/prque"
)
// servingQueue allows running tasks in a limited number of threads and puts the
// waiting tasks in a priority queue
type servingQueue struct {
recentTime, queuedTime, servingTimeDiff uint64
burstLimit, burstDropLimit uint64
burstDecRate float64
lastUpdate mclock.AbsTime
queueAddCh, queueBestCh chan *servingTask
stopThreadCh, quit chan struct{}
setThreadsCh chan int
wg sync.WaitGroup
threadCount int // number of currently running threads
queue *prque.Prque // priority queue for waiting or suspended tasks
best *servingTask // the highest priority task (not included in the queue)
suspendBias int64 // priority bias against suspending an already running task
}
// servingTask represents a request serving task. Tasks can be implemented to
// run in multiple steps, allowing the serving queue to suspend execution between
// steps if higher priority tasks are entered. The creator of the task should
// set the following fields:
//
// - priority: greater value means higher priority; values can wrap around the int64 range
// - run: execute a single step; return true if finished
// - after: executed after run finishes or returns an error, receives the total serving time
type servingTask struct {
sq *servingQueue
servingTime, timeAdded, maxTime, expTime uint64
peer *clientPeer
priority int64
biasAdded bool
token runToken
tokenCh chan runToken
}
// runToken received by servingTask.start allows the task to run. Closing the
// channel by servingTask.stop signals the thread controller to allow a new task
// to start running.
type runToken chan struct{}
// start blocks until the task can start and returns true if it is allowed to run.
// Returning false means that the task should be cancelled.
func (t *servingTask) start() bool {
if t.peer.isFrozen() {
return false
}
t.tokenCh = make(chan runToken, 1)
select {
case t.sq.queueAddCh <- t:
case <-t.sq.quit:
return false
}
select {
case t.token = <-t.tokenCh:
case <-t.sq.quit:
return false
}
if t.token == nil {
return false
}
t.servingTime -= uint64(mclock.Now())
return true
}
// done signals the thread controller about the task being finished and returns
// the total serving time of the task in nanoseconds.
func (t *servingTask) done() uint64 {
t.servingTime += uint64(mclock.Now())
close(t.token)
diff := t.servingTime - t.timeAdded
t.timeAdded = t.servingTime
if t.expTime > diff {
t.expTime -= diff
atomic.AddUint64(&t.sq.servingTimeDiff, t.expTime)
} else {
t.expTime = 0
}
return t.servingTime
}
// waitOrStop can be called during the execution of the task. It blocks if there
// is a higher priority task waiting (a bias is applied in favor of the currently
// running task). Returning true means that the execution can be resumed. False
// means the task should be cancelled.
func (t *servingTask) waitOrStop() bool {
t.done()
if !t.biasAdded {
t.priority += t.sq.suspendBias
t.biasAdded = true
}
return t.start()
}
// newServingQueue returns a new servingQueue
func newServingQueue(suspendBias int64, utilTarget float64) *servingQueue {
sq := &servingQueue{
queue: prque.NewWrapAround(nil),
suspendBias: suspendBias,
queueAddCh: make(chan *servingTask, 100),
queueBestCh: make(chan *servingTask),
stopThreadCh: make(chan struct{}),
quit: make(chan struct{}),
setThreadsCh: make(chan int, 10),
burstLimit: uint64(utilTarget * bufLimitRatio * 1200000),
burstDropLimit: uint64(utilTarget * bufLimitRatio * 1000000),
burstDecRate: utilTarget,
lastUpdate: mclock.Now(),
}
sq.wg.Add(2)
go sq.queueLoop()
go sq.threadCountLoop()
return sq
}
// newTask creates a new task with the given priority
func (sq *servingQueue) newTask(peer *clientPeer, maxTime uint64, priority int64) *servingTask {
return &servingTask{
sq: sq,
peer: peer,
maxTime: maxTime,
expTime: maxTime,
priority: priority,
}
}
// threadController is started in multiple goroutines and controls the execution
// of tasks. The number of active thread controllers equals the allowed number of
// concurrently running threads. It tries to fetch the highest priority queued
// task first. If there are no queued tasks waiting then it can directly catch
// run tokens from the token channel and allow the corresponding tasks to run
// without entering the priority queue.
func (sq *servingQueue) threadController() {
defer sq.wg.Done()
for {
token := make(runToken)
select {
case best := <-sq.queueBestCh:
best.tokenCh <- token
case <-sq.stopThreadCh:
return
case <-sq.quit:
return
}
select {
case <-sq.stopThreadCh:
return
case <-sq.quit:
return
case <-token:
}
}
}
type (
// peerTasks lists the tasks received from a given peer when selecting peers to freeze
peerTasks struct {
peer *clientPeer
list []*servingTask
sumTime uint64
priority float64
}
// peerList is a sortable list of peerTasks
peerList []*peerTasks
)
func (l peerList) Len() int {
return len(l)
}
func (l peerList) Less(i, j int) bool {
return l[i].priority < l[j].priority
}
func (l peerList) Swap(i, j int) {
l[i], l[j] = l[j], l[i]
}
// freezePeers selects the peers with the worst priority queued tasks and freezes
// them until burstTime goes under burstDropLimit or all peers are frozen
func (sq *servingQueue) freezePeers() {
peerMap := make(map[*clientPeer]*peerTasks)
var peerList peerList
if sq.best != nil {
sq.queue.Push(sq.best, sq.best.priority)
}
sq.best = nil
for sq.queue.Size() > 0 {
task := sq.queue.PopItem().(*servingTask)
tasks := peerMap[task.peer]
if tasks == nil {
bufValue, bufLimit := task.peer.fcClient.BufferStatus()
if bufLimit < 1 {
bufLimit = 1
}
tasks = &peerTasks{
peer: task.peer,
priority: float64(bufValue) / float64(bufLimit), // lower value comes first
}
peerMap[task.peer] = tasks
peerList = append(peerList, tasks)
}
tasks.list = append(tasks.list, task)
tasks.sumTime += task.expTime
}
sort.Sort(peerList)
drop := true
for _, tasks := range peerList {
if drop {
tasks.peer.freeze()
tasks.peer.fcClient.Freeze()
sq.queuedTime -= tasks.sumTime
sqQueuedGauge.Update(int64(sq.queuedTime))
clientFreezeMeter.Mark(1)
drop = sq.recentTime+sq.queuedTime > sq.burstDropLimit
for _, task := range tasks.list {
task.tokenCh <- nil
}
} else {
for _, task := range tasks.list {
sq.queue.Push(task, task.priority)
}
}
}
if sq.queue.Size() > 0 {
sq.best = sq.queue.PopItem().(*servingTask)
}
}
// updateRecentTime recalculates the recent serving time value
func (sq *servingQueue) updateRecentTime() {
subTime := atomic.SwapUint64(&sq.servingTimeDiff, 0)
now := mclock.Now()
dt := now - sq.lastUpdate
sq.lastUpdate = now
if dt > 0 {
subTime += uint64(float64(dt) * sq.burstDecRate)
}
if sq.recentTime > subTime {
sq.recentTime -= subTime
} else {
sq.recentTime = 0
}
}
// addTask inserts a task into the priority queue
func (sq *servingQueue) addTask(task *servingTask) {
if sq.best == nil {
sq.best = task
} else if task.priority-sq.best.priority > 0 {
sq.queue.Push(sq.best, sq.best.priority)
sq.best = task
} else {
sq.queue.Push(task, task.priority)
}
sq.updateRecentTime()
sq.queuedTime += task.expTime
sqServedGauge.Update(int64(sq.recentTime))
sqQueuedGauge.Update(int64(sq.queuedTime))
if sq.recentTime+sq.queuedTime > sq.burstLimit {
sq.freezePeers()
}
}
// queueLoop is an event loop running in a goroutine. It receives tasks from queueAddCh
// and always tries to send the highest priority task to queueBestCh. Successfully sent
// tasks are removed from the queue.
func (sq *servingQueue) queueLoop() {
defer sq.wg.Done()
for {
if sq.best != nil {
expTime := sq.best.expTime
select {
case task := <-sq.queueAddCh:
sq.addTask(task)
case sq.queueBestCh <- sq.best:
sq.updateRecentTime()
sq.queuedTime -= expTime
sq.recentTime += expTime
sqServedGauge.Update(int64(sq.recentTime))
sqQueuedGauge.Update(int64(sq.queuedTime))
if sq.queue.Size() == 0 {
sq.best = nil
} else {
sq.best, _ = sq.queue.PopItem().(*servingTask)
}
case <-sq.quit:
return
}
} else {
select {
case task := <-sq.queueAddCh:
sq.addTask(task)
case <-sq.quit:
return
}
}
}
}
// threadCountLoop is an event loop running in a goroutine. It adjusts the number
// of active thread controller goroutines.
func (sq *servingQueue) threadCountLoop() {
var threadCountTarget int
defer sq.wg.Done()
for {
for threadCountTarget > sq.threadCount {
sq.wg.Add(1)
go sq.threadController()
sq.threadCount++
}
if threadCountTarget < sq.threadCount {
select {
case threadCountTarget = <-sq.setThreadsCh:
case sq.stopThreadCh <- struct{}{}:
sq.threadCount--
case <-sq.quit:
return
}
} else {
select {
case threadCountTarget = <-sq.setThreadsCh:
case <-sq.quit:
return
}
}
}
}
// setThreads sets the allowed processing thread count, suspending tasks as soon as
// possible if necessary.
func (sq *servingQueue) setThreads(threadCount int) {
select {
case sq.setThreadsCh <- threadCount:
case <-sq.quit:
return
}
}
// stop stops task processing as soon as possible and shuts down the serving queue.
func (sq *servingQueue) stop() {
close(sq.quit)
sq.wg.Wait()
}
| les/servingqueue.go | 0 | https://github.com/ethereum/go-ethereum/commit/16701c51697e28986feebd122c6a491e4d9ac0e7 | [
0.00017590819334145635,
0.00017010832380037755,
0.00016342215531039983,
0.0001700006250757724,
0.000003605875463108532
] |
{
"id": 3,
"code_window": [
"\t\tvalue []byte\n",
"\t\tdata []byte\n",
"\t\terr error\n",
"\t)\n",
"\tkey, err = parseHexOrString(ctx.Args().Get(0))\n",
"\tif err != nil {\n",
"\t\tlog.Info(\"Could not decode the key\", \"error\", err)\n",
"\t\treturn err\n",
"\t}\n",
"\tvalue, err = hexutil.Decode(ctx.Args().Get(1))\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tkey, err = common.ParseHexOrString(ctx.Args().Get(0))\n"
],
"file_path": "cmd/geth/dbcmd.go",
"type": "replace",
"edit_start_line_idx": 479
} | // Copyright 2020 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package snap
import (
"bytes"
"crypto/rand"
"encoding/binary"
"fmt"
"math/big"
"sort"
"sync"
"testing"
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/light"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/trie"
"golang.org/x/crypto/sha3"
)
func TestHashing(t *testing.T) {
t.Parallel()
var bytecodes = make([][]byte, 10)
for i := 0; i < len(bytecodes); i++ {
buf := make([]byte, 100)
rand.Read(buf)
bytecodes[i] = buf
}
var want, got string
var old = func() {
hasher := sha3.NewLegacyKeccak256()
for i := 0; i < len(bytecodes); i++ {
hasher.Reset()
hasher.Write(bytecodes[i])
hash := hasher.Sum(nil)
got = fmt.Sprintf("%v\n%v", got, hash)
}
}
var new = func() {
hasher := sha3.NewLegacyKeccak256().(crypto.KeccakState)
var hash = make([]byte, 32)
for i := 0; i < len(bytecodes); i++ {
hasher.Reset()
hasher.Write(bytecodes[i])
hasher.Read(hash)
want = fmt.Sprintf("%v\n%v", want, hash)
}
}
old()
new()
if want != got {
t.Errorf("want\n%v\ngot\n%v\n", want, got)
}
}
func BenchmarkHashing(b *testing.B) {
var bytecodes = make([][]byte, 10000)
for i := 0; i < len(bytecodes); i++ {
buf := make([]byte, 100)
rand.Read(buf)
bytecodes[i] = buf
}
var old = func() {
hasher := sha3.NewLegacyKeccak256()
for i := 0; i < len(bytecodes); i++ {
hasher.Reset()
hasher.Write(bytecodes[i])
hasher.Sum(nil)
}
}
var new = func() {
hasher := sha3.NewLegacyKeccak256().(crypto.KeccakState)
var hash = make([]byte, 32)
for i := 0; i < len(bytecodes); i++ {
hasher.Reset()
hasher.Write(bytecodes[i])
hasher.Read(hash)
}
}
b.Run("old", func(b *testing.B) {
b.ReportAllocs()
for i := 0; i < b.N; i++ {
old()
}
})
b.Run("new", func(b *testing.B) {
b.ReportAllocs()
for i := 0; i < b.N; i++ {
new()
}
})
}
type (
accountHandlerFunc func(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) error
storageHandlerFunc func(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) error
trieHandlerFunc func(t *testPeer, requestId uint64, root common.Hash, paths []TrieNodePathSet, cap uint64) error
codeHandlerFunc func(t *testPeer, id uint64, hashes []common.Hash, max uint64) error
)
type testPeer struct {
id string
test *testing.T
remote *Syncer
logger log.Logger
accountTrie *trie.Trie
accountValues entrySlice
storageTries map[common.Hash]*trie.Trie
storageValues map[common.Hash]entrySlice
accountRequestHandler accountHandlerFunc
storageRequestHandler storageHandlerFunc
trieRequestHandler trieHandlerFunc
codeRequestHandler codeHandlerFunc
term func()
// counters
nAccountRequests int
nStorageRequests int
nBytecodeRequests int
nTrienodeRequests int
}
func newTestPeer(id string, t *testing.T, term func()) *testPeer {
peer := &testPeer{
id: id,
test: t,
logger: log.New("id", id),
accountRequestHandler: defaultAccountRequestHandler,
trieRequestHandler: defaultTrieRequestHandler,
storageRequestHandler: defaultStorageRequestHandler,
codeRequestHandler: defaultCodeRequestHandler,
term: term,
}
//stderrHandler := log.StreamHandler(os.Stderr, log.TerminalFormat(true))
//peer.logger.SetHandler(stderrHandler)
return peer
}
func (t *testPeer) ID() string { return t.id }
func (t *testPeer) Log() log.Logger { return t.logger }
func (t *testPeer) Stats() string {
return fmt.Sprintf(`Account requests: %d
Storage requests: %d
Bytecode requests: %d
Trienode requests: %d
`, t.nAccountRequests, t.nStorageRequests, t.nBytecodeRequests, t.nTrienodeRequests)
}
func (t *testPeer) RequestAccountRange(id uint64, root, origin, limit common.Hash, bytes uint64) error {
t.logger.Trace("Fetching range of accounts", "reqid", id, "root", root, "origin", origin, "limit", limit, "bytes", common.StorageSize(bytes))
t.nAccountRequests++
go t.accountRequestHandler(t, id, root, origin, limit, bytes)
return nil
}
func (t *testPeer) RequestTrieNodes(id uint64, root common.Hash, paths []TrieNodePathSet, bytes uint64) error {
t.logger.Trace("Fetching set of trie nodes", "reqid", id, "root", root, "pathsets", len(paths), "bytes", common.StorageSize(bytes))
t.nTrienodeRequests++
go t.trieRequestHandler(t, id, root, paths, bytes)
return nil
}
func (t *testPeer) RequestStorageRanges(id uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, bytes uint64) error {
t.nStorageRequests++
if len(accounts) == 1 && origin != nil {
t.logger.Trace("Fetching range of large storage slots", "reqid", id, "root", root, "account", accounts[0], "origin", common.BytesToHash(origin), "limit", common.BytesToHash(limit), "bytes", common.StorageSize(bytes))
} else {
t.logger.Trace("Fetching ranges of small storage slots", "reqid", id, "root", root, "accounts", len(accounts), "first", accounts[0], "bytes", common.StorageSize(bytes))
}
go t.storageRequestHandler(t, id, root, accounts, origin, limit, bytes)
return nil
}
func (t *testPeer) RequestByteCodes(id uint64, hashes []common.Hash, bytes uint64) error {
t.nBytecodeRequests++
t.logger.Trace("Fetching set of byte codes", "reqid", id, "hashes", len(hashes), "bytes", common.StorageSize(bytes))
go t.codeRequestHandler(t, id, hashes, bytes)
return nil
}
// defaultTrieRequestHandler is a well-behaving handler for trie healing requests
func defaultTrieRequestHandler(t *testPeer, requestId uint64, root common.Hash, paths []TrieNodePathSet, cap uint64) error {
// Pass the response
var nodes [][]byte
for _, pathset := range paths {
switch len(pathset) {
case 1:
blob, _, err := t.accountTrie.TryGetNode(pathset[0])
if err != nil {
t.logger.Info("Error handling req", "error", err)
break
}
nodes = append(nodes, blob)
default:
account := t.storageTries[(common.BytesToHash(pathset[0]))]
for _, path := range pathset[1:] {
blob, _, err := account.TryGetNode(path)
if err != nil {
t.logger.Info("Error handling req", "error", err)
break
}
nodes = append(nodes, blob)
}
}
}
t.remote.OnTrieNodes(t, requestId, nodes)
return nil
}
// defaultAccountRequestHandler is a well-behaving handler for AccountRangeRequests
func defaultAccountRequestHandler(t *testPeer, id uint64, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) error {
keys, vals, proofs := createAccountRequestResponse(t, root, origin, limit, cap)
if err := t.remote.OnAccounts(t, id, keys, vals, proofs); err != nil {
t.test.Errorf("Remote side rejected our delivery: %v", err)
t.term()
return err
}
return nil
}
func createAccountRequestResponse(t *testPeer, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) (keys []common.Hash, vals [][]byte, proofs [][]byte) {
var size uint64
if limit == (common.Hash{}) {
limit = common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")
}
for _, entry := range t.accountValues {
if size > cap {
break
}
if bytes.Compare(origin[:], entry.k) <= 0 {
keys = append(keys, common.BytesToHash(entry.k))
vals = append(vals, entry.v)
size += uint64(32 + len(entry.v))
}
// If we've exceeded the request threshold, abort
if bytes.Compare(entry.k, limit[:]) >= 0 {
break
}
}
// Unless we send the entire trie, we need to supply proofs
// Actually, we need to supply proofs either way! This seems to be an implementation
// quirk in go-ethereum
proof := light.NewNodeSet()
if err := t.accountTrie.Prove(origin[:], 0, proof); err != nil {
t.logger.Error("Could not prove inexistence of origin", "origin", origin, "error", err)
}
if len(keys) > 0 {
lastK := (keys[len(keys)-1])[:]
if err := t.accountTrie.Prove(lastK, 0, proof); err != nil {
t.logger.Error("Could not prove last item", "error", err)
}
}
for _, blob := range proof.NodeList() {
proofs = append(proofs, blob)
}
return keys, vals, proofs
}
// defaultStorageRequestHandler is a well-behaving storage request handler
func defaultStorageRequestHandler(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, bOrigin, bLimit []byte, max uint64) error {
hashes, slots, proofs := createStorageRequestResponse(t, root, accounts, bOrigin, bLimit, max)
if err := t.remote.OnStorage(t, requestId, hashes, slots, proofs); err != nil {
t.test.Errorf("Remote side rejected our delivery: %v", err)
t.term()
}
return nil
}
func defaultCodeRequestHandler(t *testPeer, id uint64, hashes []common.Hash, max uint64) error {
var bytecodes [][]byte
for _, h := range hashes {
bytecodes = append(bytecodes, getCodeByHash(h))
}
if err := t.remote.OnByteCodes(t, id, bytecodes); err != nil {
t.test.Errorf("Remote side rejected our delivery: %v", err)
t.term()
}
return nil
}
func createStorageRequestResponse(t *testPeer, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) (hashes [][]common.Hash, slots [][][]byte, proofs [][]byte) {
var size uint64
for _, account := range accounts {
// The first account might start from a different origin and end sooner
var originHash common.Hash
if len(origin) > 0 {
originHash = common.BytesToHash(origin)
}
var limitHash = common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")
if len(limit) > 0 {
limitHash = common.BytesToHash(limit)
}
var (
keys []common.Hash
vals [][]byte
abort bool
)
for _, entry := range t.storageValues[account] {
if size >= max {
abort = true
break
}
if bytes.Compare(entry.k, originHash[:]) < 0 {
continue
}
keys = append(keys, common.BytesToHash(entry.k))
vals = append(vals, entry.v)
size += uint64(32 + len(entry.v))
if bytes.Compare(entry.k, limitHash[:]) >= 0 {
break
}
}
hashes = append(hashes, keys)
slots = append(slots, vals)
// Generate the Merkle proofs for the first and last storage slot, but
// only if the response was capped. If the entire storage trie included
// in the response, no need for any proofs.
if originHash != (common.Hash{}) || abort {
// If we're aborting, we need to prove the first and last item
// This terminates the response (and thus the loop)
proof := light.NewNodeSet()
stTrie := t.storageTries[account]
// Here's a potential gotcha: when constructing the proof, we cannot
// use the 'origin' slice directly, but must use the full 32-byte
// hash form.
if err := stTrie.Prove(originHash[:], 0, proof); err != nil {
t.logger.Error("Could not prove inexistence of origin", "origin", originHash, "error", err)
}
if len(keys) > 0 {
lastK := (keys[len(keys)-1])[:]
if err := stTrie.Prove(lastK, 0, proof); err != nil {
t.logger.Error("Could not prove last item", "error", err)
}
}
for _, blob := range proof.NodeList() {
proofs = append(proofs, blob)
}
break
}
}
return hashes, slots, proofs
}
// the createStorageRequestResponseAlwaysProve tests a cornercase, where it always
// supplies the proof for the last account, even if it is 'complete'.h
func createStorageRequestResponseAlwaysProve(t *testPeer, root common.Hash, accounts []common.Hash, bOrigin, bLimit []byte, max uint64) (hashes [][]common.Hash, slots [][][]byte, proofs [][]byte) {
var size uint64
max = max * 3 / 4
var origin common.Hash
if len(bOrigin) > 0 {
origin = common.BytesToHash(bOrigin)
}
var exit bool
for i, account := range accounts {
var keys []common.Hash
var vals [][]byte
for _, entry := range t.storageValues[account] {
if bytes.Compare(entry.k, origin[:]) < 0 {
exit = true
}
keys = append(keys, common.BytesToHash(entry.k))
vals = append(vals, entry.v)
size += uint64(32 + len(entry.v))
if size > max {
exit = true
}
}
if i == len(accounts)-1 {
exit = true
}
hashes = append(hashes, keys)
slots = append(slots, vals)
if exit {
// If we're aborting, we need to prove the first and last item
// This terminates the response (and thus the loop)
proof := light.NewNodeSet()
stTrie := t.storageTries[account]
// Here's a potential gotcha: when constructing the proof, we cannot
// use the 'origin' slice directly, but must use the full 32-byte
// hash form.
if err := stTrie.Prove(origin[:], 0, proof); err != nil {
t.logger.Error("Could not prove inexistence of origin", "origin", origin,
"error", err)
}
if len(keys) > 0 {
lastK := (keys[len(keys)-1])[:]
if err := stTrie.Prove(lastK, 0, proof); err != nil {
t.logger.Error("Could not prove last item", "error", err)
}
}
for _, blob := range proof.NodeList() {
proofs = append(proofs, blob)
}
break
}
}
return hashes, slots, proofs
}
// emptyRequestAccountRangeFn is a rejects AccountRangeRequests
func emptyRequestAccountRangeFn(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) error {
t.remote.OnAccounts(t, requestId, nil, nil, nil)
return nil
}
func nonResponsiveRequestAccountRangeFn(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) error {
return nil
}
func emptyTrieRequestHandler(t *testPeer, requestId uint64, root common.Hash, paths []TrieNodePathSet, cap uint64) error {
t.remote.OnTrieNodes(t, requestId, nil)
return nil
}
func nonResponsiveTrieRequestHandler(t *testPeer, requestId uint64, root common.Hash, paths []TrieNodePathSet, cap uint64) error {
return nil
}
func emptyStorageRequestHandler(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) error {
t.remote.OnStorage(t, requestId, nil, nil, nil)
return nil
}
func nonResponsiveStorageRequestHandler(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) error {
return nil
}
func proofHappyStorageRequestHandler(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) error {
hashes, slots, proofs := createStorageRequestResponseAlwaysProve(t, root, accounts, origin, limit, max)
if err := t.remote.OnStorage(t, requestId, hashes, slots, proofs); err != nil {
t.test.Errorf("Remote side rejected our delivery: %v", err)
t.term()
}
return nil
}
//func emptyCodeRequestHandler(t *testPeer, id uint64, hashes []common.Hash, max uint64) error {
// var bytecodes [][]byte
// t.remote.OnByteCodes(t, id, bytecodes)
// return nil
//}
func corruptCodeRequestHandler(t *testPeer, id uint64, hashes []common.Hash, max uint64) error {
var bytecodes [][]byte
for _, h := range hashes {
// Send back the hashes
bytecodes = append(bytecodes, h[:])
}
if err := t.remote.OnByteCodes(t, id, bytecodes); err != nil {
t.logger.Info("remote error on delivery (as expected)", "error", err)
// Mimic the real-life handler, which drops a peer on errors
t.remote.Unregister(t.id)
}
return nil
}
func cappedCodeRequestHandler(t *testPeer, id uint64, hashes []common.Hash, max uint64) error {
var bytecodes [][]byte
for _, h := range hashes[:1] {
bytecodes = append(bytecodes, getCodeByHash(h))
}
// Missing bytecode can be retrieved again, no error expected
if err := t.remote.OnByteCodes(t, id, bytecodes); err != nil {
t.test.Errorf("Remote side rejected our delivery: %v", err)
t.term()
}
return nil
}
// starvingStorageRequestHandler is somewhat well-behaving storage handler, but it caps the returned results to be very small
func starvingStorageRequestHandler(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) error {
return defaultStorageRequestHandler(t, requestId, root, accounts, origin, limit, 500)
}
func starvingAccountRequestHandler(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) error {
return defaultAccountRequestHandler(t, requestId, root, origin, limit, 500)
}
//func misdeliveringAccountRequestHandler(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, cap uint64) error {
// return defaultAccountRequestHandler(t, requestId-1, root, origin, 500)
//}
func corruptAccountRequestHandler(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) error {
hashes, accounts, proofs := createAccountRequestResponse(t, root, origin, limit, cap)
if len(proofs) > 0 {
proofs = proofs[1:]
}
if err := t.remote.OnAccounts(t, requestId, hashes, accounts, proofs); err != nil {
t.logger.Info("remote error on delivery (as expected)", "error", err)
// Mimic the real-life handler, which drops a peer on errors
t.remote.Unregister(t.id)
}
return nil
}
// corruptStorageRequestHandler doesn't provide good proofs
func corruptStorageRequestHandler(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) error {
hashes, slots, proofs := createStorageRequestResponse(t, root, accounts, origin, limit, max)
if len(proofs) > 0 {
proofs = proofs[1:]
}
if err := t.remote.OnStorage(t, requestId, hashes, slots, proofs); err != nil {
t.logger.Info("remote error on delivery (as expected)", "error", err)
// Mimic the real-life handler, which drops a peer on errors
t.remote.Unregister(t.id)
}
return nil
}
func noProofStorageRequestHandler(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) error {
hashes, slots, _ := createStorageRequestResponse(t, root, accounts, origin, limit, max)
if err := t.remote.OnStorage(t, requestId, hashes, slots, nil); err != nil {
t.logger.Info("remote error on delivery (as expected)", "error", err)
// Mimic the real-life handler, which drops a peer on errors
t.remote.Unregister(t.id)
}
return nil
}
// TestSyncBloatedProof tests a scenario where we provide only _one_ value, but
// also ship the entire trie inside the proof. If the attack is successful,
// the remote side does not do any follow-up requests
func TestSyncBloatedProof(t *testing.T) {
t.Parallel()
var (
once sync.Once
cancel = make(chan struct{})
term = func() {
once.Do(func() {
close(cancel)
})
}
)
sourceAccountTrie, elems := makeAccountTrieNoStorage(100)
source := newTestPeer("source", t, term)
source.accountTrie = sourceAccountTrie
source.accountValues = elems
source.accountRequestHandler = func(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) error {
var (
proofs [][]byte
keys []common.Hash
vals [][]byte
)
// The values
for _, entry := range t.accountValues {
if bytes.Compare(entry.k, origin[:]) < 0 {
continue
}
if bytes.Compare(entry.k, limit[:]) > 0 {
continue
}
keys = append(keys, common.BytesToHash(entry.k))
vals = append(vals, entry.v)
}
// The proofs
proof := light.NewNodeSet()
if err := t.accountTrie.Prove(origin[:], 0, proof); err != nil {
t.logger.Error("Could not prove origin", "origin", origin, "error", err)
}
// The bloat: add proof of every single element
for _, entry := range t.accountValues {
if err := t.accountTrie.Prove(entry.k, 0, proof); err != nil {
t.logger.Error("Could not prove item", "error", err)
}
}
// And remove one item from the elements
if len(keys) > 2 {
keys = append(keys[:1], keys[2:]...)
vals = append(vals[:1], vals[2:]...)
}
for _, blob := range proof.NodeList() {
proofs = append(proofs, blob)
}
if err := t.remote.OnAccounts(t, requestId, keys, vals, proofs); err != nil {
t.logger.Info("remote error on delivery (as expected)", "error", err)
t.term()
// This is actually correct, signal to exit the test successfully
}
return nil
}
syncer := setupSyncer(source)
if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err == nil {
t.Fatal("No error returned from incomplete/cancelled sync")
}
}
func setupSyncer(peers ...*testPeer) *Syncer {
stateDb := rawdb.NewMemoryDatabase()
syncer := NewSyncer(stateDb)
for _, peer := range peers {
syncer.Register(peer)
peer.remote = syncer
}
return syncer
}
// TestSync tests a basic sync with one peer
func TestSync(t *testing.T) {
t.Parallel()
var (
once sync.Once
cancel = make(chan struct{})
term = func() {
once.Do(func() {
close(cancel)
})
}
)
sourceAccountTrie, elems := makeAccountTrieNoStorage(100)
mkSource := func(name string) *testPeer {
source := newTestPeer(name, t, term)
source.accountTrie = sourceAccountTrie
source.accountValues = elems
return source
}
syncer := setupSyncer(mkSource("source"))
if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
t.Fatalf("sync failed: %v", err)
}
verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
}
// TestSyncTinyTriePanic tests a basic sync with one peer, and a tiny trie. This caused a
// panic within the prover
func TestSyncTinyTriePanic(t *testing.T) {
t.Parallel()
var (
once sync.Once
cancel = make(chan struct{})
term = func() {
once.Do(func() {
close(cancel)
})
}
)
sourceAccountTrie, elems := makeAccountTrieNoStorage(1)
mkSource := func(name string) *testPeer {
source := newTestPeer(name, t, term)
source.accountTrie = sourceAccountTrie
source.accountValues = elems
return source
}
syncer := setupSyncer(mkSource("source"))
done := checkStall(t, term)
if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
t.Fatalf("sync failed: %v", err)
}
close(done)
verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
}
// TestMultiSync tests a basic sync with multiple peers
func TestMultiSync(t *testing.T) {
t.Parallel()
var (
once sync.Once
cancel = make(chan struct{})
term = func() {
once.Do(func() {
close(cancel)
})
}
)
sourceAccountTrie, elems := makeAccountTrieNoStorage(100)
mkSource := func(name string) *testPeer {
source := newTestPeer(name, t, term)
source.accountTrie = sourceAccountTrie
source.accountValues = elems
return source
}
syncer := setupSyncer(mkSource("sourceA"), mkSource("sourceB"))
done := checkStall(t, term)
if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
t.Fatalf("sync failed: %v", err)
}
close(done)
verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
}
// TestSyncWithStorage tests basic sync using accounts + storage + code
func TestSyncWithStorage(t *testing.T) {
t.Parallel()
var (
once sync.Once
cancel = make(chan struct{})
term = func() {
once.Do(func() {
close(cancel)
})
}
)
sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(3, 3000, true, false)
mkSource := func(name string) *testPeer {
source := newTestPeer(name, t, term)
source.accountTrie = sourceAccountTrie
source.accountValues = elems
source.storageTries = storageTries
source.storageValues = storageElems
return source
}
syncer := setupSyncer(mkSource("sourceA"))
done := checkStall(t, term)
if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
t.Fatalf("sync failed: %v", err)
}
close(done)
verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
}
// TestMultiSyncManyUseless contains one good peer, and many which doesn't return anything valuable at all
func TestMultiSyncManyUseless(t *testing.T) {
t.Parallel()
var (
once sync.Once
cancel = make(chan struct{})
term = func() {
once.Do(func() {
close(cancel)
})
}
)
sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(100, 3000, true, false)
mkSource := func(name string, noAccount, noStorage, noTrieNode bool) *testPeer {
source := newTestPeer(name, t, term)
source.accountTrie = sourceAccountTrie
source.accountValues = elems
source.storageTries = storageTries
source.storageValues = storageElems
if !noAccount {
source.accountRequestHandler = emptyRequestAccountRangeFn
}
if !noStorage {
source.storageRequestHandler = emptyStorageRequestHandler
}
if !noTrieNode {
source.trieRequestHandler = emptyTrieRequestHandler
}
return source
}
syncer := setupSyncer(
mkSource("full", true, true, true),
mkSource("noAccounts", false, true, true),
mkSource("noStorage", true, false, true),
mkSource("noTrie", true, true, false),
)
done := checkStall(t, term)
if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
t.Fatalf("sync failed: %v", err)
}
close(done)
verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
}
// TestMultiSyncManyUseless contains one good peer, and many which doesn't return anything valuable at all
func TestMultiSyncManyUselessWithLowTimeout(t *testing.T) {
var (
once sync.Once
cancel = make(chan struct{})
term = func() {
once.Do(func() {
close(cancel)
})
}
)
sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(100, 3000, true, false)
mkSource := func(name string, noAccount, noStorage, noTrieNode bool) *testPeer {
source := newTestPeer(name, t, term)
source.accountTrie = sourceAccountTrie
source.accountValues = elems
source.storageTries = storageTries
source.storageValues = storageElems
if !noAccount {
source.accountRequestHandler = emptyRequestAccountRangeFn
}
if !noStorage {
source.storageRequestHandler = emptyStorageRequestHandler
}
if !noTrieNode {
source.trieRequestHandler = emptyTrieRequestHandler
}
return source
}
syncer := setupSyncer(
mkSource("full", true, true, true),
mkSource("noAccounts", false, true, true),
mkSource("noStorage", true, false, true),
mkSource("noTrie", true, true, false),
)
// We're setting the timeout to very low, to increase the chance of the timeout
// being triggered. This was previously a cause of panic, when a response
// arrived simultaneously as a timeout was triggered.
syncer.rates.OverrideTTLLimit = time.Millisecond
done := checkStall(t, term)
if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
t.Fatalf("sync failed: %v", err)
}
close(done)
verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
}
// TestMultiSyncManyUnresponsive contains one good peer, and many which doesn't respond at all
func TestMultiSyncManyUnresponsive(t *testing.T) {
var (
once sync.Once
cancel = make(chan struct{})
term = func() {
once.Do(func() {
close(cancel)
})
}
)
sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(100, 3000, true, false)
mkSource := func(name string, noAccount, noStorage, noTrieNode bool) *testPeer {
source := newTestPeer(name, t, term)
source.accountTrie = sourceAccountTrie
source.accountValues = elems
source.storageTries = storageTries
source.storageValues = storageElems
if !noAccount {
source.accountRequestHandler = nonResponsiveRequestAccountRangeFn
}
if !noStorage {
source.storageRequestHandler = nonResponsiveStorageRequestHandler
}
if !noTrieNode {
source.trieRequestHandler = nonResponsiveTrieRequestHandler
}
return source
}
syncer := setupSyncer(
mkSource("full", true, true, true),
mkSource("noAccounts", false, true, true),
mkSource("noStorage", true, false, true),
mkSource("noTrie", true, true, false),
)
// We're setting the timeout to very low, to make the test run a bit faster
syncer.rates.OverrideTTLLimit = time.Millisecond
done := checkStall(t, term)
if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
t.Fatalf("sync failed: %v", err)
}
close(done)
verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
}
func checkStall(t *testing.T, term func()) chan struct{} {
testDone := make(chan struct{})
go func() {
select {
case <-time.After(time.Minute): // TODO(karalabe): Make tests smaller, this is too much
t.Log("Sync stalled")
term()
case <-testDone:
return
}
}()
return testDone
}
// TestSyncBoundaryAccountTrie tests sync against a few normal peers, but the
// account trie has a few boundary elements.
func TestSyncBoundaryAccountTrie(t *testing.T) {
t.Parallel()
var (
once sync.Once
cancel = make(chan struct{})
term = func() {
once.Do(func() {
close(cancel)
})
}
)
sourceAccountTrie, elems := makeBoundaryAccountTrie(3000)
mkSource := func(name string) *testPeer {
source := newTestPeer(name, t, term)
source.accountTrie = sourceAccountTrie
source.accountValues = elems
return source
}
syncer := setupSyncer(
mkSource("peer-a"),
mkSource("peer-b"),
)
done := checkStall(t, term)
if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
t.Fatalf("sync failed: %v", err)
}
close(done)
verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
}
// TestSyncNoStorageAndOneCappedPeer tests sync using accounts and no storage, where one peer is
// consistently returning very small results
func TestSyncNoStorageAndOneCappedPeer(t *testing.T) {
t.Parallel()
var (
once sync.Once
cancel = make(chan struct{})
term = func() {
once.Do(func() {
close(cancel)
})
}
)
sourceAccountTrie, elems := makeAccountTrieNoStorage(3000)
mkSource := func(name string, slow bool) *testPeer {
source := newTestPeer(name, t, term)
source.accountTrie = sourceAccountTrie
source.accountValues = elems
if slow {
source.accountRequestHandler = starvingAccountRequestHandler
}
return source
}
syncer := setupSyncer(
mkSource("nice-a", false),
mkSource("nice-b", false),
mkSource("nice-c", false),
mkSource("capped", true),
)
done := checkStall(t, term)
if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
t.Fatalf("sync failed: %v", err)
}
close(done)
verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
}
// TestSyncNoStorageAndOneCodeCorruptPeer has one peer which doesn't deliver
// code requests properly.
func TestSyncNoStorageAndOneCodeCorruptPeer(t *testing.T) {
t.Parallel()
var (
once sync.Once
cancel = make(chan struct{})
term = func() {
once.Do(func() {
close(cancel)
})
}
)
sourceAccountTrie, elems := makeAccountTrieNoStorage(3000)
mkSource := func(name string, codeFn codeHandlerFunc) *testPeer {
source := newTestPeer(name, t, term)
source.accountTrie = sourceAccountTrie
source.accountValues = elems
source.codeRequestHandler = codeFn
return source
}
// One is capped, one is corrupt. If we don't use a capped one, there's a 50%
// chance that the full set of codes requested are sent only to the
// non-corrupt peer, which delivers everything in one go, and makes the
// test moot
syncer := setupSyncer(
mkSource("capped", cappedCodeRequestHandler),
mkSource("corrupt", corruptCodeRequestHandler),
)
done := checkStall(t, term)
if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
t.Fatalf("sync failed: %v", err)
}
close(done)
verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
}
func TestSyncNoStorageAndOneAccountCorruptPeer(t *testing.T) {
t.Parallel()
var (
once sync.Once
cancel = make(chan struct{})
term = func() {
once.Do(func() {
close(cancel)
})
}
)
sourceAccountTrie, elems := makeAccountTrieNoStorage(3000)
mkSource := func(name string, accFn accountHandlerFunc) *testPeer {
source := newTestPeer(name, t, term)
source.accountTrie = sourceAccountTrie
source.accountValues = elems
source.accountRequestHandler = accFn
return source
}
// One is capped, one is corrupt. If we don't use a capped one, there's a 50%
// chance that the full set of codes requested are sent only to the
// non-corrupt peer, which delivers everything in one go, and makes the
// test moot
syncer := setupSyncer(
mkSource("capped", defaultAccountRequestHandler),
mkSource("corrupt", corruptAccountRequestHandler),
)
done := checkStall(t, term)
if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
t.Fatalf("sync failed: %v", err)
}
close(done)
verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
}
// TestSyncNoStorageAndOneCodeCappedPeer has one peer which delivers code hashes
// one by one
func TestSyncNoStorageAndOneCodeCappedPeer(t *testing.T) {
t.Parallel()
var (
once sync.Once
cancel = make(chan struct{})
term = func() {
once.Do(func() {
close(cancel)
})
}
)
sourceAccountTrie, elems := makeAccountTrieNoStorage(3000)
mkSource := func(name string, codeFn codeHandlerFunc) *testPeer {
source := newTestPeer(name, t, term)
source.accountTrie = sourceAccountTrie
source.accountValues = elems
source.codeRequestHandler = codeFn
return source
}
// Count how many times it's invoked. Remember, there are only 8 unique hashes,
// so it shouldn't be more than that
var counter int
syncer := setupSyncer(
mkSource("capped", func(t *testPeer, id uint64, hashes []common.Hash, max uint64) error {
counter++
return cappedCodeRequestHandler(t, id, hashes, max)
}),
)
done := checkStall(t, term)
if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
t.Fatalf("sync failed: %v", err)
}
close(done)
// There are only 8 unique hashes, and 3K accounts. However, the code
// deduplication is per request batch. If it were a perfect global dedup,
// we would expect only 8 requests. If there were no dedup, there would be
// 3k requests.
// We expect somewhere below 100 requests for these 8 unique hashes.
if threshold := 100; counter > threshold {
t.Fatalf("Error, expected < %d invocations, got %d", threshold, counter)
}
verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
}
// TestSyncBoundaryStorageTrie tests sync against a few normal peers, but the
// storage trie has a few boundary elements.
func TestSyncBoundaryStorageTrie(t *testing.T) {
t.Parallel()
var (
once sync.Once
cancel = make(chan struct{})
term = func() {
once.Do(func() {
close(cancel)
})
}
)
sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(10, 1000, false, true)
mkSource := func(name string) *testPeer {
source := newTestPeer(name, t, term)
source.accountTrie = sourceAccountTrie
source.accountValues = elems
source.storageTries = storageTries
source.storageValues = storageElems
return source
}
syncer := setupSyncer(
mkSource("peer-a"),
mkSource("peer-b"),
)
done := checkStall(t, term)
if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
t.Fatalf("sync failed: %v", err)
}
close(done)
verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
}
// TestSyncWithStorageAndOneCappedPeer tests sync using accounts + storage, where one peer is
// consistently returning very small results
func TestSyncWithStorageAndOneCappedPeer(t *testing.T) {
t.Parallel()
var (
once sync.Once
cancel = make(chan struct{})
term = func() {
once.Do(func() {
close(cancel)
})
}
)
sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(300, 1000, false, false)
mkSource := func(name string, slow bool) *testPeer {
source := newTestPeer(name, t, term)
source.accountTrie = sourceAccountTrie
source.accountValues = elems
source.storageTries = storageTries
source.storageValues = storageElems
if slow {
source.storageRequestHandler = starvingStorageRequestHandler
}
return source
}
syncer := setupSyncer(
mkSource("nice-a", false),
mkSource("slow", true),
)
done := checkStall(t, term)
if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
t.Fatalf("sync failed: %v", err)
}
close(done)
verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
}
// TestSyncWithStorageAndCorruptPeer tests sync using accounts + storage, where one peer is
// sometimes sending bad proofs
func TestSyncWithStorageAndCorruptPeer(t *testing.T) {
t.Parallel()
var (
once sync.Once
cancel = make(chan struct{})
term = func() {
once.Do(func() {
close(cancel)
})
}
)
sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(100, 3000, true, false)
mkSource := func(name string, handler storageHandlerFunc) *testPeer {
source := newTestPeer(name, t, term)
source.accountTrie = sourceAccountTrie
source.accountValues = elems
source.storageTries = storageTries
source.storageValues = storageElems
source.storageRequestHandler = handler
return source
}
syncer := setupSyncer(
mkSource("nice-a", defaultStorageRequestHandler),
mkSource("nice-b", defaultStorageRequestHandler),
mkSource("nice-c", defaultStorageRequestHandler),
mkSource("corrupt", corruptStorageRequestHandler),
)
done := checkStall(t, term)
if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
t.Fatalf("sync failed: %v", err)
}
close(done)
verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
}
func TestSyncWithStorageAndNonProvingPeer(t *testing.T) {
t.Parallel()
var (
once sync.Once
cancel = make(chan struct{})
term = func() {
once.Do(func() {
close(cancel)
})
}
)
sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(100, 3000, true, false)
mkSource := func(name string, handler storageHandlerFunc) *testPeer {
source := newTestPeer(name, t, term)
source.accountTrie = sourceAccountTrie
source.accountValues = elems
source.storageTries = storageTries
source.storageValues = storageElems
source.storageRequestHandler = handler
return source
}
syncer := setupSyncer(
mkSource("nice-a", defaultStorageRequestHandler),
mkSource("nice-b", defaultStorageRequestHandler),
mkSource("nice-c", defaultStorageRequestHandler),
mkSource("corrupt", noProofStorageRequestHandler),
)
done := checkStall(t, term)
if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
t.Fatalf("sync failed: %v", err)
}
close(done)
verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
}
// TestSyncWithStorage tests basic sync using accounts + storage + code, against
// a peer who insists on delivering full storage sets _and_ proofs. This triggered
// an error, where the recipient erroneously clipped the boundary nodes, but
// did not mark the account for healing.
func TestSyncWithStorageMisbehavingProve(t *testing.T) {
t.Parallel()
var (
once sync.Once
cancel = make(chan struct{})
term = func() {
once.Do(func() {
close(cancel)
})
}
)
sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorageWithUniqueStorage(10, 30, false)
mkSource := func(name string) *testPeer {
source := newTestPeer(name, t, term)
source.accountTrie = sourceAccountTrie
source.accountValues = elems
source.storageTries = storageTries
source.storageValues = storageElems
source.storageRequestHandler = proofHappyStorageRequestHandler
return source
}
syncer := setupSyncer(mkSource("sourceA"))
if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
t.Fatalf("sync failed: %v", err)
}
verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
}
type kv struct {
k, v []byte
}
// Some helpers for sorting
type entrySlice []*kv
func (p entrySlice) Len() int { return len(p) }
func (p entrySlice) Less(i, j int) bool { return bytes.Compare(p[i].k, p[j].k) < 0 }
func (p entrySlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
func key32(i uint64) []byte {
key := make([]byte, 32)
binary.LittleEndian.PutUint64(key, i)
return key
}
var (
codehashes = []common.Hash{
crypto.Keccak256Hash([]byte{0}),
crypto.Keccak256Hash([]byte{1}),
crypto.Keccak256Hash([]byte{2}),
crypto.Keccak256Hash([]byte{3}),
crypto.Keccak256Hash([]byte{4}),
crypto.Keccak256Hash([]byte{5}),
crypto.Keccak256Hash([]byte{6}),
crypto.Keccak256Hash([]byte{7}),
}
)
// getCodeHash returns a pseudo-random code hash
func getCodeHash(i uint64) []byte {
h := codehashes[int(i)%len(codehashes)]
return common.CopyBytes(h[:])
}
// getCodeByHash convenience function to lookup the code from the code hash
func getCodeByHash(hash common.Hash) []byte {
if hash == emptyCode {
return nil
}
for i, h := range codehashes {
if h == hash {
return []byte{byte(i)}
}
}
return nil
}
// makeAccountTrieNoStorage spits out a trie, along with the leafs
func makeAccountTrieNoStorage(n int) (*trie.Trie, entrySlice) {
db := trie.NewDatabase(rawdb.NewMemoryDatabase())
accTrie, _ := trie.New(common.Hash{}, db)
var entries entrySlice
for i := uint64(1); i <= uint64(n); i++ {
value, _ := rlp.EncodeToBytes(&types.StateAccount{
Nonce: i,
Balance: big.NewInt(int64(i)),
Root: emptyRoot,
CodeHash: getCodeHash(i),
})
key := key32(i)
elem := &kv{key, value}
accTrie.Update(elem.k, elem.v)
entries = append(entries, elem)
}
sort.Sort(entries)
accTrie.Commit(nil)
return accTrie, entries
}
// makeBoundaryAccountTrie constructs an account trie. Instead of filling
// accounts normally, this function will fill a few accounts which have
// boundary hash.
func makeBoundaryAccountTrie(n int) (*trie.Trie, entrySlice) {
var (
entries entrySlice
boundaries []common.Hash
db = trie.NewDatabase(rawdb.NewMemoryDatabase())
trie, _ = trie.New(common.Hash{}, db)
)
// Initialize boundaries
var next common.Hash
step := new(big.Int).Sub(
new(big.Int).Div(
new(big.Int).Exp(common.Big2, common.Big256, nil),
big.NewInt(int64(accountConcurrency)),
), common.Big1,
)
for i := 0; i < accountConcurrency; i++ {
last := common.BigToHash(new(big.Int).Add(next.Big(), step))
if i == accountConcurrency-1 {
last = common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")
}
boundaries = append(boundaries, last)
next = common.BigToHash(new(big.Int).Add(last.Big(), common.Big1))
}
// Fill boundary accounts
for i := 0; i < len(boundaries); i++ {
value, _ := rlp.EncodeToBytes(&types.StateAccount{
Nonce: uint64(0),
Balance: big.NewInt(int64(i)),
Root: emptyRoot,
CodeHash: getCodeHash(uint64(i)),
})
elem := &kv{boundaries[i].Bytes(), value}
trie.Update(elem.k, elem.v)
entries = append(entries, elem)
}
// Fill other accounts if required
for i := uint64(1); i <= uint64(n); i++ {
value, _ := rlp.EncodeToBytes(&types.StateAccount{
Nonce: i,
Balance: big.NewInt(int64(i)),
Root: emptyRoot,
CodeHash: getCodeHash(i),
})
elem := &kv{key32(i), value}
trie.Update(elem.k, elem.v)
entries = append(entries, elem)
}
sort.Sort(entries)
trie.Commit(nil)
return trie, entries
}
// makeAccountTrieWithStorageWithUniqueStorage creates an account trie where each accounts
// has a unique storage set.
func makeAccountTrieWithStorageWithUniqueStorage(accounts, slots int, code bool) (*trie.Trie, entrySlice, map[common.Hash]*trie.Trie, map[common.Hash]entrySlice) {
var (
db = trie.NewDatabase(rawdb.NewMemoryDatabase())
accTrie, _ = trie.New(common.Hash{}, db)
entries entrySlice
storageTries = make(map[common.Hash]*trie.Trie)
storageEntries = make(map[common.Hash]entrySlice)
)
// Create n accounts in the trie
for i := uint64(1); i <= uint64(accounts); i++ {
key := key32(i)
codehash := emptyCode[:]
if code {
codehash = getCodeHash(i)
}
// Create a storage trie
stTrie, stEntries := makeStorageTrieWithSeed(uint64(slots), i, db)
stRoot := stTrie.Hash()
stTrie.Commit(nil)
value, _ := rlp.EncodeToBytes(&types.StateAccount{
Nonce: i,
Balance: big.NewInt(int64(i)),
Root: stRoot,
CodeHash: codehash,
})
elem := &kv{key, value}
accTrie.Update(elem.k, elem.v)
entries = append(entries, elem)
storageTries[common.BytesToHash(key)] = stTrie
storageEntries[common.BytesToHash(key)] = stEntries
}
sort.Sort(entries)
accTrie.Commit(nil)
return accTrie, entries, storageTries, storageEntries
}
// makeAccountTrieWithStorage spits out a trie, along with the leafs
func makeAccountTrieWithStorage(accounts, slots int, code, boundary bool) (*trie.Trie, entrySlice, map[common.Hash]*trie.Trie, map[common.Hash]entrySlice) {
var (
db = trie.NewDatabase(rawdb.NewMemoryDatabase())
accTrie, _ = trie.New(common.Hash{}, db)
entries entrySlice
storageTries = make(map[common.Hash]*trie.Trie)
storageEntries = make(map[common.Hash]entrySlice)
)
// Make a storage trie which we reuse for the whole lot
var (
stTrie *trie.Trie
stEntries entrySlice
)
if boundary {
stTrie, stEntries = makeBoundaryStorageTrie(slots, db)
} else {
stTrie, stEntries = makeStorageTrieWithSeed(uint64(slots), 0, db)
}
stRoot := stTrie.Hash()
// Create n accounts in the trie
for i := uint64(1); i <= uint64(accounts); i++ {
key := key32(i)
codehash := emptyCode[:]
if code {
codehash = getCodeHash(i)
}
value, _ := rlp.EncodeToBytes(&types.StateAccount{
Nonce: i,
Balance: big.NewInt(int64(i)),
Root: stRoot,
CodeHash: codehash,
})
elem := &kv{key, value}
accTrie.Update(elem.k, elem.v)
entries = append(entries, elem)
// we reuse the same one for all accounts
storageTries[common.BytesToHash(key)] = stTrie
storageEntries[common.BytesToHash(key)] = stEntries
}
sort.Sort(entries)
stTrie.Commit(nil)
accTrie.Commit(nil)
return accTrie, entries, storageTries, storageEntries
}
// makeStorageTrieWithSeed fills a storage trie with n items, returning the
// not-yet-committed trie and the sorted entries. The seeds can be used to ensure
// that tries are unique.
func makeStorageTrieWithSeed(n, seed uint64, db *trie.Database) (*trie.Trie, entrySlice) {
trie, _ := trie.New(common.Hash{}, db)
var entries entrySlice
for i := uint64(1); i <= n; i++ {
// store 'x' at slot 'x'
slotValue := key32(i + seed)
rlpSlotValue, _ := rlp.EncodeToBytes(common.TrimLeftZeroes(slotValue[:]))
slotKey := key32(i)
key := crypto.Keccak256Hash(slotKey[:])
elem := &kv{key[:], rlpSlotValue}
trie.Update(elem.k, elem.v)
entries = append(entries, elem)
}
sort.Sort(entries)
trie.Commit(nil)
return trie, entries
}
// makeBoundaryStorageTrie constructs a storage trie. Instead of filling
// storage slots normally, this function will fill a few slots which have
// boundary hash.
func makeBoundaryStorageTrie(n int, db *trie.Database) (*trie.Trie, entrySlice) {
var (
entries entrySlice
boundaries []common.Hash
trie, _ = trie.New(common.Hash{}, db)
)
// Initialize boundaries
var next common.Hash
step := new(big.Int).Sub(
new(big.Int).Div(
new(big.Int).Exp(common.Big2, common.Big256, nil),
big.NewInt(int64(accountConcurrency)),
), common.Big1,
)
for i := 0; i < accountConcurrency; i++ {
last := common.BigToHash(new(big.Int).Add(next.Big(), step))
if i == accountConcurrency-1 {
last = common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")
}
boundaries = append(boundaries, last)
next = common.BigToHash(new(big.Int).Add(last.Big(), common.Big1))
}
// Fill boundary slots
for i := 0; i < len(boundaries); i++ {
key := boundaries[i]
val := []byte{0xde, 0xad, 0xbe, 0xef}
elem := &kv{key[:], val}
trie.Update(elem.k, elem.v)
entries = append(entries, elem)
}
// Fill other slots if required
for i := uint64(1); i <= uint64(n); i++ {
slotKey := key32(i)
key := crypto.Keccak256Hash(slotKey[:])
slotValue := key32(i)
rlpSlotValue, _ := rlp.EncodeToBytes(common.TrimLeftZeroes(slotValue[:]))
elem := &kv{key[:], rlpSlotValue}
trie.Update(elem.k, elem.v)
entries = append(entries, elem)
}
sort.Sort(entries)
trie.Commit(nil)
return trie, entries
}
func verifyTrie(db ethdb.KeyValueStore, root common.Hash, t *testing.T) {
t.Helper()
triedb := trie.NewDatabase(db)
accTrie, err := trie.New(root, triedb)
if err != nil {
t.Fatal(err)
}
accounts, slots := 0, 0
accIt := trie.NewIterator(accTrie.NodeIterator(nil))
for accIt.Next() {
var acc struct {
Nonce uint64
Balance *big.Int
Root common.Hash
CodeHash []byte
}
if err := rlp.DecodeBytes(accIt.Value, &acc); err != nil {
log.Crit("Invalid account encountered during snapshot creation", "err", err)
}
accounts++
if acc.Root != emptyRoot {
storeTrie, err := trie.NewSecure(acc.Root, triedb)
if err != nil {
t.Fatal(err)
}
storeIt := trie.NewIterator(storeTrie.NodeIterator(nil))
for storeIt.Next() {
slots++
}
if err := storeIt.Err; err != nil {
t.Fatal(err)
}
}
}
if err := accIt.Err; err != nil {
t.Fatal(err)
}
t.Logf("accounts: %d, slots: %d", accounts, slots)
}
// TestSyncAccountPerformance tests how efficient the snap algo is at minimizing
// state healing
func TestSyncAccountPerformance(t *testing.T) {
// Set the account concurrency to 1. This _should_ result in the
// range root to become correct, and there should be no healing needed
defer func(old int) { accountConcurrency = old }(accountConcurrency)
accountConcurrency = 1
var (
once sync.Once
cancel = make(chan struct{})
term = func() {
once.Do(func() {
close(cancel)
})
}
)
sourceAccountTrie, elems := makeAccountTrieNoStorage(100)
mkSource := func(name string) *testPeer {
source := newTestPeer(name, t, term)
source.accountTrie = sourceAccountTrie
source.accountValues = elems
return source
}
src := mkSource("source")
syncer := setupSyncer(src)
if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
t.Fatalf("sync failed: %v", err)
}
verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
// The trie root will always be requested, since it is added when the snap
// sync cycle starts. When popping the queue, we do not look it up again.
// Doing so would bring this number down to zero in this artificial testcase,
// but only add extra IO for no reason in practice.
if have, want := src.nTrienodeRequests, 1; have != want {
fmt.Printf(src.Stats())
t.Errorf("trie node heal requests wrong, want %d, have %d", want, have)
}
}
func TestSlotEstimation(t *testing.T) {
for i, tc := range []struct {
last common.Hash
count int
want uint64
}{
{
// Half the space
common.HexToHash("0x7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"),
100,
100,
},
{
// 1 / 16th
common.HexToHash("0x0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"),
100,
1500,
},
{
// Bit more than 1 / 16th
common.HexToHash("0x1000000000000000000000000000000000000000000000000000000000000000"),
100,
1499,
},
{
// Almost everything
common.HexToHash("0xF000000000000000000000000000000000000000000000000000000000000000"),
100,
6,
},
{
// Almost nothing -- should lead to error
common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000001"),
1,
0,
},
{
// Nothing -- should lead to error
common.Hash{},
100,
0,
},
} {
have, _ := estimateRemainingSlots(tc.count, tc.last)
if want := tc.want; have != want {
t.Errorf("test %d: have %d want %d", i, have, want)
}
}
}
| eth/protocols/snap/sync_test.go | 0 | https://github.com/ethereum/go-ethereum/commit/16701c51697e28986feebd122c6a491e4d9ac0e7 | [
0.004002972971647978,
0.00029814281151629984,
0.0001623091520741582,
0.00017201455193571746,
0.00048342804075218737
] |
{
"id": 4,
"code_window": [
"\treturn nil\n",
"}\n",
"\n",
"// ParseHexOrString tries to hexdecode b, but if the prefix is missing, it instead just returns the raw bytes\n",
"func parseHexOrString(str string) ([]byte, error) {\n",
"\tb, err := hexutil.Decode(str)\n",
"\tif errors.Is(err, hexutil.ErrMissingPrefix) {\n",
"\t\treturn []byte(str), nil\n",
"\t}\n",
"\treturn b, err\n",
"}\n",
"\n",
"func importLDBdata(ctx *cli.Context) error {\n",
"\tstart := 0\n",
"\tswitch ctx.NArg() {\n",
"\tcase 1:\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "cmd/geth/dbcmd.go",
"type": "replace",
"edit_start_line_idx": 586
} | // Copyright 2014 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
// Package common contains various helper functions.
package common
import (
"encoding/hex"
)
// FromHex returns the bytes represented by the hexadecimal string s.
// s may be prefixed with "0x".
func FromHex(s string) []byte {
if has0xPrefix(s) {
s = s[2:]
}
if len(s)%2 == 1 {
s = "0" + s
}
return Hex2Bytes(s)
}
// CopyBytes returns an exact copy of the provided bytes.
func CopyBytes(b []byte) (copiedBytes []byte) {
if b == nil {
return nil
}
copiedBytes = make([]byte, len(b))
copy(copiedBytes, b)
return
}
// has0xPrefix validates str begins with '0x' or '0X'.
func has0xPrefix(str string) bool {
return len(str) >= 2 && str[0] == '0' && (str[1] == 'x' || str[1] == 'X')
}
// isHexCharacter returns bool of c being a valid hexadecimal.
func isHexCharacter(c byte) bool {
return ('0' <= c && c <= '9') || ('a' <= c && c <= 'f') || ('A' <= c && c <= 'F')
}
// isHex validates whether each byte is valid hexadecimal string.
func isHex(str string) bool {
if len(str)%2 != 0 {
return false
}
for _, c := range []byte(str) {
if !isHexCharacter(c) {
return false
}
}
return true
}
// Bytes2Hex returns the hexadecimal encoding of d.
func Bytes2Hex(d []byte) string {
return hex.EncodeToString(d)
}
// Hex2Bytes returns the bytes represented by the hexadecimal string str.
func Hex2Bytes(str string) []byte {
h, _ := hex.DecodeString(str)
return h
}
// Hex2BytesFixed returns bytes of a specified fixed length flen.
func Hex2BytesFixed(str string, flen int) []byte {
h, _ := hex.DecodeString(str)
if len(h) == flen {
return h
}
if len(h) > flen {
return h[len(h)-flen:]
}
hh := make([]byte, flen)
copy(hh[flen-len(h):flen], h)
return hh
}
// RightPadBytes zero-pads slice to the right up to length l.
func RightPadBytes(slice []byte, l int) []byte {
if l <= len(slice) {
return slice
}
padded := make([]byte, l)
copy(padded, slice)
return padded
}
// LeftPadBytes zero-pads slice to the left up to length l.
func LeftPadBytes(slice []byte, l int) []byte {
if l <= len(slice) {
return slice
}
padded := make([]byte, l)
copy(padded[l-len(slice):], slice)
return padded
}
// TrimLeftZeroes returns a subslice of s without leading zeroes
func TrimLeftZeroes(s []byte) []byte {
idx := 0
for ; idx < len(s); idx++ {
if s[idx] != 0 {
break
}
}
return s[idx:]
}
// TrimRightZeroes returns a subslice of s without trailing zeroes
func TrimRightZeroes(s []byte) []byte {
idx := len(s)
for ; idx > 0; idx-- {
if s[idx-1] != 0 {
break
}
}
return s[:idx]
}
| common/bytes.go | 1 | https://github.com/ethereum/go-ethereum/commit/16701c51697e28986feebd122c6a491e4d9ac0e7 | [
0.03593215346336365,
0.008445209823548794,
0.00016730779316276312,
0.0035023479722440243,
0.010280735790729523
] |
{
"id": 4,
"code_window": [
"\treturn nil\n",
"}\n",
"\n",
"// ParseHexOrString tries to hexdecode b, but if the prefix is missing, it instead just returns the raw bytes\n",
"func parseHexOrString(str string) ([]byte, error) {\n",
"\tb, err := hexutil.Decode(str)\n",
"\tif errors.Is(err, hexutil.ErrMissingPrefix) {\n",
"\t\treturn []byte(str), nil\n",
"\t}\n",
"\treturn b, err\n",
"}\n",
"\n",
"func importLDBdata(ctx *cli.Context) error {\n",
"\tstart := 0\n",
"\tswitch ctx.NArg() {\n",
"\tcase 1:\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "cmd/geth/dbcmd.go",
"type": "replace",
"edit_start_line_idx": 586
} | # Swarm
https://swarm.ethereum.org
Swarm is a distributed storage platform and content distribution service, a native base layer service of the ethereum web3 stack. The primary objective of Swarm is to provide a decentralized and redundant store for dapp code and data as well as block chain and state data. Swarm is also set out to provide various base layer services for web3, including node-to-node messaging, media streaming, decentralised database services and scalable state-channel infrastructure for decentralised service economies.
**Note**: The codebase has been moved to [ethersphere/swarm](https://github.com/ethersphere/swarm)
| swarm/README.md | 0 | https://github.com/ethereum/go-ethereum/commit/16701c51697e28986feebd122c6a491e4d9ac0e7 | [
0.0001675982348388061,
0.0001675982348388061,
0.0001675982348388061,
0.0001675982348388061,
0
] |
{
"id": 4,
"code_window": [
"\treturn nil\n",
"}\n",
"\n",
"// ParseHexOrString tries to hexdecode b, but if the prefix is missing, it instead just returns the raw bytes\n",
"func parseHexOrString(str string) ([]byte, error) {\n",
"\tb, err := hexutil.Decode(str)\n",
"\tif errors.Is(err, hexutil.ErrMissingPrefix) {\n",
"\t\treturn []byte(str), nil\n",
"\t}\n",
"\treturn b, err\n",
"}\n",
"\n",
"func importLDBdata(ctx *cli.Context) error {\n",
"\tstart := 0\n",
"\tswitch ctx.NArg() {\n",
"\tcase 1:\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "cmd/geth/dbcmd.go",
"type": "replace",
"edit_start_line_idx": 586
} | // Copyright 2019 The go-ethereum Authors
// This file is part of go-ethereum.
//
// go-ethereum is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// go-ethereum is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
package main
import (
"fmt"
"net"
"strings"
"time"
"github.com/ethereum/go-ethereum/cmd/devp2p/internal/v4test"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/p2p/discover"
"github.com/ethereum/go-ethereum/p2p/enode"
"github.com/ethereum/go-ethereum/params"
"gopkg.in/urfave/cli.v1"
)
var (
discv4Command = cli.Command{
Name: "discv4",
Usage: "Node Discovery v4 tools",
Subcommands: []cli.Command{
discv4PingCommand,
discv4RequestRecordCommand,
discv4ResolveCommand,
discv4ResolveJSONCommand,
discv4CrawlCommand,
discv4TestCommand,
},
}
discv4PingCommand = cli.Command{
Name: "ping",
Usage: "Sends ping to a node",
Action: discv4Ping,
ArgsUsage: "<node>",
}
discv4RequestRecordCommand = cli.Command{
Name: "requestenr",
Usage: "Requests a node record using EIP-868 enrRequest",
Action: discv4RequestRecord,
ArgsUsage: "<node>",
}
discv4ResolveCommand = cli.Command{
Name: "resolve",
Usage: "Finds a node in the DHT",
Action: discv4Resolve,
ArgsUsage: "<node>",
Flags: []cli.Flag{bootnodesFlag},
}
discv4ResolveJSONCommand = cli.Command{
Name: "resolve-json",
Usage: "Re-resolves nodes in a nodes.json file",
Action: discv4ResolveJSON,
Flags: []cli.Flag{bootnodesFlag},
ArgsUsage: "<nodes.json file>",
}
discv4CrawlCommand = cli.Command{
Name: "crawl",
Usage: "Updates a nodes.json file with random nodes found in the DHT",
Action: discv4Crawl,
Flags: []cli.Flag{bootnodesFlag, crawlTimeoutFlag},
}
discv4TestCommand = cli.Command{
Name: "test",
Usage: "Runs tests against a node",
Action: discv4Test,
Flags: []cli.Flag{
remoteEnodeFlag,
testPatternFlag,
testTAPFlag,
testListen1Flag,
testListen2Flag,
},
}
)
var (
bootnodesFlag = cli.StringFlag{
Name: "bootnodes",
Usage: "Comma separated nodes used for bootstrapping",
}
nodekeyFlag = cli.StringFlag{
Name: "nodekey",
Usage: "Hex-encoded node key",
}
nodedbFlag = cli.StringFlag{
Name: "nodedb",
Usage: "Nodes database location",
}
listenAddrFlag = cli.StringFlag{
Name: "addr",
Usage: "Listening address",
}
crawlTimeoutFlag = cli.DurationFlag{
Name: "timeout",
Usage: "Time limit for the crawl.",
Value: 30 * time.Minute,
}
remoteEnodeFlag = cli.StringFlag{
Name: "remote",
Usage: "Enode of the remote node under test",
EnvVar: "REMOTE_ENODE",
}
)
func discv4Ping(ctx *cli.Context) error {
n := getNodeArg(ctx)
disc := startV4(ctx)
defer disc.Close()
start := time.Now()
if err := disc.Ping(n); err != nil {
return fmt.Errorf("node didn't respond: %v", err)
}
fmt.Printf("node responded to ping (RTT %v).\n", time.Since(start))
return nil
}
func discv4RequestRecord(ctx *cli.Context) error {
n := getNodeArg(ctx)
disc := startV4(ctx)
defer disc.Close()
respN, err := disc.RequestENR(n)
if err != nil {
return fmt.Errorf("can't retrieve record: %v", err)
}
fmt.Println(respN.String())
return nil
}
func discv4Resolve(ctx *cli.Context) error {
n := getNodeArg(ctx)
disc := startV4(ctx)
defer disc.Close()
fmt.Println(disc.Resolve(n).String())
return nil
}
func discv4ResolveJSON(ctx *cli.Context) error {
if ctx.NArg() < 1 {
return fmt.Errorf("need nodes file as argument")
}
nodesFile := ctx.Args().Get(0)
inputSet := make(nodeSet)
if common.FileExist(nodesFile) {
inputSet = loadNodesJSON(nodesFile)
}
// Add extra nodes from command line arguments.
var nodeargs []*enode.Node
for i := 1; i < ctx.NArg(); i++ {
n, err := parseNode(ctx.Args().Get(i))
if err != nil {
exit(err)
}
nodeargs = append(nodeargs, n)
}
// Run the crawler.
disc := startV4(ctx)
defer disc.Close()
c := newCrawler(inputSet, disc, enode.IterNodes(nodeargs))
c.revalidateInterval = 0
output := c.run(0)
writeNodesJSON(nodesFile, output)
return nil
}
func discv4Crawl(ctx *cli.Context) error {
if ctx.NArg() < 1 {
return fmt.Errorf("need nodes file as argument")
}
nodesFile := ctx.Args().First()
var inputSet nodeSet
if common.FileExist(nodesFile) {
inputSet = loadNodesJSON(nodesFile)
}
disc := startV4(ctx)
defer disc.Close()
c := newCrawler(inputSet, disc, disc.RandomNodes())
c.revalidateInterval = 10 * time.Minute
output := c.run(ctx.Duration(crawlTimeoutFlag.Name))
writeNodesJSON(nodesFile, output)
return nil
}
// discv4Test runs the protocol test suite.
func discv4Test(ctx *cli.Context) error {
// Configure test package globals.
if !ctx.IsSet(remoteEnodeFlag.Name) {
return fmt.Errorf("Missing -%v", remoteEnodeFlag.Name)
}
v4test.Remote = ctx.String(remoteEnodeFlag.Name)
v4test.Listen1 = ctx.String(testListen1Flag.Name)
v4test.Listen2 = ctx.String(testListen2Flag.Name)
return runTests(ctx, v4test.AllTests)
}
// startV4 starts an ephemeral discovery V4 node.
func startV4(ctx *cli.Context) *discover.UDPv4 {
ln, config := makeDiscoveryConfig(ctx)
socket := listen(ln, ctx.String(listenAddrFlag.Name))
disc, err := discover.ListenV4(socket, ln, config)
if err != nil {
exit(err)
}
return disc
}
func makeDiscoveryConfig(ctx *cli.Context) (*enode.LocalNode, discover.Config) {
var cfg discover.Config
if ctx.IsSet(nodekeyFlag.Name) {
key, err := crypto.HexToECDSA(ctx.String(nodekeyFlag.Name))
if err != nil {
exit(fmt.Errorf("-%s: %v", nodekeyFlag.Name, err))
}
cfg.PrivateKey = key
} else {
cfg.PrivateKey, _ = crypto.GenerateKey()
}
if commandHasFlag(ctx, bootnodesFlag) {
bn, err := parseBootnodes(ctx)
if err != nil {
exit(err)
}
cfg.Bootnodes = bn
}
dbpath := ctx.String(nodedbFlag.Name)
db, err := enode.OpenDB(dbpath)
if err != nil {
exit(err)
}
ln := enode.NewLocalNode(db, cfg.PrivateKey)
return ln, cfg
}
func listen(ln *enode.LocalNode, addr string) *net.UDPConn {
if addr == "" {
addr = "0.0.0.0:0"
}
socket, err := net.ListenPacket("udp4", addr)
if err != nil {
exit(err)
}
usocket := socket.(*net.UDPConn)
uaddr := socket.LocalAddr().(*net.UDPAddr)
if uaddr.IP.IsUnspecified() {
ln.SetFallbackIP(net.IP{127, 0, 0, 1})
} else {
ln.SetFallbackIP(uaddr.IP)
}
ln.SetFallbackUDP(uaddr.Port)
return usocket
}
func parseBootnodes(ctx *cli.Context) ([]*enode.Node, error) {
s := params.RinkebyBootnodes
if ctx.IsSet(bootnodesFlag.Name) {
input := ctx.String(bootnodesFlag.Name)
if input == "" {
return nil, nil
}
s = strings.Split(input, ",")
}
nodes := make([]*enode.Node, len(s))
var err error
for i, record := range s {
nodes[i], err = parseNode(record)
if err != nil {
return nil, fmt.Errorf("invalid bootstrap node: %v", err)
}
}
return nodes, nil
}
| cmd/devp2p/discv4cmd.go | 0 | https://github.com/ethereum/go-ethereum/commit/16701c51697e28986feebd122c6a491e4d9ac0e7 | [
0.0008591928635723889,
0.00022517694742418826,
0.00016513722948729992,
0.00016905996017158031,
0.0001438531035091728
] |
{
"id": 4,
"code_window": [
"\treturn nil\n",
"}\n",
"\n",
"// ParseHexOrString tries to hexdecode b, but if the prefix is missing, it instead just returns the raw bytes\n",
"func parseHexOrString(str string) ([]byte, error) {\n",
"\tb, err := hexutil.Decode(str)\n",
"\tif errors.Is(err, hexutil.ErrMissingPrefix) {\n",
"\t\treturn []byte(str), nil\n",
"\t}\n",
"\treturn b, err\n",
"}\n",
"\n",
"func importLDBdata(ctx *cli.Context) error {\n",
"\tstart := 0\n",
"\tswitch ctx.NArg() {\n",
"\tcase 1:\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "cmd/geth/dbcmd.go",
"type": "replace",
"edit_start_line_idx": 586
} | module github.com/ethereum/go-ethereum
go 1.16
require (
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.3.0
github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6 // indirect
github.com/VictoriaMetrics/fastcache v1.6.0
github.com/aws/aws-sdk-go-v2 v1.2.0
github.com/aws/aws-sdk-go-v2/config v1.1.1
github.com/aws/aws-sdk-go-v2/credentials v1.1.1
github.com/aws/aws-sdk-go-v2/service/route53 v1.1.1
github.com/btcsuite/btcd/btcec/v2 v2.1.3
github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1 // indirect
github.com/cespare/cp v0.1.0
github.com/cloudflare/cloudflare-go v0.14.0
github.com/consensys/gnark-crypto v0.4.1-0.20210426202927-39ac3d4b3f1f
github.com/davecgh/go-spew v1.1.1
github.com/deckarep/golang-set v1.8.0
github.com/deepmap/oapi-codegen v1.8.2 // indirect
github.com/docker/docker v1.4.2-0.20180625184442-8e610b2b55bf
github.com/dop251/goja v0.0.0-20211011172007-d99e4b8cbf48
github.com/edsrzf/mmap-go v1.0.0
github.com/fatih/color v1.7.0
github.com/fjl/gencodec v0.0.0-20220412091415-8bb9e558978c
github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5
github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff
github.com/go-ole/go-ole v1.2.1 // indirect
github.com/go-stack/stack v1.8.0
github.com/golang-jwt/jwt/v4 v4.3.0
github.com/golang/protobuf v1.4.3
github.com/golang/snappy v0.0.4
github.com/google/gofuzz v1.1.1-0.20200604201612-c04b05f3adfa
github.com/google/uuid v1.2.0
github.com/gorilla/websocket v1.4.2
github.com/graph-gophers/graphql-go v1.3.0
github.com/hashicorp/go-bexpr v0.1.10
github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d
github.com/holiman/bloomfilter/v2 v2.0.3
github.com/holiman/uint256 v1.2.0
github.com/huin/goupnp v1.0.3
github.com/influxdata/influxdb v1.8.3
github.com/influxdata/influxdb-client-go/v2 v2.4.0
github.com/influxdata/line-protocol v0.0.0-20210311194329-9aa0e372d097 // indirect
github.com/jackpal/go-nat-pmp v1.0.2
github.com/jedisct1/go-minisign v0.0.0-20190909160543-45766022959e
github.com/julienschmidt/httprouter v1.2.0
github.com/karalabe/usb v0.0.2
github.com/kylelemons/godebug v1.1.0 // indirect
github.com/mattn/go-colorable v0.1.8
github.com/mattn/go-isatty v0.0.12
github.com/naoina/go-stringutil v0.1.0 // indirect
github.com/naoina/toml v0.1.2-0.20170918210437-9fafd6967416
github.com/olekukonko/tablewriter v0.0.5
github.com/peterh/liner v1.1.1-0.20190123174540-a2c9a5303de7
github.com/prometheus/tsdb v0.7.1
github.com/rjeczalik/notify v0.9.1
github.com/rs/cors v1.7.0
github.com/shirou/gopsutil v3.21.4-0.20210419000835-c7a38de76ee5+incompatible
github.com/status-im/keycard-go v0.0.0-20190316090335-8537d3370df4
github.com/stretchr/testify v1.7.0
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7
github.com/tklauser/go-sysconf v0.3.5 // indirect
github.com/tyler-smith/go-bip39 v1.0.1-0.20181017060643-dbb3b84ba2ef
golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2
golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c
golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912
golang.org/x/text v0.3.7
golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba
golang.org/x/tools v0.1.0
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce
gopkg.in/olebedev/go-duktape.v3 v3.0.0-20200619000410-60c24ae608a6
gopkg.in/urfave/cli.v1 v1.20.0
gotest.tools v2.2.0+incompatible // indirect
)
| go.mod | 0 | https://github.com/ethereum/go-ethereum/commit/16701c51697e28986feebd122c6a491e4d9ac0e7 | [
0.000169706778251566,
0.00016703084111213684,
0.00016507096006534994,
0.00016661285189911723,
0.0000016193699821087648
] |
{
"id": 5,
"code_window": [
"// Package common contains various helper functions.\n",
"package common\n",
"\n",
"import (\n",
"\t\"encoding/hex\"\n",
")\n",
"\n",
"// FromHex returns the bytes represented by the hexadecimal string s.\n",
"// s may be prefixed with \"0x\".\n",
"func FromHex(s string) []byte {\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\"errors\"\n",
"\n",
"\t\"github.com/ethereum/go-ethereum/common/hexutil\"\n"
],
"file_path": "common/bytes.go",
"type": "add",
"edit_start_line_idx": 21
} | // Copyright 2020 The go-ethereum Authors
// This file is part of go-ethereum.
//
// go-ethereum is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// go-ethereum is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
package main
import (
"bytes"
"errors"
"fmt"
"os"
"os/signal"
"path/filepath"
"sort"
"strconv"
"strings"
"syscall"
"time"
"github.com/ethereum/go-ethereum/cmd/utils"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/console/prompt"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/state/snapshot"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/trie"
"github.com/olekukonko/tablewriter"
"gopkg.in/urfave/cli.v1"
)
var (
removedbCommand = cli.Command{
Action: utils.MigrateFlags(removeDB),
Name: "removedb",
Usage: "Remove blockchain and state databases",
ArgsUsage: "",
Flags: []cli.Flag{
utils.DataDirFlag,
},
Category: "DATABASE COMMANDS",
Description: `
Remove blockchain and state databases`,
}
dbCommand = cli.Command{
Name: "db",
Usage: "Low level database operations",
ArgsUsage: "",
Category: "DATABASE COMMANDS",
Subcommands: []cli.Command{
dbInspectCmd,
dbStatCmd,
dbCompactCmd,
dbGetCmd,
dbDeleteCmd,
dbPutCmd,
dbGetSlotsCmd,
dbDumpFreezerIndex,
dbImportCmd,
dbExportCmd,
dbMetadataCmd,
dbMigrateFreezerCmd,
},
}
dbInspectCmd = cli.Command{
Action: utils.MigrateFlags(inspect),
Name: "inspect",
ArgsUsage: "<prefix> <start>",
Flags: []cli.Flag{
utils.DataDirFlag,
utils.AncientFlag,
utils.SyncModeFlag,
utils.MainnetFlag,
utils.RopstenFlag,
utils.SepoliaFlag,
utils.RinkebyFlag,
utils.GoerliFlag,
},
Usage: "Inspect the storage size for each type of data in the database",
Description: `This commands iterates the entire database. If the optional 'prefix' and 'start' arguments are provided, then the iteration is limited to the given subset of data.`,
}
dbStatCmd = cli.Command{
Action: utils.MigrateFlags(dbStats),
Name: "stats",
Usage: "Print leveldb statistics",
Flags: []cli.Flag{
utils.DataDirFlag,
utils.SyncModeFlag,
utils.MainnetFlag,
utils.RopstenFlag,
utils.SepoliaFlag,
utils.RinkebyFlag,
utils.GoerliFlag,
},
}
dbCompactCmd = cli.Command{
Action: utils.MigrateFlags(dbCompact),
Name: "compact",
Usage: "Compact leveldb database. WARNING: May take a very long time",
Flags: []cli.Flag{
utils.DataDirFlag,
utils.SyncModeFlag,
utils.MainnetFlag,
utils.RopstenFlag,
utils.SepoliaFlag,
utils.RinkebyFlag,
utils.GoerliFlag,
utils.CacheFlag,
utils.CacheDatabaseFlag,
},
Description: `This command performs a database compaction.
WARNING: This operation may take a very long time to finish, and may cause database
corruption if it is aborted during execution'!`,
}
dbGetCmd = cli.Command{
Action: utils.MigrateFlags(dbGet),
Name: "get",
Usage: "Show the value of a database key",
ArgsUsage: "<hex-encoded key>",
Flags: []cli.Flag{
utils.DataDirFlag,
utils.SyncModeFlag,
utils.MainnetFlag,
utils.RopstenFlag,
utils.SepoliaFlag,
utils.RinkebyFlag,
utils.GoerliFlag,
},
Description: "This command looks up the specified database key from the database.",
}
dbDeleteCmd = cli.Command{
Action: utils.MigrateFlags(dbDelete),
Name: "delete",
Usage: "Delete a database key (WARNING: may corrupt your database)",
ArgsUsage: "<hex-encoded key>",
Flags: []cli.Flag{
utils.DataDirFlag,
utils.SyncModeFlag,
utils.MainnetFlag,
utils.RopstenFlag,
utils.SepoliaFlag,
utils.RinkebyFlag,
utils.GoerliFlag,
},
Description: `This command deletes the specified database key from the database.
WARNING: This is a low-level operation which may cause database corruption!`,
}
dbPutCmd = cli.Command{
Action: utils.MigrateFlags(dbPut),
Name: "put",
Usage: "Set the value of a database key (WARNING: may corrupt your database)",
ArgsUsage: "<hex-encoded key> <hex-encoded value>",
Flags: []cli.Flag{
utils.DataDirFlag,
utils.SyncModeFlag,
utils.MainnetFlag,
utils.RopstenFlag,
utils.SepoliaFlag,
utils.RinkebyFlag,
utils.GoerliFlag,
},
Description: `This command sets a given database key to the given value.
WARNING: This is a low-level operation which may cause database corruption!`,
}
dbGetSlotsCmd = cli.Command{
Action: utils.MigrateFlags(dbDumpTrie),
Name: "dumptrie",
Usage: "Show the storage key/values of a given storage trie",
ArgsUsage: "<hex-encoded storage trie root> <hex-encoded start (optional)> <int max elements (optional)>",
Flags: []cli.Flag{
utils.DataDirFlag,
utils.SyncModeFlag,
utils.MainnetFlag,
utils.RopstenFlag,
utils.SepoliaFlag,
utils.RinkebyFlag,
utils.GoerliFlag,
},
Description: "This command looks up the specified database key from the database.",
}
dbDumpFreezerIndex = cli.Command{
Action: utils.MigrateFlags(freezerInspect),
Name: "freezer-index",
Usage: "Dump out the index of a given freezer type",
ArgsUsage: "<type> <start (int)> <end (int)>",
Flags: []cli.Flag{
utils.DataDirFlag,
utils.SyncModeFlag,
utils.MainnetFlag,
utils.RopstenFlag,
utils.SepoliaFlag,
utils.RinkebyFlag,
utils.GoerliFlag,
},
Description: "This command displays information about the freezer index.",
}
dbImportCmd = cli.Command{
Action: utils.MigrateFlags(importLDBdata),
Name: "import",
Usage: "Imports leveldb-data from an exported RLP dump.",
ArgsUsage: "<dumpfile> <start (optional)",
Flags: []cli.Flag{
utils.DataDirFlag,
utils.SyncModeFlag,
utils.MainnetFlag,
utils.RopstenFlag,
utils.RinkebyFlag,
utils.GoerliFlag,
},
Description: "The import command imports the specific chain data from an RLP encoded stream.",
}
dbExportCmd = cli.Command{
Action: utils.MigrateFlags(exportChaindata),
Name: "export",
Usage: "Exports the chain data into an RLP dump. If the <dumpfile> has .gz suffix, gzip compression will be used.",
ArgsUsage: "<type> <dumpfile>",
Flags: []cli.Flag{
utils.DataDirFlag,
utils.SyncModeFlag,
utils.MainnetFlag,
utils.RopstenFlag,
utils.RinkebyFlag,
utils.GoerliFlag,
},
Description: "Exports the specified chain data to an RLP encoded stream, optionally gzip-compressed.",
}
dbMetadataCmd = cli.Command{
Action: utils.MigrateFlags(showMetaData),
Name: "metadata",
Usage: "Shows metadata about the chain status.",
Flags: []cli.Flag{
utils.DataDirFlag,
utils.SyncModeFlag,
utils.MainnetFlag,
utils.RopstenFlag,
utils.SepoliaFlag,
utils.RinkebyFlag,
utils.GoerliFlag,
},
Description: "Shows metadata about the chain status.",
}
dbMigrateFreezerCmd = cli.Command{
Action: utils.MigrateFlags(freezerMigrate),
Name: "freezer-migrate",
Usage: "Migrate legacy parts of the freezer. (WARNING: may take a long time)",
ArgsUsage: "",
Flags: []cli.Flag{
utils.DataDirFlag,
utils.SyncModeFlag,
utils.MainnetFlag,
utils.RopstenFlag,
utils.SepoliaFlag,
utils.RinkebyFlag,
utils.GoerliFlag,
},
Description: `The freezer-migrate command checks your database for receipts in a legacy format and updates those.
WARNING: please back-up the receipt files in your ancients before running this command.`,
}
)
func removeDB(ctx *cli.Context) error {
stack, config := makeConfigNode(ctx)
// Remove the full node state database
path := stack.ResolvePath("chaindata")
if common.FileExist(path) {
confirmAndRemoveDB(path, "full node state database")
} else {
log.Info("Full node state database missing", "path", path)
}
// Remove the full node ancient database
path = config.Eth.DatabaseFreezer
switch {
case path == "":
path = filepath.Join(stack.ResolvePath("chaindata"), "ancient")
case !filepath.IsAbs(path):
path = config.Node.ResolvePath(path)
}
if common.FileExist(path) {
confirmAndRemoveDB(path, "full node ancient database")
} else {
log.Info("Full node ancient database missing", "path", path)
}
// Remove the light node database
path = stack.ResolvePath("lightchaindata")
if common.FileExist(path) {
confirmAndRemoveDB(path, "light node database")
} else {
log.Info("Light node database missing", "path", path)
}
return nil
}
// confirmAndRemoveDB prompts the user for a last confirmation and removes the
// folder if accepted.
func confirmAndRemoveDB(database string, kind string) {
confirm, err := prompt.Stdin.PromptConfirm(fmt.Sprintf("Remove %s (%s)?", kind, database))
switch {
case err != nil:
utils.Fatalf("%v", err)
case !confirm:
log.Info("Database deletion skipped", "path", database)
default:
start := time.Now()
filepath.Walk(database, func(path string, info os.FileInfo, err error) error {
// If we're at the top level folder, recurse into
if path == database {
return nil
}
// Delete all the files, but not subfolders
if !info.IsDir() {
os.Remove(path)
return nil
}
return filepath.SkipDir
})
log.Info("Database successfully deleted", "path", database, "elapsed", common.PrettyDuration(time.Since(start)))
}
}
func inspect(ctx *cli.Context) error {
var (
prefix []byte
start []byte
)
if ctx.NArg() > 2 {
return fmt.Errorf("Max 2 arguments: %v", ctx.Command.ArgsUsage)
}
if ctx.NArg() >= 1 {
if d, err := hexutil.Decode(ctx.Args().Get(0)); err != nil {
return fmt.Errorf("failed to hex-decode 'prefix': %v", err)
} else {
prefix = d
}
}
if ctx.NArg() >= 2 {
if d, err := hexutil.Decode(ctx.Args().Get(1)); err != nil {
return fmt.Errorf("failed to hex-decode 'start': %v", err)
} else {
start = d
}
}
stack, _ := makeConfigNode(ctx)
defer stack.Close()
db := utils.MakeChainDatabase(ctx, stack, true)
defer db.Close()
return rawdb.InspectDatabase(db, prefix, start)
}
func showLeveldbStats(db ethdb.Stater) {
if stats, err := db.Stat("leveldb.stats"); err != nil {
log.Warn("Failed to read database stats", "error", err)
} else {
fmt.Println(stats)
}
if ioStats, err := db.Stat("leveldb.iostats"); err != nil {
log.Warn("Failed to read database iostats", "error", err)
} else {
fmt.Println(ioStats)
}
}
func dbStats(ctx *cli.Context) error {
stack, _ := makeConfigNode(ctx)
defer stack.Close()
db := utils.MakeChainDatabase(ctx, stack, true)
defer db.Close()
showLeveldbStats(db)
return nil
}
func dbCompact(ctx *cli.Context) error {
stack, _ := makeConfigNode(ctx)
defer stack.Close()
db := utils.MakeChainDatabase(ctx, stack, false)
defer db.Close()
log.Info("Stats before compaction")
showLeveldbStats(db)
log.Info("Triggering compaction")
if err := db.Compact(nil, nil); err != nil {
log.Info("Compact err", "error", err)
return err
}
log.Info("Stats after compaction")
showLeveldbStats(db)
return nil
}
// dbGet shows the value of a given database key
func dbGet(ctx *cli.Context) error {
if ctx.NArg() != 1 {
return fmt.Errorf("required arguments: %v", ctx.Command.ArgsUsage)
}
stack, _ := makeConfigNode(ctx)
defer stack.Close()
db := utils.MakeChainDatabase(ctx, stack, true)
defer db.Close()
key, err := parseHexOrString(ctx.Args().Get(0))
if err != nil {
log.Info("Could not decode the key", "error", err)
return err
}
data, err := db.Get(key)
if err != nil {
log.Info("Get operation failed", "key", fmt.Sprintf("0x%#x", key), "error", err)
return err
}
fmt.Printf("key %#x: %#x\n", key, data)
return nil
}
// dbDelete deletes a key from the database
func dbDelete(ctx *cli.Context) error {
if ctx.NArg() != 1 {
return fmt.Errorf("required arguments: %v", ctx.Command.ArgsUsage)
}
stack, _ := makeConfigNode(ctx)
defer stack.Close()
db := utils.MakeChainDatabase(ctx, stack, false)
defer db.Close()
key, err := parseHexOrString(ctx.Args().Get(0))
if err != nil {
log.Info("Could not decode the key", "error", err)
return err
}
data, err := db.Get(key)
if err == nil {
fmt.Printf("Previous value: %#x\n", data)
}
if err = db.Delete(key); err != nil {
log.Info("Delete operation returned an error", "key", fmt.Sprintf("0x%#x", key), "error", err)
return err
}
return nil
}
// dbPut overwrite a value in the database
func dbPut(ctx *cli.Context) error {
if ctx.NArg() != 2 {
return fmt.Errorf("required arguments: %v", ctx.Command.ArgsUsage)
}
stack, _ := makeConfigNode(ctx)
defer stack.Close()
db := utils.MakeChainDatabase(ctx, stack, false)
defer db.Close()
var (
key []byte
value []byte
data []byte
err error
)
key, err = parseHexOrString(ctx.Args().Get(0))
if err != nil {
log.Info("Could not decode the key", "error", err)
return err
}
value, err = hexutil.Decode(ctx.Args().Get(1))
if err != nil {
log.Info("Could not decode the value", "error", err)
return err
}
data, err = db.Get(key)
if err == nil {
fmt.Printf("Previous value: %#x\n", data)
}
return db.Put(key, value)
}
// dbDumpTrie shows the key-value slots of a given storage trie
func dbDumpTrie(ctx *cli.Context) error {
if ctx.NArg() < 1 {
return fmt.Errorf("required arguments: %v", ctx.Command.ArgsUsage)
}
stack, _ := makeConfigNode(ctx)
defer stack.Close()
db := utils.MakeChainDatabase(ctx, stack, true)
defer db.Close()
var (
root []byte
start []byte
max = int64(-1)
err error
)
if root, err = hexutil.Decode(ctx.Args().Get(0)); err != nil {
log.Info("Could not decode the root", "error", err)
return err
}
stRoot := common.BytesToHash(root)
if ctx.NArg() >= 2 {
if start, err = hexutil.Decode(ctx.Args().Get(1)); err != nil {
log.Info("Could not decode the seek position", "error", err)
return err
}
}
if ctx.NArg() >= 3 {
if max, err = strconv.ParseInt(ctx.Args().Get(2), 10, 64); err != nil {
log.Info("Could not decode the max count", "error", err)
return err
}
}
theTrie, err := trie.New(stRoot, trie.NewDatabase(db))
if err != nil {
return err
}
var count int64
it := trie.NewIterator(theTrie.NodeIterator(start))
for it.Next() {
if max > 0 && count == max {
fmt.Printf("Exiting after %d values\n", count)
break
}
fmt.Printf(" %d. key %#x: %#x\n", count, it.Key, it.Value)
count++
}
return it.Err
}
func freezerInspect(ctx *cli.Context) error {
var (
start, end int64
disableSnappy bool
err error
)
if ctx.NArg() < 3 {
return fmt.Errorf("required arguments: %v", ctx.Command.ArgsUsage)
}
kind := ctx.Args().Get(0)
if noSnap, ok := rawdb.FreezerNoSnappy[kind]; !ok {
var options []string
for opt := range rawdb.FreezerNoSnappy {
options = append(options, opt)
}
sort.Strings(options)
return fmt.Errorf("Could read freezer-type '%v'. Available options: %v", kind, options)
} else {
disableSnappy = noSnap
}
if start, err = strconv.ParseInt(ctx.Args().Get(1), 10, 64); err != nil {
log.Info("Could read start-param", "error", err)
return err
}
if end, err = strconv.ParseInt(ctx.Args().Get(2), 10, 64); err != nil {
log.Info("Could read count param", "error", err)
return err
}
stack, _ := makeConfigNode(ctx)
defer stack.Close()
path := filepath.Join(stack.ResolvePath("chaindata"), "ancient")
log.Info("Opening freezer", "location", path, "name", kind)
if f, err := rawdb.NewFreezerTable(path, kind, disableSnappy, true); err != nil {
return err
} else {
f.DumpIndex(start, end)
}
return nil
}
// ParseHexOrString tries to hexdecode b, but if the prefix is missing, it instead just returns the raw bytes
func parseHexOrString(str string) ([]byte, error) {
b, err := hexutil.Decode(str)
if errors.Is(err, hexutil.ErrMissingPrefix) {
return []byte(str), nil
}
return b, err
}
func importLDBdata(ctx *cli.Context) error {
start := 0
switch ctx.NArg() {
case 1:
break
case 2:
s, err := strconv.Atoi(ctx.Args().Get(1))
if err != nil {
return fmt.Errorf("second arg must be an integer: %v", err)
}
start = s
default:
return fmt.Errorf("required arguments: %v", ctx.Command.ArgsUsage)
}
var (
fName = ctx.Args().Get(0)
stack, _ = makeConfigNode(ctx)
interrupt = make(chan os.Signal, 1)
stop = make(chan struct{})
)
defer stack.Close()
signal.Notify(interrupt, syscall.SIGINT, syscall.SIGTERM)
defer signal.Stop(interrupt)
defer close(interrupt)
go func() {
if _, ok := <-interrupt; ok {
log.Info("Interrupted during ldb import, stopping at next batch")
}
close(stop)
}()
db := utils.MakeChainDatabase(ctx, stack, false)
return utils.ImportLDBData(db, fName, int64(start), stop)
}
type preimageIterator struct {
iter ethdb.Iterator
}
func (iter *preimageIterator) Next() (byte, []byte, []byte, bool) {
for iter.iter.Next() {
key := iter.iter.Key()
if bytes.HasPrefix(key, rawdb.PreimagePrefix) && len(key) == (len(rawdb.PreimagePrefix)+common.HashLength) {
return utils.OpBatchAdd, key, iter.iter.Value(), true
}
}
return 0, nil, nil, false
}
func (iter *preimageIterator) Release() {
iter.iter.Release()
}
type snapshotIterator struct {
init bool
account ethdb.Iterator
storage ethdb.Iterator
}
func (iter *snapshotIterator) Next() (byte, []byte, []byte, bool) {
if !iter.init {
iter.init = true
return utils.OpBatchDel, rawdb.SnapshotRootKey, nil, true
}
for iter.account.Next() {
key := iter.account.Key()
if bytes.HasPrefix(key, rawdb.SnapshotAccountPrefix) && len(key) == (len(rawdb.SnapshotAccountPrefix)+common.HashLength) {
return utils.OpBatchAdd, key, iter.account.Value(), true
}
}
for iter.storage.Next() {
key := iter.storage.Key()
if bytes.HasPrefix(key, rawdb.SnapshotStoragePrefix) && len(key) == (len(rawdb.SnapshotStoragePrefix)+2*common.HashLength) {
return utils.OpBatchAdd, key, iter.storage.Value(), true
}
}
return 0, nil, nil, false
}
func (iter *snapshotIterator) Release() {
iter.account.Release()
iter.storage.Release()
}
// chainExporters defines the export scheme for all exportable chain data.
var chainExporters = map[string]func(db ethdb.Database) utils.ChainDataIterator{
"preimage": func(db ethdb.Database) utils.ChainDataIterator {
iter := db.NewIterator(rawdb.PreimagePrefix, nil)
return &preimageIterator{iter: iter}
},
"snapshot": func(db ethdb.Database) utils.ChainDataIterator {
account := db.NewIterator(rawdb.SnapshotAccountPrefix, nil)
storage := db.NewIterator(rawdb.SnapshotStoragePrefix, nil)
return &snapshotIterator{account: account, storage: storage}
},
}
func exportChaindata(ctx *cli.Context) error {
if ctx.NArg() < 2 {
return fmt.Errorf("required arguments: %v", ctx.Command.ArgsUsage)
}
// Parse the required chain data type, make sure it's supported.
kind := ctx.Args().Get(0)
kind = strings.ToLower(strings.Trim(kind, " "))
exporter, ok := chainExporters[kind]
if !ok {
var kinds []string
for kind := range chainExporters {
kinds = append(kinds, kind)
}
return fmt.Errorf("invalid data type %s, supported types: %s", kind, strings.Join(kinds, ", "))
}
var (
stack, _ = makeConfigNode(ctx)
interrupt = make(chan os.Signal, 1)
stop = make(chan struct{})
)
defer stack.Close()
signal.Notify(interrupt, syscall.SIGINT, syscall.SIGTERM)
defer signal.Stop(interrupt)
defer close(interrupt)
go func() {
if _, ok := <-interrupt; ok {
log.Info("Interrupted during db export, stopping at next batch")
}
close(stop)
}()
db := utils.MakeChainDatabase(ctx, stack, true)
return utils.ExportChaindata(ctx.Args().Get(1), kind, exporter(db), stop)
}
func showMetaData(ctx *cli.Context) error {
stack, _ := makeConfigNode(ctx)
defer stack.Close()
db := utils.MakeChainDatabase(ctx, stack, true)
ancients, err := db.Ancients()
if err != nil {
fmt.Fprintf(os.Stderr, "Error accessing ancients: %v", err)
}
pp := func(val *uint64) string {
if val == nil {
return "<nil>"
}
return fmt.Sprintf("%d (0x%x)", *val, *val)
}
data := [][]string{
{"databaseVersion", pp(rawdb.ReadDatabaseVersion(db))},
{"headBlockHash", fmt.Sprintf("%v", rawdb.ReadHeadBlockHash(db))},
{"headFastBlockHash", fmt.Sprintf("%v", rawdb.ReadHeadFastBlockHash(db))},
{"headHeaderHash", fmt.Sprintf("%v", rawdb.ReadHeadHeaderHash(db))}}
if b := rawdb.ReadHeadBlock(db); b != nil {
data = append(data, []string{"headBlock.Hash", fmt.Sprintf("%v", b.Hash())})
data = append(data, []string{"headBlock.Root", fmt.Sprintf("%v", b.Root())})
data = append(data, []string{"headBlock.Number", fmt.Sprintf("%d (0x%x)", b.Number(), b.Number())})
}
if b := rawdb.ReadSkeletonSyncStatus(db); b != nil {
data = append(data, []string{"SkeletonSyncStatus", string(b)})
}
if h := rawdb.ReadHeadHeader(db); h != nil {
data = append(data, []string{"headHeader.Hash", fmt.Sprintf("%v", h.Hash())})
data = append(data, []string{"headHeader.Root", fmt.Sprintf("%v", h.Root)})
data = append(data, []string{"headHeader.Number", fmt.Sprintf("%d (0x%x)", h.Number, h.Number)})
}
data = append(data, [][]string{{"frozen", fmt.Sprintf("%d items", ancients)},
{"lastPivotNumber", pp(rawdb.ReadLastPivotNumber(db))},
{"len(snapshotSyncStatus)", fmt.Sprintf("%d bytes", len(rawdb.ReadSnapshotSyncStatus(db)))},
{"snapshotGenerator", snapshot.ParseGeneratorStatus(rawdb.ReadSnapshotGenerator(db))},
{"snapshotDisabled", fmt.Sprintf("%v", rawdb.ReadSnapshotDisabled(db))},
{"snapshotJournal", fmt.Sprintf("%d bytes", len(rawdb.ReadSnapshotJournal(db)))},
{"snapshotRecoveryNumber", pp(rawdb.ReadSnapshotRecoveryNumber(db))},
{"snapshotRoot", fmt.Sprintf("%v", rawdb.ReadSnapshotRoot(db))},
{"txIndexTail", pp(rawdb.ReadTxIndexTail(db))},
{"fastTxLookupLimit", pp(rawdb.ReadFastTxLookupLimit(db))},
}...)
table := tablewriter.NewWriter(os.Stdout)
table.SetHeader([]string{"Field", "Value"})
table.AppendBulk(data)
table.Render()
return nil
}
func freezerMigrate(ctx *cli.Context) error {
stack, _ := makeConfigNode(ctx)
defer stack.Close()
db := utils.MakeChainDatabase(ctx, stack, false)
defer db.Close()
// Check first block for legacy receipt format
numAncients, err := db.Ancients()
if err != nil {
return err
}
if numAncients < 1 {
log.Info("No receipts in freezer to migrate")
return nil
}
isFirstLegacy, firstIdx, err := dbHasLegacyReceipts(db, 0)
if err != nil {
return err
}
if !isFirstLegacy {
log.Info("No legacy receipts to migrate")
return nil
}
log.Info("Starting migration", "ancients", numAncients, "firstLegacy", firstIdx)
start := time.Now()
if err := db.MigrateTable("receipts", types.ConvertLegacyStoredReceipts); err != nil {
return err
}
if err := db.Close(); err != nil {
return err
}
log.Info("Migration finished", "duration", time.Since(start))
return nil
}
// dbHasLegacyReceipts checks freezer entries for legacy receipts. It stops at the first
// non-empty receipt and checks its format. The index of this first non-empty element is
// the second return parameter.
func dbHasLegacyReceipts(db ethdb.Database, firstIdx uint64) (bool, uint64, error) {
// Check first block for legacy receipt format
numAncients, err := db.Ancients()
if err != nil {
return false, 0, err
}
if numAncients < 1 {
return false, 0, nil
}
if firstIdx >= numAncients {
return false, firstIdx, nil
}
var (
legacy bool
blob []byte
emptyRLPList = []byte{192}
)
// Find first block with non-empty receipt, only if
// the index is not already provided.
if firstIdx == 0 {
for i := uint64(0); i < numAncients; i++ {
blob, err = db.Ancient("receipts", i)
if err != nil {
return false, 0, err
}
if len(blob) == 0 {
continue
}
if !bytes.Equal(blob, emptyRLPList) {
firstIdx = i
break
}
}
}
// Is first non-empty receipt legacy?
first, err := db.Ancient("receipts", firstIdx)
if err != nil {
return false, 0, err
}
legacy, err = types.IsLegacyStoredReceipts(first)
return legacy, firstIdx, err
}
| cmd/geth/dbcmd.go | 1 | https://github.com/ethereum/go-ethereum/commit/16701c51697e28986feebd122c6a491e4d9ac0e7 | [
0.0009352441411465406,
0.00019655434880405664,
0.00016352036618627608,
0.0001704034802969545,
0.0001055930188158527
] |
{
"id": 5,
"code_window": [
"// Package common contains various helper functions.\n",
"package common\n",
"\n",
"import (\n",
"\t\"encoding/hex\"\n",
")\n",
"\n",
"// FromHex returns the bytes represented by the hexadecimal string s.\n",
"// s may be prefixed with \"0x\".\n",
"func FromHex(s string) []byte {\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\"errors\"\n",
"\n",
"\t\"github.com/ethereum/go-ethereum/common/hexutil\"\n"
],
"file_path": "common/bytes.go",
"type": "add",
"edit_start_line_idx": 21
} | // Copyright 2016 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
// Package geth contains the simplified mobile APIs to go-ethereum.
//
// The scope of this package is *not* to allow writing a custom Ethereum client
// with pieces plucked from go-ethereum, rather to allow writing native dapps on
// mobile platforms. Keep this in mind when using or extending this package!
//
// API limitations
//
// Since gomobile cannot bridge arbitrary types between Go and Android/iOS, the
// exposed APIs need to be manually wrapped into simplified types, with custom
// constructors and getters/setters to ensure that they can be meaningfully used
// from Java/ObjC too.
//
// With this in mind, please try to limit the scope of this package and only add
// essentials without which mobile support cannot work, especially since manually
// syncing the code will be unwieldy otherwise. In the long term we might consider
// writing custom library generators, but those are out of scope now.
//
// Content wise each file in this package corresponds to an entire Go package
// from the go-ethereum repository. Please adhere to this scoping to prevent this
// package getting unmaintainable.
//
// Wrapping guidelines:
//
// Every type that is to be exposed should be wrapped into its own plain struct,
// which internally contains a single field: the original go-ethereum version.
// This is needed because gomobile cannot expose named types for now.
//
// Whenever a method argument or a return type is a custom struct, the pointer
// variant should always be used as value types crossing over between language
// boundaries might have strange behaviors.
//
// Slices of types should be converted into a single multiplicative type wrapping
// a go slice with the methods `Size`, `Get` and `Set`. Further slice operations
// should not be provided to limit the remote code complexity. Arrays should be
// avoided as much as possible since they complicate bounds checking.
//
// If a method has multiple return values (e.g. some return + an error), those
// are generated as output arguments in ObjC. To avoid weird generated names like
// ret_0 for them, please always assign names to output variables if tuples.
//
// Note, a panic *cannot* cross over language boundaries, instead will result in
// an undebuggable SEGFAULT in the process. For error handling only ever use error
// returns, which may be the only or the second return.
package geth
| mobile/doc.go | 0 | https://github.com/ethereum/go-ethereum/commit/16701c51697e28986feebd122c6a491e4d9ac0e7 | [
0.00017774377192836255,
0.00016757829871494323,
0.00016274959489237517,
0.00016652568592689931,
0.000004494715085456846
] |
{
"id": 5,
"code_window": [
"// Package common contains various helper functions.\n",
"package common\n",
"\n",
"import (\n",
"\t\"encoding/hex\"\n",
")\n",
"\n",
"// FromHex returns the bytes represented by the hexadecimal string s.\n",
"// s may be prefixed with \"0x\".\n",
"func FromHex(s string) []byte {\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\"errors\"\n",
"\n",
"\t\"github.com/ethereum/go-ethereum/common/hexutil\"\n"
],
"file_path": "common/bytes.go",
"type": "add",
"edit_start_line_idx": 21
} | // Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package bn256
// For details of the algorithms used, see "Multiplication and Squaring on
// Pairing-Friendly Fields, Devegili et al.
// http://eprint.iacr.org/2006/471.pdf.
import (
"math/big"
)
// gfP2 implements a field of size p² as a quadratic extension of the base
// field where i²=-1.
type gfP2 struct {
x, y *big.Int // value is xi+y.
}
func newGFp2(pool *bnPool) *gfP2 {
return &gfP2{pool.Get(), pool.Get()}
}
func (e *gfP2) String() string {
x := new(big.Int).Mod(e.x, P)
y := new(big.Int).Mod(e.y, P)
return "(" + x.String() + "," + y.String() + ")"
}
func (e *gfP2) Put(pool *bnPool) {
pool.Put(e.x)
pool.Put(e.y)
}
func (e *gfP2) Set(a *gfP2) *gfP2 {
e.x.Set(a.x)
e.y.Set(a.y)
return e
}
func (e *gfP2) SetZero() *gfP2 {
e.x.SetInt64(0)
e.y.SetInt64(0)
return e
}
func (e *gfP2) SetOne() *gfP2 {
e.x.SetInt64(0)
e.y.SetInt64(1)
return e
}
func (e *gfP2) Minimal() {
if e.x.Sign() < 0 || e.x.Cmp(P) >= 0 {
e.x.Mod(e.x, P)
}
if e.y.Sign() < 0 || e.y.Cmp(P) >= 0 {
e.y.Mod(e.y, P)
}
}
func (e *gfP2) IsZero() bool {
return e.x.Sign() == 0 && e.y.Sign() == 0
}
func (e *gfP2) IsOne() bool {
if e.x.Sign() != 0 {
return false
}
words := e.y.Bits()
return len(words) == 1 && words[0] == 1
}
func (e *gfP2) Conjugate(a *gfP2) *gfP2 {
e.y.Set(a.y)
e.x.Neg(a.x)
return e
}
func (e *gfP2) Negative(a *gfP2) *gfP2 {
e.x.Neg(a.x)
e.y.Neg(a.y)
return e
}
func (e *gfP2) Add(a, b *gfP2) *gfP2 {
e.x.Add(a.x, b.x)
e.y.Add(a.y, b.y)
return e
}
func (e *gfP2) Sub(a, b *gfP2) *gfP2 {
e.x.Sub(a.x, b.x)
e.y.Sub(a.y, b.y)
return e
}
func (e *gfP2) Double(a *gfP2) *gfP2 {
e.x.Lsh(a.x, 1)
e.y.Lsh(a.y, 1)
return e
}
func (c *gfP2) Exp(a *gfP2, power *big.Int, pool *bnPool) *gfP2 {
sum := newGFp2(pool)
sum.SetOne()
t := newGFp2(pool)
for i := power.BitLen() - 1; i >= 0; i-- {
t.Square(sum, pool)
if power.Bit(i) != 0 {
sum.Mul(t, a, pool)
} else {
sum.Set(t)
}
}
c.Set(sum)
sum.Put(pool)
t.Put(pool)
return c
}
// See "Multiplication and Squaring in Pairing-Friendly Fields",
// http://eprint.iacr.org/2006/471.pdf
func (e *gfP2) Mul(a, b *gfP2, pool *bnPool) *gfP2 {
tx := pool.Get().Mul(a.x, b.y)
t := pool.Get().Mul(b.x, a.y)
tx.Add(tx, t)
tx.Mod(tx, P)
ty := pool.Get().Mul(a.y, b.y)
t.Mul(a.x, b.x)
ty.Sub(ty, t)
e.y.Mod(ty, P)
e.x.Set(tx)
pool.Put(tx)
pool.Put(ty)
pool.Put(t)
return e
}
func (e *gfP2) MulScalar(a *gfP2, b *big.Int) *gfP2 {
e.x.Mul(a.x, b)
e.y.Mul(a.y, b)
return e
}
// MulXi sets e=ξa where ξ=i+9 and then returns e.
func (e *gfP2) MulXi(a *gfP2, pool *bnPool) *gfP2 {
// (xi+y)(i+3) = (9x+y)i+(9y-x)
tx := pool.Get().Lsh(a.x, 3)
tx.Add(tx, a.x)
tx.Add(tx, a.y)
ty := pool.Get().Lsh(a.y, 3)
ty.Add(ty, a.y)
ty.Sub(ty, a.x)
e.x.Set(tx)
e.y.Set(ty)
pool.Put(tx)
pool.Put(ty)
return e
}
func (e *gfP2) Square(a *gfP2, pool *bnPool) *gfP2 {
// Complex squaring algorithm:
// (xi+b)² = (x+y)(y-x) + 2*i*x*y
t1 := pool.Get().Sub(a.y, a.x)
t2 := pool.Get().Add(a.x, a.y)
ty := pool.Get().Mul(t1, t2)
ty.Mod(ty, P)
t1.Mul(a.x, a.y)
t1.Lsh(t1, 1)
e.x.Mod(t1, P)
e.y.Set(ty)
pool.Put(t1)
pool.Put(t2)
pool.Put(ty)
return e
}
func (e *gfP2) Invert(a *gfP2, pool *bnPool) *gfP2 {
// See "Implementing cryptographic pairings", M. Scott, section 3.2.
// ftp://136.206.11.249/pub/crypto/pairings.pdf
t := pool.Get()
t.Mul(a.y, a.y)
t2 := pool.Get()
t2.Mul(a.x, a.x)
t.Add(t, t2)
inv := pool.Get()
inv.ModInverse(t, P)
e.x.Neg(a.x)
e.x.Mul(e.x, inv)
e.x.Mod(e.x, P)
e.y.Mul(a.y, inv)
e.y.Mod(e.y, P)
pool.Put(t)
pool.Put(t2)
pool.Put(inv)
return e
}
func (e *gfP2) Real() *big.Int {
return e.x
}
func (e *gfP2) Imag() *big.Int {
return e.y
}
| crypto/bn256/google/gfp2.go | 0 | https://github.com/ethereum/go-ethereum/commit/16701c51697e28986feebd122c6a491e4d9ac0e7 | [
0.00017741766350809485,
0.00017122221470344812,
0.00016208675515372306,
0.0001713426609057933,
0.000004170005468040472
] |
{
"id": 5,
"code_window": [
"// Package common contains various helper functions.\n",
"package common\n",
"\n",
"import (\n",
"\t\"encoding/hex\"\n",
")\n",
"\n",
"// FromHex returns the bytes represented by the hexadecimal string s.\n",
"// s may be prefixed with \"0x\".\n",
"func FromHex(s string) []byte {\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\"errors\"\n",
"\n",
"\t\"github.com/ethereum/go-ethereum/common/hexutil\"\n"
],
"file_path": "common/bytes.go",
"type": "add",
"edit_start_line_idx": 21
} | package secp256k1
import "testing"
func TestFuzzer(t *testing.T) {
test := "00000000N0000000/R00000000000000000U0000S0000000mkhP000000000000000U"
Fuzz([]byte(test))
}
| tests/fuzzers/secp256k1/secp_test.go | 0 | https://github.com/ethereum/go-ethereum/commit/16701c51697e28986feebd122c6a491e4d9ac0e7 | [
0.00017381092766299844,
0.00017381092766299844,
0.00017381092766299844,
0.00017381092766299844,
0
] |
{
"id": 7,
"code_window": [
"func (api *PrivateDebugAPI) SetHead(number hexutil.Uint64) {\n",
"\tapi.b.SetHead(uint64(number))\n",
"}\n",
"\n",
"// PublicNetAPI offers network related RPC methods\n",
"type PublicNetAPI struct {\n",
"\tnet *p2p.Server\n",
"\tnetworkVersion uint64\n",
"}\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"// DbGet returns the raw value of a key stored in the database.\n",
"func (api *PrivateDebugAPI) DbGet(key string) (hexutil.Bytes, error) {\n",
"\tblob, err := common.ParseHexOrString(key)\n",
"\tif err != nil {\n",
"\t\treturn nil, err\n",
"\t}\n",
"\treturn api.b.ChainDb().Get(blob)\n",
"}\n",
"\n"
],
"file_path": "internal/ethapi/api.go",
"type": "add",
"edit_start_line_idx": 1000
} | // Copyright 2015 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
// package web3ext contains geth specific web3.js extensions.
package web3ext
var Modules = map[string]string{
"admin": AdminJs,
"clique": CliqueJs,
"ethash": EthashJs,
"debug": DebugJs,
"eth": EthJs,
"miner": MinerJs,
"net": NetJs,
"personal": PersonalJs,
"rpc": RpcJs,
"txpool": TxpoolJs,
"les": LESJs,
"vflux": VfluxJs,
}
const CliqueJs = `
web3._extend({
property: 'clique',
methods: [
new web3._extend.Method({
name: 'getSnapshot',
call: 'clique_getSnapshot',
params: 1,
inputFormatter: [web3._extend.formatters.inputBlockNumberFormatter]
}),
new web3._extend.Method({
name: 'getSnapshotAtHash',
call: 'clique_getSnapshotAtHash',
params: 1
}),
new web3._extend.Method({
name: 'getSigners',
call: 'clique_getSigners',
params: 1,
inputFormatter: [web3._extend.formatters.inputBlockNumberFormatter]
}),
new web3._extend.Method({
name: 'getSignersAtHash',
call: 'clique_getSignersAtHash',
params: 1
}),
new web3._extend.Method({
name: 'propose',
call: 'clique_propose',
params: 2
}),
new web3._extend.Method({
name: 'discard',
call: 'clique_discard',
params: 1
}),
new web3._extend.Method({
name: 'status',
call: 'clique_status',
params: 0
}),
new web3._extend.Method({
name: 'getSigner',
call: 'clique_getSigner',
params: 1,
inputFormatter: [null]
}),
],
properties: [
new web3._extend.Property({
name: 'proposals',
getter: 'clique_proposals'
}),
]
});
`
const EthashJs = `
web3._extend({
property: 'ethash',
methods: [
new web3._extend.Method({
name: 'getWork',
call: 'ethash_getWork',
params: 0
}),
new web3._extend.Method({
name: 'getHashrate',
call: 'ethash_getHashrate',
params: 0
}),
new web3._extend.Method({
name: 'submitWork',
call: 'ethash_submitWork',
params: 3,
}),
new web3._extend.Method({
name: 'submitHashrate',
call: 'ethash_submitHashrate',
params: 2,
}),
]
});
`
const AdminJs = `
web3._extend({
property: 'admin',
methods: [
new web3._extend.Method({
name: 'addPeer',
call: 'admin_addPeer',
params: 1
}),
new web3._extend.Method({
name: 'removePeer',
call: 'admin_removePeer',
params: 1
}),
new web3._extend.Method({
name: 'addTrustedPeer',
call: 'admin_addTrustedPeer',
params: 1
}),
new web3._extend.Method({
name: 'removeTrustedPeer',
call: 'admin_removeTrustedPeer',
params: 1
}),
new web3._extend.Method({
name: 'exportChain',
call: 'admin_exportChain',
params: 3,
inputFormatter: [null, null, null]
}),
new web3._extend.Method({
name: 'importChain',
call: 'admin_importChain',
params: 1
}),
new web3._extend.Method({
name: 'sleepBlocks',
call: 'admin_sleepBlocks',
params: 2
}),
new web3._extend.Method({
name: 'startHTTP',
call: 'admin_startHTTP',
params: 5,
inputFormatter: [null, null, null, null, null]
}),
new web3._extend.Method({
name: 'stopHTTP',
call: 'admin_stopHTTP'
}),
// This method is deprecated.
new web3._extend.Method({
name: 'startRPC',
call: 'admin_startRPC',
params: 5,
inputFormatter: [null, null, null, null, null]
}),
// This method is deprecated.
new web3._extend.Method({
name: 'stopRPC',
call: 'admin_stopRPC'
}),
new web3._extend.Method({
name: 'startWS',
call: 'admin_startWS',
params: 4,
inputFormatter: [null, null, null, null]
}),
new web3._extend.Method({
name: 'stopWS',
call: 'admin_stopWS'
}),
],
properties: [
new web3._extend.Property({
name: 'nodeInfo',
getter: 'admin_nodeInfo'
}),
new web3._extend.Property({
name: 'peers',
getter: 'admin_peers'
}),
new web3._extend.Property({
name: 'datadir',
getter: 'admin_datadir'
}),
]
});
`
const DebugJs = `
web3._extend({
property: 'debug',
methods: [
new web3._extend.Method({
name: 'accountRange',
call: 'debug_accountRange',
params: 6,
inputFormatter: [web3._extend.formatters.inputDefaultBlockNumberFormatter, null, null, null, null, null],
}),
new web3._extend.Method({
name: 'printBlock',
call: 'debug_printBlock',
params: 1,
outputFormatter: console.log
}),
new web3._extend.Method({
name: 'getHeaderRlp',
call: 'debug_getHeaderRlp',
params: 1
}),
new web3._extend.Method({
name: 'getBlockRlp',
call: 'debug_getBlockRlp',
params: 1
}),
new web3._extend.Method({
name: 'testSignCliqueBlock',
call: 'debug_testSignCliqueBlock',
params: 2,
inputFormatter: [web3._extend.formatters.inputAddressFormatter, null],
}),
new web3._extend.Method({
name: 'setHead',
call: 'debug_setHead',
params: 1
}),
new web3._extend.Method({
name: 'seedHash',
call: 'debug_seedHash',
params: 1
}),
new web3._extend.Method({
name: 'dumpBlock',
call: 'debug_dumpBlock',
params: 1,
inputFormatter: [web3._extend.formatters.inputBlockNumberFormatter]
}),
new web3._extend.Method({
name: 'chaindbProperty',
call: 'debug_chaindbProperty',
params: 1,
outputFormatter: console.log
}),
new web3._extend.Method({
name: 'chaindbCompact',
call: 'debug_chaindbCompact',
}),
new web3._extend.Method({
name: 'verbosity',
call: 'debug_verbosity',
params: 1
}),
new web3._extend.Method({
name: 'vmodule',
call: 'debug_vmodule',
params: 1
}),
new web3._extend.Method({
name: 'backtraceAt',
call: 'debug_backtraceAt',
params: 1,
}),
new web3._extend.Method({
name: 'stacks',
call: 'debug_stacks',
params: 1,
inputFormatter: [null],
outputFormatter: console.log
}),
new web3._extend.Method({
name: 'freeOSMemory',
call: 'debug_freeOSMemory',
params: 0,
}),
new web3._extend.Method({
name: 'setGCPercent',
call: 'debug_setGCPercent',
params: 1,
}),
new web3._extend.Method({
name: 'memStats',
call: 'debug_memStats',
params: 0,
}),
new web3._extend.Method({
name: 'gcStats',
call: 'debug_gcStats',
params: 0,
}),
new web3._extend.Method({
name: 'cpuProfile',
call: 'debug_cpuProfile',
params: 2
}),
new web3._extend.Method({
name: 'startCPUProfile',
call: 'debug_startCPUProfile',
params: 1
}),
new web3._extend.Method({
name: 'stopCPUProfile',
call: 'debug_stopCPUProfile',
params: 0
}),
new web3._extend.Method({
name: 'goTrace',
call: 'debug_goTrace',
params: 2
}),
new web3._extend.Method({
name: 'startGoTrace',
call: 'debug_startGoTrace',
params: 1
}),
new web3._extend.Method({
name: 'stopGoTrace',
call: 'debug_stopGoTrace',
params: 0
}),
new web3._extend.Method({
name: 'blockProfile',
call: 'debug_blockProfile',
params: 2
}),
new web3._extend.Method({
name: 'setBlockProfileRate',
call: 'debug_setBlockProfileRate',
params: 1
}),
new web3._extend.Method({
name: 'writeBlockProfile',
call: 'debug_writeBlockProfile',
params: 1
}),
new web3._extend.Method({
name: 'mutexProfile',
call: 'debug_mutexProfile',
params: 2
}),
new web3._extend.Method({
name: 'setMutexProfileFraction',
call: 'debug_setMutexProfileFraction',
params: 1
}),
new web3._extend.Method({
name: 'writeMutexProfile',
call: 'debug_writeMutexProfile',
params: 1
}),
new web3._extend.Method({
name: 'writeMemProfile',
call: 'debug_writeMemProfile',
params: 1
}),
new web3._extend.Method({
name: 'traceBlock',
call: 'debug_traceBlock',
params: 2,
inputFormatter: [null, null]
}),
new web3._extend.Method({
name: 'traceBlockFromFile',
call: 'debug_traceBlockFromFile',
params: 2,
inputFormatter: [null, null]
}),
new web3._extend.Method({
name: 'traceBadBlock',
call: 'debug_traceBadBlock',
params: 1,
inputFormatter: [null]
}),
new web3._extend.Method({
name: 'standardTraceBadBlockToFile',
call: 'debug_standardTraceBadBlockToFile',
params: 2,
inputFormatter: [null, null]
}),
new web3._extend.Method({
name: 'intermediateRoots',
call: 'debug_intermediateRoots',
params: 2,
inputFormatter: [null, null]
}),
new web3._extend.Method({
name: 'standardTraceBlockToFile',
call: 'debug_standardTraceBlockToFile',
params: 2,
inputFormatter: [null, null]
}),
new web3._extend.Method({
name: 'traceBlockByNumber',
call: 'debug_traceBlockByNumber',
params: 2,
inputFormatter: [web3._extend.formatters.inputBlockNumberFormatter, null]
}),
new web3._extend.Method({
name: 'traceBlockByHash',
call: 'debug_traceBlockByHash',
params: 2,
inputFormatter: [null, null]
}),
new web3._extend.Method({
name: 'traceTransaction',
call: 'debug_traceTransaction',
params: 2,
inputFormatter: [null, null]
}),
new web3._extend.Method({
name: 'traceCall',
call: 'debug_traceCall',
params: 3,
inputFormatter: [null, null, null]
}),
new web3._extend.Method({
name: 'preimage',
call: 'debug_preimage',
params: 1,
inputFormatter: [null]
}),
new web3._extend.Method({
name: 'getBadBlocks',
call: 'debug_getBadBlocks',
params: 0,
}),
new web3._extend.Method({
name: 'storageRangeAt',
call: 'debug_storageRangeAt',
params: 5,
}),
new web3._extend.Method({
name: 'getModifiedAccountsByNumber',
call: 'debug_getModifiedAccountsByNumber',
params: 2,
inputFormatter: [null, null],
}),
new web3._extend.Method({
name: 'getModifiedAccountsByHash',
call: 'debug_getModifiedAccountsByHash',
params: 2,
inputFormatter:[null, null],
}),
new web3._extend.Method({
name: 'freezeClient',
call: 'debug_freezeClient',
params: 1,
}),
new web3._extend.Method({
name: 'getAccessibleState',
call: 'debug_getAccessibleState',
params: 2,
inputFormatter:[web3._extend.formatters.inputBlockNumberFormatter, web3._extend.formatters.inputBlockNumberFormatter],
}),
],
properties: []
});
`
const EthJs = `
web3._extend({
property: 'eth',
methods: [
new web3._extend.Method({
name: 'chainId',
call: 'eth_chainId',
params: 0
}),
new web3._extend.Method({
name: 'sign',
call: 'eth_sign',
params: 2,
inputFormatter: [web3._extend.formatters.inputAddressFormatter, null]
}),
new web3._extend.Method({
name: 'resend',
call: 'eth_resend',
params: 3,
inputFormatter: [web3._extend.formatters.inputTransactionFormatter, web3._extend.utils.fromDecimal, web3._extend.utils.fromDecimal]
}),
new web3._extend.Method({
name: 'signTransaction',
call: 'eth_signTransaction',
params: 1,
inputFormatter: [web3._extend.formatters.inputTransactionFormatter]
}),
new web3._extend.Method({
name: 'estimateGas',
call: 'eth_estimateGas',
params: 2,
inputFormatter: [web3._extend.formatters.inputCallFormatter, web3._extend.formatters.inputBlockNumberFormatter],
outputFormatter: web3._extend.utils.toDecimal
}),
new web3._extend.Method({
name: 'submitTransaction',
call: 'eth_submitTransaction',
params: 1,
inputFormatter: [web3._extend.formatters.inputTransactionFormatter]
}),
new web3._extend.Method({
name: 'fillTransaction',
call: 'eth_fillTransaction',
params: 1,
inputFormatter: [web3._extend.formatters.inputTransactionFormatter]
}),
new web3._extend.Method({
name: 'getHeaderByNumber',
call: 'eth_getHeaderByNumber',
params: 1,
inputFormatter: [web3._extend.formatters.inputBlockNumberFormatter]
}),
new web3._extend.Method({
name: 'getHeaderByHash',
call: 'eth_getHeaderByHash',
params: 1
}),
new web3._extend.Method({
name: 'getBlockByNumber',
call: 'eth_getBlockByNumber',
params: 2,
inputFormatter: [web3._extend.formatters.inputBlockNumberFormatter, function (val) { return !!val; }]
}),
new web3._extend.Method({
name: 'getBlockByHash',
call: 'eth_getBlockByHash',
params: 2,
inputFormatter: [null, function (val) { return !!val; }]
}),
new web3._extend.Method({
name: 'getRawTransaction',
call: 'eth_getRawTransactionByHash',
params: 1
}),
new web3._extend.Method({
name: 'getRawTransactionFromBlock',
call: function(args) {
return (web3._extend.utils.isString(args[0]) && args[0].indexOf('0x') === 0) ? 'eth_getRawTransactionByBlockHashAndIndex' : 'eth_getRawTransactionByBlockNumberAndIndex';
},
params: 2,
inputFormatter: [web3._extend.formatters.inputBlockNumberFormatter, web3._extend.utils.toHex]
}),
new web3._extend.Method({
name: 'getProof',
call: 'eth_getProof',
params: 3,
inputFormatter: [web3._extend.formatters.inputAddressFormatter, null, web3._extend.formatters.inputBlockNumberFormatter]
}),
new web3._extend.Method({
name: 'createAccessList',
call: 'eth_createAccessList',
params: 2,
inputFormatter: [null, web3._extend.formatters.inputBlockNumberFormatter],
}),
new web3._extend.Method({
name: 'feeHistory',
call: 'eth_feeHistory',
params: 3,
inputFormatter: [null, web3._extend.formatters.inputBlockNumberFormatter, null]
}),
new web3._extend.Method({
name: 'getLogs',
call: 'eth_getLogs',
params: 1,
}),
],
properties: [
new web3._extend.Property({
name: 'pendingTransactions',
getter: 'eth_pendingTransactions',
outputFormatter: function(txs) {
var formatted = [];
for (var i = 0; i < txs.length; i++) {
formatted.push(web3._extend.formatters.outputTransactionFormatter(txs[i]));
formatted[i].blockHash = null;
}
return formatted;
}
}),
new web3._extend.Property({
name: 'maxPriorityFeePerGas',
getter: 'eth_maxPriorityFeePerGas',
outputFormatter: web3._extend.utils.toBigNumber
}),
]
});
`
const MinerJs = `
web3._extend({
property: 'miner',
methods: [
new web3._extend.Method({
name: 'start',
call: 'miner_start',
params: 1,
inputFormatter: [null]
}),
new web3._extend.Method({
name: 'stop',
call: 'miner_stop'
}),
new web3._extend.Method({
name: 'setEtherbase',
call: 'miner_setEtherbase',
params: 1,
inputFormatter: [web3._extend.formatters.inputAddressFormatter]
}),
new web3._extend.Method({
name: 'setExtra',
call: 'miner_setExtra',
params: 1
}),
new web3._extend.Method({
name: 'setGasPrice',
call: 'miner_setGasPrice',
params: 1,
inputFormatter: [web3._extend.utils.fromDecimal]
}),
new web3._extend.Method({
name: 'setGasLimit',
call: 'miner_setGasLimit',
params: 1,
inputFormatter: [web3._extend.utils.fromDecimal]
}),
new web3._extend.Method({
name: 'setRecommitInterval',
call: 'miner_setRecommitInterval',
params: 1,
}),
new web3._extend.Method({
name: 'getHashrate',
call: 'miner_getHashrate'
}),
],
properties: []
});
`
const NetJs = `
web3._extend({
property: 'net',
methods: [],
properties: [
new web3._extend.Property({
name: 'version',
getter: 'net_version'
}),
]
});
`
const PersonalJs = `
web3._extend({
property: 'personal',
methods: [
new web3._extend.Method({
name: 'importRawKey',
call: 'personal_importRawKey',
params: 2
}),
new web3._extend.Method({
name: 'sign',
call: 'personal_sign',
params: 3,
inputFormatter: [null, web3._extend.formatters.inputAddressFormatter, null]
}),
new web3._extend.Method({
name: 'ecRecover',
call: 'personal_ecRecover',
params: 2
}),
new web3._extend.Method({
name: 'openWallet',
call: 'personal_openWallet',
params: 2
}),
new web3._extend.Method({
name: 'deriveAccount',
call: 'personal_deriveAccount',
params: 3
}),
new web3._extend.Method({
name: 'signTransaction',
call: 'personal_signTransaction',
params: 2,
inputFormatter: [web3._extend.formatters.inputTransactionFormatter, null]
}),
new web3._extend.Method({
name: 'unpair',
call: 'personal_unpair',
params: 2
}),
new web3._extend.Method({
name: 'initializeWallet',
call: 'personal_initializeWallet',
params: 1
})
],
properties: [
new web3._extend.Property({
name: 'listWallets',
getter: 'personal_listWallets'
}),
]
})
`
const RpcJs = `
web3._extend({
property: 'rpc',
methods: [],
properties: [
new web3._extend.Property({
name: 'modules',
getter: 'rpc_modules'
}),
]
});
`
const TxpoolJs = `
web3._extend({
property: 'txpool',
methods: [],
properties:
[
new web3._extend.Property({
name: 'content',
getter: 'txpool_content'
}),
new web3._extend.Property({
name: 'inspect',
getter: 'txpool_inspect'
}),
new web3._extend.Property({
name: 'status',
getter: 'txpool_status',
outputFormatter: function(status) {
status.pending = web3._extend.utils.toDecimal(status.pending);
status.queued = web3._extend.utils.toDecimal(status.queued);
return status;
}
}),
new web3._extend.Method({
name: 'contentFrom',
call: 'txpool_contentFrom',
params: 1,
}),
]
});
`
const LESJs = `
web3._extend({
property: 'les',
methods:
[
new web3._extend.Method({
name: 'getCheckpoint',
call: 'les_getCheckpoint',
params: 1
}),
new web3._extend.Method({
name: 'clientInfo',
call: 'les_clientInfo',
params: 1
}),
new web3._extend.Method({
name: 'priorityClientInfo',
call: 'les_priorityClientInfo',
params: 3
}),
new web3._extend.Method({
name: 'setClientParams',
call: 'les_setClientParams',
params: 2
}),
new web3._extend.Method({
name: 'setDefaultParams',
call: 'les_setDefaultParams',
params: 1
}),
new web3._extend.Method({
name: 'addBalance',
call: 'les_addBalance',
params: 2
}),
],
properties:
[
new web3._extend.Property({
name: 'latestCheckpoint',
getter: 'les_latestCheckpoint'
}),
new web3._extend.Property({
name: 'checkpointContractAddress',
getter: 'les_getCheckpointContractAddress'
}),
new web3._extend.Property({
name: 'serverInfo',
getter: 'les_serverInfo'
}),
]
});
`
const VfluxJs = `
web3._extend({
property: 'vflux',
methods:
[
new web3._extend.Method({
name: 'distribution',
call: 'vflux_distribution',
params: 2
}),
new web3._extend.Method({
name: 'timeout',
call: 'vflux_timeout',
params: 2
}),
new web3._extend.Method({
name: 'value',
call: 'vflux_value',
params: 2
}),
],
properties:
[
new web3._extend.Property({
name: 'requestStats',
getter: 'vflux_requestStats'
}),
]
});
`
| internal/web3ext/web3ext.go | 1 | https://github.com/ethereum/go-ethereum/commit/16701c51697e28986feebd122c6a491e4d9ac0e7 | [
0.00043174612801522017,
0.00017861471860669553,
0.00016569164290558547,
0.0001729873038129881,
0.00003450157237239182
] |
{
"id": 7,
"code_window": [
"func (api *PrivateDebugAPI) SetHead(number hexutil.Uint64) {\n",
"\tapi.b.SetHead(uint64(number))\n",
"}\n",
"\n",
"// PublicNetAPI offers network related RPC methods\n",
"type PublicNetAPI struct {\n",
"\tnet *p2p.Server\n",
"\tnetworkVersion uint64\n",
"}\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"// DbGet returns the raw value of a key stored in the database.\n",
"func (api *PrivateDebugAPI) DbGet(key string) (hexutil.Bytes, error) {\n",
"\tblob, err := common.ParseHexOrString(key)\n",
"\tif err != nil {\n",
"\t\treturn nil, err\n",
"\t}\n",
"\treturn api.b.ChainDb().Get(blob)\n",
"}\n",
"\n"
],
"file_path": "internal/ethapi/api.go",
"type": "add",
"edit_start_line_idx": 1000
} | // Copyright 2017 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package bloombits
import (
"sync"
)
// request represents a bloom retrieval task to prioritize and pull from the local
// database or remotely from the network.
type request struct {
section uint64 // Section index to retrieve the a bit-vector from
bit uint // Bit index within the section to retrieve the vector of
}
// response represents the state of a requested bit-vector through a scheduler.
type response struct {
cached []byte // Cached bits to dedup multiple requests
done chan struct{} // Channel to allow waiting for completion
}
// scheduler handles the scheduling of bloom-filter retrieval operations for
// entire section-batches belonging to a single bloom bit. Beside scheduling the
// retrieval operations, this struct also deduplicates the requests and caches
// the results to minimize network/database overhead even in complex filtering
// scenarios.
type scheduler struct {
bit uint // Index of the bit in the bloom filter this scheduler is responsible for
responses map[uint64]*response // Currently pending retrieval requests or already cached responses
lock sync.Mutex // Lock protecting the responses from concurrent access
}
// newScheduler creates a new bloom-filter retrieval scheduler for a specific
// bit index.
func newScheduler(idx uint) *scheduler {
return &scheduler{
bit: idx,
responses: make(map[uint64]*response),
}
}
// run creates a retrieval pipeline, receiving section indexes from sections and
// returning the results in the same order through the done channel. Concurrent
// runs of the same scheduler are allowed, leading to retrieval task deduplication.
func (s *scheduler) run(sections chan uint64, dist chan *request, done chan []byte, quit chan struct{}, wg *sync.WaitGroup) {
// Create a forwarder channel between requests and responses of the same size as
// the distribution channel (since that will block the pipeline anyway).
pend := make(chan uint64, cap(dist))
// Start the pipeline schedulers to forward between user -> distributor -> user
wg.Add(2)
go s.scheduleRequests(sections, dist, pend, quit, wg)
go s.scheduleDeliveries(pend, done, quit, wg)
}
// reset cleans up any leftovers from previous runs. This is required before a
// restart to ensure the no previously requested but never delivered state will
// cause a lockup.
func (s *scheduler) reset() {
s.lock.Lock()
defer s.lock.Unlock()
for section, res := range s.responses {
if res.cached == nil {
delete(s.responses, section)
}
}
}
// scheduleRequests reads section retrieval requests from the input channel,
// deduplicates the stream and pushes unique retrieval tasks into the distribution
// channel for a database or network layer to honour.
func (s *scheduler) scheduleRequests(reqs chan uint64, dist chan *request, pend chan uint64, quit chan struct{}, wg *sync.WaitGroup) {
// Clean up the goroutine and pipeline when done
defer wg.Done()
defer close(pend)
// Keep reading and scheduling section requests
for {
select {
case <-quit:
return
case section, ok := <-reqs:
// New section retrieval requested
if !ok {
return
}
// Deduplicate retrieval requests
unique := false
s.lock.Lock()
if s.responses[section] == nil {
s.responses[section] = &response{
done: make(chan struct{}),
}
unique = true
}
s.lock.Unlock()
// Schedule the section for retrieval and notify the deliverer to expect this section
if unique {
select {
case <-quit:
return
case dist <- &request{bit: s.bit, section: section}:
}
}
select {
case <-quit:
return
case pend <- section:
}
}
}
}
// scheduleDeliveries reads section acceptance notifications and waits for them
// to be delivered, pushing them into the output data buffer.
func (s *scheduler) scheduleDeliveries(pend chan uint64, done chan []byte, quit chan struct{}, wg *sync.WaitGroup) {
// Clean up the goroutine and pipeline when done
defer wg.Done()
defer close(done)
// Keep reading notifications and scheduling deliveries
for {
select {
case <-quit:
return
case idx, ok := <-pend:
// New section retrieval pending
if !ok {
return
}
// Wait until the request is honoured
s.lock.Lock()
res := s.responses[idx]
s.lock.Unlock()
select {
case <-quit:
return
case <-res.done:
}
// Deliver the result
select {
case <-quit:
return
case done <- res.cached:
}
}
}
}
// deliver is called by the request distributor when a reply to a request arrives.
func (s *scheduler) deliver(sections []uint64, data [][]byte) {
s.lock.Lock()
defer s.lock.Unlock()
for i, section := range sections {
if res := s.responses[section]; res != nil && res.cached == nil { // Avoid non-requests and double deliveries
res.cached = data[i]
close(res.done)
}
}
}
| core/bloombits/scheduler.go | 0 | https://github.com/ethereum/go-ethereum/commit/16701c51697e28986feebd122c6a491e4d9ac0e7 | [
0.00046609193668700755,
0.00020674712141044438,
0.00016753336240071803,
0.0001732207601889968,
0.000077563185186591
] |
{
"id": 7,
"code_window": [
"func (api *PrivateDebugAPI) SetHead(number hexutil.Uint64) {\n",
"\tapi.b.SetHead(uint64(number))\n",
"}\n",
"\n",
"// PublicNetAPI offers network related RPC methods\n",
"type PublicNetAPI struct {\n",
"\tnet *p2p.Server\n",
"\tnetworkVersion uint64\n",
"}\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"// DbGet returns the raw value of a key stored in the database.\n",
"func (api *PrivateDebugAPI) DbGet(key string) (hexutil.Bytes, error) {\n",
"\tblob, err := common.ParseHexOrString(key)\n",
"\tif err != nil {\n",
"\t\treturn nil, err\n",
"\t}\n",
"\treturn api.b.ChainDb().Get(blob)\n",
"}\n",
"\n"
],
"file_path": "internal/ethapi/api.go",
"type": "add",
"edit_start_line_idx": 1000
} | // Copyright 2019 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package rawdb
import (
"math/big"
"reflect"
"sort"
"sync"
"testing"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
)
func TestChainIterator(t *testing.T) {
// Construct test chain db
chainDb := NewMemoryDatabase()
var block *types.Block
var txs []*types.Transaction
to := common.BytesToAddress([]byte{0x11})
block = types.NewBlock(&types.Header{Number: big.NewInt(int64(0))}, nil, nil, nil, newHasher()) // Empty genesis block
WriteBlock(chainDb, block)
WriteCanonicalHash(chainDb, block.Hash(), block.NumberU64())
for i := uint64(1); i <= 10; i++ {
var tx *types.Transaction
if i%2 == 0 {
tx = types.NewTx(&types.LegacyTx{
Nonce: i,
GasPrice: big.NewInt(11111),
Gas: 1111,
To: &to,
Value: big.NewInt(111),
Data: []byte{0x11, 0x11, 0x11},
})
} else {
tx = types.NewTx(&types.AccessListTx{
ChainID: big.NewInt(1337),
Nonce: i,
GasPrice: big.NewInt(11111),
Gas: 1111,
To: &to,
Value: big.NewInt(111),
Data: []byte{0x11, 0x11, 0x11},
})
}
txs = append(txs, tx)
block = types.NewBlock(&types.Header{Number: big.NewInt(int64(i))}, []*types.Transaction{tx}, nil, nil, newHasher())
WriteBlock(chainDb, block)
WriteCanonicalHash(chainDb, block.Hash(), block.NumberU64())
}
var cases = []struct {
from, to uint64
reverse bool
expect []int
}{
{0, 11, true, []int{10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0}},
{0, 0, true, nil},
{0, 5, true, []int{4, 3, 2, 1, 0}},
{10, 11, true, []int{10}},
{0, 11, false, []int{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10}},
{0, 0, false, nil},
{10, 11, false, []int{10}},
}
for i, c := range cases {
var numbers []int
hashCh := iterateTransactions(chainDb, c.from, c.to, c.reverse, nil)
if hashCh != nil {
for h := range hashCh {
numbers = append(numbers, int(h.number))
if len(h.hashes) > 0 {
if got, exp := h.hashes[0], txs[h.number-1].Hash(); got != exp {
t.Fatalf("block %d: hash wrong, got %x exp %x", h.number, got, exp)
}
}
}
}
if !c.reverse {
sort.Ints(numbers)
} else {
sort.Sort(sort.Reverse(sort.IntSlice(numbers)))
}
if !reflect.DeepEqual(numbers, c.expect) {
t.Fatalf("Case %d failed, visit element mismatch, want %v, got %v", i, c.expect, numbers)
}
}
}
func TestIndexTransactions(t *testing.T) {
// Construct test chain db
chainDb := NewMemoryDatabase()
var block *types.Block
var txs []*types.Transaction
to := common.BytesToAddress([]byte{0x11})
// Write empty genesis block
block = types.NewBlock(&types.Header{Number: big.NewInt(int64(0))}, nil, nil, nil, newHasher())
WriteBlock(chainDb, block)
WriteCanonicalHash(chainDb, block.Hash(), block.NumberU64())
for i := uint64(1); i <= 10; i++ {
var tx *types.Transaction
if i%2 == 0 {
tx = types.NewTx(&types.LegacyTx{
Nonce: i,
GasPrice: big.NewInt(11111),
Gas: 1111,
To: &to,
Value: big.NewInt(111),
Data: []byte{0x11, 0x11, 0x11},
})
} else {
tx = types.NewTx(&types.AccessListTx{
ChainID: big.NewInt(1337),
Nonce: i,
GasPrice: big.NewInt(11111),
Gas: 1111,
To: &to,
Value: big.NewInt(111),
Data: []byte{0x11, 0x11, 0x11},
})
}
txs = append(txs, tx)
block = types.NewBlock(&types.Header{Number: big.NewInt(int64(i))}, []*types.Transaction{tx}, nil, nil, newHasher())
WriteBlock(chainDb, block)
WriteCanonicalHash(chainDb, block.Hash(), block.NumberU64())
}
// verify checks whether the tx indices in the range [from, to)
// is expected.
verify := func(from, to int, exist bool, tail uint64) {
for i := from; i < to; i++ {
if i == 0 {
continue
}
number := ReadTxLookupEntry(chainDb, txs[i-1].Hash())
if exist && number == nil {
t.Fatalf("Transaction index %d missing", i)
}
if !exist && number != nil {
t.Fatalf("Transaction index %d is not deleted", i)
}
}
number := ReadTxIndexTail(chainDb)
if number == nil || *number != tail {
t.Fatalf("Transaction tail mismatch")
}
}
IndexTransactions(chainDb, 5, 11, nil)
verify(5, 11, true, 5)
verify(0, 5, false, 5)
IndexTransactions(chainDb, 0, 5, nil)
verify(0, 11, true, 0)
UnindexTransactions(chainDb, 0, 5, nil)
verify(5, 11, true, 5)
verify(0, 5, false, 5)
UnindexTransactions(chainDb, 5, 11, nil)
verify(0, 11, false, 11)
// Testing corner cases
signal := make(chan struct{})
var once sync.Once
indexTransactionsForTesting(chainDb, 5, 11, signal, func(n uint64) bool {
if n <= 8 {
once.Do(func() {
close(signal)
})
return false
}
return true
})
verify(9, 11, true, 9)
verify(0, 9, false, 9)
IndexTransactions(chainDb, 0, 9, nil)
signal = make(chan struct{})
var once2 sync.Once
unindexTransactionsForTesting(chainDb, 0, 11, signal, func(n uint64) bool {
if n >= 8 {
once2.Do(func() {
close(signal)
})
return false
}
return true
})
verify(8, 11, true, 8)
verify(0, 8, false, 8)
}
| core/rawdb/chain_iterator_test.go | 0 | https://github.com/ethereum/go-ethereum/commit/16701c51697e28986feebd122c6a491e4d9ac0e7 | [
0.0020453191827982664,
0.0002687568194232881,
0.0001631352206459269,
0.00017212719831150025,
0.0003979588800575584
] |
{
"id": 7,
"code_window": [
"func (api *PrivateDebugAPI) SetHead(number hexutil.Uint64) {\n",
"\tapi.b.SetHead(uint64(number))\n",
"}\n",
"\n",
"// PublicNetAPI offers network related RPC methods\n",
"type PublicNetAPI struct {\n",
"\tnet *p2p.Server\n",
"\tnetworkVersion uint64\n",
"}\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"// DbGet returns the raw value of a key stored in the database.\n",
"func (api *PrivateDebugAPI) DbGet(key string) (hexutil.Bytes, error) {\n",
"\tblob, err := common.ParseHexOrString(key)\n",
"\tif err != nil {\n",
"\t\treturn nil, err\n",
"\t}\n",
"\treturn api.b.ChainDb().Get(blob)\n",
"}\n",
"\n"
],
"file_path": "internal/ethapi/api.go",
"type": "add",
"edit_start_line_idx": 1000
} | // Copyright 2019 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package flowcontrol
import (
"fmt"
"time"
"github.com/ethereum/go-ethereum/common/mclock"
)
// logger collects events in string format and discards events older than the
// "keep" parameter
type logger struct {
events map[uint64]logEvent
writePtr, delPtr uint64
keep time.Duration
}
// logEvent describes a single event
type logEvent struct {
time mclock.AbsTime
event string
}
// newLogger creates a new logger
func newLogger(keep time.Duration) *logger {
return &logger{
events: make(map[uint64]logEvent),
keep: keep,
}
}
// add adds a new event and discards old events if possible
func (l *logger) add(now mclock.AbsTime, event string) {
keepAfter := now - mclock.AbsTime(l.keep)
for l.delPtr < l.writePtr && l.events[l.delPtr].time <= keepAfter {
delete(l.events, l.delPtr)
l.delPtr++
}
l.events[l.writePtr] = logEvent{now, event}
l.writePtr++
}
// dump prints all stored events
func (l *logger) dump(now mclock.AbsTime) {
for i := l.delPtr; i < l.writePtr; i++ {
e := l.events[i]
fmt.Println(time.Duration(e.time-now), e.event)
}
}
| les/flowcontrol/logger.go | 0 | https://github.com/ethereum/go-ethereum/commit/16701c51697e28986feebd122c6a491e4d9ac0e7 | [
0.00017723150085657835,
0.000173125765286386,
0.00016690722259227186,
0.00017576735990587622,
0.000004031210210087011
] |
{
"id": 8,
"code_window": [
"\t\t\tcall: 'debug_getAccessibleState',\n",
"\t\t\tparams: 2,\n",
"\t\t\tinputFormatter:[web3._extend.formatters.inputBlockNumberFormatter, web3._extend.formatters.inputBlockNumberFormatter],\n",
"\t\t}),\n",
"\t],\n",
"\tproperties: []\n",
"});\n",
"`\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tnew web3._extend.Method({\n",
"\t\t\tname: 'dbGet',\n",
"\t\t\tcall: 'debug_dbGet',\n",
"\t\t\tparams: 1\n",
"\t\t}),\n"
],
"file_path": "internal/web3ext/web3ext.go",
"type": "add",
"edit_start_line_idx": 473
} | // Copyright 2020 The go-ethereum Authors
// This file is part of go-ethereum.
//
// go-ethereum is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// go-ethereum is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
package main
import (
"bytes"
"errors"
"fmt"
"os"
"os/signal"
"path/filepath"
"sort"
"strconv"
"strings"
"syscall"
"time"
"github.com/ethereum/go-ethereum/cmd/utils"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/console/prompt"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/state/snapshot"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/trie"
"github.com/olekukonko/tablewriter"
"gopkg.in/urfave/cli.v1"
)
var (
removedbCommand = cli.Command{
Action: utils.MigrateFlags(removeDB),
Name: "removedb",
Usage: "Remove blockchain and state databases",
ArgsUsage: "",
Flags: []cli.Flag{
utils.DataDirFlag,
},
Category: "DATABASE COMMANDS",
Description: `
Remove blockchain and state databases`,
}
dbCommand = cli.Command{
Name: "db",
Usage: "Low level database operations",
ArgsUsage: "",
Category: "DATABASE COMMANDS",
Subcommands: []cli.Command{
dbInspectCmd,
dbStatCmd,
dbCompactCmd,
dbGetCmd,
dbDeleteCmd,
dbPutCmd,
dbGetSlotsCmd,
dbDumpFreezerIndex,
dbImportCmd,
dbExportCmd,
dbMetadataCmd,
dbMigrateFreezerCmd,
},
}
dbInspectCmd = cli.Command{
Action: utils.MigrateFlags(inspect),
Name: "inspect",
ArgsUsage: "<prefix> <start>",
Flags: []cli.Flag{
utils.DataDirFlag,
utils.AncientFlag,
utils.SyncModeFlag,
utils.MainnetFlag,
utils.RopstenFlag,
utils.SepoliaFlag,
utils.RinkebyFlag,
utils.GoerliFlag,
},
Usage: "Inspect the storage size for each type of data in the database",
Description: `This commands iterates the entire database. If the optional 'prefix' and 'start' arguments are provided, then the iteration is limited to the given subset of data.`,
}
dbStatCmd = cli.Command{
Action: utils.MigrateFlags(dbStats),
Name: "stats",
Usage: "Print leveldb statistics",
Flags: []cli.Flag{
utils.DataDirFlag,
utils.SyncModeFlag,
utils.MainnetFlag,
utils.RopstenFlag,
utils.SepoliaFlag,
utils.RinkebyFlag,
utils.GoerliFlag,
},
}
dbCompactCmd = cli.Command{
Action: utils.MigrateFlags(dbCompact),
Name: "compact",
Usage: "Compact leveldb database. WARNING: May take a very long time",
Flags: []cli.Flag{
utils.DataDirFlag,
utils.SyncModeFlag,
utils.MainnetFlag,
utils.RopstenFlag,
utils.SepoliaFlag,
utils.RinkebyFlag,
utils.GoerliFlag,
utils.CacheFlag,
utils.CacheDatabaseFlag,
},
Description: `This command performs a database compaction.
WARNING: This operation may take a very long time to finish, and may cause database
corruption if it is aborted during execution'!`,
}
dbGetCmd = cli.Command{
Action: utils.MigrateFlags(dbGet),
Name: "get",
Usage: "Show the value of a database key",
ArgsUsage: "<hex-encoded key>",
Flags: []cli.Flag{
utils.DataDirFlag,
utils.SyncModeFlag,
utils.MainnetFlag,
utils.RopstenFlag,
utils.SepoliaFlag,
utils.RinkebyFlag,
utils.GoerliFlag,
},
Description: "This command looks up the specified database key from the database.",
}
dbDeleteCmd = cli.Command{
Action: utils.MigrateFlags(dbDelete),
Name: "delete",
Usage: "Delete a database key (WARNING: may corrupt your database)",
ArgsUsage: "<hex-encoded key>",
Flags: []cli.Flag{
utils.DataDirFlag,
utils.SyncModeFlag,
utils.MainnetFlag,
utils.RopstenFlag,
utils.SepoliaFlag,
utils.RinkebyFlag,
utils.GoerliFlag,
},
Description: `This command deletes the specified database key from the database.
WARNING: This is a low-level operation which may cause database corruption!`,
}
dbPutCmd = cli.Command{
Action: utils.MigrateFlags(dbPut),
Name: "put",
Usage: "Set the value of a database key (WARNING: may corrupt your database)",
ArgsUsage: "<hex-encoded key> <hex-encoded value>",
Flags: []cli.Flag{
utils.DataDirFlag,
utils.SyncModeFlag,
utils.MainnetFlag,
utils.RopstenFlag,
utils.SepoliaFlag,
utils.RinkebyFlag,
utils.GoerliFlag,
},
Description: `This command sets a given database key to the given value.
WARNING: This is a low-level operation which may cause database corruption!`,
}
dbGetSlotsCmd = cli.Command{
Action: utils.MigrateFlags(dbDumpTrie),
Name: "dumptrie",
Usage: "Show the storage key/values of a given storage trie",
ArgsUsage: "<hex-encoded storage trie root> <hex-encoded start (optional)> <int max elements (optional)>",
Flags: []cli.Flag{
utils.DataDirFlag,
utils.SyncModeFlag,
utils.MainnetFlag,
utils.RopstenFlag,
utils.SepoliaFlag,
utils.RinkebyFlag,
utils.GoerliFlag,
},
Description: "This command looks up the specified database key from the database.",
}
dbDumpFreezerIndex = cli.Command{
Action: utils.MigrateFlags(freezerInspect),
Name: "freezer-index",
Usage: "Dump out the index of a given freezer type",
ArgsUsage: "<type> <start (int)> <end (int)>",
Flags: []cli.Flag{
utils.DataDirFlag,
utils.SyncModeFlag,
utils.MainnetFlag,
utils.RopstenFlag,
utils.SepoliaFlag,
utils.RinkebyFlag,
utils.GoerliFlag,
},
Description: "This command displays information about the freezer index.",
}
dbImportCmd = cli.Command{
Action: utils.MigrateFlags(importLDBdata),
Name: "import",
Usage: "Imports leveldb-data from an exported RLP dump.",
ArgsUsage: "<dumpfile> <start (optional)",
Flags: []cli.Flag{
utils.DataDirFlag,
utils.SyncModeFlag,
utils.MainnetFlag,
utils.RopstenFlag,
utils.RinkebyFlag,
utils.GoerliFlag,
},
Description: "The import command imports the specific chain data from an RLP encoded stream.",
}
dbExportCmd = cli.Command{
Action: utils.MigrateFlags(exportChaindata),
Name: "export",
Usage: "Exports the chain data into an RLP dump. If the <dumpfile> has .gz suffix, gzip compression will be used.",
ArgsUsage: "<type> <dumpfile>",
Flags: []cli.Flag{
utils.DataDirFlag,
utils.SyncModeFlag,
utils.MainnetFlag,
utils.RopstenFlag,
utils.RinkebyFlag,
utils.GoerliFlag,
},
Description: "Exports the specified chain data to an RLP encoded stream, optionally gzip-compressed.",
}
dbMetadataCmd = cli.Command{
Action: utils.MigrateFlags(showMetaData),
Name: "metadata",
Usage: "Shows metadata about the chain status.",
Flags: []cli.Flag{
utils.DataDirFlag,
utils.SyncModeFlag,
utils.MainnetFlag,
utils.RopstenFlag,
utils.SepoliaFlag,
utils.RinkebyFlag,
utils.GoerliFlag,
},
Description: "Shows metadata about the chain status.",
}
dbMigrateFreezerCmd = cli.Command{
Action: utils.MigrateFlags(freezerMigrate),
Name: "freezer-migrate",
Usage: "Migrate legacy parts of the freezer. (WARNING: may take a long time)",
ArgsUsage: "",
Flags: []cli.Flag{
utils.DataDirFlag,
utils.SyncModeFlag,
utils.MainnetFlag,
utils.RopstenFlag,
utils.SepoliaFlag,
utils.RinkebyFlag,
utils.GoerliFlag,
},
Description: `The freezer-migrate command checks your database for receipts in a legacy format and updates those.
WARNING: please back-up the receipt files in your ancients before running this command.`,
}
)
func removeDB(ctx *cli.Context) error {
stack, config := makeConfigNode(ctx)
// Remove the full node state database
path := stack.ResolvePath("chaindata")
if common.FileExist(path) {
confirmAndRemoveDB(path, "full node state database")
} else {
log.Info("Full node state database missing", "path", path)
}
// Remove the full node ancient database
path = config.Eth.DatabaseFreezer
switch {
case path == "":
path = filepath.Join(stack.ResolvePath("chaindata"), "ancient")
case !filepath.IsAbs(path):
path = config.Node.ResolvePath(path)
}
if common.FileExist(path) {
confirmAndRemoveDB(path, "full node ancient database")
} else {
log.Info("Full node ancient database missing", "path", path)
}
// Remove the light node database
path = stack.ResolvePath("lightchaindata")
if common.FileExist(path) {
confirmAndRemoveDB(path, "light node database")
} else {
log.Info("Light node database missing", "path", path)
}
return nil
}
// confirmAndRemoveDB prompts the user for a last confirmation and removes the
// folder if accepted.
func confirmAndRemoveDB(database string, kind string) {
confirm, err := prompt.Stdin.PromptConfirm(fmt.Sprintf("Remove %s (%s)?", kind, database))
switch {
case err != nil:
utils.Fatalf("%v", err)
case !confirm:
log.Info("Database deletion skipped", "path", database)
default:
start := time.Now()
filepath.Walk(database, func(path string, info os.FileInfo, err error) error {
// If we're at the top level folder, recurse into
if path == database {
return nil
}
// Delete all the files, but not subfolders
if !info.IsDir() {
os.Remove(path)
return nil
}
return filepath.SkipDir
})
log.Info("Database successfully deleted", "path", database, "elapsed", common.PrettyDuration(time.Since(start)))
}
}
func inspect(ctx *cli.Context) error {
var (
prefix []byte
start []byte
)
if ctx.NArg() > 2 {
return fmt.Errorf("Max 2 arguments: %v", ctx.Command.ArgsUsage)
}
if ctx.NArg() >= 1 {
if d, err := hexutil.Decode(ctx.Args().Get(0)); err != nil {
return fmt.Errorf("failed to hex-decode 'prefix': %v", err)
} else {
prefix = d
}
}
if ctx.NArg() >= 2 {
if d, err := hexutil.Decode(ctx.Args().Get(1)); err != nil {
return fmt.Errorf("failed to hex-decode 'start': %v", err)
} else {
start = d
}
}
stack, _ := makeConfigNode(ctx)
defer stack.Close()
db := utils.MakeChainDatabase(ctx, stack, true)
defer db.Close()
return rawdb.InspectDatabase(db, prefix, start)
}
func showLeveldbStats(db ethdb.Stater) {
if stats, err := db.Stat("leveldb.stats"); err != nil {
log.Warn("Failed to read database stats", "error", err)
} else {
fmt.Println(stats)
}
if ioStats, err := db.Stat("leveldb.iostats"); err != nil {
log.Warn("Failed to read database iostats", "error", err)
} else {
fmt.Println(ioStats)
}
}
func dbStats(ctx *cli.Context) error {
stack, _ := makeConfigNode(ctx)
defer stack.Close()
db := utils.MakeChainDatabase(ctx, stack, true)
defer db.Close()
showLeveldbStats(db)
return nil
}
func dbCompact(ctx *cli.Context) error {
stack, _ := makeConfigNode(ctx)
defer stack.Close()
db := utils.MakeChainDatabase(ctx, stack, false)
defer db.Close()
log.Info("Stats before compaction")
showLeveldbStats(db)
log.Info("Triggering compaction")
if err := db.Compact(nil, nil); err != nil {
log.Info("Compact err", "error", err)
return err
}
log.Info("Stats after compaction")
showLeveldbStats(db)
return nil
}
// dbGet shows the value of a given database key
func dbGet(ctx *cli.Context) error {
if ctx.NArg() != 1 {
return fmt.Errorf("required arguments: %v", ctx.Command.ArgsUsage)
}
stack, _ := makeConfigNode(ctx)
defer stack.Close()
db := utils.MakeChainDatabase(ctx, stack, true)
defer db.Close()
key, err := parseHexOrString(ctx.Args().Get(0))
if err != nil {
log.Info("Could not decode the key", "error", err)
return err
}
data, err := db.Get(key)
if err != nil {
log.Info("Get operation failed", "key", fmt.Sprintf("0x%#x", key), "error", err)
return err
}
fmt.Printf("key %#x: %#x\n", key, data)
return nil
}
// dbDelete deletes a key from the database
func dbDelete(ctx *cli.Context) error {
if ctx.NArg() != 1 {
return fmt.Errorf("required arguments: %v", ctx.Command.ArgsUsage)
}
stack, _ := makeConfigNode(ctx)
defer stack.Close()
db := utils.MakeChainDatabase(ctx, stack, false)
defer db.Close()
key, err := parseHexOrString(ctx.Args().Get(0))
if err != nil {
log.Info("Could not decode the key", "error", err)
return err
}
data, err := db.Get(key)
if err == nil {
fmt.Printf("Previous value: %#x\n", data)
}
if err = db.Delete(key); err != nil {
log.Info("Delete operation returned an error", "key", fmt.Sprintf("0x%#x", key), "error", err)
return err
}
return nil
}
// dbPut overwrite a value in the database
func dbPut(ctx *cli.Context) error {
if ctx.NArg() != 2 {
return fmt.Errorf("required arguments: %v", ctx.Command.ArgsUsage)
}
stack, _ := makeConfigNode(ctx)
defer stack.Close()
db := utils.MakeChainDatabase(ctx, stack, false)
defer db.Close()
var (
key []byte
value []byte
data []byte
err error
)
key, err = parseHexOrString(ctx.Args().Get(0))
if err != nil {
log.Info("Could not decode the key", "error", err)
return err
}
value, err = hexutil.Decode(ctx.Args().Get(1))
if err != nil {
log.Info("Could not decode the value", "error", err)
return err
}
data, err = db.Get(key)
if err == nil {
fmt.Printf("Previous value: %#x\n", data)
}
return db.Put(key, value)
}
// dbDumpTrie shows the key-value slots of a given storage trie
func dbDumpTrie(ctx *cli.Context) error {
if ctx.NArg() < 1 {
return fmt.Errorf("required arguments: %v", ctx.Command.ArgsUsage)
}
stack, _ := makeConfigNode(ctx)
defer stack.Close()
db := utils.MakeChainDatabase(ctx, stack, true)
defer db.Close()
var (
root []byte
start []byte
max = int64(-1)
err error
)
if root, err = hexutil.Decode(ctx.Args().Get(0)); err != nil {
log.Info("Could not decode the root", "error", err)
return err
}
stRoot := common.BytesToHash(root)
if ctx.NArg() >= 2 {
if start, err = hexutil.Decode(ctx.Args().Get(1)); err != nil {
log.Info("Could not decode the seek position", "error", err)
return err
}
}
if ctx.NArg() >= 3 {
if max, err = strconv.ParseInt(ctx.Args().Get(2), 10, 64); err != nil {
log.Info("Could not decode the max count", "error", err)
return err
}
}
theTrie, err := trie.New(stRoot, trie.NewDatabase(db))
if err != nil {
return err
}
var count int64
it := trie.NewIterator(theTrie.NodeIterator(start))
for it.Next() {
if max > 0 && count == max {
fmt.Printf("Exiting after %d values\n", count)
break
}
fmt.Printf(" %d. key %#x: %#x\n", count, it.Key, it.Value)
count++
}
return it.Err
}
func freezerInspect(ctx *cli.Context) error {
var (
start, end int64
disableSnappy bool
err error
)
if ctx.NArg() < 3 {
return fmt.Errorf("required arguments: %v", ctx.Command.ArgsUsage)
}
kind := ctx.Args().Get(0)
if noSnap, ok := rawdb.FreezerNoSnappy[kind]; !ok {
var options []string
for opt := range rawdb.FreezerNoSnappy {
options = append(options, opt)
}
sort.Strings(options)
return fmt.Errorf("Could read freezer-type '%v'. Available options: %v", kind, options)
} else {
disableSnappy = noSnap
}
if start, err = strconv.ParseInt(ctx.Args().Get(1), 10, 64); err != nil {
log.Info("Could read start-param", "error", err)
return err
}
if end, err = strconv.ParseInt(ctx.Args().Get(2), 10, 64); err != nil {
log.Info("Could read count param", "error", err)
return err
}
stack, _ := makeConfigNode(ctx)
defer stack.Close()
path := filepath.Join(stack.ResolvePath("chaindata"), "ancient")
log.Info("Opening freezer", "location", path, "name", kind)
if f, err := rawdb.NewFreezerTable(path, kind, disableSnappy, true); err != nil {
return err
} else {
f.DumpIndex(start, end)
}
return nil
}
// ParseHexOrString tries to hexdecode b, but if the prefix is missing, it instead just returns the raw bytes
func parseHexOrString(str string) ([]byte, error) {
b, err := hexutil.Decode(str)
if errors.Is(err, hexutil.ErrMissingPrefix) {
return []byte(str), nil
}
return b, err
}
func importLDBdata(ctx *cli.Context) error {
start := 0
switch ctx.NArg() {
case 1:
break
case 2:
s, err := strconv.Atoi(ctx.Args().Get(1))
if err != nil {
return fmt.Errorf("second arg must be an integer: %v", err)
}
start = s
default:
return fmt.Errorf("required arguments: %v", ctx.Command.ArgsUsage)
}
var (
fName = ctx.Args().Get(0)
stack, _ = makeConfigNode(ctx)
interrupt = make(chan os.Signal, 1)
stop = make(chan struct{})
)
defer stack.Close()
signal.Notify(interrupt, syscall.SIGINT, syscall.SIGTERM)
defer signal.Stop(interrupt)
defer close(interrupt)
go func() {
if _, ok := <-interrupt; ok {
log.Info("Interrupted during ldb import, stopping at next batch")
}
close(stop)
}()
db := utils.MakeChainDatabase(ctx, stack, false)
return utils.ImportLDBData(db, fName, int64(start), stop)
}
type preimageIterator struct {
iter ethdb.Iterator
}
func (iter *preimageIterator) Next() (byte, []byte, []byte, bool) {
for iter.iter.Next() {
key := iter.iter.Key()
if bytes.HasPrefix(key, rawdb.PreimagePrefix) && len(key) == (len(rawdb.PreimagePrefix)+common.HashLength) {
return utils.OpBatchAdd, key, iter.iter.Value(), true
}
}
return 0, nil, nil, false
}
func (iter *preimageIterator) Release() {
iter.iter.Release()
}
type snapshotIterator struct {
init bool
account ethdb.Iterator
storage ethdb.Iterator
}
func (iter *snapshotIterator) Next() (byte, []byte, []byte, bool) {
if !iter.init {
iter.init = true
return utils.OpBatchDel, rawdb.SnapshotRootKey, nil, true
}
for iter.account.Next() {
key := iter.account.Key()
if bytes.HasPrefix(key, rawdb.SnapshotAccountPrefix) && len(key) == (len(rawdb.SnapshotAccountPrefix)+common.HashLength) {
return utils.OpBatchAdd, key, iter.account.Value(), true
}
}
for iter.storage.Next() {
key := iter.storage.Key()
if bytes.HasPrefix(key, rawdb.SnapshotStoragePrefix) && len(key) == (len(rawdb.SnapshotStoragePrefix)+2*common.HashLength) {
return utils.OpBatchAdd, key, iter.storage.Value(), true
}
}
return 0, nil, nil, false
}
func (iter *snapshotIterator) Release() {
iter.account.Release()
iter.storage.Release()
}
// chainExporters defines the export scheme for all exportable chain data.
var chainExporters = map[string]func(db ethdb.Database) utils.ChainDataIterator{
"preimage": func(db ethdb.Database) utils.ChainDataIterator {
iter := db.NewIterator(rawdb.PreimagePrefix, nil)
return &preimageIterator{iter: iter}
},
"snapshot": func(db ethdb.Database) utils.ChainDataIterator {
account := db.NewIterator(rawdb.SnapshotAccountPrefix, nil)
storage := db.NewIterator(rawdb.SnapshotStoragePrefix, nil)
return &snapshotIterator{account: account, storage: storage}
},
}
func exportChaindata(ctx *cli.Context) error {
if ctx.NArg() < 2 {
return fmt.Errorf("required arguments: %v", ctx.Command.ArgsUsage)
}
// Parse the required chain data type, make sure it's supported.
kind := ctx.Args().Get(0)
kind = strings.ToLower(strings.Trim(kind, " "))
exporter, ok := chainExporters[kind]
if !ok {
var kinds []string
for kind := range chainExporters {
kinds = append(kinds, kind)
}
return fmt.Errorf("invalid data type %s, supported types: %s", kind, strings.Join(kinds, ", "))
}
var (
stack, _ = makeConfigNode(ctx)
interrupt = make(chan os.Signal, 1)
stop = make(chan struct{})
)
defer stack.Close()
signal.Notify(interrupt, syscall.SIGINT, syscall.SIGTERM)
defer signal.Stop(interrupt)
defer close(interrupt)
go func() {
if _, ok := <-interrupt; ok {
log.Info("Interrupted during db export, stopping at next batch")
}
close(stop)
}()
db := utils.MakeChainDatabase(ctx, stack, true)
return utils.ExportChaindata(ctx.Args().Get(1), kind, exporter(db), stop)
}
func showMetaData(ctx *cli.Context) error {
stack, _ := makeConfigNode(ctx)
defer stack.Close()
db := utils.MakeChainDatabase(ctx, stack, true)
ancients, err := db.Ancients()
if err != nil {
fmt.Fprintf(os.Stderr, "Error accessing ancients: %v", err)
}
pp := func(val *uint64) string {
if val == nil {
return "<nil>"
}
return fmt.Sprintf("%d (0x%x)", *val, *val)
}
data := [][]string{
{"databaseVersion", pp(rawdb.ReadDatabaseVersion(db))},
{"headBlockHash", fmt.Sprintf("%v", rawdb.ReadHeadBlockHash(db))},
{"headFastBlockHash", fmt.Sprintf("%v", rawdb.ReadHeadFastBlockHash(db))},
{"headHeaderHash", fmt.Sprintf("%v", rawdb.ReadHeadHeaderHash(db))}}
if b := rawdb.ReadHeadBlock(db); b != nil {
data = append(data, []string{"headBlock.Hash", fmt.Sprintf("%v", b.Hash())})
data = append(data, []string{"headBlock.Root", fmt.Sprintf("%v", b.Root())})
data = append(data, []string{"headBlock.Number", fmt.Sprintf("%d (0x%x)", b.Number(), b.Number())})
}
if b := rawdb.ReadSkeletonSyncStatus(db); b != nil {
data = append(data, []string{"SkeletonSyncStatus", string(b)})
}
if h := rawdb.ReadHeadHeader(db); h != nil {
data = append(data, []string{"headHeader.Hash", fmt.Sprintf("%v", h.Hash())})
data = append(data, []string{"headHeader.Root", fmt.Sprintf("%v", h.Root)})
data = append(data, []string{"headHeader.Number", fmt.Sprintf("%d (0x%x)", h.Number, h.Number)})
}
data = append(data, [][]string{{"frozen", fmt.Sprintf("%d items", ancients)},
{"lastPivotNumber", pp(rawdb.ReadLastPivotNumber(db))},
{"len(snapshotSyncStatus)", fmt.Sprintf("%d bytes", len(rawdb.ReadSnapshotSyncStatus(db)))},
{"snapshotGenerator", snapshot.ParseGeneratorStatus(rawdb.ReadSnapshotGenerator(db))},
{"snapshotDisabled", fmt.Sprintf("%v", rawdb.ReadSnapshotDisabled(db))},
{"snapshotJournal", fmt.Sprintf("%d bytes", len(rawdb.ReadSnapshotJournal(db)))},
{"snapshotRecoveryNumber", pp(rawdb.ReadSnapshotRecoveryNumber(db))},
{"snapshotRoot", fmt.Sprintf("%v", rawdb.ReadSnapshotRoot(db))},
{"txIndexTail", pp(rawdb.ReadTxIndexTail(db))},
{"fastTxLookupLimit", pp(rawdb.ReadFastTxLookupLimit(db))},
}...)
table := tablewriter.NewWriter(os.Stdout)
table.SetHeader([]string{"Field", "Value"})
table.AppendBulk(data)
table.Render()
return nil
}
func freezerMigrate(ctx *cli.Context) error {
stack, _ := makeConfigNode(ctx)
defer stack.Close()
db := utils.MakeChainDatabase(ctx, stack, false)
defer db.Close()
// Check first block for legacy receipt format
numAncients, err := db.Ancients()
if err != nil {
return err
}
if numAncients < 1 {
log.Info("No receipts in freezer to migrate")
return nil
}
isFirstLegacy, firstIdx, err := dbHasLegacyReceipts(db, 0)
if err != nil {
return err
}
if !isFirstLegacy {
log.Info("No legacy receipts to migrate")
return nil
}
log.Info("Starting migration", "ancients", numAncients, "firstLegacy", firstIdx)
start := time.Now()
if err := db.MigrateTable("receipts", types.ConvertLegacyStoredReceipts); err != nil {
return err
}
if err := db.Close(); err != nil {
return err
}
log.Info("Migration finished", "duration", time.Since(start))
return nil
}
// dbHasLegacyReceipts checks freezer entries for legacy receipts. It stops at the first
// non-empty receipt and checks its format. The index of this first non-empty element is
// the second return parameter.
func dbHasLegacyReceipts(db ethdb.Database, firstIdx uint64) (bool, uint64, error) {
// Check first block for legacy receipt format
numAncients, err := db.Ancients()
if err != nil {
return false, 0, err
}
if numAncients < 1 {
return false, 0, nil
}
if firstIdx >= numAncients {
return false, firstIdx, nil
}
var (
legacy bool
blob []byte
emptyRLPList = []byte{192}
)
// Find first block with non-empty receipt, only if
// the index is not already provided.
if firstIdx == 0 {
for i := uint64(0); i < numAncients; i++ {
blob, err = db.Ancient("receipts", i)
if err != nil {
return false, 0, err
}
if len(blob) == 0 {
continue
}
if !bytes.Equal(blob, emptyRLPList) {
firstIdx = i
break
}
}
}
// Is first non-empty receipt legacy?
first, err := db.Ancient("receipts", firstIdx)
if err != nil {
return false, 0, err
}
legacy, err = types.IsLegacyStoredReceipts(first)
return legacy, firstIdx, err
}
| cmd/geth/dbcmd.go | 1 | https://github.com/ethereum/go-ethereum/commit/16701c51697e28986feebd122c6a491e4d9ac0e7 | [
0.00035042723175138235,
0.00017402952653355896,
0.00016606679128017277,
0.00017193252278957516,
0.000019316808902658522
] |
{
"id": 8,
"code_window": [
"\t\t\tcall: 'debug_getAccessibleState',\n",
"\t\t\tparams: 2,\n",
"\t\t\tinputFormatter:[web3._extend.formatters.inputBlockNumberFormatter, web3._extend.formatters.inputBlockNumberFormatter],\n",
"\t\t}),\n",
"\t],\n",
"\tproperties: []\n",
"});\n",
"`\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tnew web3._extend.Method({\n",
"\t\t\tname: 'dbGet',\n",
"\t\t\tcall: 'debug_dbGet',\n",
"\t\t\tparams: 1\n",
"\t\t}),\n"
],
"file_path": "internal/web3ext/web3ext.go",
"type": "add",
"edit_start_line_idx": 473
} | // Copyright 2018 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
//go:build js
// +build js
package rpc
import (
"context"
"errors"
"net"
)
var errNotSupported = errors.New("rpc: not supported")
// ipcListen will create a named pipe on the given endpoint.
func ipcListen(endpoint string) (net.Listener, error) {
return nil, errNotSupported
}
// newIPCConnection will connect to a named pipe with the given endpoint as name.
func newIPCConnection(ctx context.Context, endpoint string) (net.Conn, error) {
return nil, errNotSupported
}
| rpc/ipc_js.go | 0 | https://github.com/ethereum/go-ethereum/commit/16701c51697e28986feebd122c6a491e4d9ac0e7 | [
0.00017690667300485075,
0.00017305283108726144,
0.00017085274157579988,
0.00017222596216015518,
0.000002295449348821421
] |
{
"id": 8,
"code_window": [
"\t\t\tcall: 'debug_getAccessibleState',\n",
"\t\t\tparams: 2,\n",
"\t\t\tinputFormatter:[web3._extend.formatters.inputBlockNumberFormatter, web3._extend.formatters.inputBlockNumberFormatter],\n",
"\t\t}),\n",
"\t],\n",
"\tproperties: []\n",
"});\n",
"`\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tnew web3._extend.Method({\n",
"\t\t\tname: 'dbGet',\n",
"\t\t\tcall: 'debug_dbGet',\n",
"\t\t\tparams: 1\n",
"\t\t}),\n"
],
"file_path": "internal/web3ext/web3ext.go",
"type": "add",
"edit_start_line_idx": 473
} | [
{
"Input": "",
"ExpectedError": "invalid input length",
"Name": "bls_pairing_empty_input"
},
{
"Input": "0000000000000000000000000000000017f1d3a73197d7942695638c4fa9ac0fc3688c4f9774b905a14e3a3f171bac586c55e83ff97a1aeffb3af00adb22c6bb0000000000000000000000000000000008b3f481e3aaa0f1a09e30ed741d8ae4fcf5e095d5d00af600db18cb2c04b3edd03cc744a2888ae40caa232946c5e7e100000000000000000000000000000000024aa2b2f08f0a91260805272dc51051c6e47ad4fa403b02b4510b647ae3d1770bac0326a805bbefd48056c8c121bdb80000000000000000000000000000000013e02b6052719f607dacd3a088274f65596bd0d09920b61ab5da61bbdc7f5049334cf11213945d57e5ac7d055d042b7e000000000000000000000000000000000ce5d527727d6e118cc9cdc6da2e351aadfd9baa8cbdd3a76d429a695160d12c923ac9cc3baca289e193548608b82801000000000000000000000000000000000606c4a02ea734cc32acd2b02bc28b99cb3e287e85a763af267492ab572e99ab3f370d275cec1da1aaa9075ff05f79be0000000000000000000000000000000017f1d3a73197d7942695638c4fa9ac0fc3688c4f9774b905a14e3a3f171bac586c55e83ff97a1aeffb3af00adb22c6bb0000000000000000000000000000000008b3f481e3aaa0f1a09e30ed741d8ae4fcf5e095d5d00af600db18cb2c04b3edd03cc744a2888ae40caa232946c5e7e100000000000000000000000000000000024aa2b2f08f0a91260805272dc51051c6e47ad4fa403b02b4510b647ae3d1770bac0326a805bbefd48056c8c121bdb80000000000000000000000000000000013e02b6052719f607dacd3a088274f65596bd0d09920b61ab5da61bbdc7f5049334cf11213945d57e5ac7d055d042b7e000000000000000000000000000000000ce5d527727d6e118cc9cdc6da2e351aadfd9baa8cbdd3a76d429a695160d12c923ac9cc3baca289e193548608b8280100000000000000000000000000000000000606c4a02ea734cc32acd2b02bc28b99cb3e287e85a763af267492ab572e99ab3f370d275cec1da1aaa9075ff05f79be",
"ExpectedError": "invalid input length",
"Name": "bls_pairing_extra_data"
},
{
"Input": "0000000000000000000000000000000017f1d3a73197d7942695638c4fa9ac0fc3688c4f9774b905a14e3a3f171bac586c55e83ff97a1aeffb3af00adb22c6bb0000000000000000000000000000000008b3f481e3aaa0f1a09e30ed741d8ae4fcf5e095d5d00af600db18cb2c04b3edd03cc744a2888ae40caa232946c5e7e100000000000000000000000000000000024aa2b2f08f0a91260805272dc51051c6e47ad4fa403b02b4510b647ae3d1770bac0326a805bbefd48056c8c121bdb80000000000000000000000000000000013e02b6052719f607dacd3a088274f65596bd0d09920b61ab5da61bbdc7f5049334cf11213945d57e5ac7d055d042b7e000000000000000000000000000000000ce5d527727d6e118cc9cdc6da2e351aadfd9baa8cbdd3a76d429a695160d12c923ac9cc3baca289e193548608b82801000000000000000000000000000000000606c4a02ea734cc32acd2b02bc28b99cb3e287e85a763af267492ab572e99ab3f370d275cec1da1aaa9075ff05f79be0000000000000000000000000000000017f1d3a73197d7942695638c4fa9ac0fc3688c4f9774b905a14e3a3f171bac586c55e83ff97a1aeffb3af00adb22c6bb0000000000000000000000000000000008b3f481e3aaa0f1a09e30ed741d8ae4fcf5e095d5d00af600db18cb2c04b3edd03cc744a2888ae40caa232946c5e7e100000000000000000000000000000000024aa2b2f08f0a91260805272dc51051c6e47ad4fa403b02b4510b647ae3d1770bac0326a805bbefd48056c8c121bdb80000000000000000000000000000000013e02b6052719f607dacd3a088274f65596bd0d09920b61ab5da61bbdc7f5049334cf11213945d57e5ac7d055d042b7e000000000000000000000000000000000ce5d527727d6e118cc9cdc6da2e351aadfd9baa8cbdd3a76d429a695160d12c923ac9cc3baca289e193548608b82801000000000000000000000000000000001a0111ea397fe69a4b1ba7b6434bacd764774b84f38512bf6730d2a0f6b0f6241eabfffeb153ffffb9feffffffffaaac",
"ExpectedError": "must be less than modulus",
"Name": "bls_pairing_invalid_field_element"
},
{
"Input": "0000000000000000000000000000000017f1d3a73197d7942695638c4fa9ac0fc3688c4f9774b905a14e3a3f171bac586c55e83ff97a1aeffb3af00adb22c6bb0000000000000000000000000000000008b3f481e3aaa0f1a09e30ed741d8ae4fcf5e095d5d00af600db18cb2c04b3edd03cc744a2888ae40caa232946c5e7e100000000000000000000000000000000024aa2b2f08f0a91260805272dc51051c6e47ad4fa403b02b4510b647ae3d1770bac0326a805bbefd48056c8c121bdb80000000000000000000000000000000013e02b6052719f607dacd3a088274f65596bd0d09920b61ab5da61bbdc7f5049334cf11213945d57e5ac7d055d042b7e000000000000000000000000000000000ce5d527727d6e118cc9cdc6da2e351aadfd9baa8cbdd3a76d429a695160d12c923ac9cc3baca289e193548608b82801000000000000000000000000000000000606c4a02ea734cc32acd2b02bc28b99cb3e287e85a763af267492ab572e99ab3f370d275cec1da1aaa9075ff05f79be0000000000000000000000000000000017f1d3a73197d7942695638c4fa9ac0fc3688c4f9774b905a14e3a3f171bac586c55e83ff97a1aeffb3af00adb22c6bb0000000000000000000000000000000008b3f481e3aaa0f1a09e30ed741d8ae4fcf5e095d5d00af600db18cb2c04b3edd03cc744a2888ae40caa232946c5e7e100000000000000000000000000000000024aa2b2f08f0a91260805272dc51051c6e47ad4fa403b02b4510b647ae3d1770bac0326a805bbefd48056c8c121bdb80000000000000000000000000000000013e02b6052719f607dacd3a088274f65596bd0d09920b61ab5da61bbdc7f5049334cf11213945d57e5ac7d055d042b7e000000000000000000000000000000000ce5d527727d6e118cc9cdc6da2e351aadfd9baa8cbdd3a76d429a695160d12c923ac9cc3baca289e193548608b82801000000000000000000000000000000010606c4a02ea734cc32acd2b02bc28b99cb3e287e85a763af267492ab572e99ab3f370d275cec1da1aaa9075ff05f79be",
"ExpectedError": "invalid field element top bytes",
"Name": "bls_pairing_top_bytes"
},
{
"Input": "0000000000000000000000000000000017f1d3a73197d7942695638c4fa9ac0fc3688c4f9774b905a14e3a3f171bac586c55e83ff97a1aeffb3af00adb22c6bb0000000000000000000000000000000008b3f481e3aaa0f1a09e30ed741d8ae4fcf5e095d5d00af600db18cb2c04b3edd03cc744a2888ae40caa232946c5e7e100000000000000000000000000000000024aa2b2f08f0a91260805272dc51051c6e47ad4fa403b02b4510b647ae3d1770bac0326a805bbefd48056c8c121bdb80000000000000000000000000000000013e02b6052719f607dacd3a088274f65596bd0d09920b61ab5da61bbdc7f5049334cf11213945d57e5ac7d055d042b7e000000000000000000000000000000000ce5d527727d6e118cc9cdc6da2e351aadfd9baa8cbdd3a76d429a695160d12c923ac9cc3baca289e193548608b82801000000000000000000000000000000000606c4a02ea734cc32acd2b02bc28b99cb3e287e85a763af267492ab572e99ab3f370d275cec1da1aaa9075ff05f79be000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000024aa2b2f08f0a91260805272dc51051c6e47ad4fa403b02b4510b647ae3d1770bac0326a805bbefd48056c8c121bdb80000000000000000000000000000000013e02b6052719f607dacd3a088274f65596bd0d09920b61ab5da61bbdc7f5049334cf11213945d57e5ac7d055d042b7e000000000000000000000000000000000ce5d527727d6e118cc9cdc6da2e351aadfd9baa8cbdd3a76d429a695160d12c923ac9cc3baca289e193548608b82801000000000000000000000000000000000606c4a02ea734cc32acd2b02bc28b99cb3e287e85a763af267492ab572e99ab3f370d275cec1da1aaa9075ff05f79be",
"ExpectedError": "point is not on curve",
"Name": "bls_pairing_g1_not_on_curve"
},
{
"Input": "0000000000000000000000000000000017f1d3a73197d7942695638c4fa9ac0fc3688c4f9774b905a14e3a3f171bac586c55e83ff97a1aeffb3af00adb22c6bb0000000000000000000000000000000008b3f481e3aaa0f1a09e30ed741d8ae4fcf5e095d5d00af600db18cb2c04b3edd03cc744a2888ae40caa232946c5e7e100000000000000000000000000000000024aa2b2f08f0a91260805272dc51051c6e47ad4fa403b02b4510b647ae3d1770bac0326a805bbefd48056c8c121bdb80000000000000000000000000000000013e02b6052719f607dacd3a088274f65596bd0d09920b61ab5da61bbdc7f5049334cf11213945d57e5ac7d055d042b7e000000000000000000000000000000000ce5d527727d6e118cc9cdc6da2e351aadfd9baa8cbdd3a76d429a695160d12c923ac9cc3baca289e193548608b82801000000000000000000000000000000000606c4a02ea734cc32acd2b02bc28b99cb3e287e85a763af267492ab572e99ab3f370d275cec1da1aaa9075ff05f79be0000000000000000000000000000000017f1d3a73197d7942695638c4fa9ac0fc3688c4f9774b905a14e3a3f171bac586c55e83ff97a1aeffb3af00adb22c6bb0000000000000000000000000000000008b3f481e3aaa0f1a09e30ed741d8ae4fcf5e095d5d00af600db18cb2c04b3edd03cc744a2888ae40caa232946c5e7e100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001",
"ExpectedError": "point is not on curve",
"Name": "bls_pairing_g2_not_on_curve"
},
{
"Input": "0000000000000000000000000000000017f1d3a73197d7942695638c4fa9ac0fc3688c4f9774b905a14e3a3f171bac586c55e83ff97a1aeffb3af00adb22c6bb0000000000000000000000000000000008b3f481e3aaa0f1a09e30ed741d8ae4fcf5e095d5d00af600db18cb2c04b3edd03cc744a2888ae40caa232946c5e7e100000000000000000000000000000000024aa2b2f08f0a91260805272dc51051c6e47ad4fa403b02b4510b647ae3d1770bac0326a805bbefd48056c8c121bdb80000000000000000000000000000000013e02b6052719f607dacd3a088274f65596bd0d09920b61ab5da61bbdc7f5049334cf11213945d57e5ac7d055d042b7e000000000000000000000000000000000ce5d527727d6e118cc9cdc6da2e351aadfd9baa8cbdd3a76d429a695160d12c923ac9cc3baca289e193548608b82801000000000000000000000000000000000606c4a02ea734cc32acd2b02bc28b99cb3e287e85a763af267492ab572e99ab3f370d275cec1da1aaa9075ff05f79be00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000a989badd40d6212b33cffc3f3763e9bc760f988c9926b26da9dd85e928483446346b8ed00e1de5d5ea93e354abe706c00000000000000000000000000000000024aa2b2f08f0a91260805272dc51051c6e47ad4fa403b02b4510b647ae3d1770bac0326a805bbefd48056c8c121bdb80000000000000000000000000000000013e02b6052719f607dacd3a088274f65596bd0d09920b61ab5da61bbdc7f5049334cf11213945d57e5ac7d055d042b7e000000000000000000000000000000000ce5d527727d6e118cc9cdc6da2e351aadfd9baa8cbdd3a76d429a695160d12c923ac9cc3baca289e193548608b82801000000000000000000000000000000000606c4a02ea734cc32acd2b02bc28b99cb3e287e85a763af267492ab572e99ab3f370d275cec1da1aaa9075ff05f79be",
"ExpectedError": "g1 point is not on correct subgroup",
"Name": "bls_pairing_g1_not_in_correct_subgroup"
},
{
"Input": "0000000000000000000000000000000017f1d3a73197d7942695638c4fa9ac0fc3688c4f9774b905a14e3a3f171bac586c55e83ff97a1aeffb3af00adb22c6bb0000000000000000000000000000000008b3f481e3aaa0f1a09e30ed741d8ae4fcf5e095d5d00af600db18cb2c04b3edd03cc744a2888ae40caa232946c5e7e100000000000000000000000000000000024aa2b2f08f0a91260805272dc51051c6e47ad4fa403b02b4510b647ae3d1770bac0326a805bbefd48056c8c121bdb80000000000000000000000000000000013e02b6052719f607dacd3a088274f65596bd0d09920b61ab5da61bbdc7f5049334cf11213945d57e5ac7d055d042b7e000000000000000000000000000000000ce5d527727d6e118cc9cdc6da2e351aadfd9baa8cbdd3a76d429a695160d12c923ac9cc3baca289e193548608b82801000000000000000000000000000000000606c4a02ea734cc32acd2b02bc28b99cb3e287e85a763af267492ab572e99ab3f370d275cec1da1aaa9075ff05f79be0000000000000000000000000000000017f1d3a73197d7942695638c4fa9ac0fc3688c4f9774b905a14e3a3f171bac586c55e83ff97a1aeffb3af00adb22c6bb0000000000000000000000000000000008b3f481e3aaa0f1a09e30ed741d8ae4fcf5e095d5d00af600db18cb2c04b3edd03cc744a2888ae40caa232946c5e7e1000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000013a59858b6809fca4d9a3b6539246a70051a3c88899964a42bc9a69cf9acdd9dd387cfa9086b894185b9a46a402be730000000000000000000000000000000002d27e0ec3356299a346a09ad7dc4ef68a483c3aed53f9139d2f929a3eecebf72082e5e58c6da24ee32e03040c406d4f",
"ExpectedError": "g2 point is not on correct subgroup",
"Name": "bls_pairing_g2_not_in_correct_subgroup"
}
] | core/vm/testdata/precompiles/fail-blsPairing.json | 0 | https://github.com/ethereum/go-ethereum/commit/16701c51697e28986feebd122c6a491e4d9ac0e7 | [
0.029841069132089615,
0.01057393103837967,
0.0001762537140166387,
0.0072916653007268906,
0.010107170790433884
] |
{
"id": 8,
"code_window": [
"\t\t\tcall: 'debug_getAccessibleState',\n",
"\t\t\tparams: 2,\n",
"\t\t\tinputFormatter:[web3._extend.formatters.inputBlockNumberFormatter, web3._extend.formatters.inputBlockNumberFormatter],\n",
"\t\t}),\n",
"\t],\n",
"\tproperties: []\n",
"});\n",
"`\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tnew web3._extend.Method({\n",
"\t\t\tname: 'dbGet',\n",
"\t\t\tcall: 'debug_dbGet',\n",
"\t\t\tparams: 1\n",
"\t\t}),\n"
],
"file_path": "internal/web3ext/web3ext.go",
"type": "add",
"edit_start_line_idx": 473
} | // Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build go1.7,amd64,!gccgo,!appengine
#include "textflag.h"
DATA ·AVX2_iv0<>+0x00(SB)/8, $0x6a09e667f3bcc908
DATA ·AVX2_iv0<>+0x08(SB)/8, $0xbb67ae8584caa73b
DATA ·AVX2_iv0<>+0x10(SB)/8, $0x3c6ef372fe94f82b
DATA ·AVX2_iv0<>+0x18(SB)/8, $0xa54ff53a5f1d36f1
GLOBL ·AVX2_iv0<>(SB), (NOPTR+RODATA), $32
DATA ·AVX2_iv1<>+0x00(SB)/8, $0x510e527fade682d1
DATA ·AVX2_iv1<>+0x08(SB)/8, $0x9b05688c2b3e6c1f
DATA ·AVX2_iv1<>+0x10(SB)/8, $0x1f83d9abfb41bd6b
DATA ·AVX2_iv1<>+0x18(SB)/8, $0x5be0cd19137e2179
GLOBL ·AVX2_iv1<>(SB), (NOPTR+RODATA), $32
DATA ·AVX2_c40<>+0x00(SB)/8, $0x0201000706050403
DATA ·AVX2_c40<>+0x08(SB)/8, $0x0a09080f0e0d0c0b
DATA ·AVX2_c40<>+0x10(SB)/8, $0x0201000706050403
DATA ·AVX2_c40<>+0x18(SB)/8, $0x0a09080f0e0d0c0b
GLOBL ·AVX2_c40<>(SB), (NOPTR+RODATA), $32
DATA ·AVX2_c48<>+0x00(SB)/8, $0x0100070605040302
DATA ·AVX2_c48<>+0x08(SB)/8, $0x09080f0e0d0c0b0a
DATA ·AVX2_c48<>+0x10(SB)/8, $0x0100070605040302
DATA ·AVX2_c48<>+0x18(SB)/8, $0x09080f0e0d0c0b0a
GLOBL ·AVX2_c48<>(SB), (NOPTR+RODATA), $32
DATA ·AVX_iv0<>+0x00(SB)/8, $0x6a09e667f3bcc908
DATA ·AVX_iv0<>+0x08(SB)/8, $0xbb67ae8584caa73b
GLOBL ·AVX_iv0<>(SB), (NOPTR+RODATA), $16
DATA ·AVX_iv1<>+0x00(SB)/8, $0x3c6ef372fe94f82b
DATA ·AVX_iv1<>+0x08(SB)/8, $0xa54ff53a5f1d36f1
GLOBL ·AVX_iv1<>(SB), (NOPTR+RODATA), $16
DATA ·AVX_iv2<>+0x00(SB)/8, $0x510e527fade682d1
DATA ·AVX_iv2<>+0x08(SB)/8, $0x9b05688c2b3e6c1f
GLOBL ·AVX_iv2<>(SB), (NOPTR+RODATA), $16
DATA ·AVX_iv3<>+0x00(SB)/8, $0x1f83d9abfb41bd6b
DATA ·AVX_iv3<>+0x08(SB)/8, $0x5be0cd19137e2179
GLOBL ·AVX_iv3<>(SB), (NOPTR+RODATA), $16
DATA ·AVX_c40<>+0x00(SB)/8, $0x0201000706050403
DATA ·AVX_c40<>+0x08(SB)/8, $0x0a09080f0e0d0c0b
GLOBL ·AVX_c40<>(SB), (NOPTR+RODATA), $16
DATA ·AVX_c48<>+0x00(SB)/8, $0x0100070605040302
DATA ·AVX_c48<>+0x08(SB)/8, $0x09080f0e0d0c0b0a
GLOBL ·AVX_c48<>(SB), (NOPTR+RODATA), $16
#define VPERMQ_0x39_Y1_Y1 BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xc9; BYTE $0x39
#define VPERMQ_0x93_Y1_Y1 BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xc9; BYTE $0x93
#define VPERMQ_0x4E_Y2_Y2 BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xd2; BYTE $0x4e
#define VPERMQ_0x93_Y3_Y3 BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xdb; BYTE $0x93
#define VPERMQ_0x39_Y3_Y3 BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xdb; BYTE $0x39
#define ROUND_AVX2(m0, m1, m2, m3, t, c40, c48) \
VPADDQ m0, Y0, Y0; \
VPADDQ Y1, Y0, Y0; \
VPXOR Y0, Y3, Y3; \
VPSHUFD $-79, Y3, Y3; \
VPADDQ Y3, Y2, Y2; \
VPXOR Y2, Y1, Y1; \
VPSHUFB c40, Y1, Y1; \
VPADDQ m1, Y0, Y0; \
VPADDQ Y1, Y0, Y0; \
VPXOR Y0, Y3, Y3; \
VPSHUFB c48, Y3, Y3; \
VPADDQ Y3, Y2, Y2; \
VPXOR Y2, Y1, Y1; \
VPADDQ Y1, Y1, t; \
VPSRLQ $63, Y1, Y1; \
VPXOR t, Y1, Y1; \
VPERMQ_0x39_Y1_Y1; \
VPERMQ_0x4E_Y2_Y2; \
VPERMQ_0x93_Y3_Y3; \
VPADDQ m2, Y0, Y0; \
VPADDQ Y1, Y0, Y0; \
VPXOR Y0, Y3, Y3; \
VPSHUFD $-79, Y3, Y3; \
VPADDQ Y3, Y2, Y2; \
VPXOR Y2, Y1, Y1; \
VPSHUFB c40, Y1, Y1; \
VPADDQ m3, Y0, Y0; \
VPADDQ Y1, Y0, Y0; \
VPXOR Y0, Y3, Y3; \
VPSHUFB c48, Y3, Y3; \
VPADDQ Y3, Y2, Y2; \
VPXOR Y2, Y1, Y1; \
VPADDQ Y1, Y1, t; \
VPSRLQ $63, Y1, Y1; \
VPXOR t, Y1, Y1; \
VPERMQ_0x39_Y3_Y3; \
VPERMQ_0x4E_Y2_Y2; \
VPERMQ_0x93_Y1_Y1
#define VMOVQ_SI_X11_0 BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x1E
#define VMOVQ_SI_X12_0 BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x26
#define VMOVQ_SI_X13_0 BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x2E
#define VMOVQ_SI_X14_0 BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x36
#define VMOVQ_SI_X15_0 BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x3E
#define VMOVQ_SI_X11(n) BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x5E; BYTE $n
#define VMOVQ_SI_X12(n) BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x66; BYTE $n
#define VMOVQ_SI_X13(n) BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x6E; BYTE $n
#define VMOVQ_SI_X14(n) BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x76; BYTE $n
#define VMOVQ_SI_X15(n) BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x7E; BYTE $n
#define VPINSRQ_1_SI_X11_0 BYTE $0xC4; BYTE $0x63; BYTE $0xA1; BYTE $0x22; BYTE $0x1E; BYTE $0x01
#define VPINSRQ_1_SI_X12_0 BYTE $0xC4; BYTE $0x63; BYTE $0x99; BYTE $0x22; BYTE $0x26; BYTE $0x01
#define VPINSRQ_1_SI_X13_0 BYTE $0xC4; BYTE $0x63; BYTE $0x91; BYTE $0x22; BYTE $0x2E; BYTE $0x01
#define VPINSRQ_1_SI_X14_0 BYTE $0xC4; BYTE $0x63; BYTE $0x89; BYTE $0x22; BYTE $0x36; BYTE $0x01
#define VPINSRQ_1_SI_X15_0 BYTE $0xC4; BYTE $0x63; BYTE $0x81; BYTE $0x22; BYTE $0x3E; BYTE $0x01
#define VPINSRQ_1_SI_X11(n) BYTE $0xC4; BYTE $0x63; BYTE $0xA1; BYTE $0x22; BYTE $0x5E; BYTE $n; BYTE $0x01
#define VPINSRQ_1_SI_X12(n) BYTE $0xC4; BYTE $0x63; BYTE $0x99; BYTE $0x22; BYTE $0x66; BYTE $n; BYTE $0x01
#define VPINSRQ_1_SI_X13(n) BYTE $0xC4; BYTE $0x63; BYTE $0x91; BYTE $0x22; BYTE $0x6E; BYTE $n; BYTE $0x01
#define VPINSRQ_1_SI_X14(n) BYTE $0xC4; BYTE $0x63; BYTE $0x89; BYTE $0x22; BYTE $0x76; BYTE $n; BYTE $0x01
#define VPINSRQ_1_SI_X15(n) BYTE $0xC4; BYTE $0x63; BYTE $0x81; BYTE $0x22; BYTE $0x7E; BYTE $n; BYTE $0x01
#define VMOVQ_R8_X15 BYTE $0xC4; BYTE $0x41; BYTE $0xF9; BYTE $0x6E; BYTE $0xF8
#define VPINSRQ_1_R9_X15 BYTE $0xC4; BYTE $0x43; BYTE $0x81; BYTE $0x22; BYTE $0xF9; BYTE $0x01
// load msg: Y12 = (i0, i1, i2, i3)
// i0, i1, i2, i3 must not be 0
#define LOAD_MSG_AVX2_Y12(i0, i1, i2, i3) \
VMOVQ_SI_X12(i0*8); \
VMOVQ_SI_X11(i2*8); \
VPINSRQ_1_SI_X12(i1*8); \
VPINSRQ_1_SI_X11(i3*8); \
VINSERTI128 $1, X11, Y12, Y12
// load msg: Y13 = (i0, i1, i2, i3)
// i0, i1, i2, i3 must not be 0
#define LOAD_MSG_AVX2_Y13(i0, i1, i2, i3) \
VMOVQ_SI_X13(i0*8); \
VMOVQ_SI_X11(i2*8); \
VPINSRQ_1_SI_X13(i1*8); \
VPINSRQ_1_SI_X11(i3*8); \
VINSERTI128 $1, X11, Y13, Y13
// load msg: Y14 = (i0, i1, i2, i3)
// i0, i1, i2, i3 must not be 0
#define LOAD_MSG_AVX2_Y14(i0, i1, i2, i3) \
VMOVQ_SI_X14(i0*8); \
VMOVQ_SI_X11(i2*8); \
VPINSRQ_1_SI_X14(i1*8); \
VPINSRQ_1_SI_X11(i3*8); \
VINSERTI128 $1, X11, Y14, Y14
// load msg: Y15 = (i0, i1, i2, i3)
// i0, i1, i2, i3 must not be 0
#define LOAD_MSG_AVX2_Y15(i0, i1, i2, i3) \
VMOVQ_SI_X15(i0*8); \
VMOVQ_SI_X11(i2*8); \
VPINSRQ_1_SI_X15(i1*8); \
VPINSRQ_1_SI_X11(i3*8); \
VINSERTI128 $1, X11, Y15, Y15
#define LOAD_MSG_AVX2_0_2_4_6_1_3_5_7_8_10_12_14_9_11_13_15() \
VMOVQ_SI_X12_0; \
VMOVQ_SI_X11(4*8); \
VPINSRQ_1_SI_X12(2*8); \
VPINSRQ_1_SI_X11(6*8); \
VINSERTI128 $1, X11, Y12, Y12; \
LOAD_MSG_AVX2_Y13(1, 3, 5, 7); \
LOAD_MSG_AVX2_Y14(8, 10, 12, 14); \
LOAD_MSG_AVX2_Y15(9, 11, 13, 15)
#define LOAD_MSG_AVX2_14_4_9_13_10_8_15_6_1_0_11_5_12_2_7_3() \
LOAD_MSG_AVX2_Y12(14, 4, 9, 13); \
LOAD_MSG_AVX2_Y13(10, 8, 15, 6); \
VMOVQ_SI_X11(11*8); \
VPSHUFD $0x4E, 0*8(SI), X14; \
VPINSRQ_1_SI_X11(5*8); \
VINSERTI128 $1, X11, Y14, Y14; \
LOAD_MSG_AVX2_Y15(12, 2, 7, 3)
#define LOAD_MSG_AVX2_11_12_5_15_8_0_2_13_10_3_7_9_14_6_1_4() \
VMOVQ_SI_X11(5*8); \
VMOVDQU 11*8(SI), X12; \
VPINSRQ_1_SI_X11(15*8); \
VINSERTI128 $1, X11, Y12, Y12; \
VMOVQ_SI_X13(8*8); \
VMOVQ_SI_X11(2*8); \
VPINSRQ_1_SI_X13_0; \
VPINSRQ_1_SI_X11(13*8); \
VINSERTI128 $1, X11, Y13, Y13; \
LOAD_MSG_AVX2_Y14(10, 3, 7, 9); \
LOAD_MSG_AVX2_Y15(14, 6, 1, 4)
#define LOAD_MSG_AVX2_7_3_13_11_9_1_12_14_2_5_4_15_6_10_0_8() \
LOAD_MSG_AVX2_Y12(7, 3, 13, 11); \
LOAD_MSG_AVX2_Y13(9, 1, 12, 14); \
LOAD_MSG_AVX2_Y14(2, 5, 4, 15); \
VMOVQ_SI_X15(6*8); \
VMOVQ_SI_X11_0; \
VPINSRQ_1_SI_X15(10*8); \
VPINSRQ_1_SI_X11(8*8); \
VINSERTI128 $1, X11, Y15, Y15
#define LOAD_MSG_AVX2_9_5_2_10_0_7_4_15_14_11_6_3_1_12_8_13() \
LOAD_MSG_AVX2_Y12(9, 5, 2, 10); \
VMOVQ_SI_X13_0; \
VMOVQ_SI_X11(4*8); \
VPINSRQ_1_SI_X13(7*8); \
VPINSRQ_1_SI_X11(15*8); \
VINSERTI128 $1, X11, Y13, Y13; \
LOAD_MSG_AVX2_Y14(14, 11, 6, 3); \
LOAD_MSG_AVX2_Y15(1, 12, 8, 13)
#define LOAD_MSG_AVX2_2_6_0_8_12_10_11_3_4_7_15_1_13_5_14_9() \
VMOVQ_SI_X12(2*8); \
VMOVQ_SI_X11_0; \
VPINSRQ_1_SI_X12(6*8); \
VPINSRQ_1_SI_X11(8*8); \
VINSERTI128 $1, X11, Y12, Y12; \
LOAD_MSG_AVX2_Y13(12, 10, 11, 3); \
LOAD_MSG_AVX2_Y14(4, 7, 15, 1); \
LOAD_MSG_AVX2_Y15(13, 5, 14, 9)
#define LOAD_MSG_AVX2_12_1_14_4_5_15_13_10_0_6_9_8_7_3_2_11() \
LOAD_MSG_AVX2_Y12(12, 1, 14, 4); \
LOAD_MSG_AVX2_Y13(5, 15, 13, 10); \
VMOVQ_SI_X14_0; \
VPSHUFD $0x4E, 8*8(SI), X11; \
VPINSRQ_1_SI_X14(6*8); \
VINSERTI128 $1, X11, Y14, Y14; \
LOAD_MSG_AVX2_Y15(7, 3, 2, 11)
#define LOAD_MSG_AVX2_13_7_12_3_11_14_1_9_5_15_8_2_0_4_6_10() \
LOAD_MSG_AVX2_Y12(13, 7, 12, 3); \
LOAD_MSG_AVX2_Y13(11, 14, 1, 9); \
LOAD_MSG_AVX2_Y14(5, 15, 8, 2); \
VMOVQ_SI_X15_0; \
VMOVQ_SI_X11(6*8); \
VPINSRQ_1_SI_X15(4*8); \
VPINSRQ_1_SI_X11(10*8); \
VINSERTI128 $1, X11, Y15, Y15
#define LOAD_MSG_AVX2_6_14_11_0_15_9_3_8_12_13_1_10_2_7_4_5() \
VMOVQ_SI_X12(6*8); \
VMOVQ_SI_X11(11*8); \
VPINSRQ_1_SI_X12(14*8); \
VPINSRQ_1_SI_X11_0; \
VINSERTI128 $1, X11, Y12, Y12; \
LOAD_MSG_AVX2_Y13(15, 9, 3, 8); \
VMOVQ_SI_X11(1*8); \
VMOVDQU 12*8(SI), X14; \
VPINSRQ_1_SI_X11(10*8); \
VINSERTI128 $1, X11, Y14, Y14; \
VMOVQ_SI_X15(2*8); \
VMOVDQU 4*8(SI), X11; \
VPINSRQ_1_SI_X15(7*8); \
VINSERTI128 $1, X11, Y15, Y15
#define LOAD_MSG_AVX2_10_8_7_1_2_4_6_5_15_9_3_13_11_14_12_0() \
LOAD_MSG_AVX2_Y12(10, 8, 7, 1); \
VMOVQ_SI_X13(2*8); \
VPSHUFD $0x4E, 5*8(SI), X11; \
VPINSRQ_1_SI_X13(4*8); \
VINSERTI128 $1, X11, Y13, Y13; \
LOAD_MSG_AVX2_Y14(15, 9, 3, 13); \
VMOVQ_SI_X15(11*8); \
VMOVQ_SI_X11(12*8); \
VPINSRQ_1_SI_X15(14*8); \
VPINSRQ_1_SI_X11_0; \
VINSERTI128 $1, X11, Y15, Y15
// func fAVX2(h *[8]uint64, m *[16]uint64, c0, c1 uint64, flag uint64, rounds uint64)
TEXT ·fAVX2(SB), 4, $64-48 // frame size = 32 + 32 byte alignment
MOVQ h+0(FP), AX
MOVQ m+8(FP), SI
MOVQ c0+16(FP), R8
MOVQ c1+24(FP), R9
MOVQ flag+32(FP), CX
MOVQ rounds+40(FP), BX
MOVQ SP, DX
MOVQ SP, R10
ADDQ $31, R10
ANDQ $~31, R10
MOVQ R10, SP
MOVQ CX, 16(SP)
XORQ CX, CX
MOVQ CX, 24(SP)
VMOVDQU ·AVX2_c40<>(SB), Y4
VMOVDQU ·AVX2_c48<>(SB), Y5
VMOVDQU 0(AX), Y8
VMOVDQU 32(AX), Y9
VMOVDQU ·AVX2_iv0<>(SB), Y6
VMOVDQU ·AVX2_iv1<>(SB), Y7
MOVQ R8, 0(SP)
MOVQ R9, 8(SP)
VMOVDQA Y8, Y0
VMOVDQA Y9, Y1
VMOVDQA Y6, Y2
VPXOR 0(SP), Y7, Y3
loop:
SUBQ $1, BX; JCS done
LOAD_MSG_AVX2_0_2_4_6_1_3_5_7_8_10_12_14_9_11_13_15()
ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5)
SUBQ $1, BX; JCS done
LOAD_MSG_AVX2_14_4_9_13_10_8_15_6_1_0_11_5_12_2_7_3()
ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5)
SUBQ $1, BX; JCS done
LOAD_MSG_AVX2_11_12_5_15_8_0_2_13_10_3_7_9_14_6_1_4()
ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5)
SUBQ $1, BX; JCS done
LOAD_MSG_AVX2_7_3_13_11_9_1_12_14_2_5_4_15_6_10_0_8()
ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5)
SUBQ $1, BX; JCS done
LOAD_MSG_AVX2_9_5_2_10_0_7_4_15_14_11_6_3_1_12_8_13()
ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5)
SUBQ $1, BX; JCS done
LOAD_MSG_AVX2_2_6_0_8_12_10_11_3_4_7_15_1_13_5_14_9()
ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5)
SUBQ $1, BX; JCS done
LOAD_MSG_AVX2_12_1_14_4_5_15_13_10_0_6_9_8_7_3_2_11()
ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5)
SUBQ $1, BX; JCS done
LOAD_MSG_AVX2_13_7_12_3_11_14_1_9_5_15_8_2_0_4_6_10()
ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5)
SUBQ $1, BX; JCS done
LOAD_MSG_AVX2_6_14_11_0_15_9_3_8_12_13_1_10_2_7_4_5()
ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5)
SUBQ $1, BX; JCS done
LOAD_MSG_AVX2_10_8_7_1_2_4_6_5_15_9_3_13_11_14_12_0()
ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5)
JMP loop
done:
VPXOR Y0, Y8, Y8
VPXOR Y1, Y9, Y9
VPXOR Y2, Y8, Y8
VPXOR Y3, Y9, Y9
VMOVDQU Y8, 0(AX)
VMOVDQU Y9, 32(AX)
VZEROUPPER
MOVQ DX, SP
RET
#define VPUNPCKLQDQ_X2_X2_X15 BYTE $0xC5; BYTE $0x69; BYTE $0x6C; BYTE $0xFA
#define VPUNPCKLQDQ_X3_X3_X15 BYTE $0xC5; BYTE $0x61; BYTE $0x6C; BYTE $0xFB
#define VPUNPCKLQDQ_X7_X7_X15 BYTE $0xC5; BYTE $0x41; BYTE $0x6C; BYTE $0xFF
#define VPUNPCKLQDQ_X13_X13_X15 BYTE $0xC4; BYTE $0x41; BYTE $0x11; BYTE $0x6C; BYTE $0xFD
#define VPUNPCKLQDQ_X14_X14_X15 BYTE $0xC4; BYTE $0x41; BYTE $0x09; BYTE $0x6C; BYTE $0xFE
#define VPUNPCKHQDQ_X15_X2_X2 BYTE $0xC4; BYTE $0xC1; BYTE $0x69; BYTE $0x6D; BYTE $0xD7
#define VPUNPCKHQDQ_X15_X3_X3 BYTE $0xC4; BYTE $0xC1; BYTE $0x61; BYTE $0x6D; BYTE $0xDF
#define VPUNPCKHQDQ_X15_X6_X6 BYTE $0xC4; BYTE $0xC1; BYTE $0x49; BYTE $0x6D; BYTE $0xF7
#define VPUNPCKHQDQ_X15_X7_X7 BYTE $0xC4; BYTE $0xC1; BYTE $0x41; BYTE $0x6D; BYTE $0xFF
#define VPUNPCKHQDQ_X15_X3_X2 BYTE $0xC4; BYTE $0xC1; BYTE $0x61; BYTE $0x6D; BYTE $0xD7
#define VPUNPCKHQDQ_X15_X7_X6 BYTE $0xC4; BYTE $0xC1; BYTE $0x41; BYTE $0x6D; BYTE $0xF7
#define VPUNPCKHQDQ_X15_X13_X3 BYTE $0xC4; BYTE $0xC1; BYTE $0x11; BYTE $0x6D; BYTE $0xDF
#define VPUNPCKHQDQ_X15_X13_X7 BYTE $0xC4; BYTE $0xC1; BYTE $0x11; BYTE $0x6D; BYTE $0xFF
#define SHUFFLE_AVX() \
VMOVDQA X6, X13; \
VMOVDQA X2, X14; \
VMOVDQA X4, X6; \
VPUNPCKLQDQ_X13_X13_X15; \
VMOVDQA X5, X4; \
VMOVDQA X6, X5; \
VPUNPCKHQDQ_X15_X7_X6; \
VPUNPCKLQDQ_X7_X7_X15; \
VPUNPCKHQDQ_X15_X13_X7; \
VPUNPCKLQDQ_X3_X3_X15; \
VPUNPCKHQDQ_X15_X2_X2; \
VPUNPCKLQDQ_X14_X14_X15; \
VPUNPCKHQDQ_X15_X3_X3; \
#define SHUFFLE_AVX_INV() \
VMOVDQA X2, X13; \
VMOVDQA X4, X14; \
VPUNPCKLQDQ_X2_X2_X15; \
VMOVDQA X5, X4; \
VPUNPCKHQDQ_X15_X3_X2; \
VMOVDQA X14, X5; \
VPUNPCKLQDQ_X3_X3_X15; \
VMOVDQA X6, X14; \
VPUNPCKHQDQ_X15_X13_X3; \
VPUNPCKLQDQ_X7_X7_X15; \
VPUNPCKHQDQ_X15_X6_X6; \
VPUNPCKLQDQ_X14_X14_X15; \
VPUNPCKHQDQ_X15_X7_X7; \
#define HALF_ROUND_AVX(v0, v1, v2, v3, v4, v5, v6, v7, m0, m1, m2, m3, t0, c40, c48) \
VPADDQ m0, v0, v0; \
VPADDQ v2, v0, v0; \
VPADDQ m1, v1, v1; \
VPADDQ v3, v1, v1; \
VPXOR v0, v6, v6; \
VPXOR v1, v7, v7; \
VPSHUFD $-79, v6, v6; \
VPSHUFD $-79, v7, v7; \
VPADDQ v6, v4, v4; \
VPADDQ v7, v5, v5; \
VPXOR v4, v2, v2; \
VPXOR v5, v3, v3; \
VPSHUFB c40, v2, v2; \
VPSHUFB c40, v3, v3; \
VPADDQ m2, v0, v0; \
VPADDQ v2, v0, v0; \
VPADDQ m3, v1, v1; \
VPADDQ v3, v1, v1; \
VPXOR v0, v6, v6; \
VPXOR v1, v7, v7; \
VPSHUFB c48, v6, v6; \
VPSHUFB c48, v7, v7; \
VPADDQ v6, v4, v4; \
VPADDQ v7, v5, v5; \
VPXOR v4, v2, v2; \
VPXOR v5, v3, v3; \
VPADDQ v2, v2, t0; \
VPSRLQ $63, v2, v2; \
VPXOR t0, v2, v2; \
VPADDQ v3, v3, t0; \
VPSRLQ $63, v3, v3; \
VPXOR t0, v3, v3
// load msg: X12 = (i0, i1), X13 = (i2, i3), X14 = (i4, i5), X15 = (i6, i7)
// i0, i1, i2, i3, i4, i5, i6, i7 must not be 0
#define LOAD_MSG_AVX(i0, i1, i2, i3, i4, i5, i6, i7) \
VMOVQ_SI_X12(i0*8); \
VMOVQ_SI_X13(i2*8); \
VMOVQ_SI_X14(i4*8); \
VMOVQ_SI_X15(i6*8); \
VPINSRQ_1_SI_X12(i1*8); \
VPINSRQ_1_SI_X13(i3*8); \
VPINSRQ_1_SI_X14(i5*8); \
VPINSRQ_1_SI_X15(i7*8)
// load msg: X12 = (0, 2), X13 = (4, 6), X14 = (1, 3), X15 = (5, 7)
#define LOAD_MSG_AVX_0_2_4_6_1_3_5_7() \
VMOVQ_SI_X12_0; \
VMOVQ_SI_X13(4*8); \
VMOVQ_SI_X14(1*8); \
VMOVQ_SI_X15(5*8); \
VPINSRQ_1_SI_X12(2*8); \
VPINSRQ_1_SI_X13(6*8); \
VPINSRQ_1_SI_X14(3*8); \
VPINSRQ_1_SI_X15(7*8)
// load msg: X12 = (1, 0), X13 = (11, 5), X14 = (12, 2), X15 = (7, 3)
#define LOAD_MSG_AVX_1_0_11_5_12_2_7_3() \
VPSHUFD $0x4E, 0*8(SI), X12; \
VMOVQ_SI_X13(11*8); \
VMOVQ_SI_X14(12*8); \
VMOVQ_SI_X15(7*8); \
VPINSRQ_1_SI_X13(5*8); \
VPINSRQ_1_SI_X14(2*8); \
VPINSRQ_1_SI_X15(3*8)
// load msg: X12 = (11, 12), X13 = (5, 15), X14 = (8, 0), X15 = (2, 13)
#define LOAD_MSG_AVX_11_12_5_15_8_0_2_13() \
VMOVDQU 11*8(SI), X12; \
VMOVQ_SI_X13(5*8); \
VMOVQ_SI_X14(8*8); \
VMOVQ_SI_X15(2*8); \
VPINSRQ_1_SI_X13(15*8); \
VPINSRQ_1_SI_X14_0; \
VPINSRQ_1_SI_X15(13*8)
// load msg: X12 = (2, 5), X13 = (4, 15), X14 = (6, 10), X15 = (0, 8)
#define LOAD_MSG_AVX_2_5_4_15_6_10_0_8() \
VMOVQ_SI_X12(2*8); \
VMOVQ_SI_X13(4*8); \
VMOVQ_SI_X14(6*8); \
VMOVQ_SI_X15_0; \
VPINSRQ_1_SI_X12(5*8); \
VPINSRQ_1_SI_X13(15*8); \
VPINSRQ_1_SI_X14(10*8); \
VPINSRQ_1_SI_X15(8*8)
// load msg: X12 = (9, 5), X13 = (2, 10), X14 = (0, 7), X15 = (4, 15)
#define LOAD_MSG_AVX_9_5_2_10_0_7_4_15() \
VMOVQ_SI_X12(9*8); \
VMOVQ_SI_X13(2*8); \
VMOVQ_SI_X14_0; \
VMOVQ_SI_X15(4*8); \
VPINSRQ_1_SI_X12(5*8); \
VPINSRQ_1_SI_X13(10*8); \
VPINSRQ_1_SI_X14(7*8); \
VPINSRQ_1_SI_X15(15*8)
// load msg: X12 = (2, 6), X13 = (0, 8), X14 = (12, 10), X15 = (11, 3)
#define LOAD_MSG_AVX_2_6_0_8_12_10_11_3() \
VMOVQ_SI_X12(2*8); \
VMOVQ_SI_X13_0; \
VMOVQ_SI_X14(12*8); \
VMOVQ_SI_X15(11*8); \
VPINSRQ_1_SI_X12(6*8); \
VPINSRQ_1_SI_X13(8*8); \
VPINSRQ_1_SI_X14(10*8); \
VPINSRQ_1_SI_X15(3*8)
// load msg: X12 = (0, 6), X13 = (9, 8), X14 = (7, 3), X15 = (2, 11)
#define LOAD_MSG_AVX_0_6_9_8_7_3_2_11() \
MOVQ 0*8(SI), X12; \
VPSHUFD $0x4E, 8*8(SI), X13; \
MOVQ 7*8(SI), X14; \
MOVQ 2*8(SI), X15; \
VPINSRQ_1_SI_X12(6*8); \
VPINSRQ_1_SI_X14(3*8); \
VPINSRQ_1_SI_X15(11*8)
// load msg: X12 = (6, 14), X13 = (11, 0), X14 = (15, 9), X15 = (3, 8)
#define LOAD_MSG_AVX_6_14_11_0_15_9_3_8() \
MOVQ 6*8(SI), X12; \
MOVQ 11*8(SI), X13; \
MOVQ 15*8(SI), X14; \
MOVQ 3*8(SI), X15; \
VPINSRQ_1_SI_X12(14*8); \
VPINSRQ_1_SI_X13_0; \
VPINSRQ_1_SI_X14(9*8); \
VPINSRQ_1_SI_X15(8*8)
// load msg: X12 = (5, 15), X13 = (8, 2), X14 = (0, 4), X15 = (6, 10)
#define LOAD_MSG_AVX_5_15_8_2_0_4_6_10() \
MOVQ 5*8(SI), X12; \
MOVQ 8*8(SI), X13; \
MOVQ 0*8(SI), X14; \
MOVQ 6*8(SI), X15; \
VPINSRQ_1_SI_X12(15*8); \
VPINSRQ_1_SI_X13(2*8); \
VPINSRQ_1_SI_X14(4*8); \
VPINSRQ_1_SI_X15(10*8)
// load msg: X12 = (12, 13), X13 = (1, 10), X14 = (2, 7), X15 = (4, 5)
#define LOAD_MSG_AVX_12_13_1_10_2_7_4_5() \
VMOVDQU 12*8(SI), X12; \
MOVQ 1*8(SI), X13; \
MOVQ 2*8(SI), X14; \
VPINSRQ_1_SI_X13(10*8); \
VPINSRQ_1_SI_X14(7*8); \
VMOVDQU 4*8(SI), X15
// load msg: X12 = (15, 9), X13 = (3, 13), X14 = (11, 14), X15 = (12, 0)
#define LOAD_MSG_AVX_15_9_3_13_11_14_12_0() \
MOVQ 15*8(SI), X12; \
MOVQ 3*8(SI), X13; \
MOVQ 11*8(SI), X14; \
MOVQ 12*8(SI), X15; \
VPINSRQ_1_SI_X12(9*8); \
VPINSRQ_1_SI_X13(13*8); \
VPINSRQ_1_SI_X14(14*8); \
VPINSRQ_1_SI_X15_0
// func fAVX(h *[8]uint64, m *[16]uint64, c0, c1 uint64, flag uint64, rounds uint64)
TEXT ·fAVX(SB), 4, $24-48 // frame size = 8 + 16 byte alignment
MOVQ h+0(FP), AX
MOVQ m+8(FP), SI
MOVQ c0+16(FP), R8
MOVQ c1+24(FP), R9
MOVQ flag+32(FP), CX
MOVQ rounds+40(FP), BX
MOVQ SP, BP
MOVQ SP, R10
ADDQ $15, R10
ANDQ $~15, R10
MOVQ R10, SP
VMOVDQU ·AVX_c40<>(SB), X0
VMOVDQU ·AVX_c48<>(SB), X1
VMOVDQA X0, X8
VMOVDQA X1, X9
VMOVDQU ·AVX_iv3<>(SB), X0
VMOVDQA X0, 0(SP)
XORQ CX, 0(SP) // 0(SP) = ·AVX_iv3 ^ (CX || 0)
VMOVDQU 0(AX), X10
VMOVDQU 16(AX), X11
VMOVDQU 32(AX), X2
VMOVDQU 48(AX), X3
VMOVQ_R8_X15
VPINSRQ_1_R9_X15
VMOVDQA X10, X0
VMOVDQA X11, X1
VMOVDQU ·AVX_iv0<>(SB), X4
VMOVDQU ·AVX_iv1<>(SB), X5
VMOVDQU ·AVX_iv2<>(SB), X6
VPXOR X15, X6, X6
VMOVDQA 0(SP), X7
loop:
SUBQ $1, BX; JCS done
LOAD_MSG_AVX_0_2_4_6_1_3_5_7()
HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
SHUFFLE_AVX()
LOAD_MSG_AVX(8, 10, 12, 14, 9, 11, 13, 15)
HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
SHUFFLE_AVX_INV()
SUBQ $1, BX; JCS done
LOAD_MSG_AVX(14, 4, 9, 13, 10, 8, 15, 6)
HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
SHUFFLE_AVX()
LOAD_MSG_AVX_1_0_11_5_12_2_7_3()
HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
SHUFFLE_AVX_INV()
SUBQ $1, BX; JCS done
LOAD_MSG_AVX_11_12_5_15_8_0_2_13()
HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
SHUFFLE_AVX()
LOAD_MSG_AVX(10, 3, 7, 9, 14, 6, 1, 4)
HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
SHUFFLE_AVX_INV()
SUBQ $1, BX; JCS done
LOAD_MSG_AVX(7, 3, 13, 11, 9, 1, 12, 14)
HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
SHUFFLE_AVX()
LOAD_MSG_AVX_2_5_4_15_6_10_0_8()
HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
SHUFFLE_AVX_INV()
SUBQ $1, BX; JCS done
LOAD_MSG_AVX_9_5_2_10_0_7_4_15()
HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
SHUFFLE_AVX()
LOAD_MSG_AVX(14, 11, 6, 3, 1, 12, 8, 13)
HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
SHUFFLE_AVX_INV()
SUBQ $1, BX; JCS done
LOAD_MSG_AVX_2_6_0_8_12_10_11_3()
HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
SHUFFLE_AVX()
LOAD_MSG_AVX(4, 7, 15, 1, 13, 5, 14, 9)
HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
SHUFFLE_AVX_INV()
SUBQ $1, BX; JCS done
LOAD_MSG_AVX(12, 1, 14, 4, 5, 15, 13, 10)
HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
SHUFFLE_AVX()
LOAD_MSG_AVX_0_6_9_8_7_3_2_11()
HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
SHUFFLE_AVX_INV()
SUBQ $1, BX; JCS done
LOAD_MSG_AVX(13, 7, 12, 3, 11, 14, 1, 9)
HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
SHUFFLE_AVX()
LOAD_MSG_AVX_5_15_8_2_0_4_6_10()
HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
SHUFFLE_AVX_INV()
SUBQ $1, BX; JCS done
LOAD_MSG_AVX_6_14_11_0_15_9_3_8()
HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
SHUFFLE_AVX()
LOAD_MSG_AVX_12_13_1_10_2_7_4_5()
HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
SHUFFLE_AVX_INV()
SUBQ $1, BX; JCS done
LOAD_MSG_AVX(10, 8, 7, 1, 2, 4, 6, 5)
HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
SHUFFLE_AVX()
LOAD_MSG_AVX_15_9_3_13_11_14_12_0()
HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
SHUFFLE_AVX_INV()
JMP loop
done:
VMOVDQU 32(AX), X14
VMOVDQU 48(AX), X15
VPXOR X0, X10, X10
VPXOR X1, X11, X11
VPXOR X2, X14, X14
VPXOR X3, X15, X15
VPXOR X4, X10, X10
VPXOR X5, X11, X11
VPXOR X6, X14, X2
VPXOR X7, X15, X3
VMOVDQU X2, 32(AX)
VMOVDQU X3, 48(AX)
VMOVDQU X10, 0(AX)
VMOVDQU X11, 16(AX)
VZEROUPPER
MOVQ BP, SP
RET
| crypto/blake2b/blake2bAVX2_amd64.s | 0 | https://github.com/ethereum/go-ethereum/commit/16701c51697e28986feebd122c6a491e4d9ac0e7 | [
0.00023607526964042336,
0.00017105354345403612,
0.00016084781964309514,
0.00016779251745902002,
0.000011822890883195214
] |
{
"id": 0,
"code_window": [
"\tcastRe := makeFunctionRegex(\"_CAST\", 4)\n",
"\ts = castRe.ReplaceAllString(s, makeTemplateFunctionCall(\"Right.Cast\", 4))\n",
"\n",
"\ts = strings.ReplaceAll(s, \"_L_UNSAFEGET\", \"execgen.UNSAFEGET\")\n",
"\ts = replaceManipulationFuncsAmbiguous(\".Left\", s)\n",
"\n",
"\ts = strings.ReplaceAll(s, \"_R_UNSAFEGET\", \"execgen.UNSAFEGET\")\n",
"\ts = replaceManipulationFuncsAmbiguous(\".Right\", s)\n",
"\n",
"\ttmpl, err := template.New(\"cast\").Funcs(template.FuncMap{\"buildDict\": buildDict}).Parse(s)\n",
"\tif err != nil {\n",
"\t\treturn err\n",
"\t}\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "pkg/sql/colexec/execgen/cmd/execgen/cast_gen.go",
"type": "replace",
"edit_start_line_idx": 39
} | // Copyright 2019 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package main
import (
"io"
"strings"
"text/template"
)
const castTmpl = "pkg/sql/colexec/colexecbase/cast_tmpl.go"
func genCastOperators(inputFileContents string, wr io.Writer) error {
r := strings.NewReplacer(
"_LEFT_CANONICAL_TYPE_FAMILY", "{{.LeftCanonicalFamilyStr}}",
"_LEFT_TYPE_WIDTH", typeWidthReplacement,
"_RIGHT_CANONICAL_TYPE_FAMILY", "{{.RightCanonicalFamilyStr}}",
"_RIGHT_TYPE_WIDTH", typeWidthReplacement,
"_R_GO_TYPE", "{{.Right.GoType}}",
"_L_TYP", "{{.Left.VecMethod}}",
"_R_TYP", "{{.Right.VecMethod}}",
"_NAME", "{{.Left.VecMethod}}{{.Right.VecMethod}}",
)
s := r.Replace(inputFileContents)
setValues := makeFunctionRegex("_CAST_TUPLES", 2)
s = setValues.ReplaceAllString(s, `{{template "castTuples" buildDict "Global" . "HasNulls" $1 "HasSel" $2}}`)
castRe := makeFunctionRegex("_CAST", 4)
s = castRe.ReplaceAllString(s, makeTemplateFunctionCall("Right.Cast", 4))
s = strings.ReplaceAll(s, "_L_UNSAFEGET", "execgen.UNSAFEGET")
s = replaceManipulationFuncsAmbiguous(".Left", s)
s = strings.ReplaceAll(s, "_R_UNSAFEGET", "execgen.UNSAFEGET")
s = replaceManipulationFuncsAmbiguous(".Right", s)
tmpl, err := template.New("cast").Funcs(template.FuncMap{"buildDict": buildDict}).Parse(s)
if err != nil {
return err
}
return tmpl.Execute(wr, twoArgsResolvedOverloadsInfo.CastOverloads)
}
func init() {
registerGenerator(genCastOperators, "cast.eg.go", castTmpl)
}
| pkg/sql/colexec/execgen/cmd/execgen/cast_gen.go | 1 | https://github.com/cockroachdb/cockroach/commit/5015f06f107c92eab8fa180ea28c09c68e294e94 | [
0.9970502853393555,
0.3720506429672241,
0.00017825770191848278,
0.11981308460235596,
0.448988676071167
] |
{
"id": 0,
"code_window": [
"\tcastRe := makeFunctionRegex(\"_CAST\", 4)\n",
"\ts = castRe.ReplaceAllString(s, makeTemplateFunctionCall(\"Right.Cast\", 4))\n",
"\n",
"\ts = strings.ReplaceAll(s, \"_L_UNSAFEGET\", \"execgen.UNSAFEGET\")\n",
"\ts = replaceManipulationFuncsAmbiguous(\".Left\", s)\n",
"\n",
"\ts = strings.ReplaceAll(s, \"_R_UNSAFEGET\", \"execgen.UNSAFEGET\")\n",
"\ts = replaceManipulationFuncsAmbiguous(\".Right\", s)\n",
"\n",
"\ttmpl, err := template.New(\"cast\").Funcs(template.FuncMap{\"buildDict\": buildDict}).Parse(s)\n",
"\tif err != nil {\n",
"\t\treturn err\n",
"\t}\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "pkg/sql/colexec/execgen/cmd/execgen/cast_gen.go",
"type": "replace",
"edit_start_line_idx": 39
} | // Copyright 2018 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package catalog
import (
"testing"
"github.com/cockroachdb/cockroach/pkg/testutils/buildutil"
"github.com/cockroachdb/cockroach/pkg/util/leaktest"
)
func TestNoLinkForbidden(t *testing.T) {
defer leaktest.AfterTest(t)()
buildutil.VerifyNoImports(t,
"github.com/cockroachdb/cockroach/pkg/sql/catalog", true, []string{"c-deps"}, nil,
)
}
| pkg/sql/catalog/dep_test.go | 0 | https://github.com/cockroachdb/cockroach/commit/5015f06f107c92eab8fa180ea28c09c68e294e94 | [
0.00017858092905953526,
0.00017560595006216317,
0.00017402252706233412,
0.00017421443772036582,
0.0000021050761915830662
] |
{
"id": 0,
"code_window": [
"\tcastRe := makeFunctionRegex(\"_CAST\", 4)\n",
"\ts = castRe.ReplaceAllString(s, makeTemplateFunctionCall(\"Right.Cast\", 4))\n",
"\n",
"\ts = strings.ReplaceAll(s, \"_L_UNSAFEGET\", \"execgen.UNSAFEGET\")\n",
"\ts = replaceManipulationFuncsAmbiguous(\".Left\", s)\n",
"\n",
"\ts = strings.ReplaceAll(s, \"_R_UNSAFEGET\", \"execgen.UNSAFEGET\")\n",
"\ts = replaceManipulationFuncsAmbiguous(\".Right\", s)\n",
"\n",
"\ttmpl, err := template.New(\"cast\").Funcs(template.FuncMap{\"buildDict\": buildDict}).Parse(s)\n",
"\tif err != nil {\n",
"\t\treturn err\n",
"\t}\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "pkg/sql/colexec/execgen/cmd/execgen/cast_gen.go",
"type": "replace",
"edit_start_line_idx": 39
} | // Copyright 2016 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package rowexec
import (
"context"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/colinfo"
"github.com/cockroachdb/cockroach/pkg/sql/execinfra"
"github.com/cockroachdb/cockroach/pkg/sql/execinfrapb"
"github.com/cockroachdb/cockroach/pkg/sql/rowenc"
"github.com/cockroachdb/cockroach/pkg/sql/sem/tree"
"github.com/cockroachdb/cockroach/pkg/sql/types"
"github.com/cockroachdb/cockroach/pkg/util/encoding"
"github.com/cockroachdb/cockroach/pkg/util/mon"
"github.com/cockroachdb/errors"
)
// We define a group to be a set of rows from a given source with the same
// group key, in this case the set of ordered columns. streamMerger emits
// batches of rows that are the cross-product of matching groups from each
// stream.
type streamMerger struct {
left streamGroupAccumulator
right streamGroupAccumulator
leftGroup []rowenc.EncDatumRow
rightGroup []rowenc.EncDatumRow
// nulLEquality indicates when NULL = NULL is truth-y. This is helpful
// when we want NULL to be meaningful during equality, for example
// during SCRUB secondary index checks.
nullEquality bool
datumAlloc rowenc.DatumAlloc
}
func (sm *streamMerger) start(ctx context.Context) {
sm.left.start(ctx)
sm.right.start(ctx)
}
// NextBatch returns a set of rows from the left stream and a set of rows from
// the right stream, all matching on the equality columns. One of the sets can
// be empty.
func (sm *streamMerger) NextBatch(
ctx context.Context, evalCtx *tree.EvalContext,
) ([]rowenc.EncDatumRow, []rowenc.EncDatumRow, *execinfrapb.ProducerMetadata) {
if sm.leftGroup == nil {
var meta *execinfrapb.ProducerMetadata
sm.leftGroup, meta = sm.left.nextGroup(ctx, evalCtx)
if meta != nil {
return nil, nil, meta
}
}
if sm.rightGroup == nil {
var meta *execinfrapb.ProducerMetadata
sm.rightGroup, meta = sm.right.nextGroup(ctx, evalCtx)
if meta != nil {
return nil, nil, meta
}
}
if sm.leftGroup == nil && sm.rightGroup == nil {
return nil, nil, nil
}
var lrow, rrow rowenc.EncDatumRow
if len(sm.leftGroup) > 0 {
lrow = sm.leftGroup[0]
}
if len(sm.rightGroup) > 0 {
rrow = sm.rightGroup[0]
}
cmp, err := CompareEncDatumRowForMerge(
sm.left.types, lrow, rrow, sm.left.ordering, sm.right.ordering,
sm.nullEquality, &sm.datumAlloc, evalCtx,
)
if err != nil {
return nil, nil, &execinfrapb.ProducerMetadata{Err: err}
}
var leftGroup, rightGroup []rowenc.EncDatumRow
if cmp <= 0 {
leftGroup = sm.leftGroup
sm.leftGroup = nil
}
if cmp >= 0 {
rightGroup = sm.rightGroup
sm.rightGroup = nil
}
return leftGroup, rightGroup, nil
}
// CompareEncDatumRowForMerge compares two EncDatumRows for merging.
// When merging two streams and preserving the order (as in a MergeSort or
// a MergeJoin) compare the head of the streams, emitting the one that sorts
// first. It allows for the EncDatumRow to be nil if one of the streams is
// exhausted (and hence nil). CompareEncDatumRowForMerge returns 0 when both
// rows are nil, and a nil row is considered greater than any non-nil row.
// CompareEncDatumRowForMerge assumes that the two rows have the same columns
// in the same orders, but can handle different ordering directions. It takes
// a DatumAlloc which is used for decoding if any underlying EncDatum is not
// yet decoded.
func CompareEncDatumRowForMerge(
lhsTypes []*types.T,
lhs, rhs rowenc.EncDatumRow,
leftOrdering, rightOrdering colinfo.ColumnOrdering,
nullEquality bool,
da *rowenc.DatumAlloc,
evalCtx *tree.EvalContext,
) (int, error) {
if lhs == nil && rhs == nil {
return 0, nil
}
if lhs == nil {
return 1, nil
}
if rhs == nil {
return -1, nil
}
if len(leftOrdering) != len(rightOrdering) {
return 0, errors.Errorf(
"cannot compare two EncDatumRow types that have different length ColumnOrderings",
)
}
for i, ord := range leftOrdering {
lIdx := ord.ColIdx
rIdx := rightOrdering[i].ColIdx
// If both datums are NULL, we need to follow SQL semantics where
// they are not equal. This differs from our datum semantics where
// they are equal. In the case where we want to consider NULLs to be
// equal, we continue and skip to the next datums in the row.
if lhs[lIdx].IsNull() && rhs[rIdx].IsNull() {
if !nullEquality {
// We can return either -1 or 1, it does not change the behavior.
return -1, nil
}
continue
}
cmp, err := lhs[lIdx].Compare(lhsTypes[lIdx], da, evalCtx, &rhs[rIdx])
if err != nil {
return 0, err
}
if cmp != 0 {
if leftOrdering[i].Direction == encoding.Descending {
cmp = -cmp
}
return cmp, nil
}
}
return 0, nil
}
func (sm *streamMerger) close(ctx context.Context) {
sm.left.close(ctx)
sm.right.close(ctx)
}
// makeStreamMerger creates a streamMerger, joining rows from leftSource with
// rows from rightSource.
//
// All metadata from the sources is forwarded to metadataSink.
func makeStreamMerger(
leftSource execinfra.RowSource,
leftOrdering colinfo.ColumnOrdering,
rightSource execinfra.RowSource,
rightOrdering colinfo.ColumnOrdering,
nullEquality bool,
memMonitor *mon.BytesMonitor,
) (streamMerger, error) {
if len(leftOrdering) != len(rightOrdering) {
return streamMerger{}, errors.Errorf(
"ordering lengths don't match: %d and %d", len(leftOrdering), len(rightOrdering))
}
for i, ord := range leftOrdering {
if ord.Direction != rightOrdering[i].Direction {
return streamMerger{}, errors.New("ordering mismatch")
}
}
return streamMerger{
left: makeStreamGroupAccumulator(leftSource, leftOrdering, memMonitor),
right: makeStreamGroupAccumulator(rightSource, rightOrdering, memMonitor),
nullEquality: nullEquality,
}, nil
}
| pkg/sql/rowexec/stream_merger.go | 0 | https://github.com/cockroachdb/cockroach/commit/5015f06f107c92eab8fa180ea28c09c68e294e94 | [
0.00018146808724850416,
0.00017314197611995041,
0.00016240932745859027,
0.00017371209105476737,
0.000004092446488357382
] |
{
"id": 0,
"code_window": [
"\tcastRe := makeFunctionRegex(\"_CAST\", 4)\n",
"\ts = castRe.ReplaceAllString(s, makeTemplateFunctionCall(\"Right.Cast\", 4))\n",
"\n",
"\ts = strings.ReplaceAll(s, \"_L_UNSAFEGET\", \"execgen.UNSAFEGET\")\n",
"\ts = replaceManipulationFuncsAmbiguous(\".Left\", s)\n",
"\n",
"\ts = strings.ReplaceAll(s, \"_R_UNSAFEGET\", \"execgen.UNSAFEGET\")\n",
"\ts = replaceManipulationFuncsAmbiguous(\".Right\", s)\n",
"\n",
"\ttmpl, err := template.New(\"cast\").Funcs(template.FuncMap{\"buildDict\": buildDict}).Parse(s)\n",
"\tif err != nil {\n",
"\t\treturn err\n",
"\t}\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "pkg/sql/colexec/execgen/cmd/execgen/cast_gen.go",
"type": "replace",
"edit_start_line_idx": 39
} | // Copyright 2020 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
import React, {
forwardRef,
useCallback,
useImperativeHandle,
useState,
} from "react";
import { ICancelSessionRequest } from "src/store/terminateQuery";
import { Modal } from "../modal";
import { Text } from "../text";
export interface TerminateSessionModalRef {
showModalFor: (req: ICancelSessionRequest) => void;
}
interface TerminateSessionModalProps {
cancel: (payload: ICancelSessionRequest) => void;
}
const TerminateSessionModal = (
props: TerminateSessionModalProps,
ref: React.RefObject<TerminateSessionModalRef>,
) => {
const { cancel } = props;
const [visible, setVisible] = useState(false);
const [req, setReq] = useState<ICancelSessionRequest>();
const onOkHandler = useCallback(() => {
cancel(req);
setVisible(false);
}, [req, cancel]);
const onCancelHandler = useCallback(() => setVisible(false), []);
useImperativeHandle(ref, () => {
return {
showModalFor: (r: ICancelSessionRequest) => {
setReq(r);
setVisible(true);
},
};
});
return (
<Modal
visible={visible}
onOk={onOkHandler}
onCancel={onCancelHandler}
okText="Yes"
cancelText="No"
title="Terminate the Session?"
>
<Text>
Terminating a session ends the session, terminating its associated
connection. The client that holds this session will receive a
"connection terminated" event.
</Text>
</Modal>
);
};
export default forwardRef(TerminateSessionModal);
| pkg/ui/cluster-ui/src/sessions/terminateSessionModal.tsx | 0 | https://github.com/cockroachdb/cockroach/commit/5015f06f107c92eab8fa180ea28c09c68e294e94 | [
0.00017875111370813102,
0.00017389343702234328,
0.0001693965314188972,
0.00017376485629938543,
0.0000024645632947795093
] |
{
"id": 1,
"code_window": [
"\t)\n",
"\ts := r.Replace(inputFileContents)\n",
"\n",
"\ts = strings.ReplaceAll(s, \"_L_UNSAFEGET\", \"execgen.UNSAFEGET\")\n",
"\ts = replaceManipulationFuncsAmbiguous(\".Global.Left\", s)\n",
"\ts = strings.ReplaceAll(s, \"_R_UNSAFEGET\", \"execgen.UNSAFEGET\")\n",
"\ts = replaceManipulationFuncsAmbiguous(\".Global.Right\", s)\n",
"\n",
"\tassignNeRe := makeFunctionRegex(\"_ASSIGN_NE\", 6)\n",
"\ts = assignNeRe.ReplaceAllString(s, makeTemplateFunctionCall(\"Global.Right.Assign\", 6))\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "pkg/sql/colexec/execgen/cmd/execgen/hashtable_gen.go",
"type": "replace",
"edit_start_line_idx": 90
} | // Copyright 2019 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package execgen
import (
"github.com/cockroachdb/cockroach/pkg/sql/colexecerror"
"github.com/cockroachdb/errors"
)
const nonTemplatePanic = "do not call from non-template code"
// Remove unused warnings.
var (
_ = COPYVAL
_ = APPENDSLICE
_ = APPENDVAL
_ = SETVARIABLESIZE
)
// COPYVAL is a template function that can be used to set a scalar to the value
// of another scalar in such a way that the destination won't be modified if the
// source is. You must use this on the result of UNSAFEGET if you wish to store
// that result past the lifetime of the batch you UNSAFEGET'd from.
func COPYVAL(dest, src interface{}) {
colexecerror.InternalError(errors.AssertionFailedf(nonTemplatePanic))
}
// APPENDSLICE is a template function.
func APPENDSLICE(target, src, destIdx, srcStartIdx, srcEndIdx interface{}) {
colexecerror.InternalError(errors.AssertionFailedf(nonTemplatePanic))
}
// APPENDVAL is a template function.
func APPENDVAL(target, v interface{}) {
colexecerror.InternalError(errors.AssertionFailedf(nonTemplatePanic))
}
// SETVARIABLESIZE is a template function.
func SETVARIABLESIZE(target, value interface{}) interface{} {
colexecerror.InternalError(errors.AssertionFailedf(nonTemplatePanic))
return nil
}
| pkg/sql/colexec/execgen/placeholders.go | 1 | https://github.com/cockroachdb/cockroach/commit/5015f06f107c92eab8fa180ea28c09c68e294e94 | [
0.0045450786128640175,
0.0011354218004271388,
0.00016757681441958994,
0.0005255092401057482,
0.0015567562077194452
] |
{
"id": 1,
"code_window": [
"\t)\n",
"\ts := r.Replace(inputFileContents)\n",
"\n",
"\ts = strings.ReplaceAll(s, \"_L_UNSAFEGET\", \"execgen.UNSAFEGET\")\n",
"\ts = replaceManipulationFuncsAmbiguous(\".Global.Left\", s)\n",
"\ts = strings.ReplaceAll(s, \"_R_UNSAFEGET\", \"execgen.UNSAFEGET\")\n",
"\ts = replaceManipulationFuncsAmbiguous(\".Global.Right\", s)\n",
"\n",
"\tassignNeRe := makeFunctionRegex(\"_ASSIGN_NE\", 6)\n",
"\ts = assignNeRe.ReplaceAllString(s, makeTemplateFunctionCall(\"Global.Right.Assign\", 6))\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "pkg/sql/colexec/execgen/cmd/execgen/hashtable_gen.go",
"type": "replace",
"edit_start_line_idx": 90
} | load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
go_library(
name = "bank",
srcs = ["bank.go"],
importpath = "github.com/cockroachdb/cockroach/pkg/workload/bank",
visibility = ["//visibility:public"],
deps = [
"//pkg/col/coldata",
"//pkg/sql/types",
"//pkg/util/bufalloc",
"//pkg/util/timeutil",
"//pkg/workload",
"//pkg/workload/histogram",
"@com_github_cockroachdb_errors//:errors",
"@com_github_spf13_pflag//:pflag",
"@org_golang_x_exp//rand",
],
)
go_test(
name = "bank_test",
size = "small",
srcs = [
"bank_test.go",
"main_test.go",
],
embed = [":bank"],
deps = [
"//pkg/base",
"//pkg/security",
"//pkg/security/securitytest",
"//pkg/server",
"//pkg/testutils/serverutils",
"//pkg/testutils/sqlutils",
"//pkg/testutils/testcluster",
"//pkg/util/leaktest",
"//pkg/util/randutil",
"//pkg/workload/workloadsql",
],
)
| pkg/workload/bank/BUILD.bazel | 0 | https://github.com/cockroachdb/cockroach/commit/5015f06f107c92eab8fa180ea28c09c68e294e94 | [
0.00017564439622219652,
0.0001743143075145781,
0.00017230617231689394,
0.00017489335732534528,
0.0000011858630841743434
] |
{
"id": 1,
"code_window": [
"\t)\n",
"\ts := r.Replace(inputFileContents)\n",
"\n",
"\ts = strings.ReplaceAll(s, \"_L_UNSAFEGET\", \"execgen.UNSAFEGET\")\n",
"\ts = replaceManipulationFuncsAmbiguous(\".Global.Left\", s)\n",
"\ts = strings.ReplaceAll(s, \"_R_UNSAFEGET\", \"execgen.UNSAFEGET\")\n",
"\ts = replaceManipulationFuncsAmbiguous(\".Global.Right\", s)\n",
"\n",
"\tassignNeRe := makeFunctionRegex(\"_ASSIGN_NE\", 6)\n",
"\ts = assignNeRe.ReplaceAllString(s, makeTemplateFunctionCall(\"Global.Right.Assign\", 6))\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "pkg/sql/colexec/execgen/cmd/execgen/hashtable_gen.go",
"type": "replace",
"edit_start_line_idx": 90
} | # Hexadecimal numbers.
eval
0xa
----
10
eval
0xcafe1111
----
3405648145
# Hexadecimal bytes literals.
eval
x'636174'
----
'\x636174'
eval
X'636174'
----
'\x636174'
eval
pg_typeof(x'636174')
----
'bytea'
eval
'\x636174'::bytes
----
'\x636174'
eval
x'636174'::string
----
'cat'
| pkg/sql/sem/tree/testdata/eval/hex | 0 | https://github.com/cockroachdb/cockroach/commit/5015f06f107c92eab8fa180ea28c09c68e294e94 | [
0.00017581060819793493,
0.00017206939810421318,
0.00016620985115878284,
0.00017312858835794032,
0.0000036403480407898314
] |
{
"id": 1,
"code_window": [
"\t)\n",
"\ts := r.Replace(inputFileContents)\n",
"\n",
"\ts = strings.ReplaceAll(s, \"_L_UNSAFEGET\", \"execgen.UNSAFEGET\")\n",
"\ts = replaceManipulationFuncsAmbiguous(\".Global.Left\", s)\n",
"\ts = strings.ReplaceAll(s, \"_R_UNSAFEGET\", \"execgen.UNSAFEGET\")\n",
"\ts = replaceManipulationFuncsAmbiguous(\".Global.Right\", s)\n",
"\n",
"\tassignNeRe := makeFunctionRegex(\"_ASSIGN_NE\", 6)\n",
"\ts = assignNeRe.ReplaceAllString(s, makeTemplateFunctionCall(\"Global.Right.Assign\", 6))\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "pkg/sql/colexec/execgen/cmd/execgen/hashtable_gen.go",
"type": "replace",
"edit_start_line_idx": 90
} | #! /usr/bin/env expect -f
source [file join [file dirname $argv0] common.tcl]
start_test "Ensure demo commands are not available in the sql shell"
# Set up the initial cluster.
start_server $argv
# Spawn a sql shell.
spawn $argv sql
set client_spawn_id $spawn_id
eexpect root@
# Ensure the demo command does not work.
send "\\demo shutdown 2\n"
eexpect "\\demo can only be run with cockroach demo"
# Exit the shell.
interrupt
eexpect eof
# Have good manners and clean up.
stop_server $argv
end_test
| pkg/cli/interactive_tests/test_sql_demo_node_cmds.tcl | 0 | https://github.com/cockroachdb/cockroach/commit/5015f06f107c92eab8fa180ea28c09c68e294e94 | [
0.00017587516049388796,
0.00017281070176977664,
0.00017122752615250647,
0.00017132943321485072,
0.0000021672954062523786
] |
{
"id": 2,
"code_window": [
"// Note that not all template variables can be present in the template, and it\n",
"// is ok - such replacements will be noops.\n",
"func replaceProjTmplVariables(tmpl string) string {\n",
"\ttmpl = strings.ReplaceAll(tmpl, \"_L_UNSAFEGET\", \"execgen.UNSAFEGET\")\n",
"\ttmpl = replaceManipulationFuncsAmbiguous(\".Left\", tmpl)\n",
"\ttmpl = strings.ReplaceAll(tmpl, \"_R_UNSAFEGET\", \"execgen.UNSAFEGET\")\n",
"\ttmpl = replaceManipulationFuncsAmbiguous(\".Right\", tmpl)\n",
"\ttmpl = strings.ReplaceAll(tmpl, \"_RETURN_UNSAFEGET\", \"execgen.RETURNUNSAFEGET\")\n",
"\ttmpl = replaceManipulationFuncsAmbiguous(\".Right\", tmpl)\n",
"\n",
"\tr := strings.NewReplacer(\n",
"\t\t\"_LEFT_CANONICAL_TYPE_FAMILY\", \"{{.LeftCanonicalFamilyStr}}\",\n",
"\t\t\"_LEFT_TYPE_WIDTH\", typeWidthReplacement,\n",
"\t\t\"_RIGHT_CANONICAL_TYPE_FAMILY\", \"{{.RightCanonicalFamilyStr}}\",\n",
"\t\t\"_RIGHT_TYPE_WIDTH\", typeWidthReplacement,\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "pkg/sql/colexec/execgen/cmd/execgen/projection_ops_gen.go",
"type": "replace",
"edit_start_line_idx": 25
} | // Copyright 2020 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package main
import (
"fmt"
"io"
"strings"
"text/template"
"github.com/cockroachdb/cockroach/pkg/sql/colexec/execgen"
"github.com/cockroachdb/cockroach/pkg/sql/colexecerror"
"github.com/cockroachdb/cockroach/pkg/sql/sem/tree"
"github.com/cockroachdb/errors"
)
// hashTableMode describes the mode in which the hash table can operate.
type hashTableMode int
const (
// hashTableFullBuildDefaultProbe is the hashTableMode in which the hash
// table is built fully (i.e. not distinct) and is being probed by
// default (i.e. not deleting). This mode is used by the hash joiner
// for non-set operation joins.
hashTableFullBuildDefaultProbe hashTableMode = iota
// hashTableDistinctBuildDefaultProbe is the hashTableMode in which the
// hash table is built distinctly (i.e. duplicates are not added into it)
// and is being probed by default (i.e. not deleting). This mode is used by
// the unordered distinct.
hashTableDistinctBuildDefaultProbe
// hashTableFullBuildDeletingProbe is the hashTableMode in which the hash
// table is built fully (i.e. not distinct) and is being probed in deleting
// mode (i.e. tuples are deleted from the hash table once they are matched
// with a probe tuple). This mode is used by the hash joiner for set
// operation joins.
hashTableFullBuildDeletingProbe
)
func (m hashTableMode) String() string {
switch m {
case hashTableFullBuildDefaultProbe:
return "full_default"
case hashTableDistinctBuildDefaultProbe:
return "distinct"
case hashTableFullBuildDeletingProbe:
return "full_deleting"
default:
colexecerror.InternalError(errors.AssertionFailedf("unexpected hashTableMode"))
// This code is unreachable, but the compiler cannot infer that.
return ""
}
}
func (m hashTableMode) IsDistinctBuild() bool {
return m == hashTableDistinctBuildDefaultProbe
}
func (m hashTableMode) IsDeletingProbe() bool {
return m == hashTableFullBuildDeletingProbe
}
// Remove unused warnings.
var _ = hashTableMode.IsDistinctBuild
var _ = hashTableMode.IsDeletingProbe
const hashTableTmpl = "pkg/sql/colexec/colexechash/hashtable_tmpl.go"
func genHashTable(inputFileContents string, wr io.Writer, htm hashTableMode) error {
r := strings.NewReplacer(
"_LEFT_CANONICAL_TYPE_FAMILY", "{{.LeftCanonicalFamilyStr}}",
"_LEFT_TYPE_WIDTH", typeWidthReplacement,
"_RIGHT_CANONICAL_TYPE_FAMILY", "{{.RightCanonicalFamilyStr}}",
"_RIGHT_TYPE_WIDTH", typeWidthReplacement,
"_ProbeType", "{{.Left.VecMethod}}",
"_BuildType", "{{.Right.VecMethod}}",
"_USE_PROBE_SEL", ".UseProbeSel",
"_PROBING_AGAINST_ITSELF", "$probingAgainstItself",
"_DELETING_PROBE_MODE", "$deletingProbeMode",
"_OVERLOADS", ".Overloads",
)
s := r.Replace(inputFileContents)
s = strings.ReplaceAll(s, "_L_UNSAFEGET", "execgen.UNSAFEGET")
s = replaceManipulationFuncsAmbiguous(".Global.Left", s)
s = strings.ReplaceAll(s, "_R_UNSAFEGET", "execgen.UNSAFEGET")
s = replaceManipulationFuncsAmbiguous(".Global.Right", s)
assignNeRe := makeFunctionRegex("_ASSIGN_NE", 6)
s = assignNeRe.ReplaceAllString(s, makeTemplateFunctionCall("Global.Right.Assign", 6))
checkColBody := makeFunctionRegex("_CHECK_COL_BODY", 6)
s = checkColBody.ReplaceAllString(s,
`{{template "checkColBody" buildDict "Global" .Global "ProbeHasNulls" $1 "BuildHasNulls" $2 "SelectDistinct" $3 "UseProbeSel" $4 "ProbingAgainstItself" $5 "DeletingProbeMode" $6}}`,
)
checkColWithNulls := makeFunctionRegex("_CHECK_COL_WITH_NULLS", 3)
s = checkColWithNulls.ReplaceAllString(s,
`{{template "checkColWithNulls" buildDict "Global" . "UseProbeSel" $1 "ProbingAgainstItself" $2 "DeletingProbeMode" $3}}`,
)
checkColFunctionTemplate := makeFunctionRegex("_CHECK_COL_FUNCTION_TEMPLATE", 2)
s = checkColFunctionTemplate.ReplaceAllString(s,
`{{template "checkColFunctionTemplate" buildDict "Global" . "ProbingAgainstItself" $1 "DeletingProbeMode" $2}}`,
)
checkColForDistinctWithNulls := makeFunctionRegex("_CHECK_COL_FOR_DISTINCT_WITH_NULLS", 1)
s = checkColForDistinctWithNulls.ReplaceAllString(s,
`{{template "checkColForDistinctWithNulls" buildDict "Global" . "UseProbeSel" $1}}`,
)
checkBody := makeFunctionRegex("_CHECK_BODY", 3)
s = checkBody.ReplaceAllString(s,
`{{template "checkBody" buildDict "Global" . "SelectSameTuples" $1 "DeletingProbeMode" $2 "SelectDistinct" $3}}`,
)
updateSelBody := makeFunctionRegex("_UPDATE_SEL_BODY", 1)
s = updateSelBody.ReplaceAllString(s,
`{{template "updateSelBody" buildDict "Global" . "UseSel" $1}}`,
)
tmpl, err := template.New("hashtable").Funcs(template.FuncMap{"buildDict": buildDict}).Parse(s)
if err != nil {
return err
}
var data *twoArgsResolvedOverloadInfo
for _, ov := range twoArgsResolvedOverloadsInfo.CmpOps {
if ov.Name == execgen.ComparisonOpName[tree.NE] {
data = ov
break
}
}
if data == nil {
colexecerror.InternalError(errors.AssertionFailedf("unexpectedly didn't find overload for tree.NE"))
}
return tmpl.Execute(wr, struct {
Overloads interface{}
HashTableMode hashTableMode
}{
Overloads: data,
HashTableMode: htm,
})
}
func init() {
hashTableGenerator := func(htm hashTableMode) generator {
return func(inputFileContents string, wr io.Writer) error {
return genHashTable(inputFileContents, wr, htm)
}
}
for _, mode := range []hashTableMode{
hashTableFullBuildDefaultProbe,
hashTableDistinctBuildDefaultProbe,
hashTableFullBuildDeletingProbe,
} {
registerGenerator(hashTableGenerator(mode), fmt.Sprintf("hashtable_%s.eg.go", mode), hashTableTmpl)
}
}
| pkg/sql/colexec/execgen/cmd/execgen/hashtable_gen.go | 1 | https://github.com/cockroachdb/cockroach/commit/5015f06f107c92eab8fa180ea28c09c68e294e94 | [
0.053121667355298996,
0.005969678517431021,
0.0001665595336817205,
0.00017907422443386167,
0.013730121776461601
] |
{
"id": 2,
"code_window": [
"// Note that not all template variables can be present in the template, and it\n",
"// is ok - such replacements will be noops.\n",
"func replaceProjTmplVariables(tmpl string) string {\n",
"\ttmpl = strings.ReplaceAll(tmpl, \"_L_UNSAFEGET\", \"execgen.UNSAFEGET\")\n",
"\ttmpl = replaceManipulationFuncsAmbiguous(\".Left\", tmpl)\n",
"\ttmpl = strings.ReplaceAll(tmpl, \"_R_UNSAFEGET\", \"execgen.UNSAFEGET\")\n",
"\ttmpl = replaceManipulationFuncsAmbiguous(\".Right\", tmpl)\n",
"\ttmpl = strings.ReplaceAll(tmpl, \"_RETURN_UNSAFEGET\", \"execgen.RETURNUNSAFEGET\")\n",
"\ttmpl = replaceManipulationFuncsAmbiguous(\".Right\", tmpl)\n",
"\n",
"\tr := strings.NewReplacer(\n",
"\t\t\"_LEFT_CANONICAL_TYPE_FAMILY\", \"{{.LeftCanonicalFamilyStr}}\",\n",
"\t\t\"_LEFT_TYPE_WIDTH\", typeWidthReplacement,\n",
"\t\t\"_RIGHT_CANONICAL_TYPE_FAMILY\", \"{{.RightCanonicalFamilyStr}}\",\n",
"\t\t\"_RIGHT_TYPE_WIDTH\", typeWidthReplacement,\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "pkg/sql/colexec/execgen/cmd/execgen/projection_ops_gen.go",
"type": "replace",
"edit_start_line_idx": 25
} | // Copyright 2018 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package sql
import (
"context"
"github.com/cockroachdb/cockroach/pkg/keys"
"github.com/cockroachdb/cockroach/pkg/security"
"github.com/cockroachdb/cockroach/pkg/sql/catalog"
"github.com/cockroachdb/cockroach/pkg/sql/privilege"
"github.com/cockroachdb/cockroach/pkg/sql/sem/tree"
"github.com/cockroachdb/cockroach/pkg/sql/sessiondata"
"github.com/cockroachdb/cockroach/pkg/util/log/eventpb"
)
type commentOnColumnNode struct {
n *tree.CommentOnColumn
tableDesc catalog.TableDescriptor
}
// CommentOnColumn add comment on a column.
// Privileges: CREATE on table.
func (p *planner) CommentOnColumn(ctx context.Context, n *tree.CommentOnColumn) (planNode, error) {
if err := checkSchemaChangeEnabled(
ctx,
p.ExecCfg(),
"COMMENT ON COLUMN",
); err != nil {
return nil, err
}
var tableName tree.TableName
if n.ColumnItem.TableName != nil {
tableName = n.ColumnItem.TableName.ToTableName()
}
tableDesc, err := p.resolveUncachedTableDescriptor(ctx, &tableName, true, tree.ResolveRequireTableDesc)
if err != nil {
return nil, err
}
if err := p.CheckPrivilege(ctx, tableDesc, privilege.CREATE); err != nil {
return nil, err
}
return &commentOnColumnNode{n: n, tableDesc: tableDesc}, nil
}
func (n *commentOnColumnNode) startExec(params runParams) error {
col, err := n.tableDesc.FindColumnWithName(n.n.ColumnItem.ColumnName)
if err != nil {
return err
}
if n.n.Comment != nil {
_, err := params.p.extendedEvalCtx.ExecCfg.InternalExecutor.ExecEx(
params.ctx,
"set-column-comment",
params.p.Txn(),
sessiondata.InternalExecutorOverride{User: security.RootUserName()},
"UPSERT INTO system.comments VALUES ($1, $2, $3, $4)",
keys.ColumnCommentType,
n.tableDesc.GetID(),
col.GetPGAttributeNum(),
*n.n.Comment)
if err != nil {
return err
}
} else {
_, err := params.p.extendedEvalCtx.ExecCfg.InternalExecutor.ExecEx(
params.ctx,
"delete-column-comment",
params.p.Txn(),
sessiondata.InternalExecutorOverride{User: security.RootUserName()},
"DELETE FROM system.comments WHERE type=$1 AND object_id=$2 AND sub_id=$3",
keys.ColumnCommentType,
n.tableDesc.GetID(),
col.GetPGAttributeNum())
if err != nil {
return err
}
}
comment := ""
if n.n.Comment != nil {
comment = *n.n.Comment
}
tn, err := params.p.getQualifiedTableName(params.ctx, n.tableDesc)
if err != nil {
return err
}
return params.p.logEvent(params.ctx,
n.tableDesc.GetID(),
&eventpb.CommentOnColumn{
TableName: tn.FQString(),
ColumnName: string(n.n.ColumnItem.ColumnName),
Comment: comment,
NullComment: n.n.Comment == nil,
})
}
func (n *commentOnColumnNode) Next(runParams) (bool, error) { return false, nil }
func (n *commentOnColumnNode) Values() tree.Datums { return tree.Datums{} }
func (n *commentOnColumnNode) Close(context.Context) {}
| pkg/sql/comment_on_column.go | 0 | https://github.com/cockroachdb/cockroach/commit/5015f06f107c92eab8fa180ea28c09c68e294e94 | [
0.00017920881509780884,
0.0001688758347881958,
0.00016479463374707848,
0.00016797042917460203,
0.0000037792633520439267
] |
{
"id": 2,
"code_window": [
"// Note that not all template variables can be present in the template, and it\n",
"// is ok - such replacements will be noops.\n",
"func replaceProjTmplVariables(tmpl string) string {\n",
"\ttmpl = strings.ReplaceAll(tmpl, \"_L_UNSAFEGET\", \"execgen.UNSAFEGET\")\n",
"\ttmpl = replaceManipulationFuncsAmbiguous(\".Left\", tmpl)\n",
"\ttmpl = strings.ReplaceAll(tmpl, \"_R_UNSAFEGET\", \"execgen.UNSAFEGET\")\n",
"\ttmpl = replaceManipulationFuncsAmbiguous(\".Right\", tmpl)\n",
"\ttmpl = strings.ReplaceAll(tmpl, \"_RETURN_UNSAFEGET\", \"execgen.RETURNUNSAFEGET\")\n",
"\ttmpl = replaceManipulationFuncsAmbiguous(\".Right\", tmpl)\n",
"\n",
"\tr := strings.NewReplacer(\n",
"\t\t\"_LEFT_CANONICAL_TYPE_FAMILY\", \"{{.LeftCanonicalFamilyStr}}\",\n",
"\t\t\"_LEFT_TYPE_WIDTH\", typeWidthReplacement,\n",
"\t\t\"_RIGHT_CANONICAL_TYPE_FAMILY\", \"{{.RightCanonicalFamilyStr}}\",\n",
"\t\t\"_RIGHT_TYPE_WIDTH\", typeWidthReplacement,\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "pkg/sql/colexec/execgen/cmd/execgen/projection_ops_gen.go",
"type": "replace",
"edit_start_line_idx": 25
} | # LogicTest: 5node
statement ok
CREATE TABLE ab (a INT PRIMARY KEY, b INT); INSERT INTO ab (a, b) VALUES (1, 10)
## Table index change: Add/remove index that query depends on, and ensure that
## the plan is recomputed each time.
statement ok
PREPARE change_index AS SELECT * FROM [EXPLAIN SELECT * FROM ab WHERE b=10]
query T
EXECUTE change_index
----
distribution: full
vectorized: true
·
• filter
│ filter: b = 10
│
└── • scan
missing stats
table: ab@primary
spans: FULL SCAN
statement ok
CREATE INDEX bindex ON ab (b)
query T
EXECUTE change_index
----
distribution: full
vectorized: true
·
• scan
missing stats
table: ab@bindex
spans: [/10 - /10]
statement ok
DROP INDEX bindex
query T
EXECUTE change_index
----
distribution: full
vectorized: true
·
• filter
│ filter: b = 10
│
└── • scan
missing stats
table: ab@primary
spans: FULL SCAN
## Statistics change: Create statistics and ensure that the plan is recalculated.
statement ok
CREATE TABLE cd (c INT PRIMARY KEY, d INT)
statement ok
PREPARE change_stats AS SELECT * FROM [EXPLAIN SELECT * FROM ab JOIN cd ON a=c]
query T
EXECUTE change_stats
----
distribution: full
vectorized: true
·
• merge join
│ equality: (a) = (c)
│ left cols are key
│ right cols are key
│
├── • scan
│ missing stats
│ table: ab@primary
│ spans: FULL SCAN
│
└── • scan
missing stats
table: cd@primary
spans: FULL SCAN
statement ok
CREATE STATISTICS s FROM ab
# Now that the optimizer knows table ab has one row (and it assumes a much
# higher number of rows for cd), it should choose lookup join.
# We allow retry because stat cache invalidation happens asynchronously.
query T retry
EXECUTE change_stats
----
distribution: full
vectorized: true
·
• lookup join
│ table: cd@primary
│ equality: (a) = (c)
│ equality cols are key
│
└── • scan
estimated row count: 1 (100% of the table; stats collected <hidden> ago)
table: ab@primary
spans: FULL SCAN
# Verify the plan of a very simple query which should be using the placeholder
# fast path.
statement ok
PREPARE pklookup AS SELECT b FROM ab WHERE a = $1
query T
EXPLAIN ANALYZE EXECUTE pklookup(1)
----
planning time: 10µs
execution time: 100µs
distribution: <hidden>
vectorized: <hidden>
rows read from KV: 1 (8 B)
maximum memory usage: <hidden>
network usage: <hidden>
regions: <hidden>
·
• scan
nodes: <hidden>
regions: <hidden>
actual row count: 1
KV time: 0µs
KV contention time: 0µs
KV rows read: 1
KV bytes read: 8 B
estimated row count: 0
table: ab@primary
spans: [/1 - /1]
query T
EXPLAIN ANALYZE EXECUTE pklookup(2)
----
planning time: 10µs
execution time: 100µs
distribution: <hidden>
vectorized: <hidden>
maximum memory usage: <hidden>
network usage: <hidden>
regions: <hidden>
·
• scan
nodes: <hidden>
regions: <hidden>
actual row count: 0
KV time: 0µs
KV contention time: 0µs
KV rows read: 0
KV bytes read: 0 B
estimated row count: 0
table: ab@primary
spans: [/2 - /2]
| pkg/sql/opt/exec/execbuilder/testdata/prepare | 0 | https://github.com/cockroachdb/cockroach/commit/5015f06f107c92eab8fa180ea28c09c68e294e94 | [
0.00017302118067163974,
0.0001667973556322977,
0.00016435539873782545,
0.00016657874220982194,
0.0000018860428099287674
] |
{
"id": 2,
"code_window": [
"// Note that not all template variables can be present in the template, and it\n",
"// is ok - such replacements will be noops.\n",
"func replaceProjTmplVariables(tmpl string) string {\n",
"\ttmpl = strings.ReplaceAll(tmpl, \"_L_UNSAFEGET\", \"execgen.UNSAFEGET\")\n",
"\ttmpl = replaceManipulationFuncsAmbiguous(\".Left\", tmpl)\n",
"\ttmpl = strings.ReplaceAll(tmpl, \"_R_UNSAFEGET\", \"execgen.UNSAFEGET\")\n",
"\ttmpl = replaceManipulationFuncsAmbiguous(\".Right\", tmpl)\n",
"\ttmpl = strings.ReplaceAll(tmpl, \"_RETURN_UNSAFEGET\", \"execgen.RETURNUNSAFEGET\")\n",
"\ttmpl = replaceManipulationFuncsAmbiguous(\".Right\", tmpl)\n",
"\n",
"\tr := strings.NewReplacer(\n",
"\t\t\"_LEFT_CANONICAL_TYPE_FAMILY\", \"{{.LeftCanonicalFamilyStr}}\",\n",
"\t\t\"_LEFT_TYPE_WIDTH\", typeWidthReplacement,\n",
"\t\t\"_RIGHT_CANONICAL_TYPE_FAMILY\", \"{{.RightCanonicalFamilyStr}}\",\n",
"\t\t\"_RIGHT_TYPE_WIDTH\", typeWidthReplacement,\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "pkg/sql/colexec/execgen/cmd/execgen/projection_ops_gen.go",
"type": "replace",
"edit_start_line_idx": 25
} | // Copyright 2020 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package slstorage
import (
"context"
"math/rand"
"time"
"github.com/cockroachdb/cockroach/pkg/keys"
"github.com/cockroachdb/cockroach/pkg/kv"
"github.com/cockroachdb/cockroach/pkg/roachpb"
"github.com/cockroachdb/cockroach/pkg/settings"
"github.com/cockroachdb/cockroach/pkg/settings/cluster"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb"
"github.com/cockroachdb/cockroach/pkg/sql/sem/tree"
"github.com/cockroachdb/cockroach/pkg/sql/sqlliveness"
"github.com/cockroachdb/cockroach/pkg/util/cache"
"github.com/cockroachdb/cockroach/pkg/util/encoding"
"github.com/cockroachdb/cockroach/pkg/util/hlc"
"github.com/cockroachdb/cockroach/pkg/util/log"
"github.com/cockroachdb/cockroach/pkg/util/stop"
"github.com/cockroachdb/cockroach/pkg/util/syncutil"
"github.com/cockroachdb/cockroach/pkg/util/syncutil/singleflight"
"github.com/cockroachdb/cockroach/pkg/util/timeutil"
"github.com/cockroachdb/errors"
"github.com/cockroachdb/logtags"
"github.com/cockroachdb/redact"
)
// GCInterval specifies duration between attempts to delete extant
// sessions that have expired.
var GCInterval = settings.RegisterDurationSetting(
"server.sqlliveness.gc_interval",
"duration between attempts to delete extant sessions that have expired",
20*time.Second,
settings.NonNegativeDuration,
)
// GCJitter specifies the jitter fraction on the interval between attempts to
// delete extant sessions that have expired.
//
// [(1-GCJitter) * GCInterval, (1+GCJitter) * GCInterval]
var GCJitter = settings.RegisterFloatSetting(
"server.sqlliveness.gc_jitter",
"jitter fraction on the duration between attempts to delete extant sessions that have expired",
.15,
func(f float64) error {
if f < 0 || f > 1 {
return errors.Errorf("%f is not in [0, 1]", f)
}
return nil
},
)
// CacheSize is the size of the entries to store in the cache.
// In general this should be larger than the number of nodes in the cluster.
//
// TODO(ajwerner): thread memory monitoring to this level and consider
// increasing the cache size dynamically. The entries are just bytes each so
// this should not be a big deal.
var CacheSize = settings.RegisterIntSetting(
"server.sqlliveness.storage_session_cache_size",
"number of session entries to store in the LRU",
1024)
// Storage implements sqlliveness.Storage.
type Storage struct {
settings *cluster.Settings
stopper *stop.Stopper
clock *hlc.Clock
db *kv.DB
codec keys.SQLCodec
metrics Metrics
gcInterval func() time.Duration
g singleflight.Group
newTimer func() timeutil.TimerI
tableID descpb.ID
mu struct {
syncutil.Mutex
started bool
// liveSessions caches the current view of expirations of live sessions.
liveSessions *cache.UnorderedCache
// deadSessions caches the IDs of sessions which have not been found. This
// package makes an assumption that a session which is queried at some
// point was alive (otherwise, how would one know the ID to query?).
// Furthermore, this package assumes that once a sessions no longer exists,
// it will never exist again in the future.
deadSessions *cache.UnorderedCache
}
}
// NewTestingStorage constructs a new storage with control for the database
// in which the `sqlliveness` table should exist.
func NewTestingStorage(
stopper *stop.Stopper,
clock *hlc.Clock,
db *kv.DB,
codec keys.SQLCodec,
settings *cluster.Settings,
sqllivenessTableID descpb.ID,
newTimer func() timeutil.TimerI,
) *Storage {
s := &Storage{
settings: settings,
stopper: stopper,
clock: clock,
db: db,
codec: codec,
tableID: sqllivenessTableID,
newTimer: newTimer,
gcInterval: func() time.Duration {
baseInterval := GCInterval.Get(&settings.SV)
jitter := GCJitter.Get(&settings.SV)
frac := 1 + (2*rand.Float64()-1)*jitter
return time.Duration(frac * float64(baseInterval.Nanoseconds()))
},
metrics: makeMetrics(),
}
cacheConfig := cache.Config{
Policy: cache.CacheLRU,
ShouldEvict: func(size int, key, value interface{}) bool {
return size > int(CacheSize.Get(&settings.SV))
},
}
s.mu.liveSessions = cache.NewUnorderedCache(cacheConfig)
s.mu.deadSessions = cache.NewUnorderedCache(cacheConfig)
return s
}
// NewStorage creates a new storage struct.
func NewStorage(
stopper *stop.Stopper,
clock *hlc.Clock,
db *kv.DB,
codec keys.SQLCodec,
settings *cluster.Settings,
) *Storage {
return NewTestingStorage(stopper, clock, db, codec, settings, keys.SqllivenessID,
timeutil.DefaultTimeSource{}.NewTimer)
}
// Metrics returns the associated metrics struct.
func (s *Storage) Metrics() *Metrics {
return &s.metrics
}
// Start runs the delete sessions loop.
func (s *Storage) Start(ctx context.Context) {
s.mu.Lock()
defer s.mu.Unlock()
if s.mu.started {
return
}
_ = s.stopper.RunAsyncTask(ctx, "slstorage", s.deleteSessionsLoop)
s.mu.started = true
}
// IsAlive determines whether a given session is alive. If this method returns
// true, the session may no longer be alive, but if it returns false, the
// session definitely is not alive.
func (s *Storage) IsAlive(ctx context.Context, sid sqlliveness.SessionID) (alive bool, err error) {
return s.isAlive(ctx, sid, sync)
}
type readType byte
const (
_ readType = iota
sync
async
)
func (s *Storage) isAlive(
ctx context.Context, sid sqlliveness.SessionID, syncOrAsync readType,
) (alive bool, _ error) {
s.mu.Lock()
if !s.mu.started {
s.mu.Unlock()
return false, sqlliveness.NotStartedError
}
if _, ok := s.mu.deadSessions.Get(sid); ok {
s.mu.Unlock()
s.metrics.IsAliveCacheHits.Inc(1)
return false, nil
}
var prevExpiration hlc.Timestamp
if expiration, ok := s.mu.liveSessions.Get(sid); ok {
expiration := expiration.(hlc.Timestamp)
// The record exists and is valid.
if s.clock.Now().Less(expiration) {
s.mu.Unlock()
s.metrics.IsAliveCacheHits.Inc(1)
return true, nil
}
// The record exists in the cache but seems expired according to our clock.
// If we returned that the session was alive regardless of the expiration
// then we'd never update the cache. Go fetch the session and pass in the
// current view of the expiration. If the expiration has not changed, then
// the session is expired and should be deleted. If it has, get the new
// expiration for the cache.
prevExpiration = expiration
}
// Launch singleflight to go read from the database and maybe delete the
// entry. If it is found, we can add it and its expiration to the liveSessions
// cache. If it isn't found, we know it's dead and we can add that to the
// deadSessions cache.
resChan, _ := s.g.DoChan(string(sid), func() (interface{}, error) {
// Note that we use a new `context` here to avoid a situation where a cancellation
// of the first context cancels other callers to the `acquireNodeLease()` method,
// because of its use of `singleflight.Group`. See issue #41780 for how this has
// happened.
newCtx, cancel := s.stopper.WithCancelOnQuiesce(
logtags.WithTags(context.Background(), logtags.FromContext(ctx)),
)
defer cancel()
// store the result underneath the singleflight to avoid the need
// for additional synchronization.
live, expiration, err := s.deleteOrFetchSession(newCtx, sid, prevExpiration)
if err != nil {
return nil, err
}
s.mu.Lock()
defer s.mu.Unlock()
if live {
s.mu.liveSessions.Add(sid, expiration)
} else {
s.mu.deadSessions.Del(sid)
s.mu.deadSessions.Add(sid, nil)
}
return live, nil
})
s.mu.Unlock()
s.metrics.IsAliveCacheMisses.Inc(1)
// If we do not want to wait for the result, assume that the session is
// indeed alive.
if syncOrAsync == async {
return true, nil
}
select {
case res := <-resChan:
if res.Err != nil {
return false, res.Err
}
return res.Val.(bool), nil
case <-ctx.Done():
return false, ctx.Err()
}
}
// deleteOrFetchSession returns whether the query session currently exists by
// reading from the database. If passed expiration is non-zero and the existing
// record has the same expiration, the record will be deleted and false will
// be returning, indicating that it no longer exists. If the record exists and
// has a differring expiration timestamp, true and the associated expiration
// will be returned.
func (s *Storage) deleteOrFetchSession(
ctx context.Context, sid sqlliveness.SessionID, prevExpiration hlc.Timestamp,
) (alive bool, expiration hlc.Timestamp, err error) {
if err := s.db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error {
k := s.makeSessionKey(sid)
kv, err := txn.Get(ctx, k)
if err != nil {
return err
}
// The session is not alive.
if kv.Value == nil {
return nil
}
expiration, err = decodeValue(kv)
if err != nil {
return errors.Wrapf(err, "failed to decode expiration for %s",
redact.SafeString(sid.String()))
}
if !expiration.Equal(prevExpiration) {
alive = true
return nil
}
// The session is expired and needs to be deleted.
expiration = hlc.Timestamp{}
return txn.Del(ctx, k)
}); err != nil {
return false, hlc.Timestamp{}, errors.Wrapf(err,
"could not query session id: %s", sid)
}
return alive, expiration, nil
}
// deleteSessionsLoop is launched in start and periodically deletes sessions.
func (s *Storage) deleteSessionsLoop(ctx context.Context) {
ctx, cancel := s.stopper.WithCancelOnQuiesce(ctx)
defer cancel()
t := s.newTimer()
t.Reset(s.gcInterval())
for {
select {
case <-ctx.Done():
return
case <-t.Ch():
t.MarkRead()
s.deleteExpiredSessions(ctx)
t.Reset(s.gcInterval())
}
}
}
// TODO(ajwerner): find a way to utilize this table scan to update the
// expirations stored in the in-memory cache or remove it altogether. As it
// stand, this scan will run more frequently than sessions expire but it won't
// propagate that fact to IsAlive. It seems like the lazy session deletion
// which has been added should be sufficient to delete expired sessions which
// matter. This would closer align with the behavior in node-liveness.
func (s *Storage) deleteExpiredSessions(ctx context.Context) {
now := s.clock.Now()
var deleted int64
if err := s.db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error {
deleted = 0 // reset for restarts
start := s.makeTablePrefix()
end := start.PrefixEnd()
const maxRows = 1024 // arbitrary but plenty
for {
rows, err := txn.Scan(ctx, start, end, maxRows)
if err != nil {
return err
}
if len(rows) == 0 {
return nil
}
var toDel []interface{}
for i := range rows {
exp, err := decodeValue(rows[i])
if err != nil {
log.Warningf(ctx, "failed to decode row %s: %v", rows[i].Key.String(), err)
}
if exp.Less(now) {
toDel = append(toDel, rows[i].Key)
deleted++
}
}
if err := txn.Del(ctx, toDel...); err != nil {
return err
}
start = rows[len(rows)-1].Key.Next()
}
}); err != nil {
if ctx.Err() == nil {
log.Errorf(ctx, "could not delete expired sessions: %+v", err)
}
return
}
s.metrics.SessionDeletionsRuns.Inc(1)
s.metrics.SessionsDeleted.Inc(deleted)
if log.V(2) || deleted > 0 {
log.Infof(ctx, "deleted %d expired SQL liveness sessions", deleted)
}
}
// Insert inserts the input Session in table `system.sqlliveness`.
// A client must never call this method with a session which was previously
// used! The contract of IsAlive is that once a session becomes not alive, it
// must never become alive again.
func (s *Storage) Insert(
ctx context.Context, sid sqlliveness.SessionID, expiration hlc.Timestamp,
) (err error) {
k := s.makeSessionKey(sid)
v := encodeValue(expiration)
if err := s.db.InitPut(ctx, k, &v, true); err != nil {
s.metrics.WriteFailures.Inc(1)
return errors.Wrapf(err, "could not insert session %s", sid)
}
log.Infof(ctx, "inserted sqlliveness session %s", sid)
s.metrics.WriteSuccesses.Inc(1)
return nil
}
// Update updates the row in table `system.sqlliveness` with the given input if
// if the row exists and in that case returns true. Otherwise it returns false.
func (s *Storage) Update(
ctx context.Context, sid sqlliveness.SessionID, expiration hlc.Timestamp,
) (sessionExists bool, err error) {
err = s.db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error {
k := s.makeSessionKey(sid)
kv, err := s.db.Get(ctx, k)
if err != nil {
return err
}
if sessionExists = kv.Value != nil; !sessionExists {
return nil
}
v := encodeValue(expiration)
return s.db.Put(ctx, k, &v)
})
if err != nil || !sessionExists {
s.metrics.WriteFailures.Inc(1)
}
if err != nil {
return false, errors.Wrapf(err, "could not update session %s", sid)
}
s.metrics.WriteSuccesses.Inc(1)
return sessionExists, nil
}
// CachedReader returns an implementation of sqlliveness.Reader which does
// not synchronously read from the store. Calls to IsAlive will return the
// currently known state of the session, but will trigger an asynchronous
// refresh of the state of the session if it is not known.
func (s *Storage) CachedReader() sqlliveness.Reader {
return (*cachedStorage)(s)
}
// cachedStorage implements sqlliveness.Storage but does not read from the
// underlying store synchronously during IsAlive.
type cachedStorage Storage
func (s *cachedStorage) IsAlive(
ctx context.Context, sid sqlliveness.SessionID,
) (alive bool, err error) {
return (*Storage)(s).isAlive(ctx, sid, async)
}
func (s *Storage) makeTablePrefix() roachpb.Key {
return s.codec.IndexPrefix(uint32(s.tableID), 1)
}
func (s *Storage) makeSessionKey(id sqlliveness.SessionID) roachpb.Key {
return keys.MakeFamilyKey(encoding.EncodeBytesAscending(s.makeTablePrefix(), id.UnsafeBytes()), 0)
}
func decodeValue(kv kv.KeyValue) (hlc.Timestamp, error) {
tup, err := kv.Value.GetTuple()
if err != nil {
return hlc.Timestamp{},
errors.Wrapf(err, "failed to decode tuple from key %v", kv.Key)
}
_, dec, err := encoding.DecodeDecimalValue(tup)
if err != nil {
return hlc.Timestamp{},
errors.Wrapf(err, "failed to decode decimal from key %v", kv.Key)
}
return tree.DecimalToHLC(&dec)
}
func encodeValue(expiration hlc.Timestamp) roachpb.Value {
var v roachpb.Value
dec := tree.TimestampToDecimal(expiration)
v.SetTuple(encoding.EncodeDecimalValue(nil, 2, &dec))
return v
}
| pkg/sql/sqlliveness/slstorage/slstorage.go | 0 | https://github.com/cockroachdb/cockroach/commit/5015f06f107c92eab8fa180ea28c09c68e294e94 | [
0.0002649863890837878,
0.0001704048627289012,
0.00016225731815211475,
0.00016570664593018591,
0.0000164166594913695
] |
{
"id": 3,
"code_window": [
"\ts := r.Replace(inputFileContents)\n",
"\n",
"\tassignCmpRe := makeFunctionRegex(\"_ASSIGN_CMP\", 6)\n",
"\ts = assignCmpRe.ReplaceAllString(s, makeTemplateFunctionCall(\"Right.Assign\", 6))\n",
"\n",
"\ts = strings.ReplaceAll(s, \"_L_UNSAFEGET\", \"execgen.UNSAFEGET\")\n",
"\ts = replaceManipulationFuncsAmbiguous(\".Left\", s)\n",
"\ts = strings.ReplaceAll(s, \"_R_UNSAFEGET\", \"execgen.UNSAFEGET\")\n",
"\ts = replaceManipulationFuncsAmbiguous(\".Right\", s)\n",
"\n",
"\ts = strings.ReplaceAll(s, \"_HAS_NULLS\", \"$hasNulls\")\n",
"\tselConstLoop := makeFunctionRegex(\"_SEL_CONST_LOOP\", 1)\n",
"\ts = selConstLoop.ReplaceAllString(s, `{{template \"selConstLoop\" buildDict \"Global\" $ \"HasNulls\" $1 \"Overload\" .}}`)\n",
"\tselLoop := makeFunctionRegex(\"_SEL_LOOP\", 1)\n",
"\ts = selLoop.ReplaceAllString(s, `{{template \"selLoop\" buildDict \"Global\" $ \"HasNulls\" $1 \"Overload\" .}}`)\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "pkg/sql/colexec/execgen/cmd/execgen/selection_ops_gen.go",
"type": "replace",
"edit_start_line_idx": 39
} | // Copyright 2018 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package main
import (
"io"
"strings"
"text/template"
)
const projConstOpsTmpl = "pkg/sql/colexec/colexecproj/proj_const_ops_tmpl.go"
// replaceProjTmplVariables replaces template variables used in the templates
// for projection operators. It should only be used within this file.
// Note that not all template variables can be present in the template, and it
// is ok - such replacements will be noops.
func replaceProjTmplVariables(tmpl string) string {
tmpl = strings.ReplaceAll(tmpl, "_L_UNSAFEGET", "execgen.UNSAFEGET")
tmpl = replaceManipulationFuncsAmbiguous(".Left", tmpl)
tmpl = strings.ReplaceAll(tmpl, "_R_UNSAFEGET", "execgen.UNSAFEGET")
tmpl = replaceManipulationFuncsAmbiguous(".Right", tmpl)
tmpl = strings.ReplaceAll(tmpl, "_RETURN_UNSAFEGET", "execgen.RETURNUNSAFEGET")
tmpl = replaceManipulationFuncsAmbiguous(".Right", tmpl)
r := strings.NewReplacer(
"_LEFT_CANONICAL_TYPE_FAMILY", "{{.LeftCanonicalFamilyStr}}",
"_LEFT_TYPE_WIDTH", typeWidthReplacement,
"_RIGHT_CANONICAL_TYPE_FAMILY", "{{.RightCanonicalFamilyStr}}",
"_RIGHT_TYPE_WIDTH", typeWidthReplacement,
"_OP_NAME", "proj{{.Name}}{{.Left.VecMethod}}{{.Right.VecMethod}}Op",
"_NAME", "{{.Name}}",
"_L_GO_TYPE", "{{.Left.GoType}}",
"_R_GO_TYPE", "{{.Right.GoType}}",
"_L_TYP", "{{.Left.VecMethod}}",
"_R_TYP", "{{.Right.VecMethod}}",
"_RET_TYP", "{{.Right.RetVecMethod}}",
)
tmpl = r.Replace(tmpl)
assignRe := makeFunctionRegex("_ASSIGN", 6)
tmpl = assignRe.ReplaceAllString(tmpl, makeTemplateFunctionCall("Right.Assign", 6))
tmpl = strings.ReplaceAll(tmpl, "_HAS_NULLS", "$hasNulls")
tmpl = strings.ReplaceAll(tmpl, "_HAS_SEL", "$hasSel")
setProjectionRe := makeFunctionRegex("_SET_PROJECTION", 1)
tmpl = setProjectionRe.ReplaceAllString(tmpl, `{{template "setProjection" buildDict "Global" $ "HasNulls" $1 "Overload" .}}`)
setSingleTupleProjectionRe := makeFunctionRegex("_SET_SINGLE_TUPLE_PROJECTION", 2)
tmpl = setSingleTupleProjectionRe.ReplaceAllString(tmpl, `{{template "setSingleTupleProjection" buildDict "Global" $ "HasNulls" $1 "HasSel" $2 "Overload" .}}`)
return tmpl
}
// replaceProjConstTmplVariables replaces template variables that are specific
// to projection operators with a constant argument. isConstLeft is true when
// the constant is on the left side. It should only be used within this file.
func replaceProjConstTmplVariables(tmpl string, isConstLeft bool) string {
if isConstLeft {
tmpl = strings.ReplaceAll(tmpl, "_CONST_SIDE", "L")
tmpl = strings.ReplaceAll(tmpl, "_IS_CONST_LEFT", "true")
tmpl = strings.ReplaceAll(tmpl, "_OP_CONST_NAME", "proj{{.Name}}{{.Left.VecMethod}}Const{{.Right.VecMethod}}Op")
tmpl = strings.ReplaceAll(tmpl, "_NON_CONST_GOTYPESLICE", "{{.Right.GoTypeSliceName}}")
tmpl = replaceManipulationFuncsAmbiguous(".Right", tmpl)
} else {
tmpl = strings.ReplaceAll(tmpl, "_CONST_SIDE", "R")
tmpl = strings.ReplaceAll(tmpl, "_IS_CONST_LEFT", "false")
tmpl = strings.ReplaceAll(tmpl, "_OP_CONST_NAME", "proj{{.Name}}{{.Left.VecMethod}}{{.Right.VecMethod}}ConstOp")
tmpl = strings.ReplaceAll(tmpl, "_NON_CONST_GOTYPESLICE", "{{.Left.GoTypeSliceName}}")
tmpl = replaceManipulationFuncsAmbiguous(".Left", tmpl)
}
return replaceProjTmplVariables(tmpl)
}
const projNonConstOpsTmpl = "pkg/sql/colexec/colexecproj/proj_non_const_ops_tmpl.go"
// genProjNonConstOps is the generator for projection operators on two vectors.
func genProjNonConstOps(inputFileContents string, wr io.Writer) error {
s := replaceProjTmplVariables(inputFileContents)
tmpl, err := template.New("proj_non_const_ops").Funcs(template.FuncMap{"buildDict": buildDict}).Parse(s)
if err != nil {
return err
}
return tmpl.Execute(wr, twoArgsResolvedOverloadsInfo)
}
func init() {
projConstOpsGenerator := func(isConstLeft bool) generator {
return func(inputFileContents string, wr io.Writer) error {
tmplString := replaceProjConstTmplVariables(inputFileContents, isConstLeft)
tmpl, err := template.New("proj_const_ops").Funcs(template.FuncMap{"buildDict": buildDict}).Parse(tmplString)
if err != nil {
return err
}
return tmpl.Execute(wr, twoArgsResolvedOverloadsInfo)
}
}
registerGenerator(projConstOpsGenerator(true /* isConstLeft */), "proj_const_left_ops.eg.go", projConstOpsTmpl)
registerGenerator(projConstOpsGenerator(false /* isConstLeft */), "proj_const_right_ops.eg.go", projConstOpsTmpl)
registerGenerator(genProjNonConstOps, "proj_non_const_ops.eg.go", projNonConstOpsTmpl)
}
| pkg/sql/colexec/execgen/cmd/execgen/projection_ops_gen.go | 1 | https://github.com/cockroachdb/cockroach/commit/5015f06f107c92eab8fa180ea28c09c68e294e94 | [
0.01587505079805851,
0.0027733128517866135,
0.00016451517876703292,
0.0007515176548622549,
0.004431195557117462
] |
{
"id": 3,
"code_window": [
"\ts := r.Replace(inputFileContents)\n",
"\n",
"\tassignCmpRe := makeFunctionRegex(\"_ASSIGN_CMP\", 6)\n",
"\ts = assignCmpRe.ReplaceAllString(s, makeTemplateFunctionCall(\"Right.Assign\", 6))\n",
"\n",
"\ts = strings.ReplaceAll(s, \"_L_UNSAFEGET\", \"execgen.UNSAFEGET\")\n",
"\ts = replaceManipulationFuncsAmbiguous(\".Left\", s)\n",
"\ts = strings.ReplaceAll(s, \"_R_UNSAFEGET\", \"execgen.UNSAFEGET\")\n",
"\ts = replaceManipulationFuncsAmbiguous(\".Right\", s)\n",
"\n",
"\ts = strings.ReplaceAll(s, \"_HAS_NULLS\", \"$hasNulls\")\n",
"\tselConstLoop := makeFunctionRegex(\"_SEL_CONST_LOOP\", 1)\n",
"\ts = selConstLoop.ReplaceAllString(s, `{{template \"selConstLoop\" buildDict \"Global\" $ \"HasNulls\" $1 \"Overload\" .}}`)\n",
"\tselLoop := makeFunctionRegex(\"_SEL_LOOP\", 1)\n",
"\ts = selLoop.ReplaceAllString(s, `{{template \"selLoop\" buildDict \"Global\" $ \"HasNulls\" $1 \"Overload\" .}}`)\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "pkg/sql/colexec/execgen/cmd/execgen/selection_ops_gen.go",
"type": "replace",
"edit_start_line_idx": 39
} | // Copyright 2020 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
// Package ptreconcile provides logic to reconcile protected timestamp records
// with state associated with their metadata.
package ptreconcile
import (
"context"
"math/rand"
"time"
"github.com/cockroachdb/cockroach/pkg/keys"
"github.com/cockroachdb/cockroach/pkg/kv"
"github.com/cockroachdb/cockroach/pkg/kv/kvserver"
"github.com/cockroachdb/cockroach/pkg/kv/kvserver/protectedts"
"github.com/cockroachdb/cockroach/pkg/kv/kvserver/protectedts/ptpb"
"github.com/cockroachdb/cockroach/pkg/settings"
"github.com/cockroachdb/cockroach/pkg/settings/cluster"
"github.com/cockroachdb/cockroach/pkg/util/hlc"
"github.com/cockroachdb/cockroach/pkg/util/log"
"github.com/cockroachdb/cockroach/pkg/util/stop"
"github.com/cockroachdb/cockroach/pkg/util/timeutil"
"github.com/cockroachdb/errors"
)
// ReconcileInterval is the interval between two generations of the reports.
// When set to zero - disables the report generation.
var ReconcileInterval = settings.RegisterDurationSetting(
"kv.protectedts.reconciliation.interval",
"the frequency for reconciling jobs with protected timestamp records",
5*time.Minute,
settings.NonNegativeDuration,
).WithPublic()
// StatusFunc is used to check on the status of a Record based on its Meta
// field.
type StatusFunc func(
ctx context.Context, txn *kv.Txn, meta []byte,
) (shouldRemove bool, _ error)
// StatusFuncs maps from MetaType to a StatusFunc.
type StatusFuncs map[string]StatusFunc
// Config configures a Reconciler.
type Config struct {
Settings *cluster.Settings
// Stores is used to ensure that we only run the reconciliation loop on
Stores *kvserver.Stores
DB *kv.DB
Storage protectedts.Storage
Cache protectedts.Cache
// We want a map from metaType to a function which determines whether we
// should clean it up.
StatusFuncs StatusFuncs
}
// Reconciler runs an a loop to reconcile the protected timestamps with external
// state. Each record's status is determined using the record's meta type and
// meta in conjunction with the configured StatusFunc.
type Reconciler struct {
settings *cluster.Settings
localStores *kvserver.Stores
db *kv.DB
cache protectedts.Cache
pts protectedts.Storage
metrics Metrics
statusFuncs StatusFuncs
}
// NewReconciler constructs a Reconciler.
func NewReconciler(cfg Config) *Reconciler {
return &Reconciler{
settings: cfg.Settings,
localStores: cfg.Stores,
db: cfg.DB,
cache: cfg.Cache,
pts: cfg.Storage,
metrics: makeMetrics(),
statusFuncs: cfg.StatusFuncs,
}
}
// Metrics returns the Reconciler's metrics.
func (r *Reconciler) Metrics() *Metrics {
return &r.metrics
}
// Start will start the Reconciler.
func (r *Reconciler) Start(ctx context.Context, stopper *stop.Stopper) error {
return stopper.RunAsyncTask(ctx, "protectedts-reconciliation", func(ctx context.Context) {
r.run(ctx, stopper)
})
}
func (r *Reconciler) run(ctx context.Context, stopper *stop.Stopper) {
reconcileIntervalChanged := make(chan struct{}, 1)
ReconcileInterval.SetOnChange(&r.settings.SV, func(ctx context.Context) {
select {
case reconcileIntervalChanged <- struct{}{}:
default:
}
})
lastReconciled := time.Time{}
getInterval := func() time.Duration {
interval := ReconcileInterval.Get(&r.settings.SV)
const jitterFrac = .1
return time.Duration(float64(interval) * (1 + (rand.Float64()-.5)*jitterFrac))
}
timer := timeutil.NewTimer()
for {
timer.Reset(timeutil.Until(lastReconciled.Add(getInterval())))
select {
case <-timer.C:
timer.Read = true
r.reconcile(ctx)
lastReconciled = timeutil.Now()
case <-reconcileIntervalChanged:
// Go back around again.
case <-stopper.ShouldQuiesce():
return
case <-ctx.Done():
return
}
}
}
func (r *Reconciler) isMeta1Leaseholder(ctx context.Context, now hlc.ClockTimestamp) (bool, error) {
return r.localStores.IsMeta1Leaseholder(ctx, now)
}
func (r *Reconciler) reconcile(ctx context.Context) {
now := r.db.Clock().NowAsClockTimestamp()
isLeaseholder, err := r.isMeta1Leaseholder(ctx, now)
if err != nil {
log.Errorf(ctx, "failed to determine whether the local store contains the meta1 lease: %v", err)
return
}
if !isLeaseholder {
return
}
if err := r.cache.Refresh(ctx, now.ToTimestamp()); err != nil {
log.Errorf(ctx, "failed to refresh the protected timestamp cache to %v: %v", now, err)
return
}
r.cache.Iterate(ctx, keys.MinKey, keys.MaxKey, func(rec *ptpb.Record) (wantMore bool) {
task, ok := r.statusFuncs[rec.MetaType]
if !ok {
// NB: We don't expect to ever hit this case outside of testing.
log.Infof(ctx, "found protected timestamp record with unknown meta type %q, skipping", rec.MetaType)
return true
}
var didRemove bool
if err := r.db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) (err error) {
didRemove = false // reset for retries
shouldRemove, err := task(ctx, txn, rec.Meta)
if err != nil {
return err
}
if !shouldRemove {
return nil
}
err = r.pts.Release(ctx, txn, rec.ID)
if err != nil && !errors.Is(err, protectedts.ErrNotExists) {
return err
}
didRemove = true
return nil
}); err != nil {
r.metrics.ReconciliationErrors.Inc(1)
log.Errorf(ctx, "failed to reconcile protected timestamp with id %s: %v",
rec.ID.String(), err)
} else {
r.metrics.RecordsProcessed.Inc(1)
if didRemove {
r.metrics.RecordsRemoved.Inc(1)
}
}
return true
})
r.metrics.ReconcilationRuns.Inc(1)
}
| pkg/kv/kvserver/protectedts/ptreconcile/reconciler.go | 0 | https://github.com/cockroachdb/cockroach/commit/5015f06f107c92eab8fa180ea28c09c68e294e94 | [
0.0001777507277438417,
0.0001687809417489916,
0.00015836644161026925,
0.00016934280574787408,
0.000004707952939497773
] |
{
"id": 3,
"code_window": [
"\ts := r.Replace(inputFileContents)\n",
"\n",
"\tassignCmpRe := makeFunctionRegex(\"_ASSIGN_CMP\", 6)\n",
"\ts = assignCmpRe.ReplaceAllString(s, makeTemplateFunctionCall(\"Right.Assign\", 6))\n",
"\n",
"\ts = strings.ReplaceAll(s, \"_L_UNSAFEGET\", \"execgen.UNSAFEGET\")\n",
"\ts = replaceManipulationFuncsAmbiguous(\".Left\", s)\n",
"\ts = strings.ReplaceAll(s, \"_R_UNSAFEGET\", \"execgen.UNSAFEGET\")\n",
"\ts = replaceManipulationFuncsAmbiguous(\".Right\", s)\n",
"\n",
"\ts = strings.ReplaceAll(s, \"_HAS_NULLS\", \"$hasNulls\")\n",
"\tselConstLoop := makeFunctionRegex(\"_SEL_CONST_LOOP\", 1)\n",
"\ts = selConstLoop.ReplaceAllString(s, `{{template \"selConstLoop\" buildDict \"Global\" $ \"HasNulls\" $1 \"Overload\" .}}`)\n",
"\tselLoop := makeFunctionRegex(\"_SEL_LOOP\", 1)\n",
"\ts = selLoop.ReplaceAllString(s, `{{template \"selLoop\" buildDict \"Global\" $ \"HasNulls\" $1 \"Overload\" .}}`)\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "pkg/sql/colexec/execgen/cmd/execgen/selection_ops_gen.go",
"type": "replace",
"edit_start_line_idx": 39
} | #!/usr/bin/env bash
set -eu -o pipefail
if [ $# -ne 2 ]; then
echo "Usage: $0 <type> <package>"
exit 1
fi
type=$1
pkg=$2
internal_pkg=$(dirname "$0")/internal
templates=(
interval_btree_tmpl.go
interval_btree_tmpl_test.go
)
# Make filename prefix from type. Lower case and strip pointer.
dst_prefix=$(echo ${type} | awk '{print tolower($0)}' | sed 's/*//')
# Add code generation comment to beginning of files.
gen_header_comment="// Code generated by go_generics. DO NOT EDIT.\n"
# Generate files:
# 1. strip internal/contract.go of its comments and package declaration. This
# script originally used github.com/mmatczuk/go_generics/cmd/go_merge to
# merge files, but that passes the AST through go/ast.MergePackageFiles,
# which causes all "unassociated" comments (those not tied to AST nodes)
# to be stripped due to https://github.com/golang/go/issues/20744.
# 2. for each template file, concatenate the code gen comment, the template
# file, and the stripped contract file, remove any build tags, and pass
# this all to go_generics.
# 3. crlfmt the result because go_generics might re-order imports.
STRIPPED=$(grep -vE '(//|package)' ${internal_pkg}/contract.go)
for template in "${templates[@]}" ; do
dst=${dst_prefix}_${template//_tmpl/}
echo -e ${gen_header_comment} \
| cat - ${internal_pkg}/${template} <(echo "$STRIPPED") \
| grep -v '// +build ignore' \
| go_generics -i /dev/stdin -t T=${type} -p ${pkg} -o ${dst}
crlfmt -w -diff=false ${dst}
done
| pkg/util/interval/generic/gen.sh | 0 | https://github.com/cockroachdb/cockroach/commit/5015f06f107c92eab8fa180ea28c09c68e294e94 | [
0.00017252154066227376,
0.00016923891962505877,
0.00016528720152564347,
0.0001702773297438398,
0.000002652478087838972
] |
{
"id": 3,
"code_window": [
"\ts := r.Replace(inputFileContents)\n",
"\n",
"\tassignCmpRe := makeFunctionRegex(\"_ASSIGN_CMP\", 6)\n",
"\ts = assignCmpRe.ReplaceAllString(s, makeTemplateFunctionCall(\"Right.Assign\", 6))\n",
"\n",
"\ts = strings.ReplaceAll(s, \"_L_UNSAFEGET\", \"execgen.UNSAFEGET\")\n",
"\ts = replaceManipulationFuncsAmbiguous(\".Left\", s)\n",
"\ts = strings.ReplaceAll(s, \"_R_UNSAFEGET\", \"execgen.UNSAFEGET\")\n",
"\ts = replaceManipulationFuncsAmbiguous(\".Right\", s)\n",
"\n",
"\ts = strings.ReplaceAll(s, \"_HAS_NULLS\", \"$hasNulls\")\n",
"\tselConstLoop := makeFunctionRegex(\"_SEL_CONST_LOOP\", 1)\n",
"\ts = selConstLoop.ReplaceAllString(s, `{{template \"selConstLoop\" buildDict \"Global\" $ \"HasNulls\" $1 \"Overload\" .}}`)\n",
"\tselLoop := makeFunctionRegex(\"_SEL_LOOP\", 1)\n",
"\ts = selLoop.ReplaceAllString(s, `{{template \"selLoop\" buildDict \"Global\" $ \"HasNulls\" $1 \"Overload\" .}}`)\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "pkg/sql/colexec/execgen/cmd/execgen/selection_ops_gen.go",
"type": "replace",
"edit_start_line_idx": 39
} | // Copyright 2021 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package lease
import (
"context"
"time"
"github.com/cockroachdb/cockroach/pkg/base"
"github.com/cockroachdb/cockroach/pkg/sql/catalog"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb"
"github.com/cockroachdb/cockroach/pkg/util/hlc"
"github.com/cockroachdb/cockroach/pkg/util/metric"
)
// StorageTestingKnobs contains testing knobs.
type StorageTestingKnobs struct {
// Called after a lease is removed from the store, with any operation error.
// See LeaseRemovalTracker.
LeaseReleasedEvent func(id descpb.ID, version descpb.DescriptorVersion, err error)
// Called after a lease is acquired, with any operation error.
LeaseAcquiredEvent func(desc catalog.Descriptor, err error)
// Called before waiting on a results from a DoChan call of acquireNodeLease
// in descriptorState.acquire() and descriptorState.acquireFreshestFromStore().
LeaseAcquireResultBlockEvent func(leaseBlockType AcquireBlockType)
// RemoveOnceDereferenced forces leases to be removed
// as soon as they are dereferenced.
RemoveOnceDereferenced bool
}
// ModuleTestingKnobs is part of the base.ModuleTestingKnobs interface.
func (*StorageTestingKnobs) ModuleTestingKnobs() {}
var _ base.ModuleTestingKnobs = &StorageTestingKnobs{}
// ManagerTestingKnobs contains test knobs.
type ManagerTestingKnobs struct {
// A callback called after the leases are refreshed as a result of a gossip update.
TestingDescriptorRefreshedEvent func(descriptor *descpb.Descriptor)
// TestingDescriptorUpdateEvent is a callback when an update is received, before
// the leases are refreshed. If a non-nil error is returned, the update is
// ignored.
TestingDescriptorUpdateEvent func(descriptor *descpb.Descriptor) error
// To disable the deletion of orphaned leases at server startup.
DisableDeleteOrphanedLeases bool
// VersionPollIntervalForRangefeeds controls the polling interval for the
// check whether the requisite version for rangefeed-based notifications has
// been finalized.
//
// TODO(ajwerner): Remove this and replace it with a callback.
VersionPollIntervalForRangefeeds time.Duration
LeaseStoreTestingKnobs StorageTestingKnobs
}
var _ base.ModuleTestingKnobs = &ManagerTestingKnobs{}
// ModuleTestingKnobs is part of the base.ModuleTestingKnobs interface.
func (*ManagerTestingKnobs) ModuleTestingKnobs() {}
// TestingAcquireAndAssertMinVersion acquires a read lease for the specified
// ID. The lease is grabbed on the latest version if >= specified version.
// It returns a descriptor and an expiration time valid for the timestamp.
// This method is useful for testing and is only intended to be used in that
// context.
func (m *Manager) TestingAcquireAndAssertMinVersion(
ctx context.Context, timestamp hlc.Timestamp, id descpb.ID, minVersion descpb.DescriptorVersion,
) (LeasedDescriptor, error) {
t := m.findDescriptorState(id, true)
if err := ensureVersion(ctx, id, minVersion, m); err != nil {
return nil, err
}
desc, _, err := t.findForTimestamp(ctx, timestamp)
if err != nil {
return nil, err
}
return desc, nil
}
// TestingOutstandingLeasesGauge returns the outstanding leases gauge that is
// used by this lease manager.
func (m *Manager) TestingOutstandingLeasesGauge() *metric.Gauge {
return m.storage.outstandingLeases
}
| pkg/sql/catalog/lease/testutils.go | 0 | https://github.com/cockroachdb/cockroach/commit/5015f06f107c92eab8fa180ea28c09c68e294e94 | [
0.00017784057126846164,
0.00016886493540368974,
0.00016124616377055645,
0.0001678884436842054,
0.000004428367901709862
] |
{
"id": 4,
"code_window": [
"\t_ = SETVARIABLESIZE\n",
")\n",
"\n",
"// COPYVAL is a template function that can be used to set a scalar to the value\n",
"// of another scalar in such a way that the destination won't be modified if the\n",
"// source is. You must use this on the result of UNSAFEGET if you wish to store\n",
"// that result past the lifetime of the batch you UNSAFEGET'd from.\n",
"func COPYVAL(dest, src interface{}) {\n",
"\tcolexecerror.InternalError(errors.AssertionFailedf(nonTemplatePanic))\n",
"}\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"// source is.\n"
],
"file_path": "pkg/sql/colexec/execgen/placeholders.go",
"type": "replace",
"edit_start_line_idx": 29
} | // Copyright 2019 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package main
import (
"io"
"strings"
"text/template"
)
const castTmpl = "pkg/sql/colexec/colexecbase/cast_tmpl.go"
func genCastOperators(inputFileContents string, wr io.Writer) error {
r := strings.NewReplacer(
"_LEFT_CANONICAL_TYPE_FAMILY", "{{.LeftCanonicalFamilyStr}}",
"_LEFT_TYPE_WIDTH", typeWidthReplacement,
"_RIGHT_CANONICAL_TYPE_FAMILY", "{{.RightCanonicalFamilyStr}}",
"_RIGHT_TYPE_WIDTH", typeWidthReplacement,
"_R_GO_TYPE", "{{.Right.GoType}}",
"_L_TYP", "{{.Left.VecMethod}}",
"_R_TYP", "{{.Right.VecMethod}}",
"_NAME", "{{.Left.VecMethod}}{{.Right.VecMethod}}",
)
s := r.Replace(inputFileContents)
setValues := makeFunctionRegex("_CAST_TUPLES", 2)
s = setValues.ReplaceAllString(s, `{{template "castTuples" buildDict "Global" . "HasNulls" $1 "HasSel" $2}}`)
castRe := makeFunctionRegex("_CAST", 4)
s = castRe.ReplaceAllString(s, makeTemplateFunctionCall("Right.Cast", 4))
s = strings.ReplaceAll(s, "_L_UNSAFEGET", "execgen.UNSAFEGET")
s = replaceManipulationFuncsAmbiguous(".Left", s)
s = strings.ReplaceAll(s, "_R_UNSAFEGET", "execgen.UNSAFEGET")
s = replaceManipulationFuncsAmbiguous(".Right", s)
tmpl, err := template.New("cast").Funcs(template.FuncMap{"buildDict": buildDict}).Parse(s)
if err != nil {
return err
}
return tmpl.Execute(wr, twoArgsResolvedOverloadsInfo.CastOverloads)
}
func init() {
registerGenerator(genCastOperators, "cast.eg.go", castTmpl)
}
| pkg/sql/colexec/execgen/cmd/execgen/cast_gen.go | 1 | https://github.com/cockroachdb/cockroach/commit/5015f06f107c92eab8fa180ea28c09c68e294e94 | [
0.0008720027399249375,
0.0003274352930020541,
0.00017000234220176935,
0.0001849750115070492,
0.0002541198336984962
] |
{
"id": 4,
"code_window": [
"\t_ = SETVARIABLESIZE\n",
")\n",
"\n",
"// COPYVAL is a template function that can be used to set a scalar to the value\n",
"// of another scalar in such a way that the destination won't be modified if the\n",
"// source is. You must use this on the result of UNSAFEGET if you wish to store\n",
"// that result past the lifetime of the batch you UNSAFEGET'd from.\n",
"func COPYVAL(dest, src interface{}) {\n",
"\tcolexecerror.InternalError(errors.AssertionFailedf(nonTemplatePanic))\n",
"}\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"// source is.\n"
],
"file_path": "pkg/sql/colexec/execgen/placeholders.go",
"type": "replace",
"edit_start_line_idx": 29
} | {
"Localities": [
{
"Name": "1",
"NumNodes": 2,
"NumWorkers": 0,
"OutgoingLatencies": [
{
"Name": "2",
"Latency": "50ms"
},
{
"Name": "3",
"Latency": "50ms"
}
]
},
{
"Name": "2",
"NumNodes": 2,
"NumWorkers": 32,
"OutgoingLatencies": [
{
"Name": "1",
"Latency": "50ms"
},
{
"Name": "3",
"Latency": "50ms"
}
]
},
{
"Name": "3",
"NumNodes": 4,
"NumWorkers": 0,
"OutgoingLatencies": [
{
"Name": "1",
"Latency": "50ms"
},
{
"Name": "2",
"Latency": "50ms"
}
]
}
]
}
| pkg/cmd/allocsim/configs/different-nodes-per-locality-imbalanced-load.json | 0 | https://github.com/cockroachdb/cockroach/commit/5015f06f107c92eab8fa180ea28c09c68e294e94 | [
0.00017193904204759747,
0.0001703126763459295,
0.0001691829675110057,
0.00017007920541800559,
9.958694136003032e-7
] |
{
"id": 4,
"code_window": [
"\t_ = SETVARIABLESIZE\n",
")\n",
"\n",
"// COPYVAL is a template function that can be used to set a scalar to the value\n",
"// of another scalar in such a way that the destination won't be modified if the\n",
"// source is. You must use this on the result of UNSAFEGET if you wish to store\n",
"// that result past the lifetime of the batch you UNSAFEGET'd from.\n",
"func COPYVAL(dest, src interface{}) {\n",
"\tcolexecerror.InternalError(errors.AssertionFailedf(nonTemplatePanic))\n",
"}\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"// source is.\n"
],
"file_path": "pkg/sql/colexec/execgen/placeholders.go",
"type": "replace",
"edit_start_line_idx": 29
} | // Copyright 2021 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package geomfn
import (
"testing"
"github.com/cockroachdb/cockroach/pkg/geo"
)
var (
benchLargePolygons = []geo.Geometry{
geo.MustParseGeometry("MULTIPOLYGON (((96.863129102000073 -12.181735934999949, 96.869395379000082 -12.187920830999929, 96.845225457000026 -12.197686455999929, 96.830577019000032 -12.179945570999905, 96.823415561000047 -12.150974216999941, 96.82154381600003 -12.126560153999947, 96.828379754000082 -12.126560153999947, 96.828379754000082 -12.136325778999947, 96.829844597000033 -12.144463799999926, 96.83521569100003 -12.160577080999929, 96.83130944100003 -12.171482028999947, 96.836192254000082 -12.182061455999929, 96.847666863000029 -12.18718840899993, 96.863129102000073 -12.181735934999949)), ((96.902191602000073 -12.199965101999908, 96.892344597000033 -12.195896091999941, 96.889659050000034 -12.192803643999923, 96.888519727000073 -12.187432549999926, 96.90007571700005 -12.18678150799991, 96.909027540000068 -12.182061455999929, 96.914561394000032 -12.17351653399993, 96.916188998000052 -12.16179778399993, 96.921071811000047 -12.17351653399993, 96.920176629000082 -12.184991143999923, 96.913910352000073 -12.194512627999927, 96.902191602000073 -12.199965101999908)), ((105.704112175000034 -10.430840752999927, 105.714691602000073 -10.43718840899993, 105.712657097000033 -10.450941664999959, 105.711436394000032 -10.469903252999927, 105.70630944100003 -10.494235934999949, 105.70630944100003 -10.514255466999941, 105.697927280000044 -10.52898528399993, 105.699880405000044 -10.553399346999925, 105.69459069100003 -10.564873955999929, 105.678721550000034 -10.566013278999947, 105.67351321700005 -10.549086195999905, 105.66724694100003 -10.533298434999949, 105.664073113000029 -10.518487237999921, 105.649261915000068 -10.513116143999923, 105.62818444100003 -10.508965752999927, 105.604991082000026 -10.510023695999905, 105.584971550000034 -10.516371351999908, 105.581797722000033 -10.504815362999921, 105.592295769000032 -10.498467705999929, 105.599619988000029 -10.480564059999949, 105.594248894000032 -10.466729424999926, 105.594248894000032 -10.454034112999921, 105.607188347000033 -10.458428643999923, 105.627126498000052 -10.468926690999922, 105.654551629000025 -10.469903252999927, 105.66928144600007 -10.456231377999927, 105.68523196700005 -10.440362237999921, 105.704112175000034 -10.430840752999927)))"),
geo.MustParseGeometry("MULTIPOLYGON (((33.78093509200005 34.976345119000086, 33.760427280000044 34.979681708000044, 33.717295769000089 34.977769273000035, 33.701508009000065 34.972886460000041, 33.702934611000103 34.987943014000038, 33.711461222000082 34.985565899000093, 33.715440308000098 34.997296448000085, 33.699730672000101 35.002722473000048, 33.696630086000084 35.008975322000069, 33.705311727000037 35.015228170000043, 33.702211141000078 35.022256165000059, 33.685002890000078 35.029284160000074, 33.679434599000103 35.033898764000057, 33.675649455000041 35.037035624000069, 33.674099162000061 35.046440734000029, 33.678853394000043 35.055794169000066, 33.694459675000076 35.058171285000029, 33.705311727000037 35.066749573000052, 33.714716838000072 35.066749573000052, 33.719367716000136 35.062770487000066, 33.711461222000082 35.04096303300004, 33.707585489000053 35.029284160000074, 33.718489217000126 35.032384745000016, 33.739676554000141 35.04721588200006, 33.766134888000067 35.031609599000021, 33.775539999000102 35.040187887000044, 33.786495402000071 35.038534240999994, 33.794350220000126 35.040187887000044, 33.798277628000108 35.052641907000023, 33.824012492000065 35.066749573000052, 33.834864542000048 35.063597311000095, 33.842719360000046 35.056620993000095, 33.853571411000075 35.058171285000029, 33.866903930000035 35.066749573000052, 33.871554810000077 35.073054098000071, 33.876929159000042 35.076826477000097, 33.871554810000077 35.085456442000051, 33.871554810000077 35.100235901000062, 33.876205689000074 35.118994446000059, 33.889434855000047 35.118994446000059, 33.891811971000095 35.110467834000033, 33.898839966000082 35.108814189000086, 33.903594198000121 35.099512431000065, 33.905867961000126 35.096360168999993, 33.905867961000126 35.090882466000025, 33.913619425000036 35.090882466000025, 33.921474244000137 35.080702210000041, 33.914446249000036 35.073054098000071, 33.908245077000061 35.07072865800005, 33.906505332000108 35.06910490500006, 33.898116496000114 35.061271872000063, 33.880133098000044 35.073054098000071, 33.874655396000037 35.067524719000076, 33.86762740100005 35.06049672400006, 33.855121704000112 35.053417053000132, 33.841169067000124 35.051091612999997, 33.834864542000048 35.056620993000095, 33.827113079000128 35.061271872000063, 33.813780558000076 35.055794169000066, 33.804375447000041 35.049541321000063, 33.799001099000066 35.038534240999994, 33.822358847000146 35.030059306000069, 33.830213664000041 35.023031311000082, 33.829386841000144 35.00117218000004, 33.840342244000112 34.993369039000115, 33.859049113000083 34.991818745000003, 33.859049113000083 34.974662171000048, 33.850470825000059 34.973008525000083, 33.838068481000107 34.963706767000062, 33.84581994600012 34.959727682000064, 33.864423462000047 34.962983297000065, 33.891840639430939 34.958139066829645, 33.883799675000034 34.949123440000051, 33.874522332000083 34.941229559000078, 33.862315300000034 34.937892971000053, 33.84742272200009 34.942450262000079, 33.81967207100007 34.964748440000051, 33.804209832000083 34.972601630000042, 33.781830274000072 34.976223049000055, 33.784945110000081 34.976212464000056, 33.788045694000118 34.976987610000052, 33.7927999260001 34.977762757000065, 33.794350220000126 34.977762757000065, 33.791146281000067 34.982413636000032, 33.786495402000071 34.984687399000066, 33.78256799400009 34.984687399000066, 33.777917115000037 34.984687399000066, 33.773989705000076 34.988666484000035, 33.766134888000067 34.990268454000059, 33.761484009000071 34.990268454000059, 33.759210246000066 34.988666484000035, 33.765411418000042 34.985565899000093, 33.769338826000137 34.983963929000069, 33.770889120000049 34.980088196000011, 33.775539999000102 34.980088196000011, 33.780190878000042 34.979313050000101, 33.78093509200005 34.976345119000086), (33.742792014000088 35.001232846999997, 33.746688555000105 35.002710845000067, 33.752063096000143 35.00432320700007, 33.752063096000143 35.014400470000041, 33.746151101000123 35.015206651000042, 33.741314015000114 35.013728653000058, 33.740239107000093 35.010100838000099, 33.738761108000062 35.00526375200009, 33.739701653000111 35.002576482000066, 33.742792014000088 35.001232846999997)))"),
geo.MustParseGeometry("MULTIPOLYGON(((44.7661352950001 37.1419200650001,44.7525443930001 37.1131362920001,44.7526994220001 37.10331777,44.7606575930001 37.0856444290001,44.766548706 37.0789523320001,44.7736283770001 37.0763426720001,44.7811214600001 37.0744823200001,44.7883561610001 37.0702448530001,44.7922318930001 37.063733622,44.7972961830001 37.0484890750001,44.8018953860001 37.0435281380001,44.8111971440001 37.0419261680001,44.83093754 37.0468354290001,44.8406010340001 37.047197165,44.8475773520001 37.0440449010001,44.8589461670001 37.0341230270001,44.880753622 37.0249246220001,44.8873682050001 37.0159329230001,44.8857145590001 37.0050808720001,44.874759156 36.9940221150001,44.869178101 36.9673570760001,44.8748625080001 36.949657898,44.884060913 36.933353984,44.8888151450001 36.910926412,44.8834407960001 36.886845195,44.869643189 36.868965149,44.8341414800001 36.834031881,44.8233927820001 36.809330547,44.8312476 36.791915589,44.851194702 36.7812185670001,44.8772396240001 36.7764643360001,44.9082454830001 36.7778595990001,44.9227148840001 36.775895895,44.9352205810001 36.7671109010001,44.9444189860001 36.758274231,44.9547542720001 36.752331442,44.966433146 36.749334209,44.9794556070001 36.74902415,44.9961987710001 36.741686096,45.0109782310001 36.7255630500001,45.0352661540001 36.6895962530001,45.0459114990001 36.668047181,45.0443095300001 36.6493403120001,45.0248791910001 36.613166809,45.014182169 36.579008687,45.0132519940001 36.5579247030001,45.0102547610001 36.5560126760001,44.9978524170001 36.552085266,44.9934082440001 36.5496564740001,44.991392863 36.5337401330001,45.0059139400001 36.5187022910001,45.0095246660001 36.5161959100001,45.0253442790001 36.5052147420001,45.038160034 36.4940009560001,45.0390902100001 36.4798416140001,45.0454980870001 36.4716250610001,45.0540763750001 36.4649588010001,45.0620862230001 36.455346985,45.0668921310001 36.442376201,45.0685457760001 36.432506002,45.0722664790001 36.4234109500001,45.0837386480001 36.4128172810001,45.1108170980001 36.4025336710001,45.1399626060001 36.4043940230001,45.1959798580001 36.4224290970001,45.2213013100001 36.4209304820001,45.238871298 36.402998759,45.2496199960001 36.3779356900001,45.2544775800001 36.3547846480001,45.2570097250001 36.3116865030001,45.2639343670001 36.2942715460001,45.2826929120001 36.274892884,45.2585083420001 36.2622321580001,45.2641927490001 36.2501915490001,45.2827962650001 36.238409322,45.297369018 36.226730449,45.3020198980001 36.2075584920001,45.299539429 36.1614114380001,45.3046037190001 36.1403791300001,45.335609579 36.1077196250001,45.3483219800001 36.08751414,45.3441878660001 36.067463685,45.3318888760001 36.05159902,45.3195898850001 36.0310318000001,45.3136987710001 36.010412903,45.3202100020001 35.9942898560001,45.3202100020001 35.994134827,45.3203133540001 35.994031474,45.32052006 35.9939797980001,45.338090047 35.979303691,45.3595874430001 35.976874899,45.3818083090001 35.982972718,45.4015487070001 35.9939797980001,45.401858765 35.994031474,45.4019621180001 35.994083151,45.4021688230001 35.9942381800001,45.419945516 35.9982689420001,45.4531217850001 36.0116531370001,45.4792700600001 36.0120148720001,45.5014236760001 36.0054347770001,45.5399898680001 35.9939797980001,45.5769385170001 35.966229553,45.5898576260001 35.9596149710001,45.617039429 35.9554291800001,45.6468567300001 35.9332083130001,45.6932104900001 35.879878235,45.7187386480001 35.8283051560001,45.7310376390001 35.8151276660001,45.7490210370001 35.8109935510001,45.7867448320001 35.8202436330001,45.7979069420001 35.8183832810001,45.8141850180001 35.809339905,45.8347522380001 35.810528463,45.854079223 35.8183316040001,45.8783154700001 35.835229798,45.8884957280001 35.8348680630001,45.8987793380001 35.832180888,45.9091146240001 35.8313023890001,45.9412573650001 35.840345765,46.00492273 35.8379686490001,46.0249731860001 35.8432913210001,46.0443001710001 35.8522830200001,46.0604232180001 35.857243958,46.0769596760001 35.8569855750001,46.1077588300001 35.8474254360001,46.115045207 35.846443584,46.1198511150001 35.842877909,46.1248120520001 35.822465719,46.1291011960001 35.815541077,46.1351990150001 35.8100116990001,46.143363891 35.8045339960001,46.1635177000001 35.797506002,46.184963419 35.7981777960001,46.227441447 35.805722555,46.246406698 35.8114069630001,46.2635115960001 35.8211221320001,46.2813916420001 35.8283051560001,46.3029923910001 35.826548157,46.319632202 35.8168846640001,46.327073608 35.8035004690001,46.3250582280001 35.7878425090001,46.3133793540001 35.7715127570001,46.2975146900001 35.7598855590001,46.2671289470001 35.744124248,46.2534863690001 35.7277428190001,46.2378800860001 35.715547181,46.2170028080001 35.713583476,46.1777287190001 35.7153921510001,46.1276025800001 35.69368805,46.1070353600001 35.689812317,46.0370654710001 35.6915693160001,46.0164982510001 35.685729879,46.0037858480001 35.674257711,45.9958793540001 35.6583930460001,45.9923653560001 35.640874736,45.9923136800001 35.6243899540001,46.00202885 35.585942688,45.9992383220001 35.5721450810001,45.9686975500001 35.579741516,45.9638916420001 35.579741516,45.9593441160001 35.5787079880001,45.9680257570001 35.5585025030001,45.9792395430001 35.5394855750001,45.9831152750001 35.5352997840001,45.9845622150001 35.5310623170001,45.9832186280001 35.5269798790001,45.9792395430001 35.523000794,45.9713330480001 35.5180398560001,45.9660620530001 35.511476949,45.9635815840001 35.5034154260001,45.9639949950001 35.4941653440001,45.9770174560001 35.465329895,46.0409928790001 35.3815623980001,46.0962866620001 35.341151429,46.120161173 35.3185171510001,46.1273958740001 35.2890615840001,46.1252854410001 35.2849893410001,46.1200578200001 35.274902243,46.107655477 35.262758281,46.0987671310001 35.2500458790001,46.1014543050001 35.2342845660001,46.115406942 35.226326396,46.1541642660001 35.2266364550001,46.169253784 35.2162494920001,46.165481405 35.1899461870001,46.143828979 35.1596121220001,46.1303931070001 35.13139679,46.1510636800001 35.111604716,46.1431055100001 35.0995124310001,46.1323568120001 35.0947581990001,46.1199544680001 35.0927428180001,46.0945296630001 35.0858181760001,46.0766496170001 35.0896939090001,46.0665727140001 35.08881541,46.0397526450001 35.075534566,46.0250765380001 35.0642691040001,46.0092118730001 35.0609101360001,45.9792395430001 35.0715554820001,45.9663721110001 35.070728658,45.9560368240001 35.0742426560001,45.9349528400001 35.0854564420001,45.9201733810001 35.0896422330001,45.9131970620001 35.0873167930001,45.899399454 35.068609925,45.898365927 35.0639073690001,45.9011564540001 35.051608378,45.8990893960001 35.0469058230001,45.8930949300001 35.0443736780001,45.879038941 35.0432367960001,45.872476034 35.0414281210001,45.861882365 35.032798157,45.8570247800001 35.0212743120001,45.856818074 35.0080451460001,45.8597119550001 34.9944025680001,45.8597119550001 34.9942475380001,45.8689620360001 34.9683576460001,45.8663265380001 34.949547425,45.8578516030001 34.9313056440001,45.850203492 34.9068110150001,45.83501062 34.8901712040001,45.8080355230001 34.8978709920001,45.7962024270001 34.903063666,45.7782698980001 34.9108934530001,45.7551188560001 34.909963277,45.748504272 34.890739645,45.7504679770001 34.865366517,45.7454036870001 34.84097524,45.7177051190001 34.825110576,45.689231405 34.822630107,45.6765706790001 34.818289287,45.6667521560001 34.806920472,45.6635998940001 34.7949315390001,45.6659253340001 34.774726054,45.662824748 34.7632538860001,45.6350228270001 34.7369505830001,45.6273747150001 34.7208275350001,45.6422575280001 34.7068748990001,45.6505257570001 34.702999166,45.6595174560001 34.6972114060001,45.6668555100001 34.6899767050001,45.6703695060001 34.6818118290001,45.673470093 34.6711664840001,45.6783793540001 34.665120341,45.6834953200001 34.6602110800001,45.6876294350001 34.6532347620001,45.6907816980001 34.6343211880001,45.6912467860001 34.6155109660001,45.694450724 34.5985610970001,45.7056128340001 34.5855903120001,45.7085583910001 34.579079081,45.708300008 34.56838206,45.7056128340001 34.557736715,45.7015303960001 34.5512771610001,45.6955876050001 34.550243632,45.6735734460001 34.5566515100001,45.6048437910001 34.5611473600001,45.5739412850001 34.567296855,45.5396281330001 34.582231344,45.5006641030001 34.5916881310001,45.4965299880001 34.564247945,45.5048498950001 34.523785299,45.5034546310001 34.4941747030001,45.4987003990001 34.48624237,45.4930159910001 34.4790076700001,45.486504761 34.4727289840001,45.4792700600001 34.4672512820001,45.4267668050001 34.457510275,45.4170516350001 34.444358623,45.4479541420001 34.3620897430001,45.4607698980001 34.340463155,45.4792700600001 34.329146017,45.4992171640001 34.342401022,45.5215413820001 34.3425043740001,45.5422119550001 34.3316006470001,45.5575598550001 34.3119636030001,45.5630375570001 34.289277649,45.5610221760001 34.264602153,45.5476896570001 34.216491395,45.5427287190001 34.2074738570001,45.5286727300001 34.1885602830001,45.5266573490001 34.179310202,45.531463257 34.167915548,45.5388013100001 34.1592339070001,45.5445373950001 34.150629781,45.5445373950001 34.1394935100001,45.5343571370001 34.1282022100001,45.493946167 34.1006845090001,45.4792700600001 34.0876362110001,45.4548787840001 34.069859518,45.4436133220001 34.0476644900001,45.4351900640001 34.022136332,45.419945516 33.9943602500001,45.419945516 33.994256898,45.4197388110001 33.994256898,45.4114705810001 33.9871513880001,45.4017554120001 33.981622009,45.3803613690001 33.973560486,45.4010319420001 33.9494534300001,45.4237178950001 33.9387822470001,45.4497111410001 33.9373869840001,45.4792700600001 33.941185201,45.4817505290001 33.940875143,45.4839209390001 33.9400483200001,45.48578129 33.938653056,45.5747681070001 33.803932597,45.5882039790001 33.7915044150001,45.6036035560001 33.780729879,45.6167293700001 33.7686892700001,45.6239123940001 33.752075297,45.6282015390001 33.7318439740001,45.63512618 33.714170634,45.6451514080001 33.697918396,45.6587939860001 33.6816919970001,45.6739868570001 33.6690312710001,45.7211157640001 33.6407642620001,45.7246296230001 33.6330364130001,45.7279887290001 33.625648905,45.7277303470001 33.59009552,45.734551636 33.583119202,45.7520182700001 33.58676239,45.7684513760001 33.595909119,45.7975452060001 33.6193185420001,45.8157869870001 33.625803935,45.8643628330001 33.626424053,45.8859635820001 33.630919902,45.8820361740001 33.6001982630001,45.8997095140001 33.5851604210001,45.9208968500001 33.5729906210001,45.9279248450001 33.5508731080001,45.9156258550001 33.5358094280001,45.8681868900001 33.5115731820001,45.852580608 33.49436493,45.8697371830001 33.482117615,45.899399454 33.4763040170001,45.9296818450001 33.4795079550001,45.9481820070001 33.4942615770001,45.953142944 33.496587016,45.9581555590001 33.4973880010001,45.9632715250001 33.496561177,45.9644874470001 33.495991214,45.9682324620001 33.494235738,45.9713847250001 33.4927629600001,45.9744336350001 33.4907992550001,46.0080233150001 33.4556592810001,46.0193921310001 33.4383993530001,46.0274536540001 33.4195116170001,46.0305025640001 33.3992027800001,46.0310710050001 33.382511291,46.0362386480001 33.3679385380001,46.0697249760001 33.340860088,46.1095158290001 33.293705343,46.1413485110001 33.272104594,46.1553011480001 33.2602190150001,46.1641894940001 33.243346659,46.1643961990001 33.233063049,46.1583500570001 33.2138652550001,46.1576782640001 33.2056745410001,46.1621224360001 33.1963727830001,46.1736979570001 33.190223287,46.174318074 33.181179911,46.1671867260001 33.168338318,46.153440796 33.1542564910001,46.1264656990001 33.131777242,46.1053817140001 33.1183930470001,46.0891553140001 33.1156800340001,46.07241215 33.116765239,46.0503979900001 33.1151115930001,46.0302958570001 33.1056806440001,46.0293140050001 33.093510844,46.043266643 33.0834597780001,46.08719161 33.07922231,46.1060018310001 33.072504375,46.1204195560001 33.0616781620001,46.126155639 33.047983908,46.1194893800001 33.031008199,46.1046582440001 33.018063253,46.0880184330001 33.006720276,46.075719442 32.9944988000001,46.075719442 32.9943437700001,46.097010132 32.9543203740001,46.1555595290001 32.9483517460001,46.2734334720001 32.9594880170001,46.3796285400001 32.9317636110001,46.4793123780001 32.89179189,46.5073210050001 32.868020732,46.6006130590001 32.8225414760001,46.6044210210001 32.8206851200001,46.6503613690001 32.7893692020001,46.7158870850001 32.756012065,46.757331584 32.7161953740001,47.0583984790001 32.494477641,47.0586051840001 32.494477641,47.05865686 32.494400127,47.0905928960001 32.4745563760001,47.121030315 32.4610429890001,47.152552938 32.4551777140001,47.1877962650001 32.458381653,47.2054696050001 32.4640402220001,47.2512549240001 32.4854601040001,47.2653109130001 32.4846849570001,47.321948283 32.468226013,47.3433423260001 32.458691712,47.356106405 32.4461860150001,47.3673201910001 32.4307606,47.3841150310001 32.4125704960001,47.4108317470001 32.3952847300001,47.416412801 32.3879725140001,47.4179630940001 32.3764228310001,47.4143457440001 32.3698599250001,47.4087646890001 32.3643305460001,47.3999796960001 32.3457787080001,47.395948934 32.341980489,47.3957939050001 32.3370195520001,47.402460165 32.3235061650001,47.4070593670001 32.317770081,47.4368766680001 32.2936888630001,47.4415792240001 32.288185323,47.4453516030001 32.2826301070001,47.4630249430001 32.261830343,47.4694328200001 32.2561200970001,47.4791479900001 32.2520634980001,47.4845223390001 32.239351095,47.5040043540001 32.2267420460001,47.5092236730001 32.2137970990001,47.5076733800001 32.1998444630001,47.5023507080001 32.190129294,47.496407918 32.1813442990001,47.4924805100001 32.170492249,47.490413452 32.156539612,47.4913436280001 32.1486331180001,47.4981649170001 32.1441372680001,47.5138228760001 32.1403132120001,47.5266903080001 32.13338857,47.5347518310001 32.1231566370001,47.5436401770001 32.114578349,47.5587296960001 32.1129763800001,47.5780566820001 32.10615509,47.5802323560001 32.1034951960001,47.5950065520001 32.085432842,47.6177958580001 32.0418696090001,47.6331437580001 32.0269351200001,47.6680770260001 32.0124140430001,47.6779472250001 31.994689026,47.6779472250001 31.9945339970001,47.6826497800001 31.9769123340001,47.7184098720001 31.9223420210001,47.7252311610001 31.914332174,47.7433179120001 31.9042552690001,47.750966024 31.898209127,47.7565470790001 31.8891657510001,47.7617147220001 31.871647441,47.7652803950001 31.8641026820001,47.7813517660001 31.848858134,47.8207808840001 31.8235883590001,47.8344751380001 31.8057083130001,47.8372656660001 31.7844693,47.8313228760001 31.7618350220001,47.6789290770001 31.4078514610001,47.676443174 31.236517019,47.6729346110001 30.9946983850001,48.0015450440001 30.994646708,48.0122420660001 30.989065653,48.0154460040001 30.9762498990001,48.012035359 30.494573873,48.0135856530001 30.463878072,48.1194189860001 30.4508039350001,48.1305810960001 30.4474966430001,48.1409163820001 30.4419155890001,48.157659546 30.426309306,48.1704753010001 30.4066722610001,48.179467 30.3848648070001,48.1876835530001 30.3408364870001,48.1920760500001 30.3312763470001,48.2000858970001 30.3239382930001,48.2111963300001 30.3196491500001,48.222978556 30.318098857,48.235070841 30.318512268,48.2716577560001 30.3238349410001,48.2842668050001 30.3233181770001,48.2963590900001 30.3197525020001,48.305712525 30.3128278610001,48.3260213620001 30.283475647,48.3581641030001 30.2517979950001,48.3970247810001 30.220998841,48.4035876880001 30.212523906,48.4087036540001 30.2023436490001,48.4106156820001 30.191543275,48.4080318600001 30.180897929,48.4007454840001 30.1722679650001,48.3911336670001 30.1647748820001,48.3832788490001 30.1564549770001,48.38110844 30.1453445440001,48.383072143 30.1384199010001,48.3955778410001 30.1152171840001,48.4155249430001 30.0956318160001,48.4212093510001 30.0853482060001,48.4239482020001 30.0836428840001,48.4421899820001 30.033930156,48.4442570400001 30.0209076950001,48.4531453860001 30.0014256790001,48.4577962650001 29.9948627720001,48.4643074960001 29.989126689,48.4781567790001 29.979618225,48.4930395910001 29.9717634080001,48.5239937750001 29.964115295,48.531016472 29.9613304710001,48.5307723320001 29.9607608090001,48.5307723320001 29.9562035180001,48.5549422540001 29.9564883480001,48.559255405 29.9466006530001,48.5468856130001 29.9347191430001,48.520518425 29.928900458,48.411875847 29.938421942,48.3354598320001 29.961411851,48.2995711600001 29.9845238300001,48.26465905 29.9939639340001,48.2092391290001 30.0244815120001,48.165782097 30.037543036,48.1191512380001 30.0449893250001,48.0763452480001 30.0456403670001,48.038259311 30.036851304,47.9690047540001 30.0040550800001,47.961924675 30.0303408870001,47.9585067070001 30.0606143250001,47.9516707690001 30.088324286,47.9348250660001 30.1070824240001,47.9471134770001 30.0745303410001,47.9484969410001 30.0623233090001,47.9460555350001 30.0493838560001,47.9417423840001 30.0372582050001,47.9409285820001 30.0265160180001,47.9484969410001 30.0176455750001,47.9480086600001 29.994045315,47.7314323330001 30.088552145,47.674174845 30.0982156370001,47.4157926840001 30.0982156370001,47.3582251380001 30.0921178180001,47.1970980230001 30.0342402140001,47.1446981200001 30.0033377070001,47.1104883220001 29.9609113570001,47.0254289140001 29.7721373500001,46.988842 29.7126577760001,46.983674357 29.6982917280001,46.9798503010001 29.668061015,46.9773698320001 29.6579841110001,46.9578361410001 29.6204411830001,46.8831120200001 29.512514954,46.8534497480001 29.4445604460001,46.8385669350001 29.4249750780001,46.7744881590001 29.3635317990001,46.7118563230001 29.27139272,46.5614681480001 29.1241665090001,46.53243575 29.0957445270001,46.4886141360001 29.0875796520001,46.4448958740001 29.079414775,46.4273223200001 29.0761431030001,46.4011776130001 29.0712757370001,46.3574593500001 29.063136699,46.2521427820001 29.0714566040001,46.1759200440001 29.077476909,46.099697307 29.0835747280001,46.0236295980001 29.0895950320001,45.9473551840001 29.0956411740001,45.8449324950001 29.1037543740001,45.7424064530001 29.1118675750001,45.6398804120001 29.1200324500001,45.5374060470001 29.1281198120001,45.4349316810001 29.1362071740001,45.3324056400001 29.1443720500001,45.2299829510001 29.152459412,45.1274569090001 29.160624288,45.047823527 29.1669029750001,44.968293498 29.1732591760001,44.888711792 29.1795120240001,44.8089233810001 29.1858940640001,44.7174560950001 29.193102926,44.7108931880001 29.195273336,44.7046403410001 29.1974954220001,44.6982324620001 29.19971751,44.6918245850001 29.201836243,44.6148783780001 29.2560706590001,44.5197420650001 29.3233275350001,44.4242956950001 29.3905327360001,44.3289526770001 29.4577896130001,44.2336613360001 29.524994812,44.119146362 29.6058167520001,44.0046313880001 29.6866128540001,43.8900647380001 29.7673831180001,43.8240129630001 29.8139798590001,43.7754980880001 29.848205058,43.660983114 29.929026998,43.5463647870001 30.009797262,43.4317464600001 30.0905675260001,43.3172314860001 30.171337789,43.2290714930001 30.233504537,43.1409631750001 30.2956712850001,43.052906535 30.3577863570001,42.9647465410001 30.420004782,42.85901656 30.4945221960001,42.8589132080001 30.494573873,42.8588098550001 30.4946255490001,42.8587065020001 30.4946772260001,42.7834125790001 30.5510450320001,42.7639319260001 30.5656289670001,42.669157349 30.6366840620001,42.5744861250001 30.7076358030001,42.4797115480001 30.7786392220001,42.3951689050001 30.8415811160001,42.3107296140001 30.9045230100001,42.226290324 30.9674649050001,42.141644328 31.0304068000001,42.075395142 31.0798611450001,41.9862016200001 31.125284729,41.8985583900001 31.1699848430001,41.8107084550001 31.2146849580001,41.7230652260001 31.2593850710001,41.6354219970001 31.3040335080001,41.5475720620001 31.3487336230001,41.4598254800001 31.3934337360001,41.3721822510001 31.43813385,41.284539022 31.4828339640001,41.1966890870001 31.527585755,41.1090458580001 31.5722858680001,41.021299275 31.6169343060001,40.9335526940001 31.6616344200001,40.8459094650001 31.7063345340001,40.7581628820001 31.7510863240001,40.6703129470001 31.7956830860001,40.5826697190001 31.840434876,40.4798336180001 31.8928347780001,40.4241264240001 31.9205333450001,40.3702795820001 31.9384650680001,40.0293184820001 31.9943789670001,40.029111776 31.9944306440001,40.0290084230001 31.9944823200001,40.0288017170001 31.9944823200001,39.948599894 32.0061095170001,39.7509892170001 32.0348932910001,39.5532751870001 32.06372874,39.3555611570001 32.092564189,39.1577437740001 32.1213479620001,39.154953247 32.120572815,39.152162719 32.119745992,39.1492688400001 32.118919169,39.1463749600001 32.1181440230001,39.146168253 32.125843811,39.266470988 32.212866923,39.2919991450001 32.244518738,39.2711218670001 32.3119564820001,39.2563424070001 32.3426781210001,39.2357751870001 32.3528583780001,39.0463293870001 32.308494161,39.0362008060001 32.313351745,39.0287593990001 32.3283379110001,38.9799768480001 32.4721017460001,38.97863326 32.473729554,38.9782198490001 32.4749697880001,38.97863326 32.475693258,38.9799768480001 32.476054993,39.057181438 32.496596374,38.9900020750001 32.705575867,38.9427698160001 32.8523369340001,38.8971912030001 32.9943437700001,38.8625679930001 33.1007197060001,38.8210201420001 33.229032288,38.774511352 33.3716850790001,38.8850989180001 33.4271080530001,38.995686483 33.4824793500001,39.1062740480001 33.5379281610001,39.216861613 33.593325297,39.3274491780001 33.6487224320001,39.4380367430001 33.7040937300001,39.5486243080001 33.7594908650001,39.6592118740001 33.8148104860001,39.7696960850001 33.8702334600001,39.8803870030001 33.9256047570001,39.9908712160001 33.980976054,40.1014587810001 34.0363731890001,40.173110825 34.072282917,40.2120463460001 34.091796163,40.3226339110001 34.14716746,40.4332214760001 34.2025387570001,40.5438090410001 34.2579875700001,40.690466756 34.3314972940001,40.936033163 34.3860676070001,40.9652820230001 34.4018547570001,40.9880196530001 34.428519796,41.023986451 34.4941747030001,41.023986451 34.4943297330001,41.1956555580001 34.7684732060001,41.2042338460001 34.793122864,41.2065076090001 34.8193228150001,41.198032674 34.994040833,41.1923258570001 35.1589044370001,41.1915214440001 35.1821430460001,41.201339966 35.2430178840001,41.2430945230001 35.366524557,41.2518795170001 35.464089661,41.2610779210001 35.4941653440001,41.2611812740001 35.4941653440001,41.2612846270001 35.4941653440001,41.2612846270001 35.494320374,41.3084578340001 35.5522476250001,41.3422099210001 35.593694153,41.3580229090001 35.6239248660001,41.3635006100001 35.655240784,41.3592631430001 35.792751771,41.354508911 35.825566304,41.3436568610001 35.85765737,41.2663489180001 35.9942381800001,41.2663489180001 35.9943415320001,41.2662455650001 35.9942898560001,41.2406140540001 36.043020732,41.2366866450001 36.060332337,41.2365832930001 36.0770238240001,41.2688293870001 36.32796458,41.2769942620001 36.3547846480001,41.3652576090001 36.493897603,41.3652576090001 36.4940009560001,41.3653609620001 36.4940526330001,41.3653609620001 36.4941559860001,41.3854114170001 36.516376851,41.4148669840001 36.5273839320001,41.4797725830001 36.5361172490001,41.7899345300001 36.589292297,41.8173230390001 36.5997309370001,41.8437813730001 36.6178693640001,41.9785535070001 36.7336245730001,42.1784379480001 36.90531952,42.2815841070001 36.9939704390001,42.281894165 36.9940221150001,42.281894165 36.994125468,42.3458695880001 37.0429080200001,42.3768057240001 37.0620006190001,42.377185506 37.0622350060001,42.3768754480001 37.0767560830001,42.3711910400001 37.087944031,42.3636462810001 37.0981501270001,42.3572384030001 37.1099840290001,42.4018868410001 37.114143982,42.4591443280001 37.1293110150001,42.5452372640001 37.140886536,42.5612569580001 37.1466226200001,42.564667602 37.1520486450001,42.5769665940001 37.179230449,42.7023336190001 37.325345561,42.7061576740001 37.3332262170001,42.7073979090001 37.340150859,42.7094649650001 37.3471788540001,42.7156661380001 37.3552920530001,42.7221773680001 37.358909404,42.7715800370001 37.3749032600001,42.7804683840001 37.3754975380001,42.7924573160001 37.3743348190001,42.801138956 37.3690896610001,42.8054797770001 37.35185557,42.814058065 37.3468171190001,42.8967403570001 37.3249063110001,42.9370479730001 37.3201520800001,42.9796293540001 37.3318309540001,43.005157512 37.3472563680001,43.0435014240001 37.360252991,43.0837056890001 37.368831279,43.114814901 37.3711308800001,43.1319714760001 37.367255147,43.2633846430001 37.3106952930001,43.2703092850001 37.3086799110001,43.2786291910001 37.307749736,43.2875175380001 37.309144999,43.2968709720001 37.316741435,43.3056559650001 37.3199712120001,43.3241561280001 37.3222191370001,43.3361450610001 37.3202295940001,43.3625052150001 37.3039081520001,43.3760392660001 37.2955282600001,43.4167602950001 37.2791726680001,43.463269084 37.248683574,43.4795988370001 37.2433609010001,43.4924145910001 37.244756165,43.5171159260001 37.2523009240001,43.52951827 37.2539028930001,43.542437378 37.2523009240001,43.5506022550001 37.24873525,43.5688957120001 37.2377023320001,43.5942171630001 37.2294599410001,43.618298381 37.226979472,43.7208244230001 37.2326122030001,43.7469210210001 37.230648499,43.770640503 37.2258425900001,43.7805623780001 37.220364889,43.8020597740001 37.203570048,43.8093978270001 37.1996943150001,43.8220068770001 37.202381491,43.8399902760001 37.217109273,43.8936304120001 37.224912414,43.9241711830001 37.2530760700001,43.9537817790001 37.287466736,43.9903686930001 37.312503968,44.0355855710001 37.3182400520001,44.0690247360001 37.3137265880001,44.0880371500001 37.3111603800001,44.1844653730001 37.2791726680001,44.206892945 37.267519633,44.2231710210001 37.254057923,44.2350049230001 37.2367721560001,44.243531535 37.21382782,44.2485441480001 37.1919170130001,44.2496293540001 37.1794371550001,44.2483374430001 37.1696703090001,44.2401208900001 37.1579655970001,44.2301990160001 37.1543224080001,44.2187785240001 37.1526170860001,44.2059627690001 37.1466226200001,44.1892196040001 37.129207662,44.180641317 37.108924662,44.1800212000001 37.087608134,44.1874626060001 37.0669375610001,44.2277702230001 36.994125468,44.234281454 36.9836609910001,44.2432731530001 36.9777698780001,44.284821004 36.9691657520001,44.2972233480001 36.9699408980001,44.3066284590001 36.977227275,44.3158785400001 36.9939704390001,44.3159818930001 36.9939704390001,44.3161369220001 36.9940221150001,44.3161369220001 36.994125468,44.3316398520001 37.015467835,44.3351538490001 37.031151632,44.3431120200001 37.0424429330001,44.4280680740001 37.064767151,44.4544230550001 37.0763426720001,44.4792277430001 37.092000631,44.5034123130001 37.116624451,44.5393791100001 37.1436770630001,44.5782397870001 37.166414694,44.6102791750001 37.1783519490001,44.6289343670001 37.1790237430001,44.7337858480001 37.1672415170001,44.7538363030001 37.1591541550001,44.7661352950001 37.1419200650001)))"),
geo.MustParseGeometry("MULTIPOLYGON(((-71.7574356759999 19.7101097680001,-71.7382706369999 19.7061221370001,-71.7203263009999 19.697455145,-71.7241104809999 19.7171898460001,-71.7418513659999 19.755804755,-71.7407934239999 19.7656924500001,-71.7407934239999 19.773179429,-71.7676488919999 19.7744815120001,-71.771066861 19.7851423200001,-71.7602432929999 19.8009300800001,-71.7444962229999 19.8172061220001,-71.7399796209999 19.82025788,-71.7348526679999 19.821275132,-71.7237035799999 19.820990302,-71.7169083319999 19.8236351580001,-71.7057999339999 19.8364932310001,-71.6998184889999 19.841498114,-71.6878962879999 19.8460147160001,-71.6790258449999 19.848089911,-71.6702774729999 19.851996161,-71.6588028639999 19.8619652360001,-71.6656388009999 19.8744977890001,-71.6667374339999 19.885687567,-71.6614477199999 19.8936221370001,-71.6485896479999 19.896714585,-71.6408585279999 19.8955752620001,-71.6288956369999 19.8904483090001,-71.6212458979999 19.8893089860001,-71.6179093089999 19.8914248720001,-71.6109513009999 19.900824286,-71.6073298819999 19.9028994810001,-71.5046280589999 19.91034577,-71.4785050119999 19.9068871110001,-71.452707486 19.8980980490001,-71.3572485019999 19.851263739,-71.3292130199999 19.845892645,-71.3026016919999 19.85569896,-71.2824600899999 19.8417829450001,-71.2680557929999 19.8355166690001,-71.2237442699999 19.8340518250001,-71.2120662099999 19.837103583,-71.2093399729999 19.8447940120001,-71.2088110019999 19.8549665390001,-71.2035212879999 19.8653832050001,-71.1917618479999 19.8696963560001,-71.1833389959999 19.8604190120001,-71.1722305979999 19.8340518250001,-71.1644180979999 19.8386091170001,-71.158518033 19.843491929,-71.1544083319999 19.8491071640001,-71.1517227859999 19.85569896,-71.1602270169999 19.8582217470001,-71.1620173819999 19.8594424500001,-71.1621801419999 19.8620466170001,-71.1653946609999 19.8688011740001,-71.1497289699999 19.863023179,-71.1365860669999 19.863714911,-71.1231990229999 19.866888739,-71.1070450509999 19.8688011740001,-71.0967911449999 19.876206773,-71.0782771479999 19.909125067,-71.0660701159999 19.9165713560001,-71.0508520169999 19.9202334660001,-71.0184220039999 19.935492255,-71.0008845689999 19.93768952,-70.9847305979999 19.9337425800001,-70.9693497389999 19.9251976580001,-70.9610489569999 19.9126651060001,-70.9661759109999 19.896714585,-70.9468888009999 19.8893089860001,-70.9394425119999 19.8893089860001,-70.9444473949999 19.904730536,-70.9468888009999 19.91034577,-70.8985082669999 19.9028994810001,-70.8842667309999 19.9042829450001,-70.8547257149999 19.910834052,-70.8432511059999 19.91034577,-70.8309626939999 19.90253327,-70.7869766919999 19.8528506530001,-70.7833145819999 19.8433291690001,-70.788644986 19.8340518250001,-70.7509659499999 19.8338076840001,-70.7365616529999 19.827093817,-70.6984757149999 19.79682038,-70.6622208319999 19.7793643250001,-70.6236873039999 19.766546942,-70.5790909499999 19.760199286,-70.5187068349999 19.7597516950001,-70.5136612619999 19.762640692,-70.5085343089999 19.7693545590001,-70.4964493479999 19.7772484400001,-70.4827774729999 19.783880927,-70.4727270169999 19.786851304,-70.4337052069999 19.7752139340001,-70.3727107409999 19.7141787780001,-70.3361710279999 19.6776390650001,-70.3195694649999 19.666449286,-70.3013402989999 19.6576602230001,-70.280832486 19.6518008480001,-70.2352188789999 19.6472028670001,-70.1985570949999 19.6346703150001,-70.1314184239999 19.6221377620001,-70.1164444649999 19.6223819030001,-70.1064346999999 19.625921942,-70.0975235669999 19.6332054710001,-70.0610245429999 19.670111395,-70.0421036449999 19.6784528670001,-69.9866837229999 19.6776390650001,-69.9595434239999 19.680121161,-69.9472550119999 19.678656317,-69.9320369129999 19.670152085,-69.8979386059999 19.635972398,-69.8871964179999 19.6127383480001,-69.8877660799999 19.5873477230001,-69.9041641919999 19.5323753930001,-69.8841446609999 19.524400132,-69.876535611 19.5066592470001,-69.8768204419999 19.4610049500001,-69.8743790359999 19.441148179,-69.867990689 19.4252790390001,-69.8013809889999 19.3308779970001,-69.7714737619999 19.3032494160001,-69.7676895819999 19.299750067,-69.7334692049999 19.2865664730001,-69.713205533 19.289740302,-69.6743871739999 19.3038597680001,-69.6543676419999 19.3070335960001,-69.6093643869999 19.30878327,-69.589426236 19.3128115910001,-69.5689591139999 19.3207054710001,-69.5394180979999 19.3394229190001,-69.5279841789999 19.3417829450001,-69.5176488919999 19.3396670590001,-69.4964493479999 19.329779364,-69.4833064439999 19.3275820980001,-69.4590958319999 19.3333194030001,-69.4485977859999 19.3343773460001,-69.4427791009999 19.334173895,-69.4373673169999 19.332953192,-69.4334203769999 19.3299014340001,-69.4317927729999 19.3241234400001,-69.4286189439999 19.3192406270001,-69.4217016269999 19.3211123720001,-69.4107152989999 19.3275820980001,-69.3526912099999 19.305121161,-69.3356013659999 19.293402411,-69.3242895169999 19.3150902360001,-69.3058975899999 19.3311221370001,-69.2604874339999 19.3548851580001,-69.231271939 19.3633487000001,-69.2243546209999 19.35179271,-69.2320857409999 19.3279483090001,-69.2468155589999 19.2996279970001,-69.226877408 19.292669989,-69.2043350899999 19.2927920590001,-69.182443814 19.2981631530001,-69.1642960279999 19.3070335960001,-69.1583552729999 19.28510163,-69.1713761059999 19.2678897160001,-69.1893204419999 19.2522647160001,-69.1983943349999 19.235052802,-69.202707486 19.2169457050001,-69.2143448559999 19.2006289730001,-69.2318009109999 19.188666083,-69.2536514959999 19.18353913,-69.3321427069999 19.197211005,-69.4181208979999 19.1909854190001,-69.4383845689999 19.196478583,-69.4754532539999 19.2140160180001,-69.6029353509999 19.228989976,-69.6172989569999 19.2252464860001,-69.6227921209999 19.211859442,-69.6260473299999 19.16429271,-69.6335343089999 19.1236839860001,-69.630441861 19.1084658870001,-69.6208796869999 19.1019554710001,-69.6084692049999 19.093451239,-69.5532934239999 19.1014671900001,-69.5342504549999 19.0947940120001,-69.5104874339999 19.102036851,-69.4634496739999 19.0847028670001,-69.4386287099999 19.0873477230001,-69.4447322259999 19.0923526060001,-69.4527481759999 19.096909898,-69.4623103509999 19.100287177,-69.4733780589999 19.1016299500001,-69.4733780589999 19.1084658870001,-69.4199926419999 19.1111514340001,-69.4044490229999 19.1084658870001,-69.3867081369999 19.0960960960001,-69.3729548819999 19.065375067,-69.3560684889999 19.053208726,-69.3389786449999 19.0488955750001,-69.3045955069999 19.044867255,-69.211740689 19.0209821640001,-69.1732478509999 19.0111351580001,-69.1574600899999 19.012193101,-69.1386612619999 19.0222842470001,-69.1448868479999 19.0296084660001,-69.1573380199999 19.0366885440001,-69.1574600899999 19.0463727890001,-69.1395157539999 19.051214911,-69.1238907539999 19.037787177,-69.1028539699999 19.0053571640001,-69.0892634759999 18.9978701840001,-69.0741267569999 18.9935570330001,-69.0611873039999 18.9881859400001,-69.0544327459999 18.977484442,-69.0424698559999 18.986761786,-69.0180557929999 18.9974632830001,-69.0065811839999 19.0053571640001,-69.0142309239999 19.013413804,-69.010568814 19.015366929,-69.0015763009999 19.0156110700001,-68.992990689 19.0184593770001,-68.9825333319999 19.0287946640001,-68.9793595039999 19.0328636740001,-68.975412564 19.033148505,-68.9395645819999 19.0313174500001,-68.9186091789999 19.02728913,-68.8998103509999 19.0209007830001,-68.883127408 19.012193101,-68.8812149729999 19.0094261740001,-68.881174283 19.005804755,-68.8803604809999 19.0020205750001,-68.8762914699999 18.9986026060001,-68.8558243479999 18.9917666690001,-68.8539932929999 18.9920921900001,-68.8182266919999 18.9849307310001,-68.7958878249999 18.984116929,-68.7868546209999 18.9810244810001,-68.7851456369999 18.9798851580001,-68.7769669259999 18.974351304,-68.7678116529999 18.9694684920001,-68.7499893869999 18.9680850280001,-68.7397354809999 18.9644229190001,-68.6235245429999 18.8618838560001,-68.5807999339999 18.8125674500001,-68.5204971999999 18.7688662780001,-68.4772843089999 18.7402204450001,-68.4649958979999 18.7350121110001,-68.458566861 18.7310244810001,-68.4134008449999 18.6896019550001,-68.3561905589999 18.6565615910001,-68.3286026679999 18.6165225280001,-68.334055142 18.5772972680001,-68.428049283 18.4412295590001,-68.4346817699999 18.435777085,-68.4412735669999 18.4290225280001,-68.4443253249999 18.4195010440001,-68.4424942699999 18.3847516950001,-68.4443253249999 18.3748233090001,-68.4556778639999 18.357855536,-68.4721573559999 18.349839585,-68.4928279289999 18.3474795590001,-68.5165909499999 18.3474795590001,-68.5349014959999 18.3533389340001,-68.574533658 18.3781598980001,-68.5889379549999 18.3822695980001,-68.6054581369999 18.3717308610001,-68.6028539699999 18.35565827,-68.5975235669999 18.336615302,-68.6060277989999 18.317084052,-68.6183162099999 18.301459052,-68.6260880199999 18.2833519550001,-68.6415095689999 18.2251651060001,-68.6470841139999 18.2148298200001,-68.6584366529999 18.2109235700001,-68.742990689 18.2045352230001,-68.7602432929999 18.2103539080001,-68.762521939 18.2469750020001,-68.7868546209999 18.295396226,-68.8201391269999 18.342474677,-68.8495987619999 18.3748233090001,-68.8809708319999 18.395493882,-68.9225154289999 18.412298895,-68.9643448559999 18.4179548200001,-69.0125626289999 18.399115302,-69.0854386059999 18.3959007830001,-69.1043595039999 18.399115302,-69.1481827459999 18.4132347680001,-69.1680395169999 18.4163272160001,-69.1852921209999 18.4216983090001,-69.2142227859999 18.4451358090001,-69.2363175119999 18.4505069030001,-69.2556046209999 18.4466820330001,-69.2752172519999 18.4403343770001,-69.2950333319999 18.4390322940001,-69.3150935539999 18.4505069030001,-69.3360082669999 18.432074286,-69.4659317699999 18.422552802,-69.5047501289999 18.4089216170001,-69.5252579419999 18.408433335,-69.5799861319999 18.4444847680001,-69.6021215489999 18.4557152360001,-69.6168106759999 18.4573428410001,-69.6231583319999 18.4479841170001,-69.622629361 18.4367536480001,-69.6208389959999 18.425482489,-69.6236059239999 18.4163272160001,-69.6325577459999 18.4135195980001,-69.6465958319999 18.4135602890001,-69.6594945949999 18.4159203150001,-69.6651505199999 18.4195010440001,-69.681955533 18.4467634140001,-69.6899307929999 18.454087632,-69.6973770819999 18.4563662780001,-69.8514705069999 18.4724795590001,-69.8793839179999 18.4712181660001,-69.8836563789999 18.4710147160001,-69.8998103509999 18.4664574240001,-69.931223111 18.4519717470001,-69.9571020169999 18.435288804,-69.9669083319999 18.4308128930001,-69.9935196609999 18.422552802,-70.0040177069999 18.4174665390001,-70.0165909499999 18.4113630230001,-70.0389298169999 18.3924828150001,-70.0557755199999 18.3672549500001,-70.0624080069999 18.3372256530001,-70.0676977199999 18.329087632,-70.0906876289999 18.3175316430001,-70.0959366529999 18.310248114,-70.1000056629999 18.2992210960001,-70.1337784499999 18.269273179,-70.1580704419999 18.242621161,-70.1717830069999 18.231431382,-70.1778865229999 18.2341983090001,-70.2021378249999 18.2335472680001,-70.2207738919999 18.2305362000001,-70.2331436839999 18.2239444030001,-70.250803189 18.23432038,-70.2696020169999 18.2381045590001,-70.3975723949999 18.2376162780001,-70.4190567699999 18.231146552,-70.4601944649999 18.208929755,-70.4835505849999 18.204087632,-70.5528458319999 18.2042503930001,-70.5689184239999 18.2103539080001,-70.5703018869999 18.21893952,-70.5630590489999 18.2256533870001,-70.5537003249999 18.2315127620001,-70.5490616529999 18.2376162780001,-70.5519099599999 18.2484398460001,-70.5587052069999 18.254380601,-70.5657445949999 18.2582054710001,-70.5733129549999 18.268011786,-70.5924373039999 18.2798526060001,-70.5967911449999 18.2891299500001,-70.5943497389999 18.299058335,-70.5884496739999 18.3036156270001,-70.5816544259999 18.307074286,-70.5764054029999 18.3139509140001,-70.5717667309999 18.3340518250001,-70.5711563789999 18.355169989,-70.5738826159999 18.3736839860001,-70.5794571609999 18.3856468770001,-70.6014298169999 18.411851304,-70.6146541009999 18.420843817,-70.6378067699999 18.429388739,-70.6575414699999 18.43378327,-70.6824845039999 18.4356143250001,-70.7038468089999 18.430121161,-70.7128800119999 18.4126651060001,-70.7094620429999 18.3917503930001,-70.7119848299999 18.3833682310001,-70.7416886059999 18.35541413,-70.7498673169999 18.3504092470001,-70.7606501939999 18.3474795590001,-70.7723282539999 18.34788646,-70.7931208979999 18.3540713560001,-70.802316861 18.3549258480001,-70.8100479809999 18.352036851,-70.8239639959999 18.3426781270001,-70.8333227199999 18.3406436220001,-70.846018033 18.3421898460001,-70.8504532539999 18.339585679,-70.8603409499999 18.3307152360001,-70.8631078769999 18.3269717470001,-70.8693741529999 18.3146019550001,-70.8705948559999 18.310248114,-70.8732804029999 18.3063011740001,-70.8867081369999 18.3020694030001,-70.8916723299999 18.2997093770001,-70.918283658 18.2695987000001,-70.9324845039999 18.2615420590001,-70.9562068349999 18.258734442,-70.9658096999999 18.262152411,-70.9720352859999 18.2702497420001,-70.9772843089999 18.280259507,-70.9837947259999 18.2891299500001,-70.9942520819999 18.296616929,-71.0048721999999 18.30101146,-71.0482478509999 18.3100039730001,-71.0683487619999 18.3103701840001,-71.0839737619999 18.304388739,-71.0902400379999 18.2891299500001,-71.0923559239999 18.2807070980001,-71.1012263659999 18.266099351,-71.1033422519999 18.2549502620001,-71.1014298169999 18.24213288,-71.0965470039999 18.2350934920001,-71.0899145169999 18.228705145,-71.082834439 18.217718817,-71.0665177069999 18.1633161480001,-71.0612686839999 18.153998114,-71.0611059239999 18.1420759140001,-71.0805557929999 18.1139183610001,-71.0971573559999 18.0737165390001,-71.1918025379999 17.9417992210001,-71.2001033189999 17.9191755230001,-71.2042537099999 17.911118882,-71.2137345039999 17.9048526060001,-71.2336319649999 17.894964911,-71.2459203769999 17.8850772160001,-71.254709439 17.8753115910001,-71.2615453769999 17.86322663,-71.2678116529999 17.8465843770001,-71.2752172519999 17.8490664730001,-71.2780655589999 17.850775458,-71.2820938789999 17.853989976,-71.2835994129999 17.8467471370001,-71.2851456369999 17.844183661,-71.2889298169999 17.841009833,-71.2820938789999 17.841009833,-71.2861221999999 17.8325869810001,-71.291330533 17.8261579450001,-71.2977188789999 17.821966864,-71.3056534499999 17.82050202,-71.3093155589999 17.8163923200001,-71.3158259759999 17.7964541690001,-71.3196508449999 17.7892113300001,-71.3318578769999 17.772406317,-71.365549283 17.678412177,-71.3758845689999 17.6601423200001,-71.4172257149999 17.6048037780001,-71.4272354809999 17.6092796900001,-71.435861783 17.6170921900001,-71.436350064 17.62128327,-71.446115689 17.629339911,-71.5148819649999 17.7377790390001,-71.5316055979999 17.755072333,-71.565256314 17.767523505,-71.6451309889999 17.7572695980001,-71.6793106759999 17.7652855490001,-71.6696671209999 17.772406317,-71.6377660799999 17.8040225280001,-71.6309301419999 17.8167992210001,-71.632394986 17.838324286,-71.6365860669999 17.853461005,-71.6520076159999 17.8819847680001,-71.6628311839999 17.8976911480001,-71.6649470689999 17.9034691430001,-71.6656388009999 17.9154727230001,-71.6588028639999 17.95331452,-71.6662491529999 17.9707705750001,-71.6838272779999 17.990179755,-71.7203263009999 18.0191104190001,-71.7762345039999 18.0392520200001,-71.7642712 18.0694924930001,-71.7606538499999 18.0868040970001,-71.7628759359999 18.1158462530001,-71.7622041419999 18.1324860640001,-71.7645812579999 18.143674011,-71.775329956 18.1723285930001,-71.7768285729999 18.1817853800001,-71.7740380459999 18.201474101,-71.7661315519999 18.220258484,-71.721708773 18.2929962370001,-71.7211117359998 18.2939738230001,-71.7093391519999 18.313250224,-71.7115095629999 18.3161441050001,-71.7207079679999 18.323973083,-71.7349706629999 18.3329389450001,-71.7887141529999 18.352214254,-71.8264379479999 18.376166281,-71.8345511479999 18.385080465,-71.8497440189999 18.4064486700001,-71.8581672779999 18.4135541790001,-71.8971829839999 18.4228817750001,-71.9121691489999 18.430736593,-71.9181636159999 18.4492367550001,-71.9143912359999 18.460217997,-71.9068981529999 18.464300436,-71.9015754799999 18.4697781380001,-71.90405595 18.4847901410001,-71.9105155029999 18.494272766,-72.0009492599999 18.5824585980001,-72.0098376059999 18.598814189,-71.9926293539999 18.6111390180001,-71.9554739999999 18.6186062620001,-71.880284791 18.605402934,-71.841579142 18.6179861450001,-71.8285566809999 18.631292827,-71.8079894609999 18.6646499640001,-71.7943468839999 18.679171041,-71.7917791329999 18.6806816980001,-71.7848976379999 18.6847302130001,-71.7580700279999 18.700513407,-71.7440657149999 18.7114429730001,-71.7320767829999 18.7300723270001,-71.7252554929999 18.7469705200001,-71.7203462329999 18.7654448450001,-71.7188476159999 18.7842292280001,-71.7269091399999 18.8235291550001,-71.7279426689999 18.8640693160001,-71.7334720469999 18.882569479,-71.7409134529999 18.8916128540001,-71.7638061119999 18.9115082810001,-71.7729528399999 18.9217143760001,-71.7871121829999 18.950033061,-71.796258911 18.9570610560001,-71.8196683349999 18.9578103640001,-71.8445246989999 18.949748841,-71.862456422 18.947087504,-71.8654019779999 18.9644249480001,-71.848142049 18.975483704,-71.7968273519999 18.9886095180001,-71.7837015379999 18.996309306,-71.7403450119999 19.0418879190001,-71.7104243569999 19.0816529340001,-71.6952831619999 19.094546204,-71.6613834229999 19.1175938930001,-71.6487743739999 19.1352930710001,-71.6436584069999 19.152475484,-71.6391108809999 19.212110088,-71.6517327169999 19.217923884,-71.7165221769999 19.247766825,-71.7495170769999 19.2797077230001,-71.7537292069999 19.283785299,-71.771557577 19.3075564570001,-71.7765185149999 19.3275035610001,-71.7699556079999 19.333808085,-71.742980509 19.3483808390001,-71.7334720469999 19.3554605110001,-71.720811321 19.3859496060001,-71.715281943 19.391659851,-71.7038614509999 19.4146300250001,-71.703189657 19.4592784630001,-71.7151269129999 19.537464905,-71.7430838629999 19.6000709030001,-71.747683065 19.622782695,-71.7460414089999 19.649017002,-71.7450475669999 19.6648989870001,-71.7486132409999 19.6825981650001,-71.7574356759999 19.7101097680001)),((-71.5244848299999 17.5455589860001,-71.5411677729999 17.55463288,-71.5413712229999 17.5761579450001,-71.5322159499999 17.601304429,-71.521066861 17.62128327,-71.5069473949999 17.6136742210001,-71.4925837879999 17.6036644550001,-71.467111783 17.580267645,-71.4825333319999 17.5756289730001,-71.507394986 17.551255601,-71.5244848299999 17.5455589860001)),((-68.6640518869999 18.1693382830001,-68.6123754549999 18.1708031270001,-68.5987849599999 18.166205145,-68.588693814 18.1533877620001,-68.5775040359999 18.133734442,-68.5701798169999 18.115708726,-68.571848111 18.107855536,-68.5870255199999 18.111070054,-68.6192927729999 18.1251488300001,-68.6360977859999 18.1283633480001,-68.6539200509999 18.1262881530001,-68.6848852199999 18.116888739,-68.7018936839999 18.1146914730001,-68.7370499339999 18.1236026060001,-68.7620743479999 18.1451683610001,-68.7781876289999 18.1720238300001,-68.7869360019999 18.1966820330001,-68.7720841139999 18.200873114,-68.7545466789999 18.1976585960001,-68.7371720039999 18.1895205750001,-68.7223201159999 18.1792666690001,-68.7107641269999 18.1753604190001,-68.6640518869999 18.1693382830001)))"),
}
benchLargePoints = []geo.Geometry{
geo.MustParseGeometry("POINT (19.383354043259374 -29.13291298545164)"),
geo.MustParseGeometry("POINT (15.547774209259558 45.487209288609961)"),
geo.MustParseGeometry("POINT(44.6572635485688 33.7476416209197)"),
geo.MustParseGeometry("POINT(-71.7069954585449 18.8759970016117)"),
}
benchSmallPolygons = []geo.Geometry{
geo.MustParseGeometry("POLYGON EMPTY"),
geo.MustParseGeometry("POLYGON((0 0, 1 0, 1 1, 0 1, 0 0))"),
geo.MustParseGeometry("POLYGON((1 0, 0 1, 2 2, 1 0), (0.75 0.75, 1.25 0.75, 1.25 1.25, 0.75 1.25, 0.75 0.75))"),
geo.MustParseGeometry("MULTIPOLYGON(((0 0, 1 0, 1 1, 0 1, 0 0)))"),
}
benchSmallPoints = []geo.Geometry{
geo.MustParseGeometry("POINT EMPTY"),
geo.MustParseGeometry("POINT(0 0)"),
geo.MustParseGeometry("POINT(0.5 0.5)"),
geo.MustParseGeometry("POINT(-1 -2)"),
geo.MustParseGeometry("MULTIPOINT((0 0), (-1 -1), (0.5 0.25))"),
}
)
var benchRes bool
func BenchmarkIntersectsLargePolygonsAndPoints(b *testing.B) {
benchmarkIntersects(benchLargePoints, benchLargePolygons, b)
}
func BenchmarkIntersectsSmallPolygonsAndPoints(b *testing.B) {
benchmarkIntersects(benchSmallPolygons, benchSmallPoints, b)
}
func benchmarkIntersects(geomListA []geo.Geometry, geomListB []geo.Geometry, b *testing.B) {
var res bool
var err error
b.ResetTimer()
for n := 0; n < b.N; n++ {
for _, geomA := range geomListA {
for _, geomB := range geomListB {
res, err = Intersects(geomA, geomB)
if err != nil {
panic(err)
}
}
}
}
benchRes = res
}
| pkg/geo/geomfn/binary_predicates_bench_test.go | 0 | https://github.com/cockroachdb/cockroach/commit/5015f06f107c92eab8fa180ea28c09c68e294e94 | [
0.0005068868631497025,
0.00021891451615374535,
0.00016364747716579586,
0.00017223271424882114,
0.00011004594125552103
] |
{
"id": 4,
"code_window": [
"\t_ = SETVARIABLESIZE\n",
")\n",
"\n",
"// COPYVAL is a template function that can be used to set a scalar to the value\n",
"// of another scalar in such a way that the destination won't be modified if the\n",
"// source is. You must use this on the result of UNSAFEGET if you wish to store\n",
"// that result past the lifetime of the batch you UNSAFEGET'd from.\n",
"func COPYVAL(dest, src interface{}) {\n",
"\tcolexecerror.InternalError(errors.AssertionFailedf(nonTemplatePanic))\n",
"}\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"// source is.\n"
],
"file_path": "pkg/sql/colexec/execgen/placeholders.go",
"type": "replace",
"edit_start_line_idx": 29
} | // Copyright 2021 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package scplan
import (
"reflect"
"github.com/cockroachdb/cockroach/pkg/sql/catalog"
"github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scgraph"
"github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scop"
"github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scpb"
"github.com/cockroachdb/cockroach/pkg/util/iterutil"
"github.com/cockroachdb/errors"
)
// A Phase represents the context in which an op is executed within a schema
// change. Different phases require different dependencies for the execution of
// the ops to be plumbed in.
//
// Today, we support the phases corresponding to async schema changes initiated
// and partially executed in the user transaction. This will change as we
// transition to transactional schema changes.
type Phase int
const (
// StatementPhase refers to execution of ops occurring during statement
// execution during the user transaction.
StatementPhase Phase = iota
// PreCommitPhase refers to execution of ops occurring during the user
// transaction immediately before commit.
PreCommitPhase
// PostCommitPhase refers to execution of ops occurring after the user
// transaction has committed (i.e., in the async schema change job).
PostCommitPhase
)
// Params holds the arguments for planning.
type Params struct {
// ExecutionPhase indicates the phase that the plan should be constructed for.
ExecutionPhase Phase
// CreatedDescriptorIDs contains IDs for new descriptors created by the same
// schema changer (i.e., earlier in the same transaction). New descriptors
// can have most of their schema changes fully executed in the same
// transaction.
//
// This doesn't do anything right now.
CreatedDescriptorIDs catalog.DescriptorIDSet
}
// A Plan is a schema change plan, primarily containing ops to be executed that
// are partitioned into stages.
type Plan struct {
Params Params
Initial scpb.State
Graph *scgraph.Graph
Stages []Stage
}
// A Stage is a sequence of ops to be executed "together" as part of a schema
// change.
//
// Stages also contain the state before and after the execution of the ops in
// the stage, reflecting the fact that any set of ops can be thought of as a
// transition from one state to another.
type Stage struct {
Before, After scpb.State
Ops scop.Ops
Revertible bool
}
// MakePlan generates a Plan for a particular phase of a schema change, given
// the initial state for a set of targets.
func MakePlan(initial scpb.State, params Params) (_ Plan, err error) {
defer func() {
if r := recover(); r != nil {
rAsErr, ok := r.(error)
if !ok {
rAsErr = errors.Errorf("panic during MakePlan: %v", r)
}
err = errors.CombineErrors(err, rAsErr)
}
}()
g, err := scgraph.New(initial)
if err != nil {
return Plan{}, err
}
// TODO(ajwerner): Generate the stages for all of the phases as it will make
// debugging easier.
for _, ts := range initial {
p[reflect.TypeOf(ts.Element())].ops(g, ts.Target, ts.Status, params)
}
if err := g.ForEachNode(func(n *scpb.Node) error {
d, ok := p[reflect.TypeOf(n.Element())]
if !ok {
return errors.Errorf("not implemented for %T", n.Target)
}
d.deps(g, n.Target, n.Status)
return nil
}); err != nil {
return Plan{}, err
}
stages := buildStages(initial, g, params)
return Plan{
Params: params,
Initial: initial,
Graph: g,
Stages: stages,
}, nil
}
func buildStages(init scpb.State, g *scgraph.Graph, params Params) []Stage {
// TODO(ajwerner): deal with the case where the target status was
// fulfilled by something that preceded the initial state.
cur := init
fulfilled := map[*scpb.Node]struct{}{}
filterUnsatisfiedEdgesStep := func(edges []*scgraph.OpEdge) ([]*scgraph.OpEdge, bool) {
candidates := make(map[*scpb.Node]struct{})
for _, e := range edges {
candidates[e.To()] = struct{}{}
}
// Check to see if the current set of edges will have their dependencies met
// if they are all run. Any which will not must be pruned. This greedy
// algorithm works, but a justification is in order.
failed := map[*scgraph.OpEdge]struct{}{}
for _, e := range edges {
_ = g.ForEachDepEdgeFrom(e.To(), func(de *scgraph.DepEdge) error {
_, isFulfilled := fulfilled[de.To()]
_, isCandidate := candidates[de.To()]
if isFulfilled || isCandidate {
return nil
}
failed[e] = struct{}{}
return iterutil.StopIteration()
})
}
if len(failed) == 0 {
return edges, true
}
truncated := edges[:0]
for _, e := range edges {
if _, found := failed[e]; !found {
truncated = append(truncated, e)
}
}
return truncated, false
}
filterUnsatisfiedEdges := func(edges []*scgraph.OpEdge) ([]*scgraph.OpEdge, bool) {
for len(edges) > 0 {
if filtered, done := filterUnsatisfiedEdgesStep(edges); !done {
edges = filtered
} else {
return filtered, true
}
}
return edges, false
}
buildStageType := func(edges []*scgraph.OpEdge) (Stage, bool) {
edges, ok := filterUnsatisfiedEdges(edges)
if !ok {
return Stage{}, false
}
next := append(cur[:0:0], cur...)
isStageRevertible := true
var ops []scop.Op
for revertible := 1; revertible >= 0; revertible-- {
isStageRevertible = revertible == 1
for i, ts := range cur {
for _, e := range edges {
if e.From() == ts && isStageRevertible == e.Revertible() {
next[i] = e.To()
ops = append(ops, e.Op()...)
break
}
}
}
// If we added non-revertible stages
// then this stage is done
if len(ops) != 0 {
break
}
}
return Stage{
Before: cur,
After: next,
Ops: scop.MakeOps(ops...),
}, true
}
var stages []Stage
for {
// Note that the current nodes are fulfilled for the sake of dependency
// checking.
for _, ts := range cur {
fulfilled[ts] = struct{}{}
}
// Extract the set of op edges for the current stage.
var opEdges []*scgraph.OpEdge
for _, t := range cur {
// TODO(ajwerner): improve the efficiency of this lookup.
// Look for an opEdge from this node. Then, for the other side
// of the opEdge, look for dependencies.
if oe, ok := g.GetOpEdgeFrom(t); ok {
opEdges = append(opEdges, oe)
}
}
// Group the op edges a per-type basis.
opTypes := make(map[scop.Type][]*scgraph.OpEdge)
for _, oe := range opEdges {
for _, op := range oe.Op() {
opTypes[op.Type()] = append(opTypes[op.Type()], oe)
}
}
// Greedily attempt to find a stage which can be executed. This is sane
// because once a dependency is met, it never becomes unmet.
var didSomething bool
var s Stage
for _, typ := range []scop.Type{
scop.MutationType,
scop.BackfillType,
scop.ValidationType,
} {
if s, didSomething = buildStageType(opTypes[typ]); didSomething {
break
}
}
if !didSomething {
break
}
// Sort ops based on graph dependencies.
sortOps(g, s.Ops.Slice())
stages = append(stages, s)
cur = s.After
}
return stages
}
// Check if some route exists from curr to the
// target node
func doesPathExistToNode(graph *scgraph.Graph, start *scpb.Node, target *scpb.Node) bool {
nodesToVisit := []*scpb.Node{start}
visitedNodes := map[*scpb.Node]struct{}{}
for len(nodesToVisit) > 0 {
curr := nodesToVisit[0]
if curr == target {
return true
}
nodesToVisit = nodesToVisit[1:]
if _, ok := visitedNodes[curr]; !ok {
visitedNodes[curr] = struct{}{}
edges, ok := graph.GetDepEdgesFrom(curr)
if !ok {
return false
}
// Append all of the nodes to visit
for _, currEdge := range edges {
nodesToVisit = append(nodesToVisit, currEdge.To())
}
}
}
return false
}
// sortOps sorts the operations into order based on
// graph dependencies
func sortOps(graph *scgraph.Graph, ops []scop.Op) {
for i := 1; i < len(ops); i++ {
for j := i; j > 0; j-- {
if compareOps(graph, ops[j], ops[j-1]) {
tmp := ops[j]
ops[j] = ops[j-1]
ops[j-1] = tmp
}
}
}
// Sanity: Graph order is sane across all of
// the ops.
for i := 0; i < len(ops); i++ {
for j := i + 1; j < len(ops); j++ {
if !compareOps(graph, ops[i], ops[j]) && // Greater, but not equal (if equal opposite comparison would match).
compareOps(graph, ops[j], ops[i]) {
panic(errors.AssertionFailedf("Operators are not completely sorted %d %d", i, j))
} else if compareOps(graph, ops[j], ops[i]) {
compareOps(graph, ops[j], ops[i])
panic(errors.AssertionFailedf("Operators are not completely sorted %d %d", i, j))
}
}
}
}
// compareOps compares operations and orders them based on
// followed by the graph dependencies.
func compareOps(graph *scgraph.Graph, firstOp scop.Op, secondOp scop.Op) (less bool) {
// Otherwise, lets compare attributes
firstNode := graph.GetNodeFromOp(firstOp)
secondNode := graph.GetNodeFromOp(secondOp)
if firstNode == secondNode {
return false // Equal
}
firstExists := doesPathExistToNode(graph, firstNode, secondNode)
secondExists := doesPathExistToNode(graph, secondNode, firstNode)
if firstExists && secondExists {
if firstNode.Target.Direction == scpb.Target_DROP {
return true
} else if secondNode.Target.Direction == scpb.Target_DROP {
return false
} else {
panic(errors.AssertionFailedf("A potential cycle exists in plan the graph, without any"+
"nodes transitioning in opposite directions\n %s\n%s\n",
firstNode,
secondNode))
}
}
// Path exists from first to second, so we depend on second.
return firstExists
}
| pkg/sql/schemachanger/scplan/plan.go | 0 | https://github.com/cockroachdb/cockroach/commit/5015f06f107c92eab8fa180ea28c09c68e294e94 | [
0.002573809353634715,
0.0002671207475941628,
0.00015938688011374325,
0.00016978835628833622,
0.00041227179463021457
] |
{
"id": 0,
"code_window": [
"\tgopkg.in/square/go-jose.v2 v2.6.0\n",
"\tgopkg.in/yaml.v2 v2.4.0\n",
"\tgopkg.in/yaml.v3 v3.0.1\n",
"\thelm.sh/helm/v3 v3.9.0\n",
"\tistio.io/api v0.0.0-20220811073502-ef38878bf5f0\n",
"\tistio.io/client-go v1.12.0-alpha.5.0.20220808181119-e13c0270f465\n",
"\tistio.io/pkg v0.0.0-20220804202146-4787664020d6\n",
"\tk8s.io/api v0.24.2\n",
"\tk8s.io/apiextensions-apiserver v0.24.2\n",
"\tk8s.io/apimachinery v0.24.2\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tistio.io/api v0.0.0-20220812165511-84e2baba34eb\n",
"\tistio.io/client-go v1.12.0-alpha.5.0.20220812165912-faa59fdcfe39\n"
],
"file_path": "go.mod",
"type": "replace",
"edit_start_line_idx": 86
} | apiVersion: install.istio.io/v1alpha1
kind: IstioOperator
spec:
meshConfig:
accessLogFile: /dev/stdout
extensionProviders:
- name: otel
envoyOtelAls:
service: otel-collector.istio-system.svc.cluster.local
port: 4317
- name: prometheus
prometheus:
- name: stackdriver
stackdriver:
- name: envoy
envoyFileAccessLog:
path: /dev/stdout
- name: envoyExtAuthzHttp
envoyExtAuthzHttp:
- name: envoyExtAuthzGrpc
envoyExtAuthzGrpc:
- name: zipkin
zipkin:
- name: lightstep
lightstep:
- name: datadog
datadog:
- name: opencensus
opencensus:
- name: skywalking
skywalking:
- name: envoyHttpAls
envoyHttpAls:
- name: envoyTcpAls
envoyTcpAls:
components:
egressGateways:
- name: istio-egressgateway
enabled: true
k8s:
resources:
requests:
cpu: 10m
memory: 40Mi
ingressGateways:
- name: istio-ingressgateway
enabled: true
k8s:
resources:
requests:
cpu: 10m
memory: 40Mi
service:
ports:
## You can add custom gateway ports in user values overrides, but it must include those ports since helm replaces.
# Note that AWS ELB will by default perform health checks on the first port
# on this list. Setting this to the health check port will ensure that health
# checks always work. https://github.com/istio/istio/issues/12503
- port: 15021
targetPort: 15021
name: status-port
- port: 80
targetPort: 8080
name: http2
- port: 443
targetPort: 8443
name: https
- port: 31400
targetPort: 31400
name: tcp
# This is the port where sni routing happens
- port: 15443
targetPort: 15443
name: tls
pilot:
k8s:
env:
- name: PILOT_TRACE_SAMPLING
value: "100"
resources:
requests:
cpu: 10m
memory: 100Mi
values:
global:
proxy:
resources:
requests:
cpu: 10m
memory: 40Mi
pilot:
autoscaleEnabled: false
gateways:
istio-egressgateway:
autoscaleEnabled: false
istio-ingressgateway:
autoscaleEnabled: false
| operator/pkg/util/testdata/overlay-iop.yaml | 1 | https://github.com/istio/istio/commit/1de67f0f70829c754359fa2e33ac264461a2b980 | [
0.0005575122777372599,
0.00021808069141115993,
0.00016507819236721843,
0.000167600141139701,
0.00011436878412496299
] |
{
"id": 0,
"code_window": [
"\tgopkg.in/square/go-jose.v2 v2.6.0\n",
"\tgopkg.in/yaml.v2 v2.4.0\n",
"\tgopkg.in/yaml.v3 v3.0.1\n",
"\thelm.sh/helm/v3 v3.9.0\n",
"\tistio.io/api v0.0.0-20220811073502-ef38878bf5f0\n",
"\tistio.io/client-go v1.12.0-alpha.5.0.20220808181119-e13c0270f465\n",
"\tistio.io/pkg v0.0.0-20220804202146-4787664020d6\n",
"\tk8s.io/api v0.24.2\n",
"\tk8s.io/apiextensions-apiserver v0.24.2\n",
"\tk8s.io/apimachinery v0.24.2\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tistio.io/api v0.0.0-20220812165511-84e2baba34eb\n",
"\tistio.io/client-go v1.12.0-alpha.5.0.20220812165912-faa59fdcfe39\n"
],
"file_path": "go.mod",
"type": "replace",
"edit_start_line_idx": 86
} | apiVersion: apps/v1
kind: Deployment
metadata:
namespace: {{.Release.Namespace}}
name: istio-operator{{- if not (eq .Values.revision "") }}-{{ .Values.revision }}{{- end }}
spec:
replicas: 1
revisionHistoryLimit: {{ .Values.deploymentHistory }}
selector:
matchLabels:
name: istio-operator
template:
metadata:
labels:
name: istio-operator
{{- range $key, $val := .Values.podLabels }}
{{ $key }}: "{{ $val }}"
{{- end }}
{{- if .Values.podAnnotations }}
annotations:
{{ toYaml .Values.podAnnotations | indent 8 }}
{{- end }}
spec:
serviceAccountName: istio-operator{{- if not (eq .Values.revision "") }}-{{ .Values.revision }}{{- end }}
containers:
- name: istio-operator
image: {{.Values.hub}}/operator:{{.Values.tag}}
command:
- operator
- server
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
privileged: false
readOnlyRootFilesystem: true
runAsGroup: 1337
runAsUser: 1337
runAsNonRoot: true
imagePullPolicy: IfNotPresent
resources:
{{ toYaml .Values.operator.resources | trim | indent 12 }}
env:
- name: WATCH_NAMESPACE
value: {{.Values.watchedNamespaces | quote}}
- name: LEADER_ELECTION_NAMESPACE
value: {{.Release.Namespace | quote}}
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: OPERATOR_NAME
value: {{.Release.Namespace | quote}}
- name: WAIT_FOR_RESOURCES_TIMEOUT
value: {{.Values.waitForResourcesTimeout | quote}}
- name: REVISION
value: {{.Values.revision | quote}}
{{- with .Values.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.affinity }}
affinity:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.tolerations }}
tolerations:
{{- toYaml . | nindent 8 }}
{{- end }}
---
| manifests/charts/istio-operator/templates/deployment.yaml | 0 | https://github.com/istio/istio/commit/1de67f0f70829c754359fa2e33ac264461a2b980 | [
0.00017180433496832848,
0.0001672210346441716,
0.00016361346933990717,
0.00016637171211186796,
0.000002588635879874346
] |
{
"id": 0,
"code_window": [
"\tgopkg.in/square/go-jose.v2 v2.6.0\n",
"\tgopkg.in/yaml.v2 v2.4.0\n",
"\tgopkg.in/yaml.v3 v3.0.1\n",
"\thelm.sh/helm/v3 v3.9.0\n",
"\tistio.io/api v0.0.0-20220811073502-ef38878bf5f0\n",
"\tistio.io/client-go v1.12.0-alpha.5.0.20220808181119-e13c0270f465\n",
"\tistio.io/pkg v0.0.0-20220804202146-4787664020d6\n",
"\tk8s.io/api v0.24.2\n",
"\tk8s.io/apiextensions-apiserver v0.24.2\n",
"\tk8s.io/apimachinery v0.24.2\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tistio.io/api v0.0.0-20220812165511-84e2baba34eb\n",
"\tistio.io/client-go v1.12.0-alpha.5.0.20220812165912-faa59fdcfe39\n"
],
"file_path": "go.mod",
"type": "replace",
"edit_start_line_idx": 86
} | apiVersion: apps/v1
kind: Deployment
metadata:
creationTimestamp: null
name: hello
spec:
replicas: 7
selector:
matchLabels:
app: hello
tier: backend
track: stable
strategy: {}
template:
metadata:
annotations:
kubectl.kubernetes.io/default-container: hello
kubectl.kubernetes.io/default-logs-container: hello
prometheus.io/path: /stats/prometheus
prometheus.io/port: "15020"
prometheus.io/scrape: "true"
sidecar.istio.io/status: '{"initContainers":["istio-init"],"containers":["istio-proxy"],"volumes":["workload-socket","credential-socket","workload-certs","istio-envoy","istio-data","istio-podinfo","istio-token","istiod-ca-cert"],"imagePullSecrets":null,"revision":"default"}'
creationTimestamp: null
labels:
app: hello
security.istio.io/tlsMode: istio
service.istio.io/canonical-name: hello
service.istio.io/canonical-revision: latest
tier: backend
track: stable
spec:
containers:
- image: fake.docker.io/google-samples/hello-go-gke:1.0
livenessProbe:
httpGet:
path: /app-health/hello/livez
port: 15020
name: hello
ports:
- containerPort: 80
name: http
readinessProbe:
httpGet:
path: /app-health/hello/readyz
port: 15020
resources: {}
- image: fake.docker.io/google-samples/hello-go-gke:1.0
livenessProbe:
httpGet:
path: /app-health/world/livez
port: 15020
name: world
ports:
- containerPort: 90
name: http
readinessProbe:
exec:
command:
- cat
- /tmp/healthy
resources: {}
- args:
- proxy
- sidecar
- --domain
- $(POD_NAMESPACE).svc.cluster.local
- --proxyLogLevel=warning
- --proxyComponentLogLevel=misc:error
- --log_output_level=default:info
- --concurrency
- "2"
env:
- name: JWT_POLICY
value: third-party-jwt
- name: PILOT_CERT_PROVIDER
value: istiod
- name: CA_ADDR
value: istiod.istio-system.svc:15012
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: INSTANCE_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
- name: SERVICE_ACCOUNT
valueFrom:
fieldRef:
fieldPath: spec.serviceAccountName
- name: HOST_IP
valueFrom:
fieldRef:
fieldPath: status.hostIP
- name: PROXY_CONFIG
value: |
{}
- name: ISTIO_META_POD_PORTS
value: |-
[
{"name":"http","containerPort":80}
,{"name":"http","containerPort":90}
]
- name: ISTIO_META_APP_CONTAINERS
value: hello,world
- name: ISTIO_META_CLUSTER_ID
value: Kubernetes
- name: ISTIO_META_INTERCEPTION_MODE
value: REDIRECT
- name: ISTIO_META_WORKLOAD_NAME
value: hello
- name: ISTIO_META_OWNER
value: kubernetes://apis/apps/v1/namespaces/default/deployments/hello
- name: ISTIO_META_MESH_ID
value: cluster.local
- name: TRUST_DOMAIN
value: cluster.local
- name: ISTIO_KUBE_APP_PROBERS
value: '{"/app-health/hello/livez":{"httpGet":{"port":80}},"/app-health/hello/readyz":{"httpGet":{"port":3333}},"/app-health/world/livez":{"httpGet":{"port":90}}}'
image: gcr.io/istio-testing/proxyv2:latest
name: istio-proxy
ports:
- containerPort: 15090
name: http-envoy-prom
protocol: TCP
readinessProbe:
failureThreshold: 30
httpGet:
path: /healthz/ready
port: 15021
initialDelaySeconds: 1
periodSeconds: 2
timeoutSeconds: 3
resources:
limits:
cpu: "2"
memory: 1Gi
requests:
cpu: 100m
memory: 128Mi
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
privileged: false
readOnlyRootFilesystem: true
runAsGroup: 1337
runAsNonRoot: true
runAsUser: 1337
volumeMounts:
- mountPath: /var/run/secrets/workload-spiffe-uds
name: workload-socket
- mountPath: /var/run/secrets/credential-uds
name: credential-socket
- mountPath: /var/run/secrets/workload-spiffe-credentials
name: workload-certs
- mountPath: /var/run/secrets/istio
name: istiod-ca-cert
- mountPath: /var/lib/istio/data
name: istio-data
- mountPath: /etc/istio/proxy
name: istio-envoy
- mountPath: /var/run/secrets/tokens
name: istio-token
- mountPath: /etc/istio/pod
name: istio-podinfo
initContainers:
- args:
- istio-iptables
- -p
- "15001"
- -z
- "15006"
- -u
- "1337"
- -m
- REDIRECT
- -i
- '*'
- -x
- ""
- -b
- '*'
- -d
- 15090,15021,15020
- --log_output_level=default:info
image: gcr.io/istio-testing/proxyv2:latest
name: istio-init
resources:
limits:
cpu: "2"
memory: 1Gi
requests:
cpu: 100m
memory: 128Mi
securityContext:
allowPrivilegeEscalation: false
capabilities:
add:
- NET_ADMIN
- NET_RAW
drop:
- ALL
privileged: false
readOnlyRootFilesystem: false
runAsGroup: 0
runAsNonRoot: false
runAsUser: 0
securityContext:
fsGroup: 1337
volumes:
- name: workload-socket
- name: credential-socket
- name: workload-certs
- emptyDir:
medium: Memory
name: istio-envoy
- emptyDir: {}
name: istio-data
- downwardAPI:
items:
- fieldRef:
fieldPath: metadata.labels
path: labels
- fieldRef:
fieldPath: metadata.annotations
path: annotations
name: istio-podinfo
- name: istio-token
projected:
sources:
- serviceAccountToken:
audience: istio-ca
expirationSeconds: 43200
path: istio-token
- configMap:
name: istio-ca-root-cert
name: istiod-ca-cert
status: {}
---
| pkg/kube/inject/testdata/inject/ready_live.yaml.injected | 0 | https://github.com/istio/istio/commit/1de67f0f70829c754359fa2e33ac264461a2b980 | [
0.00019221064576413482,
0.00016889421385712922,
0.0001645791926421225,
0.00016677756502758712,
0.000006652129286521813
] |
{
"id": 0,
"code_window": [
"\tgopkg.in/square/go-jose.v2 v2.6.0\n",
"\tgopkg.in/yaml.v2 v2.4.0\n",
"\tgopkg.in/yaml.v3 v3.0.1\n",
"\thelm.sh/helm/v3 v3.9.0\n",
"\tistio.io/api v0.0.0-20220811073502-ef38878bf5f0\n",
"\tistio.io/client-go v1.12.0-alpha.5.0.20220808181119-e13c0270f465\n",
"\tistio.io/pkg v0.0.0-20220804202146-4787664020d6\n",
"\tk8s.io/api v0.24.2\n",
"\tk8s.io/apiextensions-apiserver v0.24.2\n",
"\tk8s.io/apimachinery v0.24.2\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tistio.io/api v0.0.0-20220812165511-84e2baba34eb\n",
"\tistio.io/client-go v1.12.0-alpha.5.0.20220812165912-faa59fdcfe39\n"
],
"file_path": "go.mod",
"type": "replace",
"edit_start_line_idx": 86
} | /*!
* Bootstrap v3.3.5 (http://getbootstrap.com)
* Copyright 2011-2015 Twitter, Inc.
* Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE)
*/.btn-danger,.btn-default,.btn-info,.btn-primary,.btn-success,.btn-warning{text-shadow:0 -1px 0 rgba(0,0,0,.2);-webkit-box-shadow:inset 0 1px 0 rgba(255,255,255,.15),0 1px 1px rgba(0,0,0,.075);box-shadow:inset 0 1px 0 rgba(255,255,255,.15),0 1px 1px rgba(0,0,0,.075)}.btn-danger.active,.btn-danger:active,.btn-default.active,.btn-default:active,.btn-info.active,.btn-info:active,.btn-primary.active,.btn-primary:active,.btn-success.active,.btn-success:active,.btn-warning.active,.btn-warning:active{-webkit-box-shadow:inset 0 3px 5px rgba(0,0,0,.125);box-shadow:inset 0 3px 5px rgba(0,0,0,.125)}.btn-danger.disabled,.btn-danger[disabled],.btn-default.disabled,.btn-default[disabled],.btn-info.disabled,.btn-info[disabled],.btn-primary.disabled,.btn-primary[disabled],.btn-success.disabled,.btn-success[disabled],.btn-warning.disabled,.btn-warning[disabled],fieldset[disabled] .btn-danger,fieldset[disabled] .btn-default,fieldset[disabled] .btn-info,fieldset[disabled] .btn-primary,fieldset[disabled] .btn-success,fieldset[disabled] .btn-warning{-webkit-box-shadow:none;box-shadow:none}.btn-danger .badge,.btn-default .badge,.btn-info .badge,.btn-primary .badge,.btn-success .badge,.btn-warning .badge{text-shadow:none}.btn.active,.btn:active{background-image:none}.btn-default{text-shadow:0 1px 0 #fff;background-image:-webkit-linear-gradient(top,#fff 0,#e0e0e0 100%);background-image:-o-linear-gradient(top,#fff 0,#e0e0e0 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#fff),to(#e0e0e0));background-image:linear-gradient(to bottom,#fff 0,#e0e0e0 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffffffff', endColorstr='#ffe0e0e0', GradientType=0);filter:progid:DXImageTransform.Microsoft.gradient(enabled=false);background-repeat:repeat-x;border-color:#dbdbdb;border-color:#ccc}.btn-default:focus,.btn-default:hover{background-color:#e0e0e0;background-position:0 -15px}.btn-default.active,.btn-default:active{background-color:#e0e0e0;border-color:#dbdbdb}.btn-default.disabled,.btn-default.disabled.active,.btn-default.disabled.focus,.btn-default.disabled:active,.btn-default.disabled:focus,.btn-default.disabled:hover,.btn-default[disabled],.btn-default[disabled].active,.btn-default[disabled].focus,.btn-default[disabled]:active,.btn-default[disabled]:focus,.btn-default[disabled]:hover,fieldset[disabled] .btn-default,fieldset[disabled] .btn-default.active,fieldset[disabled] .btn-default.focus,fieldset[disabled] .btn-default:active,fieldset[disabled] .btn-default:focus,fieldset[disabled] .btn-default:hover{background-color:#e0e0e0;background-image:none}.btn-primary{background-image:-webkit-linear-gradient(top,#337ab7 0,#265a88 100%);background-image:-o-linear-gradient(top,#337ab7 0,#265a88 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#337ab7),to(#265a88));background-image:linear-gradient(to bottom,#337ab7 0,#265a88 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff337ab7', endColorstr='#ff265a88', GradientType=0);filter:progid:DXImageTransform.Microsoft.gradient(enabled=false);background-repeat:repeat-x;border-color:#245580}.btn-primary:focus,.btn-primary:hover{background-color:#265a88;background-position:0 -15px}.btn-primary.active,.btn-primary:active{background-color:#265a88;border-color:#245580}.btn-primary.disabled,.btn-primary.disabled.active,.btn-primary.disabled.focus,.btn-primary.disabled:active,.btn-primary.disabled:focus,.btn-primary.disabled:hover,.btn-primary[disabled],.btn-primary[disabled].active,.btn-primary[disabled].focus,.btn-primary[disabled]:active,.btn-primary[disabled]:focus,.btn-primary[disabled]:hover,fieldset[disabled] .btn-primary,fieldset[disabled] .btn-primary.active,fieldset[disabled] .btn-primary.focus,fieldset[disabled] .btn-primary:active,fieldset[disabled] .btn-primary:focus,fieldset[disabled] .btn-primary:hover{background-color:#265a88;background-image:none}.btn-success{background-image:-webkit-linear-gradient(top,#5cb85c 0,#419641 100%);background-image:-o-linear-gradient(top,#5cb85c 0,#419641 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#5cb85c),to(#419641));background-image:linear-gradient(to bottom,#5cb85c 0,#419641 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff5cb85c', endColorstr='#ff419641', GradientType=0);filter:progid:DXImageTransform.Microsoft.gradient(enabled=false);background-repeat:repeat-x;border-color:#3e8f3e}.btn-success:focus,.btn-success:hover{background-color:#419641;background-position:0 -15px}.btn-success.active,.btn-success:active{background-color:#419641;border-color:#3e8f3e}.btn-success.disabled,.btn-success.disabled.active,.btn-success.disabled.focus,.btn-success.disabled:active,.btn-success.disabled:focus,.btn-success.disabled:hover,.btn-success[disabled],.btn-success[disabled].active,.btn-success[disabled].focus,.btn-success[disabled]:active,.btn-success[disabled]:focus,.btn-success[disabled]:hover,fieldset[disabled] .btn-success,fieldset[disabled] .btn-success.active,fieldset[disabled] .btn-success.focus,fieldset[disabled] .btn-success:active,fieldset[disabled] .btn-success:focus,fieldset[disabled] .btn-success:hover{background-color:#419641;background-image:none}.btn-info{background-image:-webkit-linear-gradient(top,#5bc0de 0,#2aabd2 100%);background-image:-o-linear-gradient(top,#5bc0de 0,#2aabd2 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#5bc0de),to(#2aabd2));background-image:linear-gradient(to bottom,#5bc0de 0,#2aabd2 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff5bc0de', endColorstr='#ff2aabd2', GradientType=0);filter:progid:DXImageTransform.Microsoft.gradient(enabled=false);background-repeat:repeat-x;border-color:#28a4c9}.btn-info:focus,.btn-info:hover{background-color:#2aabd2;background-position:0 -15px}.btn-info.active,.btn-info:active{background-color:#2aabd2;border-color:#28a4c9}.btn-info.disabled,.btn-info.disabled.active,.btn-info.disabled.focus,.btn-info.disabled:active,.btn-info.disabled:focus,.btn-info.disabled:hover,.btn-info[disabled],.btn-info[disabled].active,.btn-info[disabled].focus,.btn-info[disabled]:active,.btn-info[disabled]:focus,.btn-info[disabled]:hover,fieldset[disabled] .btn-info,fieldset[disabled] .btn-info.active,fieldset[disabled] .btn-info.focus,fieldset[disabled] .btn-info:active,fieldset[disabled] .btn-info:focus,fieldset[disabled] .btn-info:hover{background-color:#2aabd2;background-image:none}.btn-warning{background-image:-webkit-linear-gradient(top,#f0ad4e 0,#eb9316 100%);background-image:-o-linear-gradient(top,#f0ad4e 0,#eb9316 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#f0ad4e),to(#eb9316));background-image:linear-gradient(to bottom,#f0ad4e 0,#eb9316 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff0ad4e', endColorstr='#ffeb9316', GradientType=0);filter:progid:DXImageTransform.Microsoft.gradient(enabled=false);background-repeat:repeat-x;border-color:#e38d13}.btn-warning:focus,.btn-warning:hover{background-color:#eb9316;background-position:0 -15px}.btn-warning.active,.btn-warning:active{background-color:#eb9316;border-color:#e38d13}.btn-warning.disabled,.btn-warning.disabled.active,.btn-warning.disabled.focus,.btn-warning.disabled:active,.btn-warning.disabled:focus,.btn-warning.disabled:hover,.btn-warning[disabled],.btn-warning[disabled].active,.btn-warning[disabled].focus,.btn-warning[disabled]:active,.btn-warning[disabled]:focus,.btn-warning[disabled]:hover,fieldset[disabled] .btn-warning,fieldset[disabled] .btn-warning.active,fieldset[disabled] .btn-warning.focus,fieldset[disabled] .btn-warning:active,fieldset[disabled] .btn-warning:focus,fieldset[disabled] .btn-warning:hover{background-color:#eb9316;background-image:none}.btn-danger{background-image:-webkit-linear-gradient(top,#d9534f 0,#c12e2a 100%);background-image:-o-linear-gradient(top,#d9534f 0,#c12e2a 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#d9534f),to(#c12e2a));background-image:linear-gradient(to bottom,#d9534f 0,#c12e2a 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffd9534f', endColorstr='#ffc12e2a', GradientType=0);filter:progid:DXImageTransform.Microsoft.gradient(enabled=false);background-repeat:repeat-x;border-color:#b92c28}.btn-danger:focus,.btn-danger:hover{background-color:#c12e2a;background-position:0 -15px}.btn-danger.active,.btn-danger:active{background-color:#c12e2a;border-color:#b92c28}.btn-danger.disabled,.btn-danger.disabled.active,.btn-danger.disabled.focus,.btn-danger.disabled:active,.btn-danger.disabled:focus,.btn-danger.disabled:hover,.btn-danger[disabled],.btn-danger[disabled].active,.btn-danger[disabled].focus,.btn-danger[disabled]:active,.btn-danger[disabled]:focus,.btn-danger[disabled]:hover,fieldset[disabled] .btn-danger,fieldset[disabled] .btn-danger.active,fieldset[disabled] .btn-danger.focus,fieldset[disabled] .btn-danger:active,fieldset[disabled] .btn-danger:focus,fieldset[disabled] .btn-danger:hover{background-color:#c12e2a;background-image:none}.img-thumbnail,.thumbnail{-webkit-box-shadow:0 1px 2px rgba(0,0,0,.075);box-shadow:0 1px 2px rgba(0,0,0,.075)}.dropdown-menu>li>a:focus,.dropdown-menu>li>a:hover{background-color:#e8e8e8;background-image:-webkit-linear-gradient(top,#f5f5f5 0,#e8e8e8 100%);background-image:-o-linear-gradient(top,#f5f5f5 0,#e8e8e8 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#f5f5f5),to(#e8e8e8));background-image:linear-gradient(to bottom,#f5f5f5 0,#e8e8e8 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff5f5f5', endColorstr='#ffe8e8e8', GradientType=0);background-repeat:repeat-x}.dropdown-menu>.active>a,.dropdown-menu>.active>a:focus,.dropdown-menu>.active>a:hover{background-color:#2e6da4;background-image:-webkit-linear-gradient(top,#337ab7 0,#2e6da4 100%);background-image:-o-linear-gradient(top,#337ab7 0,#2e6da4 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#337ab7),to(#2e6da4));background-image:linear-gradient(to bottom,#337ab7 0,#2e6da4 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff337ab7', endColorstr='#ff2e6da4', GradientType=0);background-repeat:repeat-x}.navbar-default{background-image:-webkit-linear-gradient(top,#fff 0,#f8f8f8 100%);background-image:-o-linear-gradient(top,#fff 0,#f8f8f8 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#fff),to(#f8f8f8));background-image:linear-gradient(to bottom,#fff 0,#f8f8f8 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffffffff', endColorstr='#fff8f8f8', GradientType=0);filter:progid:DXImageTransform.Microsoft.gradient(enabled=false);background-repeat:repeat-x;border-radius:4px;-webkit-box-shadow:inset 0 1px 0 rgba(255,255,255,.15),0 1px 5px rgba(0,0,0,.075);box-shadow:inset 0 1px 0 rgba(255,255,255,.15),0 1px 5px rgba(0,0,0,.075)}.navbar-default .navbar-nav>.active>a,.navbar-default .navbar-nav>.open>a{background-image:-webkit-linear-gradient(top,#dbdbdb 0,#e2e2e2 100%);background-image:-o-linear-gradient(top,#dbdbdb 0,#e2e2e2 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#dbdbdb),to(#e2e2e2));background-image:linear-gradient(to bottom,#dbdbdb 0,#e2e2e2 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffdbdbdb', endColorstr='#ffe2e2e2', GradientType=0);background-repeat:repeat-x;-webkit-box-shadow:inset 0 3px 9px rgba(0,0,0,.075);box-shadow:inset 0 3px 9px rgba(0,0,0,.075)}.navbar-brand,.navbar-nav>li>a{text-shadow:0 1px 0 rgba(255,255,255,.25)}.navbar-inverse{background-image:-webkit-linear-gradient(top,#3c3c3c 0,#222 100%);background-image:-o-linear-gradient(top,#3c3c3c 0,#222 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#3c3c3c),to(#222));background-image:linear-gradient(to bottom,#3c3c3c 0,#222 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff3c3c3c', endColorstr='#ff222222', GradientType=0);filter:progid:DXImageTransform.Microsoft.gradient(enabled=false);background-repeat:repeat-x;border-radius:4px}.navbar-inverse .navbar-nav>.active>a,.navbar-inverse .navbar-nav>.open>a{background-image:-webkit-linear-gradient(top,#080808 0,#0f0f0f 100%);background-image:-o-linear-gradient(top,#080808 0,#0f0f0f 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#080808),to(#0f0f0f));background-image:linear-gradient(to bottom,#080808 0,#0f0f0f 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff080808', endColorstr='#ff0f0f0f', GradientType=0);background-repeat:repeat-x;-webkit-box-shadow:inset 0 3px 9px rgba(0,0,0,.25);box-shadow:inset 0 3px 9px rgba(0,0,0,.25)}.navbar-inverse .navbar-brand,.navbar-inverse .navbar-nav>li>a{text-shadow:0 -1px 0 rgba(0,0,0,.25)}.navbar-fixed-bottom,.navbar-fixed-top,.navbar-static-top{border-radius:0}@media (max-width:767px){.navbar .navbar-nav .open .dropdown-menu>.active>a,.navbar .navbar-nav .open .dropdown-menu>.active>a:focus,.navbar .navbar-nav .open .dropdown-menu>.active>a:hover{color:#fff;background-image:-webkit-linear-gradient(top,#337ab7 0,#2e6da4 100%);background-image:-o-linear-gradient(top,#337ab7 0,#2e6da4 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#337ab7),to(#2e6da4));background-image:linear-gradient(to bottom,#337ab7 0,#2e6da4 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff337ab7', endColorstr='#ff2e6da4', GradientType=0);background-repeat:repeat-x}}.alert{text-shadow:0 1px 0 rgba(255,255,255,.2);-webkit-box-shadow:inset 0 1px 0 rgba(255,255,255,.25),0 1px 2px rgba(0,0,0,.05);box-shadow:inset 0 1px 0 rgba(255,255,255,.25),0 1px 2px rgba(0,0,0,.05)}.alert-success{background-image:-webkit-linear-gradient(top,#dff0d8 0,#c8e5bc 100%);background-image:-o-linear-gradient(top,#dff0d8 0,#c8e5bc 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#dff0d8),to(#c8e5bc));background-image:linear-gradient(to bottom,#dff0d8 0,#c8e5bc 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffdff0d8', endColorstr='#ffc8e5bc', GradientType=0);background-repeat:repeat-x;border-color:#b2dba1}.alert-info{background-image:-webkit-linear-gradient(top,#d9edf7 0,#b9def0 100%);background-image:-o-linear-gradient(top,#d9edf7 0,#b9def0 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#d9edf7),to(#b9def0));background-image:linear-gradient(to bottom,#d9edf7 0,#b9def0 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffd9edf7', endColorstr='#ffb9def0', GradientType=0);background-repeat:repeat-x;border-color:#9acfea}.alert-warning{background-image:-webkit-linear-gradient(top,#fcf8e3 0,#f8efc0 100%);background-image:-o-linear-gradient(top,#fcf8e3 0,#f8efc0 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#fcf8e3),to(#f8efc0));background-image:linear-gradient(to bottom,#fcf8e3 0,#f8efc0 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#fffcf8e3', endColorstr='#fff8efc0', GradientType=0);background-repeat:repeat-x;border-color:#f5e79e}.alert-danger{background-image:-webkit-linear-gradient(top,#f2dede 0,#e7c3c3 100%);background-image:-o-linear-gradient(top,#f2dede 0,#e7c3c3 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#f2dede),to(#e7c3c3));background-image:linear-gradient(to bottom,#f2dede 0,#e7c3c3 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff2dede', endColorstr='#ffe7c3c3', GradientType=0);background-repeat:repeat-x;border-color:#dca7a7}.progress{background-image:-webkit-linear-gradient(top,#ebebeb 0,#f5f5f5 100%);background-image:-o-linear-gradient(top,#ebebeb 0,#f5f5f5 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#ebebeb),to(#f5f5f5));background-image:linear-gradient(to bottom,#ebebeb 0,#f5f5f5 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffebebeb', endColorstr='#fff5f5f5', GradientType=0);background-repeat:repeat-x}.progress-bar{background-image:-webkit-linear-gradient(top,#337ab7 0,#286090 100%);background-image:-o-linear-gradient(top,#337ab7 0,#286090 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#337ab7),to(#286090));background-image:linear-gradient(to bottom,#337ab7 0,#286090 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff337ab7', endColorstr='#ff286090', GradientType=0);background-repeat:repeat-x}.progress-bar-success{background-image:-webkit-linear-gradient(top,#5cb85c 0,#449d44 100%);background-image:-o-linear-gradient(top,#5cb85c 0,#449d44 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#5cb85c),to(#449d44));background-image:linear-gradient(to bottom,#5cb85c 0,#449d44 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff5cb85c', endColorstr='#ff449d44', GradientType=0);background-repeat:repeat-x}.progress-bar-info{background-image:-webkit-linear-gradient(top,#5bc0de 0,#31b0d5 100%);background-image:-o-linear-gradient(top,#5bc0de 0,#31b0d5 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#5bc0de),to(#31b0d5));background-image:linear-gradient(to bottom,#5bc0de 0,#31b0d5 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff5bc0de', endColorstr='#ff31b0d5', GradientType=0);background-repeat:repeat-x}.progress-bar-warning{background-image:-webkit-linear-gradient(top,#f0ad4e 0,#ec971f 100%);background-image:-o-linear-gradient(top,#f0ad4e 0,#ec971f 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#f0ad4e),to(#ec971f));background-image:linear-gradient(to bottom,#f0ad4e 0,#ec971f 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff0ad4e', endColorstr='#ffec971f', GradientType=0);background-repeat:repeat-x}.progress-bar-danger{background-image:-webkit-linear-gradient(top,#d9534f 0,#c9302c 100%);background-image:-o-linear-gradient(top,#d9534f 0,#c9302c 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#d9534f),to(#c9302c));background-image:linear-gradient(to bottom,#d9534f 0,#c9302c 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffd9534f', endColorstr='#ffc9302c', GradientType=0);background-repeat:repeat-x}.progress-bar-striped{background-image:-webkit-linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-image:-o-linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-image:linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent)}.list-group{border-radius:4px;-webkit-box-shadow:0 1px 2px rgba(0,0,0,.075);box-shadow:0 1px 2px rgba(0,0,0,.075)}.list-group-item.active,.list-group-item.active:focus,.list-group-item.active:hover{text-shadow:0 -1px 0 #286090;background-image:-webkit-linear-gradient(top,#337ab7 0,#2b669a 100%);background-image:-o-linear-gradient(top,#337ab7 0,#2b669a 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#337ab7),to(#2b669a));background-image:linear-gradient(to bottom,#337ab7 0,#2b669a 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff337ab7', endColorstr='#ff2b669a', GradientType=0);background-repeat:repeat-x;border-color:#2b669a}.list-group-item.active .badge,.list-group-item.active:focus .badge,.list-group-item.active:hover .badge{text-shadow:none}.panel{-webkit-box-shadow:0 1px 2px rgba(0,0,0,.05);box-shadow:0 1px 2px rgba(0,0,0,.05)}.panel-default>.panel-heading{background-image:-webkit-linear-gradient(top,#f5f5f5 0,#e8e8e8 100%);background-image:-o-linear-gradient(top,#f5f5f5 0,#e8e8e8 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#f5f5f5),to(#e8e8e8));background-image:linear-gradient(to bottom,#f5f5f5 0,#e8e8e8 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff5f5f5', endColorstr='#ffe8e8e8', GradientType=0);background-repeat:repeat-x}.panel-primary>.panel-heading{background-image:-webkit-linear-gradient(top,#337ab7 0,#2e6da4 100%);background-image:-o-linear-gradient(top,#337ab7 0,#2e6da4 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#337ab7),to(#2e6da4));background-image:linear-gradient(to bottom,#337ab7 0,#2e6da4 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff337ab7', endColorstr='#ff2e6da4', GradientType=0);background-repeat:repeat-x}.panel-success>.panel-heading{background-image:-webkit-linear-gradient(top,#dff0d8 0,#d0e9c6 100%);background-image:-o-linear-gradient(top,#dff0d8 0,#d0e9c6 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#dff0d8),to(#d0e9c6));background-image:linear-gradient(to bottom,#dff0d8 0,#d0e9c6 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffdff0d8', endColorstr='#ffd0e9c6', GradientType=0);background-repeat:repeat-x}.panel-info>.panel-heading{background-image:-webkit-linear-gradient(top,#d9edf7 0,#c4e3f3 100%);background-image:-o-linear-gradient(top,#d9edf7 0,#c4e3f3 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#d9edf7),to(#c4e3f3));background-image:linear-gradient(to bottom,#d9edf7 0,#c4e3f3 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffd9edf7', endColorstr='#ffc4e3f3', GradientType=0);background-repeat:repeat-x}.panel-warning>.panel-heading{background-image:-webkit-linear-gradient(top,#fcf8e3 0,#faf2cc 100%);background-image:-o-linear-gradient(top,#fcf8e3 0,#faf2cc 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#fcf8e3),to(#faf2cc));background-image:linear-gradient(to bottom,#fcf8e3 0,#faf2cc 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#fffcf8e3', endColorstr='#fffaf2cc', GradientType=0);background-repeat:repeat-x}.panel-danger>.panel-heading{background-image:-webkit-linear-gradient(top,#f2dede 0,#ebcccc 100%);background-image:-o-linear-gradient(top,#f2dede 0,#ebcccc 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#f2dede),to(#ebcccc));background-image:linear-gradient(to bottom,#f2dede 0,#ebcccc 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff2dede', endColorstr='#ffebcccc', GradientType=0);background-repeat:repeat-x}.well{background-image:-webkit-linear-gradient(top,#e8e8e8 0,#f5f5f5 100%);background-image:-o-linear-gradient(top,#e8e8e8 0,#f5f5f5 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#e8e8e8),to(#f5f5f5));background-image:linear-gradient(to bottom,#e8e8e8 0,#f5f5f5 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffe8e8e8', endColorstr='#fff5f5f5', GradientType=0);background-repeat:repeat-x;border-color:#dcdcdc;-webkit-box-shadow:inset 0 1px 3px rgba(0,0,0,.05),0 1px 0 rgba(255,255,255,.1);box-shadow:inset 0 1px 3px rgba(0,0,0,.05),0 1px 0 rgba(255,255,255,.1)} | samples/bookinfo/src/productpage/static/bootstrap/css/bootstrap-theme.min.css | 0 | https://github.com/istio/istio/commit/1de67f0f70829c754359fa2e33ac264461a2b980 | [
0.00016418496670667082,
0.00016418496670667082,
0.00016418496670667082,
0.00016418496670667082,
0
] |
{
"id": 3,
"code_window": [
"\t\t\"datadog\": {},\n",
"\t\t\"opencensus\": {},\n",
"\t\t\"skywalking\": {},\n",
"\t\t\"envoy_http_als\": {},\n",
"\t\t\"envoy_tcp_als\": {},\n",
"\t}\n",
"\n",
"\tunexpectedProviders := make([]string, 0)\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\"opentelemetry\": {},\n"
],
"file_path": "operator/pkg/util/merge_iop_test.go",
"type": "add",
"edit_start_line_idx": 78
} | module istio.io/istio
go 1.18
// https://github.com/containerd/containerd/issues/5781
exclude k8s.io/kubernetes v1.13.0
// Client-go does not handle different versions of mergo due to some breaking changes - use the matching version
replace github.com/imdario/mergo => github.com/imdario/mergo v0.3.5
require (
cloud.google.com/go/compute v1.7.0
cloud.google.com/go/security v1.4.0
contrib.go.opencensus.io/exporter/prometheus v0.4.1
github.com/AdaLogics/go-fuzz-headers v0.0.0-20220511133513-5ae53b7a8154
github.com/Masterminds/sprig/v3 v3.2.2
github.com/cenkalti/backoff/v4 v4.1.3
github.com/census-instrumentation/opencensus-proto v0.3.0
github.com/cheggaaa/pb/v3 v3.0.8
github.com/cncf/xds/go v0.0.0-20220520190051-1e77728a1eaa
github.com/containernetworking/cni v1.1.1
github.com/containernetworking/plugins v1.1.1
github.com/coreos/go-oidc/v3 v3.2.0
github.com/davecgh/go-spew v1.1.1
github.com/docker/cli v20.10.17+incompatible
github.com/envoyproxy/go-control-plane v0.10.3-0.20220719090109-b024c36d9935
github.com/evanphx/json-patch/v5 v5.6.0
github.com/fatih/color v1.13.0
github.com/felixge/fgprof v0.9.2
github.com/florianl/go-nflog/v2 v2.0.1
github.com/fsnotify/fsnotify v1.5.4
github.com/gogo/protobuf v1.3.2
github.com/golang/protobuf v1.5.2
github.com/google/cel-go v0.11.4
github.com/google/go-cmp v0.5.8
github.com/google/go-containerregistry v0.10.0
github.com/google/gofuzz v1.2.0
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510
github.com/google/uuid v1.3.0
github.com/gorilla/mux v1.8.0
github.com/gorilla/websocket v1.5.0
github.com/grpc-ecosystem/go-grpc-middleware v1.3.0
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0
github.com/hashicorp/go-multierror v1.1.1
github.com/hashicorp/go-version v1.5.0
github.com/hashicorp/golang-lru v0.5.4
github.com/kr/pretty v0.3.0
github.com/kylelemons/godebug v1.1.0
github.com/lestrrat-go/jwx v1.2.25
github.com/lucas-clemente/quic-go v0.28.0
github.com/mattn/go-isatty v0.0.14
github.com/miekg/dns v1.1.50
github.com/mitchellh/copystructure v1.2.0
github.com/mitchellh/go-homedir v1.1.0
github.com/moby/buildkit v0.10.3
github.com/onsi/gomega v1.19.0
github.com/openshift/api v0.0.0-20200713203337-b2494ecb17dd
github.com/pmezard/go-difflib v1.0.0
github.com/prometheus/client_golang v1.12.2
github.com/prometheus/client_model v0.2.0
github.com/prometheus/common v0.35.0
github.com/prometheus/prometheus v0.36.2
github.com/ryanuber/go-glob v1.0.0
github.com/spf13/cobra v1.5.0
github.com/spf13/pflag v1.0.5
github.com/spf13/viper v1.12.0
github.com/vishvananda/netlink v1.2.1-beta.2
github.com/yl2chen/cidranger v1.0.2
go.opencensus.io v0.23.0
go.opentelemetry.io/proto/otlp v0.18.0
go.uber.org/atomic v1.9.0
go.uber.org/multierr v1.8.0
golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e
golang.org/x/oauth2 v0.0.0-20220622183110-fd043fe589d2
golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f
golang.org/x/sys v0.0.0-20220627191245-f75cf1eec38b
golang.org/x/time v0.0.0-20220609170525-579cf78fd858
gomodules.xyz/jsonpatch/v3 v3.0.1
google.golang.org/api v0.85.0
google.golang.org/genproto v0.0.0-20220628213854-d9e0b6570c03
google.golang.org/grpc v1.47.0
google.golang.org/protobuf v1.28.0
gopkg.in/square/go-jose.v2 v2.6.0
gopkg.in/yaml.v2 v2.4.0
gopkg.in/yaml.v3 v3.0.1
helm.sh/helm/v3 v3.9.0
istio.io/api v0.0.0-20220811073502-ef38878bf5f0
istio.io/client-go v1.12.0-alpha.5.0.20220808181119-e13c0270f465
istio.io/pkg v0.0.0-20220804202146-4787664020d6
k8s.io/api v0.24.2
k8s.io/apiextensions-apiserver v0.24.2
k8s.io/apimachinery v0.24.2
k8s.io/apiserver v0.24.2
k8s.io/cli-runtime v0.24.2
k8s.io/client-go v0.24.2
k8s.io/klog/v2 v2.70.0
k8s.io/kube-openapi v0.0.0-20220413171646-5e7f5fdc6da6
k8s.io/kubectl v0.24.2
k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9
sigs.k8s.io/controller-runtime v0.12.2
sigs.k8s.io/gateway-api v0.5.0
sigs.k8s.io/mcs-api v0.1.0
sigs.k8s.io/yaml v1.3.0
)
require (
cloud.google.com/go v0.102.0 // indirect
cloud.google.com/go/logging v1.4.2 // indirect
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect
github.com/Azure/go-autorest v14.2.0+incompatible // indirect
github.com/Azure/go-autorest/autorest v0.11.27 // indirect
github.com/Azure/go-autorest/autorest/adal v0.9.20 // indirect
github.com/Azure/go-autorest/autorest/date v0.3.0 // indirect
github.com/Azure/go-autorest/logger v0.2.1 // indirect
github.com/Azure/go-autorest/tracing v0.6.0 // indirect
github.com/BurntSushi/toml v1.0.0 // indirect
github.com/MakeNowJust/heredoc v0.0.0-20170808103936-bb23615498cd // indirect
github.com/Masterminds/goutils v1.1.1 // indirect
github.com/Masterminds/semver/v3 v3.1.1 // indirect
github.com/PuerkitoBio/purell v1.1.1 // indirect
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 // indirect
github.com/VividCortex/ewma v1.1.1 // indirect
github.com/alecholmes/xfccparser v0.1.0
github.com/alecthomas/participle v0.4.1 // indirect
github.com/antlr/antlr4/runtime/Go/antlr v0.0.0-20220418222510-f25a4f6275ed // indirect
github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/cespare/xxhash/v2 v2.1.2 // indirect
github.com/chai2010/gettext-go v0.0.0-20160711120539-c6fed771bfd5 // indirect
github.com/cheekybits/genny v1.0.0 // indirect
github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4 // indirect
github.com/containerd/stargz-snapshotter/estargz v0.11.4 // indirect
github.com/containerd/typeurl v1.0.2 // indirect
github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect
github.com/cyphar/filepath-securejoin v0.2.3 // indirect
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.0-20210816181553-5444fa50b93d // indirect
github.com/docker/distribution v2.8.1+incompatible // indirect
github.com/docker/docker v20.10.16+incompatible // indirect
github.com/docker/docker-credential-helpers v0.6.4 // indirect
github.com/emicklei/go-restful v2.9.5+incompatible // indirect
github.com/envoyproxy/protoc-gen-validate v0.6.7 // indirect
github.com/evanphx/json-patch v4.12.0+incompatible // indirect
github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d // indirect
github.com/fatih/camelcase v1.0.0 // indirect
github.com/fvbommel/sortorder v1.0.1 // indirect
github.com/go-errors/errors v1.0.1 // indirect
github.com/go-kit/log v0.2.1 // indirect
github.com/go-logfmt/logfmt v0.5.1 // indirect
github.com/go-logr/logr v1.2.3 // indirect
github.com/go-openapi/jsonpointer v0.19.5 // indirect
github.com/go-openapi/jsonreference v0.19.6 // indirect
github.com/go-openapi/swag v0.21.1 // indirect
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 // indirect
github.com/gobwas/glob v0.2.3 // indirect
github.com/goccy/go-json v0.9.7 // indirect
github.com/golang-jwt/jwt/v4 v4.2.0 // indirect
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/google/btree v1.0.1 // indirect
github.com/google/gnostic v0.5.7-v3refs // indirect
github.com/google/pprof v0.0.0-20220520215854-d04f2422c8a1 // indirect
github.com/googleapis/enterprise-certificate-proxy v0.1.0 // indirect
github.com/googleapis/gax-go/v2 v2.4.0 // indirect
github.com/grafana/regexp v0.0.0-20220304095617-2e8d9baf4ac2 // indirect
github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 // indirect
github.com/hashicorp/errwrap v1.1.0 // indirect
github.com/hashicorp/hcl v1.0.0 // indirect
github.com/huandu/xstrings v1.3.2 // indirect
github.com/imdario/mergo v0.3.12 // indirect
github.com/inconshreveable/mousetrap v1.0.0 // indirect
github.com/jonboulle/clockwork v0.2.2 // indirect
github.com/josharian/intern v1.0.0 // indirect
github.com/josharian/native v0.0.0-20200817173448-b6b71def0850 // indirect
github.com/json-iterator/go v1.1.12 // indirect
github.com/klauspost/compress v1.15.4 // indirect
github.com/kr/text v0.2.0 // indirect
github.com/lestrrat-go/backoff/v2 v2.0.8 // indirect
github.com/lestrrat-go/blackmagic v1.0.0 // indirect
github.com/lestrrat-go/httpcc v1.0.1 // indirect
github.com/lestrrat-go/iter v1.0.1 // indirect
github.com/lestrrat-go/option v1.0.0 // indirect
github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect
github.com/magiconair/properties v1.8.6 // indirect
github.com/mailru/easyjson v0.7.7 // indirect
github.com/marten-seemann/qpack v0.2.1 // indirect
github.com/marten-seemann/qtls-go1-16 v0.1.5 // indirect
github.com/marten-seemann/qtls-go1-17 v0.1.2 // indirect
github.com/marten-seemann/qtls-go1-18 v0.1.2 // indirect
github.com/marten-seemann/qtls-go1-19 v0.1.0-beta.1 // indirect
github.com/mattn/go-colorable v0.1.12 // indirect
github.com/mattn/go-runewidth v0.0.13 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect
github.com/mdlayher/netlink v1.4.1 // indirect
github.com/mdlayher/socket v0.0.0-20210307095302-262dc9984e00 // indirect
github.com/mitchellh/go-wordwrap v1.0.0 // indirect
github.com/mitchellh/mapstructure v1.5.0 // indirect
github.com/mitchellh/reflectwalk v1.0.2 // indirect
github.com/moby/spdystream v0.2.0 // indirect
github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect
github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 // indirect
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
github.com/natefinch/lumberjack v2.0.0+incompatible // indirect
github.com/nxadm/tail v1.4.8 // indirect
github.com/onsi/ginkgo v1.16.5 // indirect
github.com/opencontainers/go-digest v1.0.0 // indirect
github.com/opencontainers/image-spec v1.0.3-0.20220114050600-8b9d41f48198 // indirect
github.com/pelletier/go-toml v1.9.5 // indirect
github.com/pelletier/go-toml/v2 v2.0.1 // indirect
github.com/peterbourgon/diskv v2.0.1+incompatible // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/prometheus/procfs v0.7.3 // indirect
github.com/prometheus/prom2json v1.3.0 // indirect
github.com/prometheus/statsd_exporter v0.21.0 // indirect
github.com/rivo/uniseg v0.2.0 // indirect
github.com/rogpeppe/go-internal v1.8.0 // indirect
github.com/russross/blackfriday v1.6.0 // indirect
github.com/russross/blackfriday/v2 v2.1.0 // indirect
github.com/shopspring/decimal v1.2.0 // indirect
github.com/sirupsen/logrus v1.8.1 // indirect
github.com/spaolacci/murmur3 v1.1.0 // indirect
github.com/spf13/afero v1.8.2 // indirect
github.com/spf13/cast v1.5.0 // indirect
github.com/spf13/jwalterweatherman v1.1.0 // indirect
github.com/stoewer/go-strcase v1.2.0 // indirect
github.com/stretchr/testify v1.7.1 // indirect
github.com/subosito/gotenv v1.3.0 // indirect
github.com/vbatts/tar-split v0.11.2 // indirect
github.com/vishvananda/netns v0.0.0-20210104183010-2eb08e3e575f // indirect
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect
github.com/xeipuuv/gojsonschema v1.2.0 // indirect
github.com/xlab/treeprint v1.1.0 // indirect
go.starlark.net v0.0.0-20211013185944-b0039bd2cfe3 // indirect
go.uber.org/zap v1.21.0 // indirect
golang.org/x/crypto v0.0.0-20220427172511-eb4f295cb31f // indirect
golang.org/x/exp v0.0.0-20220407100705-7b9b53b0aca4
golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3 // indirect
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 // indirect
golang.org/x/text v0.3.7 // indirect
golang.org/x/tools v0.1.10 // indirect
golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f // indirect
gomodules.xyz/jsonpatch/v2 v2.2.0 // indirect
gomodules.xyz/orderedmap v0.1.0 // indirect
google.golang.org/appengine v1.6.7 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/ini.v1 v1.66.4 // indirect
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect
k8s.io/component-base v0.24.2 // indirect
sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2 // indirect
sigs.k8s.io/kustomize/api v0.11.4 // indirect
sigs.k8s.io/kustomize/kyaml v0.13.6 // indirect
sigs.k8s.io/structured-merge-diff/v4 v4.2.1 // indirect
)
| go.mod | 1 | https://github.com/istio/istio/commit/1de67f0f70829c754359fa2e33ac264461a2b980 | [
0.00021619891049340367,
0.00017266362556256354,
0.0001624217111384496,
0.00017051682516466826,
0.000011804128007497638
] |
{
"id": 3,
"code_window": [
"\t\t\"datadog\": {},\n",
"\t\t\"opencensus\": {},\n",
"\t\t\"skywalking\": {},\n",
"\t\t\"envoy_http_als\": {},\n",
"\t\t\"envoy_tcp_als\": {},\n",
"\t}\n",
"\n",
"\tunexpectedProviders := make([]string, 0)\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\"opentelemetry\": {},\n"
],
"file_path": "operator/pkg/util/merge_iop_test.go",
"type": "add",
"edit_start_line_idx": 78
} | apiVersion: release-notes/v2
kind: feature
area: istioctl
issue:
- 31695
docs:
- '[usage] https://istio.io/latest/docs/reference/commands/istioctl/#istioctl-proxy-config'
releaseNotes:
- |
**Added** `istioctl proxy-config -o yaml` to display in yaml along with the current json and short format.
| releasenotes/notes/31695-releasenote.yaml | 0 | https://github.com/istio/istio/commit/1de67f0f70829c754359fa2e33ac264461a2b980 | [
0.00017350954294670373,
0.00017303766799159348,
0.00017256579303648323,
0.00017303766799159348,
4.718749551102519e-7
] |
{
"id": 3,
"code_window": [
"\t\t\"datadog\": {},\n",
"\t\t\"opencensus\": {},\n",
"\t\t\"skywalking\": {},\n",
"\t\t\"envoy_http_als\": {},\n",
"\t\t\"envoy_tcp_als\": {},\n",
"\t}\n",
"\n",
"\tunexpectedProviders := make([]string, 0)\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\"opentelemetry\": {},\n"
],
"file_path": "operator/pkg/util/merge_iop_test.go",
"type": "add",
"edit_start_line_idx": 78
} | # Skaffold - https://skaffold.dev/
# ------------------------------------------------ #
# This is for illustration purposes only
# ------------------------------------------------ #
# Installation options & modules
# ------------------------------------------------ #
# istio - `skaffold run -m istiod`
# ingress - `skaffold run -m ingress`
# Addons:
# - kiali - `skaffold run -m kiali`
# - prometheus - `skaffold run -m prometheus`
# Demos:
# - bookinfo - `skaffold run -m bookinfo`
# ------------------------------------------------ #
# Development mode - skaffold dev #
# ------------------------------------------------- #
apiVersion: skaffold/v2beta22
kind: Config
metadata:
name: istio-base
profiles:
- name: dev
activation:
- command: dev
deploy:
helm:
releases:
- name: istio-base
chartPath: ../../../manifests/charts/base
namespace: istio-system
createNamespace: true
- name: run
activation:
- command: run
deploy:
helm:
releases:
- name: istio-base
remoteChart: base
repo: https://istio-release.storage.googleapis.com/charts
namespace: istio-system
createNamespace: true
---
apiVersion: skaffold/v2beta22
kind: Config
metadata:
name: istiod
requires:
- configs: [istio-base]
profiles:
- name: dev
activation:
- command: dev
deploy:
helm:
releases:
- name: istiod
chartPath: ../../../manifests/charts/istio-control/istio-discovery
namespace: istio-system
- name: run
activation:
- command: run
deploy:
helm:
releases:
- name: istiod
remoteChart: istiod
repo: https://istio-release.storage.googleapis.com/charts
namespace: istio-system
---
apiVersion: skaffold/v2beta22
kind: Config
metadata:
name: ingress
requires:
- configs: [istiod]
profiles:
- name: dev
activation:
- command: dev
deploy:
helm:
releases:
- name: istio-ingressgateway
chartPath: ../../../manifests/charts/gateway
namespace: istio-system
- name: run
activation:
- command: run
deploy:
helm:
releases:
- name: istio-ingressgateway
remoteChart: gateway
repo: https://istio-release.storage.googleapis.com/charts
namespace: istio-system
---
# https://istio.io/latest/docs/ops/integrations/prometheus/
apiVersion: skaffold/v2beta22
kind: Config
metadata:
name: prometheus
requires:
- configs: [istiod]
deploy:
kubectl:
manifests: ["../../../samples/addons/prometheus.yaml"]
---
apiVersion: skaffold/v2beta22
kind: Config
metadata:
name: kiali
requires:
- configs: [prometheus]
deploy:
helm:
releases:
- name: kiali-server
remoteChart: kiali-server
repo: https://kiali.org/helm-charts
namespace: istio-system
version: v1.44.0
valuesFiles: [../../../manifests/addons/values-kiali.yaml]
---
# Config for https://istio.io/latest/docs/examples/bookinfo/
apiVersion: skaffold/v2beta22
kind: Config
metadata:
name: bookinfo
requires:
- configs: [ingress]
deploy:
kubectl:
hooks:
before:
- host:
command: ["sh", "-c", "kubectl label namespace default istio-injection=enabled --overwrite"]
os: [darwin, linux]
- host:
command: ["cmd.exe", "/C", "kubectl label namespace default istio-injection=enabled --overwrite"]
os: [windows]
manifests:
- "../../../samples/bookinfo/platform/kube/bookinfo.yaml"
- "../../../samples/bookinfo/networking/bookinfo-gateway.yaml"
- "../../../samples/bookinfo/networking/destination-rule-all.yaml"
| samples/cicd/skaffold/skaffold.yaml | 0 | https://github.com/istio/istio/commit/1de67f0f70829c754359fa2e33ac264461a2b980 | [
0.00017789180856198072,
0.00017598847625777125,
0.00017291351105086505,
0.00017674850823823363,
0.000001608515503903618
] |
{
"id": 3,
"code_window": [
"\t\t\"datadog\": {},\n",
"\t\t\"opencensus\": {},\n",
"\t\t\"skywalking\": {},\n",
"\t\t\"envoy_http_als\": {},\n",
"\t\t\"envoy_tcp_als\": {},\n",
"\t}\n",
"\n",
"\tunexpectedProviders := make([]string, 0)\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\"opentelemetry\": {},\n"
],
"file_path": "operator/pkg/util/merge_iop_test.go",
"type": "add",
"edit_start_line_idx": 78
} | // Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package virtualservice
import (
"fmt"
"regexp"
"istio.io/api/networking/v1alpha3"
"istio.io/istio/pkg/config/analysis"
"istio.io/istio/pkg/config/analysis/analyzers/util"
"istio.io/istio/pkg/config/analysis/msg"
"istio.io/istio/pkg/config/resource"
"istio.io/istio/pkg/config/schema/collection"
"istio.io/istio/pkg/config/schema/collections"
)
// RegexAnalyzer checks all regexes in a virtual service
type RegexAnalyzer struct{}
var _ analysis.Analyzer = &RegexAnalyzer{}
// Metadata implements Analyzer
func (a *RegexAnalyzer) Metadata() analysis.Metadata {
return analysis.Metadata{
Name: "virtualservice.RegexAnalyzer",
Description: "Checks regex syntax",
Inputs: collection.Names{
collections.IstioNetworkingV1Alpha3Virtualservices.Name(),
},
}
}
// Analyze implements Analyzer
func (a *RegexAnalyzer) Analyze(ctx analysis.Context) {
ctx.ForEach(collections.IstioNetworkingV1Alpha3Virtualservices.Name(), func(r *resource.Instance) bool {
a.analyzeVirtualService(r, ctx)
return true
})
}
func (a *RegexAnalyzer) analyzeVirtualService(r *resource.Instance, ctx analysis.Context) {
vs := r.Message.(*v1alpha3.VirtualService)
for i, route := range vs.GetHttp() {
for j, m := range route.GetMatch() {
analyzeStringMatch(r, m.GetUri(), ctx, "uri",
fmt.Sprintf(util.URISchemeMethodAuthorityRegexMatch, i, j, "uri"))
analyzeStringMatch(r, m.GetScheme(), ctx, "scheme",
fmt.Sprintf(util.URISchemeMethodAuthorityRegexMatch, i, j, "scheme"))
analyzeStringMatch(r, m.GetMethod(), ctx, "method",
fmt.Sprintf(util.URISchemeMethodAuthorityRegexMatch, i, j, "method"))
analyzeStringMatch(r, m.GetAuthority(), ctx, "authority",
fmt.Sprintf(util.URISchemeMethodAuthorityRegexMatch, i, j, "authority"))
for key, h := range m.GetHeaders() {
analyzeStringMatch(r, h, ctx, "headers",
fmt.Sprintf(util.HeaderAndQueryParamsRegexMatch, i, j, "headers", key))
}
for key, qp := range m.GetQueryParams() {
analyzeStringMatch(r, qp, ctx, "queryParams",
fmt.Sprintf(util.HeaderAndQueryParamsRegexMatch, i, j, "queryParams", key))
}
// We don't validate withoutHeaders, because they are undocumented
}
for j, origin := range route.GetCorsPolicy().GetAllowOrigins() {
analyzeStringMatch(r, origin, ctx, "corsPolicy.allowOrigins",
fmt.Sprintf(util.AllowOriginsRegexMatch, i, j))
}
}
}
func analyzeStringMatch(r *resource.Instance, sm *v1alpha3.StringMatch, ctx analysis.Context, where string, key string) {
re := sm.GetRegex()
if re == "" {
return
}
_, err := regexp.Compile(re)
if err == nil {
return
}
m := msg.NewInvalidRegexp(r, where, re, err.Error())
// Get line number for different match field
if line, ok := util.ErrorLine(r, key); ok {
m.Line = line
}
ctx.Report(collections.IstioNetworkingV1Alpha3Virtualservices.Name(), m)
}
| pkg/config/analysis/analyzers/virtualservice/regexes.go | 0 | https://github.com/istio/istio/commit/1de67f0f70829c754359fa2e33ac264461a2b980 | [
0.0002511664351914078,
0.00018045403703581542,
0.00016837815928738564,
0.00017479865346103907,
0.000022551843358087353
] |
{
"id": 4,
"code_window": [
" envoyHttpAls:\n",
" - name: envoyTcpAls\n",
" envoyTcpAls:\n",
" components:\n",
" egressGateways:\n",
" - name: istio-egressgateway\n"
],
"labels": [
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
" - name: opentelemetry\n",
" opentelemetry:\n"
],
"file_path": "operator/pkg/util/testdata/overlay-iop.yaml",
"type": "add",
"edit_start_line_idx": 35
} | // Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package util
import (
"os"
"path/filepath"
"testing"
"sigs.k8s.io/yaml"
meshconfig "istio.io/api/mesh/v1alpha1"
v1alpha12 "istio.io/api/operator/v1alpha1"
"istio.io/istio/operator/pkg/apis/istio/v1alpha1"
"istio.io/istio/pkg/config/mesh"
"istio.io/istio/pkg/test/env"
"istio.io/istio/pkg/util/protomarshal"
)
func TestOverlayIOP(t *testing.T) {
cases := []struct {
path string
}{
{
filepath.Join(env.IstioSrc, "manifests/profiles/default.yaml"),
},
{
filepath.Join(env.IstioSrc, "manifests/profiles/demo.yaml"),
},
{
filepath.Join("testdata", "overlay-iop.yaml"),
},
}
for _, tc := range cases {
t.Run(tc.path, func(t *testing.T) {
b, err := os.ReadFile(tc.path)
if err != nil {
t.Fatal(err)
}
// overlaying tree over itself exercises all paths for merging
if _, err := OverlayIOP(string(b), string(b)); err != nil {
t.Fatal(err)
}
})
}
}
// TestOverlayIOPExhaustiveness exhaustiveness check of `OverlayIOP`
// Once some one add a new `Provider` in api, we should update `wellknownProviders` and
// add to `meshConfigExtensionProvider`
func TestOverlayIOPExhaustiveness(t *testing.T) {
wellknownProviders := map[string]struct{}{
"prometheus": {},
"envoy_file_access_log": {},
"stackdriver": {},
"envoy_otel_als": {},
"envoy_ext_authz_http": {},
"envoy_ext_authz_grpc": {},
"zipkin": {},
"lightstep": {},
"datadog": {},
"opencensus": {},
"skywalking": {},
"envoy_http_als": {},
"envoy_tcp_als": {},
}
unexpectedProviders := make([]string, 0)
msg := &meshconfig.MeshConfig_ExtensionProvider{}
pb := msg.ProtoReflect()
md := pb.Descriptor()
of := md.Oneofs().Get(0)
for i := 0; i < of.Fields().Len(); i++ {
o := of.Fields().Get(i)
n := string(o.Name())
if _, ok := wellknownProviders[n]; ok {
delete(wellknownProviders, n)
} else {
unexpectedProviders = append(unexpectedProviders, n)
}
}
if len(wellknownProviders) != 0 || len(unexpectedProviders) != 0 {
t.Errorf("unexpected provider not implemented in OverlayIOP, wellknownProviders: %v unexpectedProviders: %v", wellknownProviders, unexpectedProviders)
t.Fail()
}
}
func TestOverlayIOPDefaultMeshConfig(t *testing.T) {
// Transform default mesh config into map[string]interface{} for inclusion in IstioOperator.
m := mesh.DefaultMeshConfig()
my, err := protomarshal.ToJSONMap(m)
if err != nil {
t.Fatal(err)
}
iop := &v1alpha1.IstioOperator{
Spec: &v1alpha12.IstioOperatorSpec{
MeshConfig: MustStruct(my),
},
}
iy, err := yaml.Marshal(iop)
if err != nil {
t.Fatal(err)
}
// overlaying tree over itself exercises all paths for merging
if _, err := OverlayIOP(string(iy), string(iy)); err != nil {
t.Fatal(err)
}
}
func TestOverlayIOPIngressGatewayLabel(t *testing.T) {
l1, err := os.ReadFile("testdata/yaml/input/yaml_layer1.yaml")
if err != nil {
t.Fatal(err)
}
l2, err := os.ReadFile("testdata/yaml/input/yaml_layer2.yaml")
if err != nil {
t.Fatal(err)
}
if _, err := OverlayIOP(string(l1), string(l2)); err != nil {
t.Fatal(err)
}
}
| operator/pkg/util/merge_iop_test.go | 1 | https://github.com/istio/istio/commit/1de67f0f70829c754359fa2e33ac264461a2b980 | [
0.0010293625527992845,
0.0002622053725644946,
0.00016270988271571696,
0.0001778434670995921,
0.0002246734220534563
] |
{
"id": 4,
"code_window": [
" envoyHttpAls:\n",
" - name: envoyTcpAls\n",
" envoyTcpAls:\n",
" components:\n",
" egressGateways:\n",
" - name: istio-egressgateway\n"
],
"labels": [
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
" - name: opentelemetry\n",
" opentelemetry:\n"
],
"file_path": "operator/pkg/util/testdata/overlay-iop.yaml",
"type": "add",
"edit_start_line_idx": 35
} | -----BEGIN CERTIFICATE-----
MIID7TCCAtWgAwIBAgIJAOIRDhOcxsx6MA0GCSqGSIb3DQEBCwUAMIGLMQswCQYD
VQQGEwJVUzETMBEGA1UECAwKQ2FsaWZvcm5pYTESMBAGA1UEBwwJU3Vubnl2YWxl
MQ4wDAYDVQQKDAVJc3RpbzENMAsGA1UECwwEVGVzdDEQMA4GA1UEAwwHUm9vdCBD
QTEiMCAGCSqGSIb3DQEJARYTdGVzdHJvb3RjYUBpc3Rpby5pbzAgFw0xODAxMjQx
OTE1NTFaGA8yMTE3MTIzMTE5MTU1MVowgYsxCzAJBgNVBAYTAlVTMRMwEQYDVQQI
DApDYWxpZm9ybmlhMRIwEAYDVQQHDAlTdW5ueXZhbGUxDjAMBgNVBAoMBUlzdGlv
MQ0wCwYDVQQLDARUZXN0MRAwDgYDVQQDDAdSb290IENBMSIwIAYJKoZIhvcNAQkB
FhN0ZXN0cm9vdGNhQGlzdGlvLmlvMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB
CgKCAQEA38uEfAatzQYqbaLou1nxJ348VyNzumYMmDDt5pbLYRrCo2pS3ki1ZVDN
8yxIENJFkpKw9UctTGdbNGuGCiSDP7uqF6BiVn+XKAU/3pnPFBbTd0S33NqbDEQu
IYraHSl/tSk5rARbC1DrQRdZ6nYD2KrapC4g0XbjY6Pu5l4y7KnFwSunnp9uqpZw
uERv/BgumJ5QlSeSeCmhnDhLxooG8w5tC2yVr1yDpsOHGimP/mc8Cds4V0zfIhQv
YzfIHphhE9DKjmnjBYLOdj4aycv44jHnOGc+wvA1Jqsl60t3wgms+zJTiWwABLdw
zgMAa7yxLyoV0+PiVQud6k+8ZoIFcwIDAQABo1AwTjAdBgNVHQ4EFgQUOUYGtUyh
euxO4lGe4Op1y8NVoagwHwYDVR0jBBgwFoAUOUYGtUyheuxO4lGe4Op1y8NVoagw
DAYDVR0TBAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAQEANXLyfAs7J9rmBamGJvPZ
ltx390WxzzLFQsBRAaH6rgeipBq3dR9qEjAwb6BTF+ROmtQzX+fjstCRrJxCto9W
tC8KvXTdRfIjfCCZjhtIOBKqRxE4KJV/RBfv9xD5lyjtCPCQl3Ia6MSf42N+abAK
WCdU6KCojA8WB9YhSCzza3aQbPTzd26OC/JblJpVgtus5f8ILzCsz+pbMimgTkhy
AuhYRppJaQ24APijsEC9+GIaVKPg5IwWroiPoj+QXNpshuvqVQQXvGaRiq4zoSnx
xAJz+w8tjrDWcf826VN14IL+/Cmqlg/rIfB5CHdwVIfWwpuGB66q/UiPegZMNs8a
3g==
-----END CERTIFICATE-----
| samples/certs/root-cert.pem | 0 | https://github.com/istio/istio/commit/1de67f0f70829c754359fa2e33ac264461a2b980 | [
0.010433722287416458,
0.006154973059892654,
0.0002064037835225463,
0.007824794389307499,
0.004339027218520641
] |
{
"id": 4,
"code_window": [
" envoyHttpAls:\n",
" - name: envoyTcpAls\n",
" envoyTcpAls:\n",
" components:\n",
" egressGateways:\n",
" - name: istio-egressgateway\n"
],
"labels": [
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
" - name: opentelemetry\n",
" opentelemetry:\n"
],
"file_path": "operator/pkg/util/testdata/overlay-iop.yaml",
"type": "add",
"edit_start_line_idx": 35
} | // Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package model
import (
"encoding/json"
"sort"
"strings"
networking "istio.io/api/networking/v1alpha3"
"istio.io/istio/pilot/pkg/serviceregistry/provider"
"istio.io/istio/pkg/config"
"istio.io/istio/pkg/config/constants"
"istio.io/istio/pkg/config/host"
"istio.io/istio/pkg/config/labels"
"istio.io/istio/pkg/config/protocol"
"istio.io/istio/pkg/config/schema/kind"
"istio.io/istio/pkg/util/sets"
)
const (
wildcardNamespace = "*"
currentNamespace = "."
wildcardService = host.Name("*")
)
var (
sidecarScopeKnownConfigTypes = map[kind.Kind]struct{}{
kind.ServiceEntry: {},
kind.VirtualService: {},
kind.DestinationRule: {},
kind.Sidecar: {},
}
// clusterScopedConfigTypes includes configs when they are in root namespace,
// they will be applied to all namespaces within the cluster.
clusterScopedConfigTypes = map[kind.Kind]struct{}{
kind.EnvoyFilter: {},
kind.AuthorizationPolicy: {},
kind.RequestAuthentication: {},
}
)
// SidecarScope is a wrapper over the Sidecar resource with some
// preprocessed data to determine the list of services, virtualServices,
// and destinationRules that are accessible to a given
// sidecar. Precomputing the list of services, virtual services, dest rules
// for a sidecar improves performance as we no longer need to compute this
// list for every sidecar. We simply have to match a sidecar to a
// SidecarScope. Note that this is not the same as public/private scoped
// services. The list of services seen by every sidecar scope (namespace
// wide or per workload) depends on the imports, the listeners, and other
// settings.
//
// Every proxy workload of SidecarProxy type will always map to a
// SidecarScope object. If the proxy's namespace does not have a user
// specified Sidecar CRD, we will construct one that has a catch all egress
// listener that imports every public service/virtualService in the mesh.
type SidecarScope struct {
Name string
// This is the namespace where the sidecar takes effect,
// maybe different from the ns where sidecar resides if sidecar is in root ns.
Namespace string
// The crd itself. Can be nil if we are constructing the default
// sidecar scope
Sidecar *networking.Sidecar
// Version this sidecar was computed for
Version string
// Set of egress listeners, and their associated services. A sidecar
// scope should have either ingress/egress listeners or both. For
// every proxy workload that maps to a sidecar API object (or the
// default object), we will go through every egress listener in the
// object and process the Envoy listener or RDS based on the imported
// services/virtual services in that listener.
EgressListeners []*IstioEgressListenerWrapper
// Union of services imported across all egress listeners for use by CDS code.
services []*Service
servicesByHostname map[host.Name]*Service
// Destination rules imported across all egress listeners. This
// contains the computed set based on public/private destination rules
// as well as the inherited ones, in addition to the wildcard matches
// such as *.com applying to foo.bar.com. Each hostname in this map
// corresponds to a service in the services array above. When computing
// CDS, we simply have to find the matching service and return the
// destination rule.
destinationRules map[host.Name][]*ConsolidatedDestRule
// OutboundTrafficPolicy defines the outbound traffic policy for this sidecar.
// If OutboundTrafficPolicy is ALLOW_ANY traffic to unknown destinations will
// be forwarded.
OutboundTrafficPolicy *networking.OutboundTrafficPolicy
// Set of known configs this sidecar depends on.
// This field will be used to determine the config/resource scope
// which means which config changes will affect the proxies within this scope.
configDependencies map[ConfigHash]struct{}
// The namespace to treat as the administrative root namespace for
// Istio configuration.
//
// Changes to Sidecar resources in this namespace will trigger a push.
RootNamespace string
}
// MarshalJSON implements json.Marshaller
func (sc *SidecarScope) MarshalJSON() ([]byte, error) {
// Json cannot expose unexported fields, so copy the ones we want here
return json.MarshalIndent(map[string]any{
"version": sc.Version,
"rootNamespace": sc.RootNamespace,
"name": sc.Name,
"namespace": sc.Namespace,
"outboundTrafficPolicy": sc.OutboundTrafficPolicy,
"services": sc.services,
"sidecar": sc.Sidecar,
"destinationRules": sc.destinationRules,
}, "", " ")
}
// IstioEgressListenerWrapper is a wrapper for
// networking.IstioEgressListener object. The wrapper provides performance
// optimizations as it allows us to precompute and store the list of
// services/virtualServices that apply to this listener.
type IstioEgressListenerWrapper struct {
// The actual IstioEgressListener api object from the Config. It can be
// nil if this is for the default sidecar scope.
IstioListener *networking.IstioEgressListener
// List of services imported by this egress listener above.
// This will be used by LDS and RDS code when
// building the set of virtual hosts or the tcp filterchain matches for
// a given listener port. Two listeners, on user specified ports or
// unix domain sockets could have completely different sets of
// services. So a global list of services per sidecar scope will be
// incorrect. Hence the per listener set of services.
services []*Service
// List of virtual services imported by this egress listener above.
// As with per listener services, this
// will be used by RDS code to compute the virtual host configs for
// http listeners, as well as by TCP/TLS filter code to compute the
// service routing configs and the filter chain matches. We need a
// virtualService set per listener and not one per sidecarScope because
// each listener imports an independent set of virtual services.
// Listener 1 could import a public virtual service for serviceA from
// namespace A that has some path rewrite, while listener2 could import
// a private virtual service for serviceA from the local namespace,
// with a different path rewrite or no path rewrites.
virtualServices []config.Config
listenerHosts map[string][]host.Name
}
const defaultSidecar = "default-sidecar"
// DefaultSidecarScopeForNamespace is a sidecar scope object with a default catch all egress listener
// that matches the default Istio behavior: a sidecar has listeners for all services in the mesh
// We use this scope when the user has not set any sidecar Config for a given config namespace.
func DefaultSidecarScopeForNamespace(ps *PushContext, configNamespace string) *SidecarScope {
defaultEgressListener := &IstioEgressListenerWrapper{
IstioListener: &networking.IstioEgressListener{
Hosts: []string{"*/*"},
},
}
defaultEgressListener.services = ps.servicesExportedToNamespace(configNamespace)
defaultEgressListener.virtualServices = ps.VirtualServicesForGateway(configNamespace, constants.IstioMeshGateway)
out := &SidecarScope{
Name: defaultSidecar,
Namespace: configNamespace,
EgressListeners: []*IstioEgressListenerWrapper{defaultEgressListener},
services: defaultEgressListener.services,
destinationRules: make(map[host.Name][]*ConsolidatedDestRule),
servicesByHostname: make(map[host.Name]*Service, len(defaultEgressListener.services)),
configDependencies: make(map[ConfigHash]struct{}),
RootNamespace: ps.Mesh.RootNamespace,
Version: ps.PushVersion,
}
// Now that we have all the services that sidecars using this scope (in
// this config namespace) will see, identify all the destinationRules
// that these services need
for _, s := range out.services {
// In some scenarios, there may be multiple Services defined for the same hostname due to ServiceEntry allowing
// arbitrary hostnames. In these cases, we want to pick the first Service, which is the oldest. This ensures
// newly created Services cannot take ownership unexpectedly.
// However, the Service is from Kubernetes it should take precedence over ones not. This prevents someone from
// "domain squatting" on the hostname before a Kubernetes Service is created.
// This relies on the assumption that
if existing, f := out.servicesByHostname[s.Hostname]; f &&
!(existing.Attributes.ServiceRegistry != provider.Kubernetes && s.Attributes.ServiceRegistry == provider.Kubernetes) {
continue
}
out.servicesByHostname[s.Hostname] = s
if dr := ps.destinationRule(configNamespace, s); dr != nil {
out.destinationRules[s.Hostname] = dr
}
out.AddConfigDependencies(ConfigKey{
Kind: kind.ServiceEntry,
Name: string(s.Hostname),
Namespace: s.Attributes.Namespace,
}.HashCode())
}
for _, drList := range out.destinationRules {
for _, dr := range drList {
for _, namespacedName := range dr.from {
out.AddConfigDependencies(ConfigKey{
Kind: kind.DestinationRule,
Name: namespacedName.Name,
Namespace: namespacedName.Namespace,
}.HashCode())
}
}
}
for _, el := range out.EgressListeners {
// add dependencies on delegate virtual services
delegates := ps.DelegateVirtualServices(el.virtualServices)
for _, delegate := range delegates {
out.AddConfigDependencies(delegate)
}
for _, vs := range el.virtualServices {
out.AddConfigDependencies(ConfigKey{
Kind: kind.VirtualService,
Name: vs.Name,
Namespace: vs.Namespace,
}.HashCode())
}
}
if ps.Mesh.OutboundTrafficPolicy != nil {
out.OutboundTrafficPolicy = &networking.OutboundTrafficPolicy{
Mode: networking.OutboundTrafficPolicy_Mode(ps.Mesh.OutboundTrafficPolicy.Mode),
}
}
return out
}
// ConvertToSidecarScope converts from Sidecar config to SidecarScope object
func ConvertToSidecarScope(ps *PushContext, sidecarConfig *config.Config, configNamespace string) *SidecarScope {
if sidecarConfig == nil {
return DefaultSidecarScopeForNamespace(ps, configNamespace)
}
sidecar := sidecarConfig.Spec.(*networking.Sidecar)
out := &SidecarScope{
Name: sidecarConfig.Name,
Namespace: configNamespace,
Sidecar: sidecar,
configDependencies: make(map[ConfigHash]struct{}),
RootNamespace: ps.Mesh.RootNamespace,
Version: ps.PushVersion,
}
out.AddConfigDependencies(ConfigKey{
Kind: kind.Sidecar,
Name: sidecarConfig.Name,
Namespace: sidecarConfig.Namespace,
}.HashCode())
egressConfigs := sidecar.Egress
// If egress not set, setup a default listener
if len(egressConfigs) == 0 {
egressConfigs = append(egressConfigs, &networking.IstioEgressListener{Hosts: []string{"*/*"}})
}
out.EgressListeners = make([]*IstioEgressListenerWrapper, 0, len(egressConfigs))
for _, e := range egressConfigs {
out.EgressListeners = append(out.EgressListeners,
convertIstioListenerToWrapper(ps, configNamespace, e))
}
// Now collect all the imported services across all egress listeners in
// this sidecar crd. This is needed to generate CDS output
out.services = make([]*Service, 0)
type serviceIndex struct {
svc *Service
index int // index record the position of the svc in slice
}
servicesAdded := make(map[host.Name]serviceIndex)
addService := func(s *Service) {
if s == nil {
return
}
if foundSvc, found := servicesAdded[s.Hostname]; !found {
out.AddConfigDependencies(ConfigKey{
Kind: kind.ServiceEntry,
Name: string(s.Hostname),
Namespace: s.Attributes.Namespace,
}.HashCode())
out.services = append(out.services, s)
servicesAdded[s.Hostname] = serviceIndex{s, len(out.services) - 1}
} else if foundSvc.svc.Attributes.Namespace == s.Attributes.Namespace && len(s.Ports) > 0 {
// merge the ports to service when each listener generates partial service
// we only merge if the found service is in the same namespace as the one we're trying to add
copied := foundSvc.svc.DeepCopy()
for _, p := range s.Ports {
found := false
for _, osp := range copied.Ports {
if p.Port == osp.Port {
found = true
break
}
}
if !found {
copied.Ports = append(copied.Ports, p)
}
}
// replace service in slice
out.services[foundSvc.index] = copied
// Update index as well, so that future reads will merge into the new service
foundSvc.svc = copied
servicesAdded[foundSvc.svc.Hostname] = foundSvc
}
}
for _, listener := range out.EgressListeners {
// First add the explicitly requested services, which take priority
for _, s := range listener.services {
addService(s)
}
// add dependencies on delegate virtual services
delegates := ps.DelegateVirtualServices(listener.virtualServices)
for _, delegate := range delegates {
out.AddConfigDependencies(delegate)
}
matchPort := needsPortMatch(listener)
// Infer more possible destinations from virtual services
// Services chosen here will not override services explicitly requested in listener.services.
// That way, if there is ambiguity around what hostname to pick, a user can specify the one they
// want in the hosts field, and the potentially random choice below won't matter
for _, vs := range listener.virtualServices {
v := vs.Spec.(*networking.VirtualService)
out.AddConfigDependencies(ConfigKey{
Kind: kind.VirtualService,
Name: vs.Name,
Namespace: vs.Namespace,
}.HashCode())
for h, ports := range virtualServiceDestinations(v) {
// Default to this hostname in our config namespace
if s, ok := ps.ServiceIndex.HostnameAndNamespace[host.Name(h)][configNamespace]; ok {
// This won't overwrite hostnames that have already been found eg because they were requested in hosts
var vss *Service
if matchPort {
vss = serviceMatchingListenerPort(s, listener)
} else {
vss = serviceMatchingVirtualServicePorts(s, ports)
}
if vss != nil {
addService(vss)
}
} else {
// We couldn't find the hostname in our config namespace
// We have to pick one arbitrarily for now, so we'll pick the first namespace alphabetically
// TODO: could we choose services more intelligently based on their ports?
byNamespace := ps.ServiceIndex.HostnameAndNamespace[host.Name(h)]
if len(byNamespace) == 0 {
// This hostname isn't found anywhere
log.Debugf("Could not find service hostname %s parsed from %s", h, vs.Key())
continue
}
ns := make([]string, 0, len(byNamespace))
for k := range byNamespace {
if ps.IsServiceVisible(byNamespace[k], configNamespace) {
ns = append(ns, k)
}
}
if len(ns) > 0 {
sort.Strings(ns)
// Pick first namespace alphabetically
// This won't overwrite hostnames that have already been found eg because they were requested in hosts
var vss *Service
if matchPort {
vss = serviceMatchingListenerPort(byNamespace[ns[0]], listener)
} else {
vss = serviceMatchingVirtualServicePorts(byNamespace[ns[0]], ports)
}
if vss != nil {
addService(vss)
}
}
}
}
}
}
// Now that we have all the services that sidecars using this scope (in
// this config namespace) will see, identify all the destinationRules
// that these services need
out.servicesByHostname = make(map[host.Name]*Service, len(out.services))
out.destinationRules = make(map[host.Name][]*ConsolidatedDestRule)
for _, s := range out.services {
out.servicesByHostname[s.Hostname] = s
drList := ps.destinationRule(configNamespace, s)
if drList != nil {
out.destinationRules[s.Hostname] = drList
for _, dr := range drList {
for _, key := range dr.from {
out.AddConfigDependencies(ConfigKey{
Kind: kind.DestinationRule,
Name: key.Name,
Namespace: key.Namespace,
}.HashCode())
}
}
}
}
if sidecar.OutboundTrafficPolicy == nil {
if ps.Mesh.OutboundTrafficPolicy != nil {
out.OutboundTrafficPolicy = &networking.OutboundTrafficPolicy{
Mode: networking.OutboundTrafficPolicy_Mode(ps.Mesh.OutboundTrafficPolicy.Mode),
}
}
} else {
out.OutboundTrafficPolicy = sidecar.OutboundTrafficPolicy
}
return out
}
func convertIstioListenerToWrapper(ps *PushContext, configNamespace string,
istioListener *networking.IstioEgressListener,
) *IstioEgressListenerWrapper {
out := &IstioEgressListenerWrapper{
IstioListener: istioListener,
}
out.listenerHosts = make(map[string][]host.Name)
for _, h := range istioListener.Hosts {
parts := strings.SplitN(h, "/", 2)
if len(parts) < 2 {
log.Errorf("Illegal host in sidecar resource: %s, host must be of form namespace/dnsName", h)
continue
}
if parts[0] == currentNamespace {
parts[0] = configNamespace
}
if _, exists := out.listenerHosts[parts[0]]; !exists {
out.listenerHosts[parts[0]] = make([]host.Name, 0)
}
out.listenerHosts[parts[0]] = append(out.listenerHosts[parts[0]], host.Name(parts[1]))
}
vses := ps.VirtualServicesForGateway(configNamespace, constants.IstioMeshGateway)
out.virtualServices = SelectVirtualServices(vses, out.listenerHosts)
svces := ps.servicesExportedToNamespace(configNamespace)
out.services = out.selectServices(svces, configNamespace, out.listenerHosts)
return out
}
// GetEgressListenerForRDS returns the egress listener corresponding to
// the listener port or the bind address or the catch all listener
func (sc *SidecarScope) GetEgressListenerForRDS(port int, bind string) *IstioEgressListenerWrapper {
if sc == nil {
return nil
}
for _, e := range sc.EgressListeners {
// We hit a catchall listener. This is the last listener in the list of listeners
// return as is
if e.IstioListener == nil || e.IstioListener.Port == nil {
return e
}
// Check if the ports match
// for unix domain sockets (i.e. port == 0), check if the bind is equal to the routeName
if int(e.IstioListener.Port.Number) == port {
if port == 0 { // unix domain socket
if e.IstioListener.Bind == bind {
return e
}
// no match.. continue searching
continue
}
// this is a non-zero port match
return e
}
}
// This should never be reached unless user explicitly set an empty array for egress
// listeners which we actually forbid
return nil
}
// HasIngressListener returns if the sidecar scope has ingress listener set
func (sc *SidecarScope) HasIngressListener() bool {
if sc == nil {
return false
}
if sc.Sidecar == nil || len(sc.Sidecar.Ingress) == 0 {
return false
}
return true
}
// Services returns the list of services imported by this egress listener
func (ilw *IstioEgressListenerWrapper) Services() []*Service {
return ilw.services
}
// VirtualServices returns the list of virtual services imported by this
// egress listener
func (ilw *IstioEgressListenerWrapper) VirtualServices() []config.Config {
return ilw.virtualServices
}
// DependsOnConfig determines if the proxy depends on the given config.
// Returns whether depends on this config or this kind of config is not scoped(unknown to be depended) here.
func (sc *SidecarScope) DependsOnConfig(config ConfigKey) bool {
if sc == nil {
return true
}
// This kind of config will trigger a change if made in the root namespace or the same namespace
if _, f := clusterScopedConfigTypes[config.Kind]; f {
return config.Namespace == sc.RootNamespace || config.Namespace == sc.Namespace
}
// This kind of config is unknown to sidecarScope.
if _, f := sidecarScopeKnownConfigTypes[config.Kind]; !f {
return true
}
_, exists := sc.configDependencies[config.HashCode()]
return exists
}
// AddConfigDependencies add extra config dependencies to this scope. This action should be done before the
// SidecarScope being used to avoid concurrent read/write.
func (sc *SidecarScope) AddConfigDependencies(dependencies ...ConfigHash) {
if sc == nil {
return
}
if sc.configDependencies == nil {
sc.configDependencies = make(map[ConfigHash]struct{})
}
for _, config := range dependencies {
sc.configDependencies[config] = struct{}{}
}
}
// DestinationRule returns a destinationrule for a svc.
func (sc *SidecarScope) DestinationRule(direction TrafficDirection, proxy *Proxy, svc host.Name) *ConsolidatedDestRule {
destinationRules := sc.destinationRules[svc]
var catchAllDr *ConsolidatedDestRule
for _, destRule := range destinationRules {
destinationRule := destRule.rule.Spec.(*networking.DestinationRule)
if destinationRule.GetWorkloadSelector() == nil {
catchAllDr = destRule
}
// filter DestinationRule based on workloadSelector for outbound configs.
// WorkloadSelector configuration is honored only for outbound configuration, because
// for inbound configuration, the settings at sidecar would be more explicit and the preferred way forward.
if sc.Namespace == destRule.rule.Namespace &&
destinationRule.GetWorkloadSelector() != nil && direction == TrafficDirectionOutbound {
workloadSelector := labels.Instance(destinationRule.GetWorkloadSelector().GetMatchLabels())
// return destination rule if workload selector matches
if workloadSelector.SubsetOf(proxy.Labels) {
return destRule
}
}
}
// If there is no workload specific destinationRule, return the wild carded dr if present.
if catchAllDr != nil {
return catchAllDr
}
return nil
}
// Services returns the list of services that are visible to a sidecar.
func (sc *SidecarScope) Services() []*Service {
return sc.services
}
// Return filtered services through the hosts field in the egress portion of the Sidecar config.
// Note that the returned service could be trimmed.
func (ilw *IstioEgressListenerWrapper) selectServices(services []*Service, configNamespace string, hosts map[string][]host.Name) []*Service {
importedServices := make([]*Service, 0)
wildcardHosts, wnsFound := hosts[wildcardNamespace]
for _, s := range services {
configNamespace := s.Attributes.Namespace
// Check if there is an explicit import of form ns/* or ns/host
if importedHosts, nsFound := hosts[configNamespace]; nsFound {
if svc := matchingService(importedHosts, s, ilw); svc != nil {
importedServices = append(importedServices, svc)
continue
}
}
if wnsFound { // Check if there is an import of form */host or */*
if svc := matchingService(wildcardHosts, s, ilw); svc != nil {
importedServices = append(importedServices, svc)
}
}
}
validServices := make(map[host.Name]string)
for _, svc := range importedServices {
_, f := validServices[svc.Hostname]
// Select a single namespace for a given hostname.
// If the same hostname is imported from multiple namespaces, pick the one in the configNamespace
// If neither are in configNamespace, an arbitrary one will be chosen
if !f || svc.Attributes.Namespace == configNamespace {
validServices[svc.Hostname] = svc.Attributes.Namespace
}
}
filteredServices := make([]*Service, 0)
// Filter down to just instances in scope for the service
for _, svc := range importedServices {
if validServices[svc.Hostname] == svc.Attributes.Namespace {
filteredServices = append(filteredServices, svc)
}
}
return filteredServices
}
// Return the original service or a trimmed service which has a subset of the ports in original service.
func matchingService(importedHosts []host.Name, service *Service, ilw *IstioEgressListenerWrapper) *Service {
matchPort := needsPortMatch(ilw)
for _, importedHost := range importedHosts {
// Check if the hostnames match per usual hostname matching rules
if service.Hostname.SubsetOf(importedHost) {
if matchPort {
return serviceMatchingListenerPort(service, ilw)
}
return service
}
}
return nil
}
// serviceMatchingListenerPort constructs service with listener port.
func serviceMatchingListenerPort(service *Service, ilw *IstioEgressListenerWrapper) *Service {
for _, port := range service.Ports {
if port.Port == int(ilw.IstioListener.Port.GetNumber()) {
sc := service.DeepCopy()
sc.Ports = []*Port{port}
return sc
}
}
return nil
}
func serviceMatchingVirtualServicePorts(service *Service, vsDestPorts sets.IntSet) *Service {
// A value of 0 in vsDestPorts is used as a sentinel to indicate a dependency
// on every port of the service.
if len(vsDestPorts) == 0 || vsDestPorts.Contains(0) {
return service
}
foundPorts := make([]*Port, 0)
for _, port := range service.Ports {
if vsDestPorts.Contains(port.Port) {
foundPorts = append(foundPorts, port)
}
}
if len(foundPorts) == len(service.Ports) {
return service
}
if len(foundPorts) > 0 {
sc := service.DeepCopy()
sc.Ports = foundPorts
return sc
}
// If the service has more than one port, and the Virtual Service only
// specifies destination ports not found in the service, we'll simply
// not add the service to the sidecar as an optimization, because
// traffic will not route properly anyway. This matches the above
// behavior in serviceMatchingListenerPort for ports specified on the
// sidecar egress listener.
log.Warnf("Failed to find any VirtualService destination ports %v exposed by Service %s", vsDestPorts, service.Hostname)
return nil
}
func needsPortMatch(ilw *IstioEgressListenerWrapper) bool {
// If a listener is defined with a port, we should match services with port except in the following case.
// - If Port's protocol is proxy protocol(HTTP_PROXY) in which case the egress listener is used as generic egress http proxy.
return ilw.IstioListener != nil && ilw.IstioListener.Port.GetNumber() != 0 &&
protocol.Parse(ilw.IstioListener.Port.Protocol) != protocol.HTTP_PROXY
}
| pilot/pkg/model/sidecar.go | 0 | https://github.com/istio/istio/commit/1de67f0f70829c754359fa2e33ac264461a2b980 | [
0.02169170044362545,
0.0013274478260427713,
0.00016002678603399545,
0.00017319715698249638,
0.0035816384479403496
] |
{
"id": 4,
"code_window": [
" envoyHttpAls:\n",
" - name: envoyTcpAls\n",
" envoyTcpAls:\n",
" components:\n",
" egressGateways:\n",
" - name: istio-egressgateway\n"
],
"labels": [
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
" - name: opentelemetry\n",
" opentelemetry:\n"
],
"file_path": "operator/pkg/util/testdata/overlay-iop.yaml",
"type": "add",
"edit_start_line_idx": 35
} | * mangle
-A PREROUTING -m conntrack --ctstate INVALID -j DROP
COMMIT
* nat
-N ISTIO_INBOUND
-N ISTIO_REDIRECT
-N ISTIO_IN_REDIRECT
-N ISTIO_OUTPUT
-A ISTIO_INBOUND -p tcp --dport 15008 -j RETURN
-A ISTIO_REDIRECT -p tcp -j REDIRECT --to-ports 15001
-A ISTIO_IN_REDIRECT -p tcp -j REDIRECT --to-ports 15006
-A PREROUTING -p tcp -j ISTIO_INBOUND
-A ISTIO_INBOUND -p tcp --dport 15020 -j RETURN
-A ISTIO_INBOUND -p tcp --dport 15021 -j RETURN
-A ISTIO_INBOUND -p tcp --dport 15090 -j RETURN
-A ISTIO_INBOUND -p tcp -j ISTIO_IN_REDIRECT
-A OUTPUT -p tcp -j ISTIO_OUTPUT
-A ISTIO_OUTPUT -p tcp --dport 15020 -j RETURN
-A ISTIO_OUTPUT -o lo -s 127.0.0.6/32 -j RETURN
-A ISTIO_OUTPUT -o lo ! -d 127.0.0.1/32 -m owner --uid-owner 1337 -j ISTIO_IN_REDIRECT
-A ISTIO_OUTPUT -o lo -m owner ! --uid-owner 1337 -j RETURN
-A ISTIO_OUTPUT -m owner --uid-owner 1337 -j RETURN
-A ISTIO_OUTPUT -o lo ! -d 127.0.0.1/32 -m owner --gid-owner 1337 -j ISTIO_IN_REDIRECT
-A ISTIO_OUTPUT -o lo -m owner ! --gid-owner 1337 -j RETURN
-A ISTIO_OUTPUT -m owner --gid-owner 1337 -j RETURN
-A ISTIO_OUTPUT -d 127.0.0.1/32 -j RETURN
-A ISTIO_OUTPUT -j ISTIO_REDIRECT
COMMIT
| cni/pkg/plugin/testdata/invalid-drop.txt.golden | 0 | https://github.com/istio/istio/commit/1de67f0f70829c754359fa2e33ac264461a2b980 | [
0.00016558897914364934,
0.0001642599090700969,
0.00016354749095626175,
0.00016364325711037964,
9.40607321808784e-7
] |
{
"id": 5,
"code_window": [
"\t\t\"skywalking\": {},\n",
"\t\t\"envoy_http_als\": {},\n",
"\t\t\"envoy_tcp_als\": {},\n",
"\t\t\"envoy_otel_als\": {},\n",
"\t}\n",
"\n",
"\tunexpectedProviders := make([]string, 0)\n",
"\tmsg := &meshconfig.MeshConfig_ExtensionProvider{}\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\"opentelemetry\": {},\n"
],
"file_path": "pilot/pkg/model/push_context_test.go",
"type": "add",
"edit_start_line_idx": 1000
} | apiVersion: install.istio.io/v1alpha1
kind: IstioOperator
spec:
meshConfig:
accessLogFile: /dev/stdout
extensionProviders:
- name: otel
envoyOtelAls:
service: otel-collector.istio-system.svc.cluster.local
port: 4317
- name: prometheus
prometheus:
- name: stackdriver
stackdriver:
- name: envoy
envoyFileAccessLog:
path: /dev/stdout
- name: envoyExtAuthzHttp
envoyExtAuthzHttp:
- name: envoyExtAuthzGrpc
envoyExtAuthzGrpc:
- name: zipkin
zipkin:
- name: lightstep
lightstep:
- name: datadog
datadog:
- name: opencensus
opencensus:
- name: skywalking
skywalking:
- name: envoyHttpAls
envoyHttpAls:
- name: envoyTcpAls
envoyTcpAls:
components:
egressGateways:
- name: istio-egressgateway
enabled: true
k8s:
resources:
requests:
cpu: 10m
memory: 40Mi
ingressGateways:
- name: istio-ingressgateway
enabled: true
k8s:
resources:
requests:
cpu: 10m
memory: 40Mi
service:
ports:
## You can add custom gateway ports in user values overrides, but it must include those ports since helm replaces.
# Note that AWS ELB will by default perform health checks on the first port
# on this list. Setting this to the health check port will ensure that health
# checks always work. https://github.com/istio/istio/issues/12503
- port: 15021
targetPort: 15021
name: status-port
- port: 80
targetPort: 8080
name: http2
- port: 443
targetPort: 8443
name: https
- port: 31400
targetPort: 31400
name: tcp
# This is the port where sni routing happens
- port: 15443
targetPort: 15443
name: tls
pilot:
k8s:
env:
- name: PILOT_TRACE_SAMPLING
value: "100"
resources:
requests:
cpu: 10m
memory: 100Mi
values:
global:
proxy:
resources:
requests:
cpu: 10m
memory: 40Mi
pilot:
autoscaleEnabled: false
gateways:
istio-egressgateway:
autoscaleEnabled: false
istio-ingressgateway:
autoscaleEnabled: false
| operator/pkg/util/testdata/overlay-iop.yaml | 1 | https://github.com/istio/istio/commit/1de67f0f70829c754359fa2e33ac264461a2b980 | [
0.001120518776588142,
0.00032385351369157434,
0.00016505505482200533,
0.000174889515619725,
0.00029291200917214155
] |
{
"id": 5,
"code_window": [
"\t\t\"skywalking\": {},\n",
"\t\t\"envoy_http_als\": {},\n",
"\t\t\"envoy_tcp_als\": {},\n",
"\t\t\"envoy_otel_als\": {},\n",
"\t}\n",
"\n",
"\tunexpectedProviders := make([]string, 0)\n",
"\tmsg := &meshconfig.MeshConfig_ExtensionProvider{}\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\"opentelemetry\": {},\n"
],
"file_path": "pilot/pkg/model/push_context_test.go",
"type": "add",
"edit_start_line_idx": 1000
} | // Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package xds
import (
discovery "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3"
mesh "istio.io/api/mesh/v1alpha1"
"istio.io/istio/pilot/pkg/features"
"istio.io/istio/pilot/pkg/model"
tb "istio.io/istio/pilot/pkg/trustbundle"
"istio.io/istio/pilot/pkg/util/protoconv"
)
// PcdsGenerator generates proxy configuration for proxies to consume
type PcdsGenerator struct {
Server *DiscoveryServer
TrustBundle *tb.TrustBundle
}
var _ model.XdsResourceGenerator = &PcdsGenerator{}
func pcdsNeedsPush(req *model.PushRequest) bool {
if !features.MultiRootMesh {
return false
}
if req == nil {
return true
}
if !req.Full {
return false
}
if len(req.ConfigsUpdated) == 0 {
// This needs to be better optimized
return true
}
return false
}
// Generate returns ProxyConfig protobuf containing TrustBundle for given proxy
func (e *PcdsGenerator) Generate(proxy *model.Proxy, w *model.WatchedResource, req *model.PushRequest) (model.Resources, model.XdsLogDetails, error) {
if !pcdsNeedsPush(req) {
return nil, model.DefaultXdsLogDetails, nil
}
if e.TrustBundle == nil {
return nil, model.DefaultXdsLogDetails, nil
}
// TODO: For now, only TrustBundle updates are pushed. Eventually, this should push entire Proxy Configuration
pc := &mesh.ProxyConfig{
CaCertificatesPem: e.TrustBundle.GetTrustBundle(),
}
return model.Resources{&discovery.Resource{Resource: protoconv.MessageToAny(pc)}}, model.DefaultXdsLogDetails, nil
}
| pilot/pkg/xds/pcds.go | 0 | https://github.com/istio/istio/commit/1de67f0f70829c754359fa2e33ac264461a2b980 | [
0.00019960995996370912,
0.00017552926146890968,
0.00016598573711235076,
0.00017164877499453723,
0.00001060919021256268
] |
{
"id": 5,
"code_window": [
"\t\t\"skywalking\": {},\n",
"\t\t\"envoy_http_als\": {},\n",
"\t\t\"envoy_tcp_als\": {},\n",
"\t\t\"envoy_otel_als\": {},\n",
"\t}\n",
"\n",
"\tunexpectedProviders := make([]string, 0)\n",
"\tmsg := &meshconfig.MeshConfig_ExtensionProvider{}\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\"opentelemetry\": {},\n"
],
"file_path": "pilot/pkg/model/push_context_test.go",
"type": "add",
"edit_start_line_idx": 1000
} | // Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package istio
import (
"flag"
)
// init registers the command-line flags that we can exposed for "go test".
func init() {
flag.StringVar(&settingsFromCommandline.SystemNamespace, "istio.test.kube.systemNamespace", settingsFromCommandline.SystemNamespace,
"Deprecated, specifies the namespace where the Istio components (<=1.1) reside in a typical deployment.")
flag.StringVar(&settingsFromCommandline.TelemetryNamespace, "istio.test.kube.telemetryNamespace", settingsFromCommandline.TelemetryNamespace,
"Specifies the namespace in which kiali, tracing providers, graphana, prometheus are deployed.")
flag.BoolVar(&settingsFromCommandline.DeployIstio, "istio.test.kube.deploy", settingsFromCommandline.DeployIstio,
"Deploy Istio into the target Kubernetes environment.")
flag.StringVar(&settingsFromCommandline.PrimaryClusterIOPFile, "istio.test.kube.helm.iopFile", settingsFromCommandline.PrimaryClusterIOPFile,
"IstioOperator spec file. This can be an absolute path or relative to repository root.")
flag.StringVar(&helmValues, "istio.test.kube.helm.values", helmValues,
"Manual overrides for Helm values file. Only valid when deploying Istio.")
flag.BoolVar(&settingsFromCommandline.DeployEastWestGW, "istio.test.kube.deployEastWestGW", settingsFromCommandline.DeployEastWestGW,
"Deploy Istio east west gateway into the target Kubernetes environment.")
flag.BoolVar(&settingsFromCommandline.DumpKubernetesManifests, "istio.test.istio.dumpManifests", settingsFromCommandline.DumpKubernetesManifests,
"Dump generated Istio install manifests in the artifacts directory.")
flag.BoolVar(&settingsFromCommandline.IstiodlessRemotes, "istio.test.istio.istiodlessRemotes", settingsFromCommandline.IstiodlessRemotes,
"Remote clusters run without istiod, using webhooks/ca from the primary cluster.")
flag.StringVar(&operatorOptions, "istio.test.istio.operatorOptions", operatorOptions,
`Comma separated operator configuration in addition to the default operator configuration.
e.g. components.cni.enabled=true,components.cni.namespace=kube-system`)
flag.BoolVar(&settingsFromCommandline.EnableCNI, "istio.test.istio.enableCNI", settingsFromCommandline.EnableCNI,
"Deploy Istio with CNI enabled.")
}
| pkg/test/framework/components/istio/flags.go | 0 | https://github.com/istio/istio/commit/1de67f0f70829c754359fa2e33ac264461a2b980 | [
0.0001796379074221477,
0.00017273242701776326,
0.00016221152327489108,
0.0001740392908686772,
0.000006236530225578463
] |
{
"id": 5,
"code_window": [
"\t\t\"skywalking\": {},\n",
"\t\t\"envoy_http_als\": {},\n",
"\t\t\"envoy_tcp_als\": {},\n",
"\t\t\"envoy_otel_als\": {},\n",
"\t}\n",
"\n",
"\tunexpectedProviders := make([]string, 0)\n",
"\tmsg := &meshconfig.MeshConfig_ExtensionProvider{}\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\"opentelemetry\": {},\n"
],
"file_path": "pilot/pkg/model/push_context_test.go",
"type": "add",
"edit_start_line_idx": 1000
} | Copyright (c) 2009 The Go Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
| licenses/github.com/liggitt/tabwriter/LICENSE | 0 | https://github.com/istio/istio/commit/1de67f0f70829c754359fa2e33ac264461a2b980 | [
0.00017192379164043814,
0.000167952457559295,
0.0001639137335587293,
0.0001680198620306328,
0.000003270439947300474
] |
{
"id": 0,
"code_window": [
" deps = [\n",
" \"//resourcemanager/scheduler\",\n",
" \"//resourcemanager/util\",\n",
" \"//util\",\n",
" \"//util/cpu\",\n",
" \"@com_github_pingcap_log//:log\",\n",
" \"@org_uber_go_zap//:zap\",\n",
" ],\n",
")"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
" \"@com_github_google_uuid//:uuid\",\n"
],
"file_path": "resourcemanager/BUILD.bazel",
"type": "add",
"edit_start_line_idx": 15
} | // Copyright 2021 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build !codes
package testkit
import (
"flag"
"testing"
"time"
"github.com/pingcap/tidb/ddl/schematracker"
"github.com/pingcap/tidb/domain"
"github.com/pingcap/tidb/kv"
"github.com/pingcap/tidb/session"
"github.com/pingcap/tidb/store/driver"
"github.com/pingcap/tidb/store/mockstore"
"github.com/pingcap/tidb/util/gctuner"
"github.com/stretchr/testify/require"
"go.opencensus.io/stats/view"
)
// WithTiKV flag is only used for debugging locally with real tikv cluster.
var WithTiKV = flag.String("with-tikv", "", "address of tikv cluster, if set, running test with real tikv cluster")
// CreateMockStore return a new mock kv.Storage.
func CreateMockStore(t testing.TB, opts ...mockstore.MockTiKVStoreOption) kv.Storage {
if *WithTiKV != "" {
var d driver.TiKVDriver
var err error
store, err := d.Open("tikv://" + *WithTiKV)
require.NoError(t, err)
var dom *domain.Domain
dom, err = session.BootstrapSession(store)
t.Cleanup(func() {
dom.Close()
err := store.Close()
require.NoError(t, err)
view.Stop()
})
require.NoError(t, err)
return store
}
t.Cleanup(func() {
view.Stop()
})
gctuner.GlobalMemoryLimitTuner.Stop()
store, _ := CreateMockStoreAndDomain(t, opts...)
return store
}
// CreateMockStoreAndDomain return a new mock kv.Storage and *domain.Domain.
func CreateMockStoreAndDomain(t testing.TB, opts ...mockstore.MockTiKVStoreOption) (kv.Storage, *domain.Domain) {
store, err := mockstore.NewMockStore(opts...)
require.NoError(t, err)
dom := bootstrap(t, store, 500*time.Millisecond)
sm := MockSessionManager{}
dom.InfoSyncer().SetSessionManager(&sm)
t.Cleanup(func() {
view.Stop()
gctuner.GlobalMemoryLimitTuner.Stop()
})
return schematracker.UnwrapStorage(store), dom
}
func bootstrap(t testing.TB, store kv.Storage, lease time.Duration) *domain.Domain {
session.SetSchemaLease(lease)
session.DisableStats4Test()
domain.DisablePlanReplayerBackgroundJob4Test()
domain.DisableDumpHistoricalStats4Test()
dom, err := session.BootstrapSession(store)
require.NoError(t, err)
dom.SetStatsUpdating(true)
t.Cleanup(func() {
dom.Close()
err := store.Close()
require.NoError(t, err)
view.Stop()
})
return dom
}
// CreateMockStoreWithSchemaLease return a new mock kv.Storage.
func CreateMockStoreWithSchemaLease(t testing.TB, lease time.Duration, opts ...mockstore.MockTiKVStoreOption) kv.Storage {
store, _ := CreateMockStoreAndDomainWithSchemaLease(t, lease, opts...)
return schematracker.UnwrapStorage(store)
}
// CreateMockStoreAndDomainWithSchemaLease return a new mock kv.Storage and *domain.Domain.
func CreateMockStoreAndDomainWithSchemaLease(t testing.TB, lease time.Duration, opts ...mockstore.MockTiKVStoreOption) (kv.Storage, *domain.Domain) {
store, err := mockstore.NewMockStore(opts...)
require.NoError(t, err)
dom := bootstrap(t, store, lease)
sm := MockSessionManager{}
dom.InfoSyncer().SetSessionManager(&sm)
return schematracker.UnwrapStorage(store), dom
}
| testkit/mockstore.go | 1 | https://github.com/pingcap/tidb/commit/90e727194074b352ffe826fca411c1776b5562cd | [
0.00018737476784735918,
0.0001727842609398067,
0.00016617641085758805,
0.0001724277390167117,
0.000006259414021769771
] |
{
"id": 0,
"code_window": [
" deps = [\n",
" \"//resourcemanager/scheduler\",\n",
" \"//resourcemanager/util\",\n",
" \"//util\",\n",
" \"//util/cpu\",\n",
" \"@com_github_pingcap_log//:log\",\n",
" \"@org_uber_go_zap//:zap\",\n",
" ],\n",
")"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
" \"@com_github_google_uuid//:uuid\",\n"
],
"file_path": "resourcemanager/BUILD.bazel",
"type": "add",
"edit_start_line_idx": 15
} | // Copyright 2022 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package isolation
import (
"context"
"time"
"github.com/pingcap/errors"
"github.com/pingcap/failpoint"
"github.com/pingcap/tidb/kv"
"github.com/pingcap/tidb/parser/ast"
"github.com/pingcap/tidb/parser/terror"
plannercore "github.com/pingcap/tidb/planner/core"
"github.com/pingcap/tidb/sessionctx"
"github.com/pingcap/tidb/sessionctx/variable"
"github.com/pingcap/tidb/sessiontxn"
"github.com/pingcap/tidb/util/logutil"
tikverr "github.com/tikv/client-go/v2/error"
"go.uber.org/zap"
)
// PessimisticRRTxnContextProvider provides txn context for isolation level repeatable-read
type PessimisticRRTxnContextProvider struct {
basePessimisticTxnContextProvider
// Used for ForUpdateRead statement
forUpdateTS uint64
latestForUpdateTS uint64
// It may decide whether to update forUpdateTs when calling provider's getForUpdateTs
// See more details in the comments of optimizeWithPlan
optimizeForNotFetchingLatestTS bool
}
// NewPessimisticRRTxnContextProvider returns a new PessimisticRRTxnContextProvider
func NewPessimisticRRTxnContextProvider(sctx sessionctx.Context, causalConsistencyOnly bool) *PessimisticRRTxnContextProvider {
provider := &PessimisticRRTxnContextProvider{
basePessimisticTxnContextProvider: basePessimisticTxnContextProvider{
baseTxnContextProvider: baseTxnContextProvider{
sctx: sctx,
causalConsistencyOnly: causalConsistencyOnly,
onInitializeTxnCtx: func(txnCtx *variable.TransactionContext) {
txnCtx.IsPessimistic = true
txnCtx.Isolation = ast.RepeatableRead
},
onTxnActiveFunc: func(txn kv.Transaction, _ sessiontxn.EnterNewTxnType) {
txn.SetOption(kv.Pessimistic, true)
},
},
},
}
provider.getStmtReadTSFunc = provider.getTxnStartTS
provider.getStmtForUpdateTSFunc = provider.getForUpdateTs
return provider
}
func (p *PessimisticRRTxnContextProvider) getForUpdateTs() (ts uint64, err error) {
if p.forUpdateTS != 0 {
return p.forUpdateTS, nil
}
var txn kv.Transaction
if txn, err = p.ActivateTxn(); err != nil {
return 0, err
}
if p.optimizeForNotFetchingLatestTS {
p.forUpdateTS = p.sctx.GetSessionVars().TxnCtx.GetForUpdateTS()
return p.forUpdateTS, nil
}
txnCtx := p.sctx.GetSessionVars().TxnCtx
futureTS := newOracleFuture(p.ctx, p.sctx, txnCtx.TxnScope)
start := time.Now()
if ts, err = futureTS.Wait(); err != nil {
return 0, err
}
p.sctx.GetSessionVars().DurationWaitTS += time.Since(start)
txnCtx.SetForUpdateTS(ts)
txn.SetOption(kv.SnapshotTS, ts)
p.forUpdateTS = ts
return
}
// updateForUpdateTS acquires the latest TSO and update the TransactionContext and kv.Transaction with it.
func (p *PessimisticRRTxnContextProvider) updateForUpdateTS() (err error) {
sctx := p.sctx
var txn kv.Transaction
if txn, err = sctx.Txn(false); err != nil {
return err
}
if !txn.Valid() {
return errors.Trace(kv.ErrInvalidTxn)
}
failpoint.Inject("RequestTsoFromPD", func() {
sessiontxn.TsoRequestCountInc(sctx)
})
// Because the ForUpdateTS is used for the snapshot for reading data in DML.
// We can avoid allocating a global TSO here to speed it up by using the local TSO.
version, err := sctx.GetStore().CurrentVersion(sctx.GetSessionVars().TxnCtx.TxnScope)
if err != nil {
return err
}
sctx.GetSessionVars().TxnCtx.SetForUpdateTS(version.Ver)
p.latestForUpdateTS = version.Ver
txn.SetOption(kv.SnapshotTS, version.Ver)
return nil
}
// OnStmtStart is the hook that should be called when a new statement started
func (p *PessimisticRRTxnContextProvider) OnStmtStart(ctx context.Context, node ast.StmtNode) error {
if err := p.basePessimisticTxnContextProvider.OnStmtStart(ctx, node); err != nil {
return err
}
p.forUpdateTS = 0
p.optimizeForNotFetchingLatestTS = false
return nil
}
// OnStmtRetry is the hook that should be called when a statement is retried internally.
func (p *PessimisticRRTxnContextProvider) OnStmtRetry(ctx context.Context) (err error) {
if err = p.basePessimisticTxnContextProvider.OnStmtRetry(ctx); err != nil {
return err
}
// If TxnCtx.forUpdateTS is updated in OnStmtErrorForNextAction, we assign the value to the provider
if p.latestForUpdateTS > p.forUpdateTS {
p.forUpdateTS = p.latestForUpdateTS
} else {
p.forUpdateTS = 0
}
p.optimizeForNotFetchingLatestTS = false
return nil
}
// OnStmtErrorForNextAction is the hook that should be called when a new statement get an error
func (p *PessimisticRRTxnContextProvider) OnStmtErrorForNextAction(point sessiontxn.StmtErrorHandlePoint, err error) (sessiontxn.StmtErrorAction, error) {
switch point {
case sessiontxn.StmtErrAfterPessimisticLock:
return p.handleAfterPessimisticLockError(err)
default:
return sessiontxn.NoIdea()
}
}
// AdviseOptimizeWithPlan optimizes for update point get related execution.
// Use case: In for update point get related operations, we do not fetch ts from PD but use the last ts we fetched.
//
// We expect that the data that the point get acquires has not been changed.
//
// Benefit: Save the cost of acquiring ts from PD.
// Drawbacks: If the data has been changed since the ts we used, we need to retry.
// One exception is insert operation, when it has no select plan, we do not fetch the latest ts immediately. We only update ts
// if write conflict is incurred.
func (p *PessimisticRRTxnContextProvider) AdviseOptimizeWithPlan(val interface{}) (err error) {
if p.isTidbSnapshotEnabled() || p.isBeginStmtWithStaleRead() {
return nil
}
plan, ok := val.(plannercore.Plan)
if !ok {
return nil
}
if execute, ok := plan.(*plannercore.Execute); ok {
plan = execute.Plan
}
p.optimizeForNotFetchingLatestTS = notNeedGetLatestTSFromPD(plan, false)
return nil
}
// notNeedGetLatestTSFromPD searches for optimization condition recursively
// Note: For point get and batch point get (name it plan), if one of the ancestor node is update/delete/physicalLock,
// we should check whether the plan.Lock is true or false. See comments in needNotToBeOptimized.
// inLockOrWriteStmt = true means one of the ancestor node is update/delete/physicalLock.
func notNeedGetLatestTSFromPD(plan plannercore.Plan, inLockOrWriteStmt bool) bool {
switch v := plan.(type) {
case *plannercore.PointGetPlan:
// We do not optimize the point get/ batch point get if plan.lock = false and inLockOrWriteStmt = true.
// Theoretically, the plan.lock should be true if the flag is true. But due to the bug describing in Issue35524,
// the plan.lock can be false in the case of inLockOrWriteStmt being true. In this case, optimization here can lead to different results
// which cannot be accepted as AdviseOptimizeWithPlan cannot change results.
return !inLockOrWriteStmt || v.Lock
case *plannercore.BatchPointGetPlan:
return !inLockOrWriteStmt || v.Lock
case plannercore.PhysicalPlan:
if len(v.Children()) == 0 {
return false
}
_, isPhysicalLock := v.(*plannercore.PhysicalLock)
for _, p := range v.Children() {
if !notNeedGetLatestTSFromPD(p, isPhysicalLock || inLockOrWriteStmt) {
return false
}
}
return true
case *plannercore.Update:
return notNeedGetLatestTSFromPD(v.SelectPlan, true)
case *plannercore.Delete:
return notNeedGetLatestTSFromPD(v.SelectPlan, true)
case *plannercore.Insert:
return v.SelectPlan == nil
}
return false
}
func (p *PessimisticRRTxnContextProvider) handleAfterPessimisticLockError(lockErr error) (sessiontxn.StmtErrorAction, error) {
sessVars := p.sctx.GetSessionVars()
txnCtx := sessVars.TxnCtx
if deadlock, ok := errors.Cause(lockErr).(*tikverr.ErrDeadlock); ok {
if !deadlock.IsRetryable {
return sessiontxn.ErrorAction(lockErr)
}
logutil.Logger(p.ctx).Info("single statement deadlock, retry statement",
zap.Uint64("txn", txnCtx.StartTS),
zap.Uint64("lockTS", deadlock.LockTs),
zap.Stringer("lockKey", kv.Key(deadlock.LockKey)),
zap.Uint64("deadlockKeyHash", deadlock.DeadlockKeyHash))
} else if terror.ErrorEqual(kv.ErrWriteConflict, lockErr) {
// Always update forUpdateTS by getting a new timestamp from PD.
// If we use the conflict commitTS as the new forUpdateTS and async commit
// is used, the commitTS of this transaction may exceed the max timestamp
// that PD allocates. Then, the change may be invisible to a new transaction,
// which means linearizability is broken.
errStr := lockErr.Error()
forUpdateTS := txnCtx.GetForUpdateTS()
logutil.Logger(p.ctx).Debug("pessimistic write conflict, retry statement",
zap.Uint64("txn", txnCtx.StartTS),
zap.Uint64("forUpdateTS", forUpdateTS),
zap.String("err", errStr))
} else {
// This branch: if err is not nil, always update forUpdateTS to avoid problem described below.
// For nowait, when ErrLock happened, ErrLockAcquireFailAndNoWaitSet will be returned, and in the same txn
// the select for updateTs must be updated, otherwise there maybe rollback problem.
// begin
// select for update key1 (here encounters ErrLocked or other errors (or max_execution_time like util),
// key1 lock has not gotten and async rollback key1 is raised)
// select for update key1 again (this time lock is acquired successfully (maybe lock was released by others))
// the async rollback operation rollbacks the lock just acquired
if err := p.updateForUpdateTS(); err != nil {
logutil.Logger(p.ctx).Warn("UpdateForUpdateTS failed", zap.Error(err))
}
return sessiontxn.ErrorAction(lockErr)
}
if err := p.updateForUpdateTS(); err != nil {
return sessiontxn.ErrorAction(lockErr)
}
return sessiontxn.RetryReady()
}
| sessiontxn/isolation/repeatable_read.go | 0 | https://github.com/pingcap/tidb/commit/90e727194074b352ffe826fca411c1776b5562cd | [
0.0006334278150461614,
0.00019388709915801883,
0.0001643044815864414,
0.00017084948194678873,
0.00008898522355593741
] |
{
"id": 0,
"code_window": [
" deps = [\n",
" \"//resourcemanager/scheduler\",\n",
" \"//resourcemanager/util\",\n",
" \"//util\",\n",
" \"//util/cpu\",\n",
" \"@com_github_pingcap_log//:log\",\n",
" \"@org_uber_go_zap//:zap\",\n",
" ],\n",
")"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
" \"@com_github_google_uuid//:uuid\",\n"
],
"file_path": "resourcemanager/BUILD.bazel",
"type": "add",
"edit_start_line_idx": 15
} | [
{
"name": "TestPredicatePushDown",
"cases": [
"select a, b from (select a, b from t as t1 order by a) as t2 where t2.b > 10",
"select a, b from (select a, b from t as t1 order by a) as t2 where t2.a > 10",
"select a, b from (select a, b, a+b as a_b from t as t1) as t2 where a_b > 10 and b = 1",
"select b, @i:=@i+1 as ii from (select b, @i:=0 from t as t1) as t2 where @i < 10",
"select b, @i:=@i+1 as ii from (select a, b, @i:=0 from t as t1) as t2 where @i < 10 and a > 10",
"select a, max(b) from t group by a having a > 1",
"select a, avg(b) from t group by a having a > 1 and max(b) > 10",
"select a, approx_count_distinct(b) from t group by a having a > 1 and max(b) > 10",
"select t1.a, t1.b, t2.b from t t1, t t2 where t1.a = t2.a and t2.b = t1.b and t1.a > 10 and t2.b > 10 and t1.a > t2.b",
"select t1.a, t1.b from t t1, t t2 where t1.a = t2.a and t1.a = 10 and t2.a = 5",
"select a, f from t where f > 1",
"select a, f from (select a, f, g from t where f = 1) t1 where g > 1",
"select a, f from t where g > 1 and f > 1",
"select t1.a, t1.b from t t1, t t2 where t1.a = t2.a and t1.a = 10 and t2.a = 5",
"select a, b from ((select a, b from t) union all(select c as a, d as b from t)) as t1 where a > 1",
"select a, b from (select a, b, min(a) over(partition by b) as min_a from t)as tt where a < 10 and b > 10 and b = min_a",
"select b, c from (select b, c from t where b > 1 and c > 1) as t1 where b > 2 and c > 2",
"select * from t t1 join t t2 on t1.a = t2.a and t1.a > 2"
]
},
{
"name": "TestAggPushDownGather",
"cases": [
"select b, sum(a) from t group by b",
"select b, sum(a) from t group by c, b",
"select b, sum(a) from t group by sin(b)+sin(c), b"
]
},
{
"name": "TestTopNRules",
"cases": [
"select b from t order by a limit 2",
"select b from t limit 2",
"select a+b from t order by a limit 1 offset 2",
"select c from t order by t.a limit 1",
"select c from t order by t.a + t.b limit 1",
"select a, b, c from t t1 where t1.a in (select t2.a as a from t t2 where t2.b > t1.b order by t1.b limit 1)",
"select a, b, c from t t1 where t1.a in (select a from (select t2.a as a, t1.b as b from t t2 where t2.b > t1.b) x order by b limit 1)",
"select a, b from (select @i as a, @i := @i+1 as b from t) t order by a desc limit 1",
"select * from t t1 left join t t2 on t1.b = t2.b order by t1.b limit 1",
"select * from t t1 left join t t2 on t1.b = t2.b order by t1.a, t1.c limit 1",
"select * from t t1 left join t t2 on t1.b = t2.b order by t2.a, t2.c limit 1",
"select * from t t1 left join t t2 on t1.b = t2.b order by t1.a, t2.c limit 1",
"select * from t t1 right join t t2 on t1.b = t2.b order by t1.a, t1.c limit 1",
"select * from t t1 right join t t2 on t1.b = t2.b order by t2.a, t2.c limit 1",
"select * from t t1 right join t t2 on t1.b = t2.b order by t1.a, t2.c limit 1",
"select * from t t1 left join t t2 on t1.b = t2.b limit 1",
"select * from t t1 left join t t2 on t1.b = t2.b limit 5 offset 4",
"select * from t t1 right join t t2 on t1.b = t2.b limit 1",
"select * from t t1 right join t t2 on t1.b = t2.b limit 5 offset 4",
"(select a from t) union all (select b from t) order by a limit 2;",
"(select a from t) union all (select b from t) limit 2;",
"(select a from t) union all (select b from t) limit 2 offset 5;",
"(select a from t) union all (select sum(a) from t where a > 2 group by b) order by a limit 2;",
"(select a from t) union all (select sum(a) from t where a > 2 group by b) order by a limit 1, 2;",
"(select a from t where a = 1) union all (select b from t where a = 2) union all (select c from t where a = 3) order by a limit 2;"
]
},
{
"name": "TestProjectionElimination",
"cases": [
"select a, b from (select a, b from t) as t2",
"select a+b from (select a, b from t) as t2",
"select a from (select floor(a) as a from t) as t2",
"select a from (select a, b from (select a, b, c from t) as t2) as t3",
"select a+c from (select floor(a) as a, b, c from t) as t2"
]
},
{
"name": "TestEliminateMaxMin",
"cases": [
"select max(a) from t;",
"select min(a) from t;"
]
},
{
"name": "TestMergeAggregationProjection",
"cases": [
"select b, max(a) from (select a, c+d as b from t as t1) as t2 group by b",
"select max(a) from (select c+d as b, a+c as a from t as t1) as t2",
"select b, max(a) from (select a, c+d as b, @i:=0 from t as t1) as t2 group by b"
]
},
{
"name": "TestMergeAdjacentLimit",
"cases": [
"select b from (select b from t limit 5) as t1 limit 10",
"select b from (select b from t limit 20) as t1 limit 10",
"select b from (select b from t limit 10) as t1 limit 10",
"select b from (select b from t limit 10 offset 10) as t1 limit 10 offset 5",
"select b from (select b from t limit 10 offset 2) as t1 limit 3 offset 5",
"select b from (select b from t limit 10 offset 5) as t1 limit 5 offset 5",
"select a from (select a from t limit 3 offset 5) t1 limit 3 offset 5"
]
},
{
"name": "TestMergeAdjacentTopN",
"cases": [
"select b from (select b from t where c > 1 order by b limit 3) as t1 order by b limit 2",
"select a from (select a from t where b > 2 order by a limit 3 offset 1) as t1 order by a limit 2 offset 1",
"select * from (select * from t order by a limit 3) as t1 order by a limit 5",
"select b from (select b from t order by b limit 5) as t1 order by b limit 10",
"select b from (select b from t order by b limit 20) as t1 order by b limit 10",
"select b from (select b from t order by b limit 10) as t1 order by b limit 10",
"select b from (select b from t order by b limit 10 offset 10) as t1 order by b limit 10 offset 5",
"select b from (select b from t order by b limit 10 offset 2) as t1 order by b limit 3 offset 5",
"select b from (select b from t order by b limit 10 offset 5) as t1 order by b limit 5 offset 5",
"select a from (select a from t order by a limit 3 offset 5) as t1 order by a limit 3 offset 5",
"select b from (select b from t where c > 1 order by b, a limit 3) as t1 order by b limit 2",
"select a from (select a from t where b > 2 order by a, b limit 3 offset 1) as t1 order by a limit 2 offset 1"
]
},
{
"name": "TestTransformLimitToTableDual",
"cases": [
"select a from t limit 0 offset 1",
"select * from t as t1 left join (select * from t limit 0 offset 1) as t2 on t1.a = t2.a;",
"select a, b from ((select a, b from t) union all(select c as a, d as b from t limit 0 offset 5)) as t1 where a > 1"
]
},
{
"name": "TestPostTransformationRules",
"cases": [
"select b from (select b+10 as b from t) as t1 order by b + 10 limit 10",
"select * from (select a+1 as c, a+b as d from t) as t1 order by c+d limit 10",
"select a from (select a, b from t order by b limit 10) as t1"
]
},
{
"name": "TestPushLimitDownTiKVSingleGather",
"cases": [
"select * from t limit 1",
"select * from t as t1 left join (select * from t limit 2) as t2 on t1.a = t2.a;",
"select a, b from ((select a, b from t) union all(select c as a, d as b from t limit 3)) as t1 where a > 1"
]
},
{
"name": "TestEliminateOuterJoin",
"cases": [
"select t1.a, max(t1.b) from t as t1 left join (select * from t) as t2 on t1.a = t2.a and t1.b = 3 group by t1.a;",
"select t1.a, max(t1.b) from t as t1 left join (select * from t) as t2 on t1.a = t2.a group by t1.a;",
"select t1.a, sum(t1.h) from t as t1 left join (select * from t) as t2 on t1.h = t2.h group by t1.a;",
"select t1.a, sum(distinct t1.h) from t as t1 left join (select * from t) as t2 on t1.h = t2.h group by t1.a;",
"select t1.a, max(t1.b) from t as t1 left join (select * from t) as t2 on t1.a = t2.a group by t2.a;",
"select t1.a, max(t2.b) from t as t1 left join (select * from t) as t2 on t1.a = t2.a group by t1.a;",
"select t2.a, max(t2.b) from t as t1 right join (select * from t) as t2 on t1.a = t2.a group by t2.a;",
"select t2.a, max(t2.b) from t as t1 right join (select * from t) as t2 on t1.a = t2.a and t2.c = 3 group by t2.a;",
"select t2.a, sum(t2.h) from t as t1 right join (select * from t) as t2 on t1.h = t2.h group by t2.a;",
"select t2.a, sum(distinct t2.h) from t as t1 right join (select * from t) as t2 on t1.h = t2.h group by t2.a;",
"select t2.a, max(t2.b) from t as t1 right join (select * from t) as t2 on t1.a = t2.a group by t1.a;",
"select t2.a, max(t1.b) from t as t1 right join (select * from t) as t2 on t1.a = t2.a group by t2.a;",
"select t1.a, t1.b from t as t1 left join t as t2 on t1.a = t2.a and t1.b = 3 and t2.b = 3;",
"select t1.a, t1.b from t as t1 left join t as t2 on t1.b = t2.b and t1.a = 3 and t2.b = 3;",
"select t2.a, t1.b from t as t1 left join t as t2 on t1.a = t2.a and t1.a = 3 and t2.b = 3;",
"select t2.a, t2.b from t as t1 right join t as t2 on t1.a = t2.a and t1.b = 3 and t2.b = 3;",
"select t2.a, t2.b from t as t1 right join t as t2 on t1.b = t2.b and t1.a = 3 and t2.b = 3;",
"select t1.a, t2.b from t as t1 right join t as t2 on t1.a = t2.a and t1.a = 3 and t2.b = 3;",
"select t3.a, max(t3.b) from (select t1.a, t1.b from t as t1 left join t as t2 on t1.a = t2.a) t3 group by t3.a"
]
},
{
"name": "TestTransformAggregateCaseToSelection",
"cases": [
"select count(case when a > 10 then b end) from t",
"select count(case when a > 10 then b end) from t group by c",
"select count(case when a > 10 then b end) from t group by 'a'",
"select count(case when a > 10 then b end) from t group by concat(c, a)",
"select count(case when a > 10 then b end) from t group by concat(c, 'a')",
"select count(case when a > 10 then b end) from t group by concat('a', 'c')",
"select count(case when a > 10 then b else null end) from t",
"select count(case when a > 10 then null else b end) from t",
"select count(case when a > 10 then c else b end) from t",
"select count(case when a > 10 then c else 0 end) from t",
"select sum(case when a > 10 then c else 0 end) from t",
"select sum(case when a > 10 then c else 0.0 end) from t",
"select sum(case when a > 10 then c else 1-1 end) from t",
"select sum(case when a > 0 then (case when a <= 1000 then b end) else 0 end) from t",
"select sum(case when a > 10 then 0 else c end) from t",
"select sum(case when a > 10 then 2 else 1 end) from t",
"select count(DISTINCT case when a > 10 then null else c end) from t",
"select approx_count_distinct(case when a > 10 then null else c end) from t",
"select sum(DISTINCT case when a > 10 then c else 0 end) from t",
"select case when c > 10 then c end from t",
"select count(case when a > 10 then c end), c from t",
"select count(case when a > 10 and d < 5 then b end) from t",
"select count(case when a > 10 and d < 5 then null else b end) from t"
]
},
{
"name": "TestTransformAggToProj",
"cases": [
"select count(b) from t group by a",
"select count(b) from t group by b",
"select count(b) from t",
"select a from t group by a having sum(b) > 4",
"select count(b), sum(b), avg(b), b, max(b), min(b), bit_and(b), bit_or(b), bit_xor(b) from t group by a having sum(b) >= 0 and count(b) >= 0 order by b",
"select count(b), sum(b), avg(b), f, max(c), min(c), bit_and(c), bit_or(d), bit_xor(g) from t group by a",
"select count(b), sum(b), avg(b), f, max(c), min(c), bit_and(c), bit_or(d), bit_xor(g), var_pop(b) from t group by a",
"select count(b), sum(b), avg(b), f, max(c), min(c), bit_and(c), bit_or(d), bit_xor(g), group_concat(b, c, d, f) from t group by a"
]
},
{
"name": "TestDecorrelate",
"cases": [
"select a from t t1 where exists (select 1 from t t2 where t1.a = t2.b)"
]
},
{
"name": "TestInjectProj",
"cases": [
"select * from t order by (a+b) limit 10",
"select max(a), min(b), avg(c) from t group by a+b",
"select max(a), min(b), avg(a / b) from t group by a",
"select max(a), min(b), avg(a / b) from t group by (a+b)"
]
},
{
"name": "TestMergeAdjacentWindow",
"cases": [
"select a, b, max(b) over (partition by c), sum(c) over (partition by c) from t",
"select a, b, max(b) over (partition by c), sum(c) over (partition by a) from t",
"select a, max(b) over (partition by c, d), sum(c) over (partition by c, d) from t",
"select a, max(b) over (partition by c, d), sum(c) over (partition by d, c) from t",
"select a, b, c, d, max(b) over (partition by c order by d desc), sum(d) over (partition by c order by d desc) from t",
"select a, b, c, d, max(b) over (partition by c order by d desc), sum(d) over (partition by c order by d) from t",
"select a, b, c, d, max(b) over (partition by c order by d desc), sum(d) over (partition by a order by b desc) from t",
"select a, b, c, d, max(b) over (partition by c order by d desc rows 2 preceding), sum(d) over (partition by c order by d desc rows 2 preceding) from t",
"select a, b, c, d, max(b) over (partition by c order by d desc rows between 2 preceding and 2 following), sum(d) over (partition by c order by d desc rows between 2 preceding and 2 following) from t",
"select a, b, c, d, max(b) over (partition by c order by d desc rows between 2 preceding and 2 following), sum(d) over (partition by c order by d desc rows between 3 preceding and 2 following) from t",
"select a, b, c, d, max(b) over (partition by c order by d desc rows between 3 preceding and 2 following), sum(d) over (partition by c order by d desc rows between 2 preceding and 2 following) from t",
"select a, b, c, d, max(b) over (partition by c order by d desc rows between 2 preceding and 2 following), sum(d) over (partition by c order by d desc range between 2 preceding and 2 following) from t",
"select a, b, c, d, max(b) over (partition by c order by d desc rows 2 preceding), sum(d) over (partition by c order by d desc range between unbounded preceding and current row) from t",
"select a, b, c, d, max(b) over (partition by c order by d desc rows 2 preceding), sum(d) over (partition by c order by d desc rows 3 preceding) from t",
"select a, b, max(b) over (partition by c), c, d, dd from (select a, b, c, d, sum(d) over (partition by c) as dd from t) as tt",
"select a, b, max(b) over (partition by c), c, d, dd from (select a, b, c, d, sum(d) over (partition by a) as dd from t) as tt",
"select a, b, rank() over (), max(b) over (partition by c), c, d, dd from (select a, b, c, d, sum(d) over (partition by c) as dd from t) as tt",
"select a, b, sum(bb) over (partition by a) as 'sum_bb' from (select a, b, max(b) over (partition by a) as 'bb' from t) as tt",
"select a, b, sum(bb) over (partition by a) as 'sum_bb', c from (select a, b, c, max(b) over (partition by a) as 'bb' from t) as tt",
"select a, b, rank() over (partition by a), sum(bb) over (partition by a) as 'sum_bb', c from (select a, b, c, max(b) over (partition by a) as 'bb' from t) as tt",
"select a, b, sum(bb) over (partition by a) as 'sum_bb', c, rank() over (partition by a) from (select a, b, c, max(b) over (partition by a) as 'bb' from t) as tt",
"select a, b, sum(bb) over (partition by a) as 'sum_bb', c, rank() over () from (select a, b, c, max(b) over (partition by a) as 'bb' from t) as tt"
]
}
]
| planner/cascades/testdata/transformation_rules_suite_in.json | 0 | https://github.com/pingcap/tidb/commit/90e727194074b352ffe826fca411c1776b5562cd | [
0.00019736617105081677,
0.00017164908058475703,
0.000165039804414846,
0.0001714804384391755,
0.000005844268343935255
] |
{
"id": 0,
"code_window": [
" deps = [\n",
" \"//resourcemanager/scheduler\",\n",
" \"//resourcemanager/util\",\n",
" \"//util\",\n",
" \"//util/cpu\",\n",
" \"@com_github_pingcap_log//:log\",\n",
" \"@org_uber_go_zap//:zap\",\n",
" ],\n",
")"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
" \"@com_github_google_uuid//:uuid\",\n"
],
"file_path": "resourcemanager/BUILD.bazel",
"type": "add",
"edit_start_line_idx": 15
} | // Copyright 2019 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package executor
import (
"context"
"fmt"
"math"
"strings"
"time"
"github.com/pingcap/errors"
"github.com/pingcap/failpoint"
"github.com/pingcap/tidb/domain/infosync"
"github.com/pingcap/tidb/infoschema"
"github.com/pingcap/tidb/kv"
"github.com/pingcap/tidb/parser/model"
"github.com/pingcap/tidb/parser/mysql"
plannercore "github.com/pingcap/tidb/planner/core"
"github.com/pingcap/tidb/sessionctx"
"github.com/pingcap/tidb/types"
"github.com/pingcap/tidb/util"
"github.com/pingcap/tidb/util/sqlexec"
"github.com/prometheus/client_golang/api"
promv1 "github.com/prometheus/client_golang/api/prometheus/v1"
pmodel "github.com/prometheus/common/model"
"golang.org/x/exp/slices"
)
const promReadTimeout = time.Second * 10
// MetricRetriever uses to read metric data.
type MetricRetriever struct {
dummyCloser
table *model.TableInfo
tblDef *infoschema.MetricTableDef
extractor *plannercore.MetricTableExtractor
retrieved bool
}
func (e *MetricRetriever) retrieve(ctx context.Context, sctx sessionctx.Context) ([][]types.Datum, error) {
if e.retrieved || e.extractor.SkipRequest {
return nil, nil
}
e.retrieved = true
failpoint.InjectContext(ctx, "mockMetricsTableData", func() {
m, ok := ctx.Value("__mockMetricsTableData").(map[string][][]types.Datum)
if ok && m[e.table.Name.L] != nil {
failpoint.Return(m[e.table.Name.L], nil)
}
})
tblDef, err := infoschema.GetMetricTableDef(e.table.Name.L)
if err != nil {
return nil, err
}
e.tblDef = tblDef
queryRange := e.getQueryRange(sctx)
totalRows := make([][]types.Datum, 0)
quantiles := e.extractor.Quantiles
if len(quantiles) == 0 {
quantiles = []float64{tblDef.Quantile}
}
for _, quantile := range quantiles {
var queryValue pmodel.Value
queryValue, err = e.queryMetric(ctx, sctx, queryRange, quantile)
if err != nil {
if err1, ok := err.(*promv1.Error); ok {
return nil, errors.Errorf("query metric error, msg: %v, detail: %v", err1.Msg, err1.Detail)
}
return nil, errors.Errorf("query metric error: %v", err.Error())
}
partRows := e.genRows(queryValue, quantile)
totalRows = append(totalRows, partRows...)
}
return totalRows, nil
}
func (e *MetricRetriever) queryMetric(ctx context.Context, sctx sessionctx.Context, queryRange promv1.Range, quantile float64) (result pmodel.Value, err error) {
failpoint.InjectContext(ctx, "mockMetricsPromData", func() {
failpoint.Return(ctx.Value("__mockMetricsPromData").(pmodel.Matrix), nil)
})
// Add retry to avoid network error.
var prometheusAddr string
for i := 0; i < 5; i++ {
//TODO: the prometheus will be Integrated into the PD, then we need to query the prometheus in PD directly, which need change the quire API
prometheusAddr, err = infosync.GetPrometheusAddr()
if err == nil || err == infosync.ErrPrometheusAddrIsNotSet {
break
}
time.Sleep(100 * time.Millisecond)
}
if err != nil {
return nil, err
}
promClient, err := api.NewClient(api.Config{
Address: prometheusAddr,
})
if err != nil {
return nil, err
}
promQLAPI := promv1.NewAPI(promClient)
ctx, cancel := context.WithTimeout(ctx, promReadTimeout)
defer cancel()
promQL := e.tblDef.GenPromQL(sctx, e.extractor.LabelConditions, quantile)
// Add retry to avoid network error.
for i := 0; i < 5; i++ {
result, _, err = promQLAPI.QueryRange(ctx, promQL, queryRange)
if err == nil {
break
}
time.Sleep(100 * time.Millisecond)
}
return result, err
}
type promQLQueryRange = promv1.Range
func (e *MetricRetriever) getQueryRange(sctx sessionctx.Context) promQLQueryRange {
startTime, endTime := e.extractor.StartTime, e.extractor.EndTime
step := time.Second * time.Duration(sctx.GetSessionVars().MetricSchemaStep)
return promQLQueryRange{Start: startTime, End: endTime, Step: step}
}
func (e *MetricRetriever) genRows(value pmodel.Value, quantile float64) [][]types.Datum {
var rows [][]types.Datum
switch value.Type() {
case pmodel.ValMatrix:
matrix := value.(pmodel.Matrix)
for _, m := range matrix {
for _, v := range m.Values {
record := e.genRecord(m.Metric, v, quantile)
rows = append(rows, record)
}
}
}
return rows
}
func (e *MetricRetriever) genRecord(metric pmodel.Metric, pair pmodel.SamplePair, quantile float64) []types.Datum {
record := make([]types.Datum, 0, 2+len(e.tblDef.Labels)+1)
// Record order should keep same with genColumnInfos.
record = append(record, types.NewTimeDatum(types.NewTime(
types.FromGoTime(time.UnixMilli(int64(pair.Timestamp))),
mysql.TypeDatetime,
types.MaxFsp,
)))
for _, label := range e.tblDef.Labels {
v := ""
if metric != nil {
v = string(metric[pmodel.LabelName(label)])
}
if len(v) == 0 {
v = infoschema.GenLabelConditionValues(e.extractor.LabelConditions[strings.ToLower(label)])
}
record = append(record, types.NewStringDatum(v))
}
if e.tblDef.Quantile > 0 {
record = append(record, types.NewFloat64Datum(quantile))
}
if math.IsNaN(float64(pair.Value)) {
record = append(record, types.NewDatum(nil))
} else {
record = append(record, types.NewFloat64Datum(float64(pair.Value)))
}
return record
}
// MetricsSummaryRetriever uses to read metric data.
type MetricsSummaryRetriever struct {
dummyCloser
table *model.TableInfo
extractor *plannercore.MetricSummaryTableExtractor
timeRange plannercore.QueryTimeRange
retrieved bool
}
func (e *MetricsSummaryRetriever) retrieve(ctx context.Context, sctx sessionctx.Context) ([][]types.Datum, error) {
if !hasPriv(sctx, mysql.ProcessPriv) {
return nil, plannercore.ErrSpecificAccessDenied.GenWithStackByArgs("PROCESS")
}
if e.retrieved || e.extractor.SkipRequest {
return nil, nil
}
e.retrieved = true
totalRows := make([][]types.Datum, 0, len(infoschema.MetricTableMap))
tables := make([]string, 0, len(infoschema.MetricTableMap))
for name := range infoschema.MetricTableMap {
tables = append(tables, name)
}
slices.Sort(tables)
ctx = kv.WithInternalSourceType(ctx, kv.InternalTxnOthers)
filter := inspectionFilter{set: e.extractor.MetricsNames}
condition := e.timeRange.Condition()
for _, name := range tables {
if !filter.enable(name) {
continue
}
def, found := infoschema.MetricTableMap[name]
if !found {
sctx.GetSessionVars().StmtCtx.AppendWarning(fmt.Errorf("metrics table: %s not found", name))
continue
}
var sql string
if def.Quantile > 0 {
var qs []string
if len(e.extractor.Quantiles) > 0 {
for _, q := range e.extractor.Quantiles {
qs = append(qs, fmt.Sprintf("%f", q))
}
} else {
qs = []string{"0.99"}
}
sql = fmt.Sprintf("select sum(value),avg(value),min(value),max(value),quantile from `%[2]s`.`%[1]s` %[3]s and quantile in (%[4]s) group by quantile order by quantile",
name, util.MetricSchemaName.L, condition, strings.Join(qs, ","))
} else {
sql = fmt.Sprintf("select sum(value),avg(value),min(value),max(value) from `%[2]s`.`%[1]s` %[3]s",
name, util.MetricSchemaName.L, condition)
}
exec := sctx.(sqlexec.RestrictedSQLExecutor)
rows, _, err := exec.ExecRestrictedSQL(ctx, nil, sql)
if err != nil {
return nil, errors.Errorf("execute '%s' failed: %v", sql, err)
}
for _, row := range rows {
var quantile interface{}
if def.Quantile > 0 {
quantile = row.GetFloat64(row.Len() - 1)
}
totalRows = append(totalRows, types.MakeDatums(
name,
quantile,
row.GetFloat64(0),
row.GetFloat64(1),
row.GetFloat64(2),
row.GetFloat64(3),
def.Comment,
))
}
}
return totalRows, nil
}
// MetricsSummaryByLabelRetriever uses to read metric detail data.
type MetricsSummaryByLabelRetriever struct {
dummyCloser
table *model.TableInfo
extractor *plannercore.MetricSummaryTableExtractor
timeRange plannercore.QueryTimeRange
retrieved bool
}
func (e *MetricsSummaryByLabelRetriever) retrieve(ctx context.Context, sctx sessionctx.Context) ([][]types.Datum, error) {
if !hasPriv(sctx, mysql.ProcessPriv) {
return nil, plannercore.ErrSpecificAccessDenied.GenWithStackByArgs("PROCESS")
}
if e.retrieved || e.extractor.SkipRequest {
return nil, nil
}
e.retrieved = true
totalRows := make([][]types.Datum, 0, len(infoschema.MetricTableMap))
tables := make([]string, 0, len(infoschema.MetricTableMap))
for name := range infoschema.MetricTableMap {
tables = append(tables, name)
}
slices.Sort(tables)
ctx = kv.WithInternalSourceType(ctx, kv.InternalTxnOthers)
filter := inspectionFilter{set: e.extractor.MetricsNames}
condition := e.timeRange.Condition()
for _, name := range tables {
if !filter.enable(name) {
continue
}
def, found := infoschema.MetricTableMap[name]
if !found {
sctx.GetSessionVars().StmtCtx.AppendWarning(fmt.Errorf("metrics table: %s not found", name))
continue
}
cols := def.Labels
cond := condition
if def.Quantile > 0 {
cols = append(cols, "quantile")
if len(e.extractor.Quantiles) > 0 {
qs := make([]string, len(e.extractor.Quantiles))
for i, q := range e.extractor.Quantiles {
qs[i] = fmt.Sprintf("%f", q)
}
cond += " and quantile in (" + strings.Join(qs, ",") + ")"
} else {
cond += " and quantile=0.99"
}
}
var sql string
if len(cols) > 0 {
sql = fmt.Sprintf("select sum(value),avg(value),min(value),max(value),`%s` from `%s`.`%s` %s group by `%[1]s` order by `%[1]s`",
strings.Join(cols, "`,`"), util.MetricSchemaName.L, name, cond)
} else {
sql = fmt.Sprintf("select sum(value),avg(value),min(value),max(value) from `%s`.`%s` %s",
util.MetricSchemaName.L, name, cond)
}
exec := sctx.(sqlexec.RestrictedSQLExecutor)
rows, _, err := exec.ExecRestrictedSQL(ctx, nil, sql)
if err != nil {
return nil, errors.Errorf("execute '%s' failed: %v", sql, err)
}
nonInstanceLabelIndex := 0
if len(def.Labels) > 0 && def.Labels[0] == "instance" {
nonInstanceLabelIndex = 1
}
// skip sum/avg/min/max
const skipCols = 4
for _, row := range rows {
instance := ""
if nonInstanceLabelIndex > 0 {
instance = row.GetString(skipCols) // sum/avg/min/max
}
var labels []string
for i, label := range def.Labels[nonInstanceLabelIndex:] {
// skip min/max/avg/instance
val := row.GetString(skipCols + nonInstanceLabelIndex + i)
if label == "store" || label == "store_id" {
val = fmt.Sprintf("store_id:%s", val)
}
labels = append(labels, val)
}
var quantile interface{}
if def.Quantile > 0 {
quantile = row.GetFloat64(row.Len() - 1) // quantile will be the last column
}
totalRows = append(totalRows, types.MakeDatums(
instance,
name,
strings.Join(labels, ", "),
quantile,
row.GetFloat64(0), // sum
row.GetFloat64(1), // avg
row.GetFloat64(2), // min
row.GetFloat64(3), // max
def.Comment,
))
}
}
return totalRows, nil
}
| executor/metrics_reader.go | 0 | https://github.com/pingcap/tidb/commit/90e727194074b352ffe826fca411c1776b5562cd | [
0.00028202010435052216,
0.00017425719124730676,
0.00016273424262180924,
0.0001709955104161054,
0.000018759821614366956
] |
{
"id": 1,
"code_window": [
"import (\n",
"\t\"time\"\n",
"\n",
"\t\"github.com/pingcap/tidb/resourcemanager/scheduler\"\n",
"\t\"github.com/pingcap/tidb/resourcemanager/util\"\n",
"\ttidbutil \"github.com/pingcap/tidb/util\"\n",
"\t\"github.com/pingcap/tidb/util/cpu\"\n",
")\n"
],
"labels": [
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\"github.com/google/uuid\"\n"
],
"file_path": "resourcemanager/rm.go",
"type": "add",
"edit_start_line_idx": 19
} | // Copyright 2022 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package resourcemanager
import (
"time"
"github.com/pingcap/tidb/resourcemanager/scheduler"
"github.com/pingcap/tidb/resourcemanager/util"
tidbutil "github.com/pingcap/tidb/util"
"github.com/pingcap/tidb/util/cpu"
)
// GlobalResourceManager is a global resource manager
var GlobalResourceManager = NewResourceManger()
// ResourceManager is a resource manager
type ResourceManager struct {
poolMap *util.ShardPoolMap
scheduler []scheduler.Scheduler
cpuObserver *cpu.Observer
exitCh chan struct{}
wg tidbutil.WaitGroupWrapper
}
// NewResourceManger is to create a new resource manager
func NewResourceManger() *ResourceManager {
sc := make([]scheduler.Scheduler, 0, 1)
sc = append(sc, scheduler.NewCPUScheduler())
return &ResourceManager{
cpuObserver: cpu.NewCPUObserver(),
exitCh: make(chan struct{}),
poolMap: util.NewShardPoolMap(),
scheduler: sc,
}
}
// Start is to start resource manager
func (r *ResourceManager) Start() {
r.wg.Run(r.cpuObserver.Start)
r.wg.Run(func() {
tick := time.NewTicker(100 * time.Millisecond)
defer tick.Stop()
for {
select {
case <-tick.C:
r.schedule()
case <-r.exitCh:
return
}
}
})
}
// Stop is to stop resource manager
func (r *ResourceManager) Stop() {
r.cpuObserver.Stop()
close(r.exitCh)
r.wg.Wait()
}
// Register is to register pool into resource manager
func (r *ResourceManager) Register(pool util.GorotinuePool, name string, component util.Component) error {
p := util.PoolContainer{Pool: pool, Component: component}
return r.registerPool(name, &p)
}
func (r *ResourceManager) registerPool(name string, pool *util.PoolContainer) error {
return r.poolMap.Add(name, pool)
}
// Unregister is to unregister pool into resource manager.
func (r *ResourceManager) Unregister(name string) {
r.poolMap.Del(name)
}
| resourcemanager/rm.go | 1 | https://github.com/pingcap/tidb/commit/90e727194074b352ffe826fca411c1776b5562cd | [
0.03756144270300865,
0.004902190063148737,
0.00016496410535182804,
0.0001986486604437232,
0.011592620052397251
] |
{
"id": 1,
"code_window": [
"import (\n",
"\t\"time\"\n",
"\n",
"\t\"github.com/pingcap/tidb/resourcemanager/scheduler\"\n",
"\t\"github.com/pingcap/tidb/resourcemanager/util\"\n",
"\ttidbutil \"github.com/pingcap/tidb/util\"\n",
"\t\"github.com/pingcap/tidb/util/cpu\"\n",
")\n"
],
"labels": [
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\"github.com/google/uuid\"\n"
],
"file_path": "resourcemanager/rm.go",
"type": "add",
"edit_start_line_idx": 19
} | // Copyright 2016 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package variable
import (
"context"
"reflect"
"strconv"
"testing"
"time"
"github.com/pingcap/tidb/config"
"github.com/pingcap/tidb/kv"
"github.com/pingcap/tidb/parser/mysql"
"github.com/pingcap/tidb/parser/terror"
"github.com/stretchr/testify/require"
)
func TestTiDBOptOn(t *testing.T) {
table := []struct {
val string
on bool
}{
{"ON", true},
{"on", true},
{"On", true},
{"1", true},
{"off", false},
{"No", false},
{"0", false},
{"1.1", false},
{"", false},
}
for _, tbl := range table {
on := TiDBOptOn(tbl.val)
require.Equal(t, tbl.on, on)
}
}
func TestNewSessionVars(t *testing.T) {
vars := NewSessionVars(nil)
require.Equal(t, DefIndexJoinBatchSize, vars.IndexJoinBatchSize)
require.Equal(t, DefIndexLookupSize, vars.IndexLookupSize)
require.Equal(t, ConcurrencyUnset, vars.indexLookupConcurrency)
require.Equal(t, DefIndexSerialScanConcurrency, vars.indexSerialScanConcurrency)
require.Equal(t, ConcurrencyUnset, vars.indexLookupJoinConcurrency)
require.Equal(t, DefTiDBHashJoinConcurrency, vars.hashJoinConcurrency)
require.Equal(t, DefExecutorConcurrency, vars.IndexLookupConcurrency())
require.Equal(t, DefIndexSerialScanConcurrency, vars.IndexSerialScanConcurrency())
require.Equal(t, DefExecutorConcurrency, vars.IndexLookupJoinConcurrency())
require.Equal(t, DefExecutorConcurrency, vars.HashJoinConcurrency())
require.Equal(t, DefTiDBAllowBatchCop, vars.AllowBatchCop)
require.Equal(t, ConcurrencyUnset, vars.projectionConcurrency)
require.Equal(t, ConcurrencyUnset, vars.hashAggPartialConcurrency)
require.Equal(t, ConcurrencyUnset, vars.hashAggFinalConcurrency)
require.Equal(t, ConcurrencyUnset, vars.windowConcurrency)
require.Equal(t, DefTiDBMergeJoinConcurrency, vars.mergeJoinConcurrency)
require.Equal(t, DefTiDBStreamAggConcurrency, vars.streamAggConcurrency)
require.Equal(t, DefDistSQLScanConcurrency, vars.distSQLScanConcurrency)
require.Equal(t, DefExecutorConcurrency, vars.ProjectionConcurrency())
require.Equal(t, DefExecutorConcurrency, vars.HashAggPartialConcurrency())
require.Equal(t, DefExecutorConcurrency, vars.HashAggFinalConcurrency())
require.Equal(t, DefExecutorConcurrency, vars.WindowConcurrency())
require.Equal(t, DefExecutorConcurrency, vars.IndexMergeIntersectionConcurrency())
require.Equal(t, DefTiDBMergeJoinConcurrency, vars.MergeJoinConcurrency())
require.Equal(t, DefTiDBStreamAggConcurrency, vars.StreamAggConcurrency())
require.Equal(t, DefDistSQLScanConcurrency, vars.DistSQLScanConcurrency())
require.Equal(t, DefExecutorConcurrency, vars.ExecutorConcurrency)
require.Equal(t, DefMaxChunkSize, vars.MaxChunkSize)
require.Equal(t, DefDMLBatchSize, vars.DMLBatchSize)
require.Equal(t, int64(DefTiDBMemQuotaApplyCache), vars.MemQuotaApplyCache)
require.Equal(t, DefOptWriteRowID, vars.AllowWriteRowID)
require.Equal(t, DefTiDBOptJoinReorderThreshold, vars.TiDBOptJoinReorderThreshold)
require.Equal(t, DefTiDBUseFastAnalyze, vars.EnableFastAnalyze)
require.Equal(t, DefTiDBFoundInPlanCache, vars.FoundInPlanCache)
require.Equal(t, DefTiDBFoundInBinding, vars.FoundInBinding)
require.Equal(t, DefTiDBAllowAutoRandExplicitInsert, vars.AllowAutoRandExplicitInsert)
require.Equal(t, int64(DefTiDBShardAllocateStep), vars.ShardAllocateStep)
require.Equal(t, DefTiDBAnalyzeVersion, vars.AnalyzeVersion)
require.Equal(t, DefCTEMaxRecursionDepth, vars.CTEMaxRecursionDepth)
require.Equal(t, int64(DefTiDBTmpTableMaxSize), vars.TMPTableSize)
assertFieldsGreaterThanZero(t, reflect.ValueOf(vars.MemQuota))
assertFieldsGreaterThanZero(t, reflect.ValueOf(vars.BatchSize))
}
func assertFieldsGreaterThanZero(t *testing.T, val reflect.Value) {
for i := 0; i < val.NumField(); i++ {
fieldVal := val.Field(i)
require.Greater(t, fieldVal.Int(), int64(0))
}
}
func TestVarsutil(t *testing.T) {
v := NewSessionVars(nil)
v.GlobalVarsAccessor = NewMockGlobalAccessor4Tests()
err := v.SetSystemVar("autocommit", "1")
require.NoError(t, err)
val, err := v.GetSessionOrGlobalSystemVar(context.Background(), "autocommit")
require.NoError(t, err)
require.Equal(t, "ON", val)
require.NotNil(t, v.SetSystemVar("autocommit", ""))
// 0 converts to OFF
err = v.SetSystemVar("foreign_key_checks", "0")
require.NoError(t, err)
val, err = v.GetSessionOrGlobalSystemVar(context.Background(), "foreign_key_checks")
require.NoError(t, err)
require.Equal(t, "OFF", val)
err = v.SetSystemVar("foreign_key_checks", "1")
require.NoError(t, err)
val, err = v.GetSessionOrGlobalSystemVar(context.Background(), "foreign_key_checks")
require.NoError(t, err)
require.Equal(t, "ON", val)
err = v.SetSystemVar("sql_mode", "strict_trans_tables")
require.NoError(t, err)
val, err = v.GetSessionOrGlobalSystemVar(context.Background(), "sql_mode")
require.NoError(t, err)
require.Equal(t, "STRICT_TRANS_TABLES", val)
require.True(t, v.StrictSQLMode)
err = v.SetSystemVar("sql_mode", "")
require.NoError(t, err)
require.False(t, v.StrictSQLMode)
err = v.SetSystemVar("character_set_connection", "utf8")
require.NoError(t, err)
err = v.SetSystemVar("collation_connection", "utf8_general_ci")
require.NoError(t, err)
charset, collation := v.GetCharsetInfo()
require.Equal(t, "utf8", charset)
require.Equal(t, "utf8_general_ci", collation)
require.Nil(t, v.SetSystemVar("character_set_results", ""))
// Test case for time_zone session variable.
testCases := []struct {
input string
expect string
compareValue bool
diff time.Duration
err error
}{
{"Europe/Helsinki", "Europe/Helsinki", true, -2 * time.Hour, nil},
{"US/Eastern", "US/Eastern", true, 5 * time.Hour, nil},
// TODO: Check it out and reopen this case.
// {"SYSTEM", "Local", false, 0},
{"+10:00", "", true, -10 * time.Hour, nil},
{"-6:00", "", true, 6 * time.Hour, nil},
{"+14:00", "", true, -14 * time.Hour, nil},
{"-12:59", "", true, 12*time.Hour + 59*time.Minute, nil},
{"+14:01", "", false, -14 * time.Hour, ErrUnknownTimeZone.GenWithStackByArgs("+14:01")},
{"-13:00", "", false, 13 * time.Hour, ErrUnknownTimeZone.GenWithStackByArgs("-13:00")},
}
for _, tc := range testCases {
err = v.SetSystemVar(TimeZone, tc.input)
if tc.err != nil {
require.Error(t, err)
continue
}
require.NoError(t, err)
require.Equal(t, tc.expect, v.TimeZone.String())
if tc.compareValue {
err = v.SetSystemVar(TimeZone, tc.input)
require.NoError(t, err)
t1 := time.Date(2000, 1, 1, 0, 0, 0, 0, time.UTC)
t2 := time.Date(2000, 1, 1, 0, 0, 0, 0, v.TimeZone)
require.Equal(t, tc.diff, t2.Sub(t1))
}
}
err = v.SetSystemVar(TimeZone, "6:00")
require.Error(t, err)
require.True(t, terror.ErrorEqual(err, ErrUnknownTimeZone))
// Test case for sql mode.
for str, mode := range mysql.Str2SQLMode {
err = v.SetSystemVar("sql_mode", str)
require.NoError(t, err)
if modeParts, exists := mysql.CombinationSQLMode[str]; exists {
for _, part := range modeParts {
mode |= mysql.Str2SQLMode[part]
}
}
require.Equal(t, mode, v.SQLMode)
}
// Combined sql_mode
err = v.SetSystemVar("sql_mode", "REAL_AS_FLOAT,ANSI_QUOTES")
require.NoError(t, err)
require.Equal(t, mysql.ModeRealAsFloat|mysql.ModeANSIQuotes, v.SQLMode)
// Test case for tidb_index_serial_scan_concurrency.
require.Equal(t, DefIndexSerialScanConcurrency, v.IndexSerialScanConcurrency())
err = v.SetSystemVar(TiDBIndexSerialScanConcurrency, "4")
require.NoError(t, err)
require.Equal(t, 4, v.IndexSerialScanConcurrency())
// Test case for tidb_batch_insert.
require.False(t, v.BatchInsert)
err = v.SetSystemVar(TiDBBatchInsert, "1")
require.NoError(t, err)
require.True(t, v.BatchInsert)
require.Equal(t, 32, v.InitChunkSize)
require.Equal(t, 1024, v.MaxChunkSize)
err = v.SetSystemVar(TiDBMaxChunkSize, "2")
require.NoError(t, err) // converts to min value
err = v.SetSystemVar(TiDBInitChunkSize, "1024")
require.NoError(t, err) // converts to max value
// Test case for TiDBConfig session variable.
err = v.SetSystemVar(TiDBConfig, "abc")
require.True(t, terror.ErrorEqual(err, ErrIncorrectScope))
val, err = v.GetSessionOrGlobalSystemVar(context.Background(), TiDBConfig)
require.NoError(t, err)
jsonConfig, err := config.GetJSONConfig()
require.NoError(t, err)
require.Equal(t, jsonConfig, val)
require.Equal(t, DefTiDBOptimizerSelectivityLevel, v.OptimizerSelectivityLevel)
err = v.SetSystemVar(TiDBOptimizerSelectivityLevel, "1")
require.NoError(t, err)
require.Equal(t, 1, v.OptimizerSelectivityLevel)
require.Equal(t, DefTiDBEnableOuterJoinReorder, v.EnableOuterJoinReorder)
err = v.SetSystemVar(TiDBOptimizerEnableOuterJoinReorder, "OFF")
require.NoError(t, err)
require.Equal(t, false, v.EnableOuterJoinReorder)
err = v.SetSystemVar(TiDBOptimizerEnableOuterJoinReorder, "ON")
require.NoError(t, err)
require.Equal(t, true, v.EnableOuterJoinReorder)
require.Equal(t, DefTiDBOptimizerEnableNewOFGB, v.OptimizerEnableNewOnlyFullGroupByCheck)
err = v.SetSystemVar(TiDBOptimizerEnableNewOnlyFullGroupByCheck, "off")
require.NoError(t, err)
require.Equal(t, false, v.OptimizerEnableNewOnlyFullGroupByCheck)
err = v.SetSystemVar(TiDBDDLReorgWorkerCount, "4") // wrong scope global only
require.True(t, terror.ErrorEqual(err, errGlobalVariable))
err = v.SetSystemVar(TiDBRetryLimit, "3")
require.NoError(t, err)
val, err = v.GetSessionOrGlobalSystemVar(context.Background(), TiDBRetryLimit)
require.NoError(t, err)
require.Equal(t, "3", val)
require.Equal(t, int64(3), v.RetryLimit)
require.Equal(t, "", v.EnableTablePartition)
err = v.SetSystemVar(TiDBEnableTablePartition, "on")
require.NoError(t, err)
val, err = v.GetSessionOrGlobalSystemVar(context.Background(), TiDBEnableTablePartition)
require.NoError(t, err)
require.Equal(t, "ON", val)
require.Equal(t, "ON", v.EnableTablePartition)
require.False(t, v.EnableListTablePartition)
err = v.SetSystemVar(TiDBEnableListTablePartition, "on")
require.NoError(t, err)
val, err = v.GetSessionOrGlobalSystemVar(context.Background(), TiDBEnableListTablePartition)
require.NoError(t, err)
require.Equal(t, "ON", val)
require.True(t, v.EnableListTablePartition)
require.Equal(t, DefTiDBOptJoinReorderThreshold, v.TiDBOptJoinReorderThreshold)
err = v.SetSystemVar(TiDBOptJoinReorderThreshold, "5")
require.NoError(t, err)
val, err = v.GetSessionOrGlobalSystemVar(context.Background(), TiDBOptJoinReorderThreshold)
require.NoError(t, err)
require.Equal(t, "5", val)
require.Equal(t, 5, v.TiDBOptJoinReorderThreshold)
err = v.SetSystemVar(TiDBLowResolutionTSO, "1")
require.NoError(t, err)
val, err = v.GetSessionOrGlobalSystemVar(context.Background(), TiDBLowResolutionTSO)
require.NoError(t, err)
require.Equal(t, "ON", val)
require.True(t, v.LowResolutionTSO)
err = v.SetSystemVar(TiDBLowResolutionTSO, "0")
require.NoError(t, err)
val, err = v.GetSessionOrGlobalSystemVar(context.Background(), TiDBLowResolutionTSO)
require.NoError(t, err)
require.Equal(t, "OFF", val)
require.False(t, v.LowResolutionTSO)
require.Equal(t, 0.9, v.CorrelationThreshold)
err = v.SetSystemVar(TiDBOptCorrelationThreshold, "0")
require.NoError(t, err)
val, err = v.GetSessionOrGlobalSystemVar(context.Background(), TiDBOptCorrelationThreshold)
require.NoError(t, err)
require.Equal(t, "0", val)
require.Equal(t, float64(0), v.CorrelationThreshold)
require.Equal(t, 3.0, v.GetCPUFactor())
err = v.SetSystemVar(TiDBOptCPUFactor, "5.0")
require.NoError(t, err)
val, err = v.GetSessionOrGlobalSystemVar(context.Background(), TiDBOptCPUFactor)
require.NoError(t, err)
require.Equal(t, "5.0", val)
require.Equal(t, 5.0, v.GetCPUFactor())
require.Equal(t, 3.0, v.GetCopCPUFactor())
err = v.SetSystemVar(TiDBOptCopCPUFactor, "5.0")
require.NoError(t, err)
val, err = v.GetSessionOrGlobalSystemVar(context.Background(), TiDBOptCopCPUFactor)
require.NoError(t, err)
require.Equal(t, "5.0", val)
require.Equal(t, 5.0, v.GetCopCPUFactor())
require.Equal(t, 24.0, v.CopTiFlashConcurrencyFactor)
err = v.SetSystemVar(TiDBOptTiFlashConcurrencyFactor, "5.0")
require.NoError(t, err)
val, err = v.GetSessionOrGlobalSystemVar(context.Background(), TiDBOptTiFlashConcurrencyFactor)
require.NoError(t, err)
require.Equal(t, "5.0", val)
require.Equal(t, 5.0, v.GetCopCPUFactor())
require.Equal(t, 1.0, v.GetNetworkFactor(nil))
err = v.SetSystemVar(TiDBOptNetworkFactor, "3.0")
require.NoError(t, err)
val, err = v.GetSessionOrGlobalSystemVar(context.Background(), TiDBOptNetworkFactor)
require.NoError(t, err)
require.Equal(t, "3.0", val)
require.Equal(t, 3.0, v.GetNetworkFactor(nil))
require.Equal(t, 1.5, v.GetScanFactor(nil))
err = v.SetSystemVar(TiDBOptScanFactor, "3.0")
require.NoError(t, err)
val, err = v.GetSessionOrGlobalSystemVar(context.Background(), TiDBOptScanFactor)
require.NoError(t, err)
require.Equal(t, "3.0", val)
require.Equal(t, 3.0, v.GetScanFactor(nil))
require.Equal(t, 3.0, v.GetDescScanFactor(nil))
err = v.SetSystemVar(TiDBOptDescScanFactor, "5.0")
require.NoError(t, err)
val, err = v.GetSessionOrGlobalSystemVar(context.Background(), TiDBOptDescScanFactor)
require.NoError(t, err)
require.Equal(t, "5.0", val)
require.Equal(t, 5.0, v.GetDescScanFactor(nil))
require.Equal(t, 20.0, v.GetSeekFactor(nil))
err = v.SetSystemVar(TiDBOptSeekFactor, "50.0")
require.NoError(t, err)
val, err = v.GetSessionOrGlobalSystemVar(context.Background(), TiDBOptSeekFactor)
require.NoError(t, err)
require.Equal(t, "50.0", val)
require.Equal(t, 50.0, v.GetSeekFactor(nil))
require.Equal(t, 0.001, v.GetMemoryFactor())
err = v.SetSystemVar(TiDBOptMemoryFactor, "1.0")
require.NoError(t, err)
val, err = v.GetSessionOrGlobalSystemVar(context.Background(), TiDBOptMemoryFactor)
require.NoError(t, err)
require.Equal(t, "1.0", val)
require.Equal(t, 1.0, v.GetMemoryFactor())
require.Equal(t, 1.5, v.GetDiskFactor())
err = v.SetSystemVar(TiDBOptDiskFactor, "1.1")
require.NoError(t, err)
val, err = v.GetSessionOrGlobalSystemVar(context.Background(), TiDBOptDiskFactor)
require.NoError(t, err)
require.Equal(t, "1.1", val)
require.Equal(t, 1.1, v.GetDiskFactor())
require.Equal(t, 3.0, v.GetConcurrencyFactor())
err = v.SetSystemVar(TiDBOptConcurrencyFactor, "5.0")
require.NoError(t, err)
val, err = v.GetSessionOrGlobalSystemVar(context.Background(), TiDBOptConcurrencyFactor)
require.NoError(t, err)
require.Equal(t, "5.0", val)
require.Equal(t, 5.0, v.GetConcurrencyFactor())
err = v.SetSystemVar(TiDBReplicaRead, "follower")
require.NoError(t, err)
val, err = v.GetSessionOrGlobalSystemVar(context.Background(), TiDBReplicaRead)
require.NoError(t, err)
require.Equal(t, "follower", val)
require.Equal(t, kv.ReplicaReadFollower, v.GetReplicaRead())
err = v.SetSystemVar(TiDBReplicaRead, "leader")
require.NoError(t, err)
val, err = v.GetSessionOrGlobalSystemVar(context.Background(), TiDBReplicaRead)
require.NoError(t, err)
require.Equal(t, "leader", val)
require.Equal(t, kv.ReplicaReadLeader, v.GetReplicaRead())
err = v.SetSystemVar(TiDBReplicaRead, "leader-and-follower")
require.NoError(t, err)
val, err = v.GetSessionOrGlobalSystemVar(context.Background(), TiDBReplicaRead)
require.NoError(t, err)
require.Equal(t, "leader-and-follower", val)
require.Equal(t, kv.ReplicaReadMixed, v.GetReplicaRead())
err = v.SetSystemVar(TiDBRedactLog, "ON")
require.NoError(t, err)
val, err = v.GetSessionOrGlobalSystemVar(context.Background(), TiDBRedactLog)
require.NoError(t, err)
require.Equal(t, "ON", val)
err = v.SetSystemVar(TiDBFoundInPlanCache, "1")
require.Error(t, err)
require.Regexp(t, "]Variable 'last_plan_from_cache' is a read only variable$", err.Error())
err = v.SetSystemVar(TiDBFoundInBinding, "1")
require.Error(t, err)
require.Regexp(t, "]Variable 'last_plan_from_binding' is a read only variable$", err.Error())
err = v.SetSystemVar("UnknownVariable", "on")
require.Error(t, err)
require.Regexp(t, "]Unknown system variable 'UnknownVariable'$", err.Error())
// reset warnings
v.StmtCtx.TruncateWarnings(0)
require.Len(t, v.StmtCtx.GetWarnings(), 0)
err = v.SetSystemVar(TiDBAnalyzeVersion, "4")
require.NoError(t, err) // converts to max value
warn := v.StmtCtx.GetWarnings()[0]
require.Error(t, warn.Err)
require.Contains(t, warn.Err.Error(), "Truncated incorrect tidb_analyze_version value")
err = v.SetSystemVar(TiDBTableCacheLease, "123")
require.Error(t, err)
require.Regexp(t, "'tidb_table_cache_lease' is a GLOBAL variable and should be set with SET GLOBAL", err.Error())
val, err = v.GetSessionOrGlobalSystemVar(context.Background(), TiDBMinPagingSize)
require.NoError(t, err)
require.Equal(t, strconv.Itoa(DefMinPagingSize), val)
err = v.SetSystemVar(TiDBMinPagingSize, "123")
require.NoError(t, err)
require.Equal(t, v.MinPagingSize, 123)
val, err = v.GetSessionOrGlobalSystemVar(context.Background(), TiDBMaxPagingSize)
require.NoError(t, err)
require.Equal(t, strconv.Itoa(DefMaxPagingSize), val)
err = v.SetSystemVar(TiDBMaxPagingSize, "456")
require.NoError(t, err)
require.Equal(t, v.MaxPagingSize, 456)
err = v.SetSystemVar(TiDBMaxPagingSize, "45678")
require.NoError(t, err)
require.Equal(t, v.MaxPagingSize, 45678)
}
func TestValidate(t *testing.T) {
v := NewSessionVars(nil)
v.GlobalVarsAccessor = NewMockGlobalAccessor4Tests()
v.TimeZone = time.UTC
testCases := []struct {
key string
value string
error bool
}{
{TiDBAutoAnalyzeStartTime, "15:04", false},
{TiDBAutoAnalyzeStartTime, "15:04 -0700", false},
{DelayKeyWrite, "ON", false},
{DelayKeyWrite, "OFF", false},
{DelayKeyWrite, "ALL", false},
{DelayKeyWrite, "3", true},
{ForeignKeyChecks, "3", true},
{MaxSpRecursionDepth, "256", false},
{SessionTrackGtids, "OFF", false},
{SessionTrackGtids, "OWN_GTID", false},
{SessionTrackGtids, "ALL_GTIDS", false},
{SessionTrackGtids, "ON", true},
{EnforceGtidConsistency, "OFF", false},
{EnforceGtidConsistency, "ON", false},
{EnforceGtidConsistency, "WARN", false},
{QueryCacheType, "OFF", false},
{QueryCacheType, "ON", false},
{QueryCacheType, "DEMAND", false},
{QueryCacheType, "3", true},
{SecureAuth, "1", false},
{SecureAuth, "3", true},
{MyISAMUseMmap, "ON", false},
{MyISAMUseMmap, "OFF", false},
{TiDBEnableTablePartition, "ON", false},
{TiDBEnableTablePartition, "OFF", false},
{TiDBEnableTablePartition, "AUTO", false},
{TiDBEnableTablePartition, "UN", true},
{TiDBEnableListTablePartition, "ON", false},
{TiDBEnableListTablePartition, "OFF", false},
{TiDBEnableListTablePartition, "list", true},
{TiDBOptCorrelationExpFactor, "a", true},
{TiDBOptCorrelationExpFactor, "-10", false},
{TiDBOptCorrelationThreshold, "a", true},
{TiDBOptCorrelationThreshold, "-2", false},
{TiDBOptCPUFactor, "a", true},
{TiDBOptCPUFactor, "-2", false},
{TiDBOptTiFlashConcurrencyFactor, "-2", false},
{TiDBOptCopCPUFactor, "a", true},
{TiDBOptCopCPUFactor, "-2", false},
{TiDBOptNetworkFactor, "a", true},
{TiDBOptNetworkFactor, "-2", false},
{TiDBOptScanFactor, "a", true},
{TiDBOptScanFactor, "-2", false},
{TiDBOptDescScanFactor, "a", true},
{TiDBOptDescScanFactor, "-2", false},
{TiDBOptSeekFactor, "a", true},
{TiDBOptSeekFactor, "-2", false},
{TiDBOptMemoryFactor, "a", true},
{TiDBOptMemoryFactor, "-2", false},
{TiDBOptDiskFactor, "a", true},
{TiDBOptDiskFactor, "-2", false},
{TiDBOptConcurrencyFactor, "a", true},
{TiDBOptConcurrencyFactor, "-2", false},
{TxnIsolation, "READ-UNCOMMITTED", true},
{TiDBInitChunkSize, "a", true},
{TiDBInitChunkSize, "-1", false},
{TiDBMaxChunkSize, "a", true},
{TiDBMaxChunkSize, "-1", false},
{TiDBOptJoinReorderThreshold, "a", true},
{TiDBOptJoinReorderThreshold, "-1", false},
{TiDBReplicaRead, "invalid", true},
{TiDBTxnMode, "invalid", true},
{TiDBTxnMode, "pessimistic", false},
{TiDBTxnMode, "optimistic", false},
{TiDBTxnMode, "", false},
{TiDBShardAllocateStep, "ad", true},
{TiDBShardAllocateStep, "-123", false},
{TiDBShardAllocateStep, "128", false},
{TiDBAllowFallbackToTiKV, "", false},
{TiDBAllowFallbackToTiKV, "tiflash", false},
{TiDBAllowFallbackToTiKV, " tiflash ", false},
{TiDBAllowFallbackToTiKV, "tikv", true},
{TiDBAllowFallbackToTiKV, "tidb", true},
{TiDBAllowFallbackToTiKV, "tiflash,tikv,tidb", true},
}
for _, tc := range testCases {
t.Run(tc.key, func(t *testing.T) {
_, err := GetSysVar(tc.key).Validate(v, tc.value, ScopeGlobal)
if tc.error {
require.Errorf(t, err, "%v got err=%v", tc, err)
} else {
require.NoErrorf(t, err, "%v got err=%v", tc, err)
}
})
}
// Test session scoped vars.
testCases = []struct {
key string
value string
error bool
}{
{TiDBEnableListTablePartition, "ON", false},
{TiDBEnableListTablePartition, "OFF", false},
{TiDBEnableListTablePartition, "list", true},
{TiDBIsolationReadEngines, "", true},
{TiDBIsolationReadEngines, "tikv", false},
{TiDBIsolationReadEngines, "TiKV,tiflash", false},
{TiDBIsolationReadEngines, " tikv, tiflash ", false},
}
for _, tc := range testCases {
// copy iterator variable into a new variable, see issue #27779
tc := tc
t.Run(tc.key, func(t *testing.T) {
_, err := GetSysVar(tc.key).Validate(v, tc.value, ScopeSession)
if tc.error {
require.Errorf(t, err, "%v got err=%v", tc, err)
} else {
require.NoErrorf(t, err, "%v got err=%v", tc, err)
}
})
}
}
func TestValidateStmtSummary(t *testing.T) {
v := NewSessionVars(nil)
v.GlobalVarsAccessor = NewMockGlobalAccessor4Tests()
v.TimeZone = time.UTC
testCases := []struct {
key string
value string
error bool
}{
{TiDBEnableStmtSummary, "", true},
{TiDBStmtSummaryInternalQuery, "", true},
{TiDBStmtSummaryRefreshInterval, "", true},
{TiDBStmtSummaryRefreshInterval, "0", false},
{TiDBStmtSummaryRefreshInterval, "99999999999", false},
{TiDBStmtSummaryHistorySize, "", true},
{TiDBStmtSummaryHistorySize, "0", false},
{TiDBStmtSummaryHistorySize, "-1", false},
{TiDBStmtSummaryHistorySize, "99999999", false},
{TiDBStmtSummaryMaxStmtCount, "", true},
{TiDBStmtSummaryMaxStmtCount, "0", false},
{TiDBStmtSummaryMaxStmtCount, "99999999", false},
{TiDBStmtSummaryMaxSQLLength, "", true},
{TiDBStmtSummaryMaxSQLLength, "0", false},
{TiDBStmtSummaryMaxSQLLength, "-1", false},
{TiDBStmtSummaryMaxSQLLength, "99999999999", false},
}
for _, tc := range testCases {
// copy iterator variable into a new variable, see issue #27779
tc := tc
t.Run(tc.key, func(t *testing.T) {
_, err := GetSysVar(tc.key).Validate(v, tc.value, ScopeGlobal)
if tc.error {
require.Errorf(t, err, "%v got err=%v", tc, err)
} else {
require.NoErrorf(t, err, "%v got err=%v", tc, err)
}
})
}
}
func TestConcurrencyVariables(t *testing.T) {
vars := NewSessionVars(nil)
vars.GlobalVarsAccessor = NewMockGlobalAccessor4Tests()
wdConcurrency := 2
require.Equal(t, ConcurrencyUnset, vars.windowConcurrency)
require.Equal(t, DefExecutorConcurrency, vars.WindowConcurrency())
err := vars.SetSystemVar(TiDBWindowConcurrency, strconv.Itoa(wdConcurrency))
require.NoError(t, err)
require.Equal(t, wdConcurrency, vars.windowConcurrency)
require.Equal(t, wdConcurrency, vars.WindowConcurrency())
mjConcurrency := 2
require.Equal(t, DefTiDBMergeJoinConcurrency, vars.mergeJoinConcurrency)
require.Equal(t, DefTiDBMergeJoinConcurrency, vars.MergeJoinConcurrency())
err = vars.SetSystemVar(TiDBMergeJoinConcurrency, strconv.Itoa(mjConcurrency))
require.NoError(t, err)
require.Equal(t, mjConcurrency, vars.mergeJoinConcurrency)
require.Equal(t, mjConcurrency, vars.MergeJoinConcurrency())
saConcurrency := 2
require.Equal(t, DefTiDBStreamAggConcurrency, vars.streamAggConcurrency)
require.Equal(t, DefTiDBStreamAggConcurrency, vars.StreamAggConcurrency())
err = vars.SetSystemVar(TiDBStreamAggConcurrency, strconv.Itoa(saConcurrency))
require.NoError(t, err)
require.Equal(t, saConcurrency, vars.streamAggConcurrency)
require.Equal(t, saConcurrency, vars.StreamAggConcurrency())
require.Equal(t, ConcurrencyUnset, vars.indexLookupConcurrency)
require.Equal(t, DefExecutorConcurrency, vars.IndexLookupConcurrency())
exeConcurrency := DefExecutorConcurrency + 1
err = vars.SetSystemVar(TiDBExecutorConcurrency, strconv.Itoa(exeConcurrency))
require.NoError(t, err)
require.Equal(t, ConcurrencyUnset, vars.indexLookupConcurrency)
require.Equal(t, exeConcurrency, vars.IndexLookupConcurrency())
require.Equal(t, wdConcurrency, vars.WindowConcurrency())
require.Equal(t, mjConcurrency, vars.MergeJoinConcurrency())
require.Equal(t, saConcurrency, vars.StreamAggConcurrency())
}
func TestHelperFuncs(t *testing.T) {
require.Equal(t, "ON", int32ToBoolStr(1))
require.Equal(t, "OFF", int32ToBoolStr(0))
require.Equal(t, ClusteredIndexDefModeOn, TiDBOptEnableClustered("ON"))
require.Equal(t, ClusteredIndexDefModeOff, TiDBOptEnableClustered("OFF"))
require.Equal(t, ClusteredIndexDefModeIntOnly, TiDBOptEnableClustered("bogus")) // default
require.Equal(t, 1234, tidbOptPositiveInt32("1234", 5))
require.Equal(t, 5, tidbOptPositiveInt32("-1234", 5))
require.Equal(t, 5, tidbOptPositiveInt32("bogus", 5))
require.Equal(t, 1234, TidbOptInt("1234", 5))
require.Equal(t, -1234, TidbOptInt("-1234", 5))
require.Equal(t, 5, TidbOptInt("bogus", 5))
}
func TestStmtVars(t *testing.T) {
vars := NewSessionVars(nil)
err := vars.SetStmtVar("bogussysvar", "1")
require.Equal(t, "[variable:1193]Unknown system variable 'bogussysvar'", err.Error())
err = vars.SetStmtVar(MaxExecutionTime, "ACDC")
require.Equal(t, "[variable:1232]Incorrect argument type to variable 'max_execution_time'", err.Error())
err = vars.SetStmtVar(MaxExecutionTime, "100")
require.NoError(t, err)
}
func TestSessionStatesSystemVar(t *testing.T) {
vars := NewSessionVars(nil)
err := vars.SetSystemVar("autocommit", "1")
require.NoError(t, err)
val, keep, err := vars.GetSessionStatesSystemVar("autocommit")
require.NoError(t, err)
require.Equal(t, "ON", val)
require.Equal(t, true, keep)
_, keep, err = vars.GetSessionStatesSystemVar(Timestamp)
require.NoError(t, err)
require.Equal(t, false, keep)
err = vars.SetSystemVar(MaxAllowedPacket, "1024")
require.NoError(t, err)
val, keep, err = vars.GetSessionStatesSystemVar(MaxAllowedPacket)
require.NoError(t, err)
require.Equal(t, "1024", val)
require.Equal(t, true, keep)
}
func TestOnOffHelpers(t *testing.T) {
require.Equal(t, "ON", trueFalseToOnOff("TRUE"))
require.Equal(t, "ON", trueFalseToOnOff("TRue"))
require.Equal(t, "ON", trueFalseToOnOff("true"))
require.Equal(t, "OFF", trueFalseToOnOff("FALSE"))
require.Equal(t, "OFF", trueFalseToOnOff("False"))
require.Equal(t, "OFF", trueFalseToOnOff("false"))
require.Equal(t, "other", trueFalseToOnOff("other"))
require.Equal(t, "true", OnOffToTrueFalse("ON"))
require.Equal(t, "true", OnOffToTrueFalse("on"))
require.Equal(t, "true", OnOffToTrueFalse("On"))
require.Equal(t, "false", OnOffToTrueFalse("OFF"))
require.Equal(t, "false", OnOffToTrueFalse("Off"))
require.Equal(t, "false", OnOffToTrueFalse("off"))
require.Equal(t, "other", OnOffToTrueFalse("other"))
}
func TestAssertionLevel(t *testing.T) {
require.Equal(t, AssertionLevelStrict, tidbOptAssertionLevel(AssertionStrictStr))
require.Equal(t, AssertionLevelOff, tidbOptAssertionLevel(AssertionOffStr))
require.Equal(t, AssertionLevelFast, tidbOptAssertionLevel(AssertionFastStr))
require.Equal(t, AssertionLevelOff, tidbOptAssertionLevel("bogus"))
}
| sessionctx/variable/varsutil_test.go | 0 | https://github.com/pingcap/tidb/commit/90e727194074b352ffe826fca411c1776b5562cd | [
0.0007662742282263935,
0.0001806207001209259,
0.0001652882929192856,
0.0001729062496451661,
0.00006861367000965402
] |
{
"id": 1,
"code_window": [
"import (\n",
"\t\"time\"\n",
"\n",
"\t\"github.com/pingcap/tidb/resourcemanager/scheduler\"\n",
"\t\"github.com/pingcap/tidb/resourcemanager/util\"\n",
"\ttidbutil \"github.com/pingcap/tidb/util\"\n",
"\t\"github.com/pingcap/tidb/util/cpu\"\n",
")\n"
],
"labels": [
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\"github.com/google/uuid\"\n"
],
"file_path": "resourcemanager/rm.go",
"type": "add",
"edit_start_line_idx": 19
} | // Copyright 2015 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package ddltest
import (
goctx "context"
"fmt"
"math"
"os"
"sync"
"sync/atomic"
"testing"
"time"
"github.com/pingcap/log"
"github.com/pingcap/tidb/store/gcworker"
"github.com/pingcap/tidb/table"
"github.com/stretchr/testify/require"
)
func getIndex(t table.Table, name string) table.Index {
for _, idx := range t.Indices() {
if idx.Meta().Name.O == name {
return idx
}
}
return nil
}
func (s *ddlSuite) checkDropIndex(t *testing.T, tableName string) {
gcWorker, err := gcworker.NewMockGCWorker(s.store)
require.NoError(t, err)
err = gcWorker.DeleteRanges(goctx.Background(), uint64(math.MaxInt32))
require.NoError(t, err)
s.mustExec(fmt.Sprintf("admin check table %s", tableName))
}
// TestIndex operations on table test_index (c int, c1 bigint, c2 double, c3 varchar(256), primary key(c)).
func TestIndex(t *testing.T) {
err := os.Setenv("tidb_manager_ttl", fmt.Sprintf("%d", *lease+5))
if err != nil {
log.Fatal("set tidb_manager_ttl failed")
}
s := createDDLSuite(t)
defer s.teardown(t)
// first add many data
workerNum := 10
base := *dataNum / workerNum
var wg sync.WaitGroup
wg.Add(workerNum)
for i := 0; i < workerNum; i++ {
go func(i int) {
defer wg.Done()
for j := 0; j < base; j++ {
k := base*i + j
s.execInsert(
fmt.Sprintf("insert into test_index values (%d, %d, %f, '%s')",
k, randomInt(), randomFloat(), randomString(10)))
}
}(i)
}
wg.Wait()
tbl := []struct {
Query string
IndexName string
Add bool
}{
{"create index c1_index on test_index (c1)", "c1_index", true},
{"drop index c1_index on test_index", "c1_index", false},
{"create index c2_index on test_index (c2)", "c2_index", true},
{"drop index c2_index on test_index", "c2_index", false},
{"create index c3_index on test_index (c3)", "c3_index", true},
{"drop index c3_index on test_index", "c3_index", false},
}
insertID := int64(*dataNum)
for _, col := range tbl {
done := s.runDDL(col.Query)
ticker := time.NewTicker(time.Duration(*lease) * time.Second / 2)
//nolint:all_revive,revive
defer ticker.Stop()
LOOP:
for {
select {
case err := <-done:
require.NoError(t, err)
break LOOP
case <-ticker.C:
// add count new data
// delete count old data randomly
// update count old data randomly
count := 10
s.execIndexOperations(t, workerNum, count, &insertID)
}
}
tbl := s.getTable(t, "test_index")
index := getIndex(tbl, col.IndexName)
if col.Add {
require.NotNil(t, index)
s.mustExec("admin check table test_index")
} else {
require.Nil(t, index)
s.checkDropIndex(t, "test_index")
}
}
}
func (s *ddlSuite) execIndexOperations(t *testing.T, workerNum, count int, insertID *int64) {
var wg sync.WaitGroup
// workerNum = 10
wg.Add(workerNum)
for i := 0; i < workerNum; i++ {
go func() {
defer wg.Done()
for j := 0; j < count; j++ {
id := atomic.AddInt64(insertID, 1)
sql := fmt.Sprintf("insert into test_index values (%d, %d, %f, '%s')", id, randomInt(), randomFloat(), randomString(10))
s.execInsert(sql)
t.Logf("sql %s", sql)
sql = fmt.Sprintf("delete from test_index where c = %d", randomIntn(int(id)))
s.mustExec(sql)
t.Logf("sql %s", sql)
sql = fmt.Sprintf("update test_index set c1 = %d, c2 = %f, c3 = '%s' where c = %d", randomInt(), randomFloat(), randomString(10), randomIntn(int(id)))
s.mustExec(sql)
t.Logf("sql %s", sql)
}
}()
}
wg.Wait()
}
| cmd/ddltest/index_test.go | 0 | https://github.com/pingcap/tidb/commit/90e727194074b352ffe826fca411c1776b5562cd | [
0.0009033206733874977,
0.00021943160390947014,
0.0001627936726436019,
0.0001710829237708822,
0.00018280134827364236
] |
{
"id": 1,
"code_window": [
"import (\n",
"\t\"time\"\n",
"\n",
"\t\"github.com/pingcap/tidb/resourcemanager/scheduler\"\n",
"\t\"github.com/pingcap/tidb/resourcemanager/util\"\n",
"\ttidbutil \"github.com/pingcap/tidb/util\"\n",
"\t\"github.com/pingcap/tidb/util/cpu\"\n",
")\n"
],
"labels": [
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\"github.com/google/uuid\"\n"
],
"file_path": "resourcemanager/rm.go",
"type": "add",
"edit_start_line_idx": 19
} | [
{
"name": "TestIndexMergeJSONMemberOf",
"cases": [
"select /*+ use_index_merge(t, j0_0) */ * from t where (1 member of (j0->'$.path0'))",
"select /*+ use_index_merge(t, j0_1) */ * from t where (1 member of (j0->'$.path1')) and a<10",
"select /*+ use_index_merge(t, j0_1) */ * from t where (1 member of (j0->'$.XXX')) and a<10",
"select /*+ use_index_merge(t, j0_1) */ * from t where (1 member of (j0->'$.path1')) and (2 member of (j1)) and a<10",
"select /*+ use_index(t, j0_0) */ * from t where (1 member of (j0->'$.path0'))",
"select /*+ use_index(t, j0_1) */ * from t where (1 member of (j0->'$.path1')) and a<10",
"select * from t use index(j0_0) where (1 member of (j0->'$.path0'))",
"select * from t use index(j0_1) where (1 member of (j0->'$.path1')) and a<10",
"select * from t force index(j0_0) where (1 member of (j0->'$.path0'))",
"select * from t force index(j0_1) where (1 member of (j0->'$.path1')) and a<10",
"select /*+ use_index_merge(t, j1) */ * from t where (1 member of (j0->'$.path1')) and (2 member of (j1)) and a<10",
"select /*+ use_index_merge(t, j0_0) */ * from t where json_contains((j0->'$.path0'), '[1, 2, 3]')",
"select /*+ use_index_merge(t, j0_0) */ * from t where json_overlaps((j0->'$.path0'), '[1, 2, 3]')",
"select /*+ use_index_merge(t, j0_0) */ * from t where json_overlaps('[1, 2, 3]', (j0->'$.path0'))",
"select /*+ use_index_merge(t, j0_0) */ * from t where json_contains((j0->'$.path0'), '[1, 2, 3]') and a<10",
"select /*+ use_index_merge(t, j0_0) */ * from t where json_overlaps((j0->'$.path0'), '[1, 2, 3]') and a<10",
"select /*+ use_index_merge(t, j0_0) */ * from t where json_overlaps('[1, 2, 3]', (j0->'$.path0')) and a<10",
"select /*+ use_index_merge(t, j0_0) */ * from t where json_contains((j0->'$.path0'), '1')",
"select /*+ use_index_merge(t, j0_0) */ * from t where json_overlaps((j0->'$.path0'), '1')",
"select /*+ use_index_merge(t, j0_0) */ * from t where json_overlaps('1', (j0->'$.path0'))",
"select /*+ use_index_merge(t, j0_0) */ * from t where json_contains((j0->'$.path0'), '1') and a<10",
"select /*+ use_index_merge(t, j0_0) */ * from t where json_overlaps((j0->'$.path0'), '1') and a<10",
"select /*+ use_index_merge(t, j0_0) */ * from t where json_overlaps('1', (j0->'$.path0')) and a<10",
"select /*+ use_index_merge(t, j0_string) */ * from t where (\"a\" member of (j0->'$.path_string'))",
"select /*+ use_index_merge(t, j0_string) */ * from t where (\"a\" member of (j0->'$.path_string')) and a<10",
"select /*+ use_index_merge(t, j0_string) */ * from t where json_contains((j0->'$.path_string'), '[\"a\", \"b\", \"c\"]')",
"select /*+ use_index_merge(t, j0_string) */ * from t where json_contains((j0->'$.path_string'), '[\"a\", \"b\", \"c\"]') and a<10",
"select /*+ use_index_merge(t, j0_string) */ * from t where json_overlaps((j0->'$.path_string'), '[\"a\", \"b\", \"c\"]')",
"select /*+ use_index_merge(t, j0_string) */ * from t where json_overlaps((j0->'$.path_string'), '[\"a\", \"b\", \"c\"]') and a<10",
"select /*+ use_index_merge(t, j0_date) */ * from t where (\"2023-01-01\" member of (j0->'$.path_date'))",
"select /*+ use_index_merge(t, j0_date) */ * from t where (\"2023-01-01\" member of (j0->'$.path_date')) and a<10",
"select /*+ use_index_merge(t, j0_date) */ * from t where json_contains((j0->'$.path_date'), json_array(cast('2023-01-01' as date), cast('2023-01-02' as date), cast('2023-01-03' as date)))",
"select /*+ use_index_merge(t, j0_date) */ * from t where json_contains((j0->'$.path_date'), json_array(cast('2023-01-01' as date), cast('2023-01-02' as date), cast('2023-01-03' as date))) and a<10",
"select /*+ use_index_merge(t, j0_date) */ * from t where json_overlaps((j0->'$.path_date'), json_array(cast('2023-01-01' as date), cast('2023-01-02' as date), cast('2023-01-03' as date)))",
"select /*+ use_index_merge(t, j0_date) */ * from t where json_overlaps((j0->'$.path_date'), json_array(cast('2023-01-01' as date), cast('2023-01-02' as date), cast('2023-01-03' as date))) and a<10"
]
},
{
"name": "TestCompositeMVIndex",
"cases": [
"select /*+ use_index_merge(t, idx) */ * from t where a=1 and b=2 and (3 member of (j)) and c=4",
"select /*+ use_index_merge(t, idx) */ * from t where a=1 and b=2 and (3 member of (j))",
"select /*+ use_index_merge(t, idx) */ * from t where a=1 and b=2",
"select /*+ use_index_merge(t, idx) */ * from t where a=1",
"select /*+ use_index_merge(t, idx2) */ * from t where a=1 and b=2 and ('3' member of (j->'$.str')) and c=4",
"select /*+ use_index_merge(t, idx2) */ * from t where a=1 and b=2 and ('3' member of (j->'$.str'))",
"select /*+ use_index_merge(t, idx2) */ * from t where a=1 and b=2",
"select /*+ use_index_merge(t, idx2) */ * from t where a=1",
"select /*+ use_index(t, idx) */ * from t where a=1 and b=2 and (3 member of (j)) and c=4",
"select * from t use index(idx) where a=1 and b=2 and (3 member of (j))",
"select /*+ use_index(t, idx) */ * from t where a=1 and b=2",
"select * from t use index(idx) where a=1",
"select * from t force index(idx) where a=1 and b=2 and (3 member of (j))",
"select * from t force index(idx) where a=1"
]
},
{
"name": "TestDNFOnMVIndex",
"cases": [
"select /*+ use_index_merge(t, idx1) */ * from t where (1 member of (j)) or (2 member of (j))",
"select /*+ use_index_merge(t, idx1) */ * from t where ((1 member of (j)) or (2 member of (j))) and (a > 10)",
"select /*+ use_index_merge(t, idx1) */ * from t where (json_overlaps(j, '[1, 2]')) or (json_overlaps(j, '[3, 4]'))",
"select /*+ use_index_merge(t, idx1) */ * from t where ((json_overlaps(j, '[1, 2]')) or (json_overlaps(j, '[3, 4]'))) and (a > 10)",
"select /*+ use_index_merge(t, idx1) */ * from t where (json_contains(j, '[1, 2]')) or (json_contains(j, '[3, 4]'))",
"select /*+ use_index_merge(t, idx2) */ * from t where (a=1 and b=2 and (3 member of (j))) or (a=11 and b=12 and (13 member of (j)))",
"select /*+ use_index_merge(t, idx2) */ * from t where (a=1 and b=2 and (3 member of (j))) or (a=11 and b=12 and (13 member of (j)) and c=14)",
"select /*+ use_index_merge(t, idx2) */ * from t where ((a=1 and b=2 and (3 member of (j))) or (a=11 and b=12 and (13 member of (j)))) and (c > 10)"
]
},
{
"name": "TestMVIndexSelection",
"cases": [
"select (j->'$.int') from t where (1 member of (j->'$.int'))",
"select * from t where (1 member of (j->'$.int'))",
"select * from t where (1 member of (j->'$.int')) and a<10",
"select (j->'$.int') from t where json_contains((j->'$.int'), '[1, 2, 3]')",
"select * from t where json_contains((j->'$.int'), '[1, 2, 3]')",
"select * from t where json_contains((j->'$.int'), '[1, 2, 3]') and a<10",
"select (j->'$.int') from t where json_overlaps((j->'$.int'), '[1, 2, 3]')",
"select * from t where json_overlaps((j->'$.int'), '[1, 2, 3]')",
"select * from t where json_overlaps((j->'$.int'), '[1, 2, 3]') and a<10"
]
},
{
"name": "TestIndexMergePathGeneration",
"cases": [
"select * from t",
"select * from t where c < 1",
"select * from t where c < 1 or f > 2",
"select * from t where (c < 1 or f > 2) and (c > 5 or f < 7)",
"select * from t where (c < 1 or f > 2) and (c > 5 or f < 7) and (c < 1 or g > 2)",
"select * from t where (c < 1 or f > 2) and (c > 5 or f < 7) and (e < 1 or f > 2)"
]
},
{
"name": "TestHintForIntersectionIndexMerge",
"cases": [
"set @@tidb_partition_prune_mode = 'dynamic'",
"select * from vh",
"select /*+ qb_name(v, v), use_index_merge(@v t1, ia, ibc, id) */ * from v",
"select /*+ qb_name(v, v@sel_1), use_index_merge(@v t1, ia, ibc, id) */ * from v",
"select /*+ qb_name(v, v@sel_1 .@sel_1), use_index_merge(@v t1, ia, ibc, id) */ * from v",
"select /*+ qb_name(v, v1@sel_1 .@sel_1), use_index_merge(@v t1, ia, ibc, id) */ * from v1 where c < 30 and d in (2,5)",
"select /*+ use_index_merge(t2, ia, ibc, id, ie) */ * from t2 where a > 10 and b = 20 and c < 35 and d < 45 and e = 100",
"select /*+ use_index_merge(t3, ia, ibc, id, ie) */ * from t3 where a > 10 and b = 20 and c < 35 and d < 45 and e = 100",
"select /*+ use_index_merge(t4, ia, ibc, id, ie) */ * from t4 where a > 10 and b = 20 and c < 35 and d in (1,3,8,9) and e = 100",
"select /*+ use_index_merge(t5, is1, is2, is3, is4) */ * from t5 where s1 = 'Abc' and s2 > 'zzz' and s3 < 'B啊a' and s4 = 'CcC'",
"select /*+ use_index_merge(t6, primary, is3, is4) */ * from t6 where s1 = 'Abc' and s2 > 'zzz' and s3 = 'A啊a' and s4 not like 'Cd_'",
"select /*+ use_index_merge(t7, primary,ia,ib,ic,ie,iff,ig) */ * from t7 where a = 100 and b > 5 and c < 12.3 and d > 54.321 and e = '2022-11-22 17:00' and f > '2020-6-23 10:00' and g < 2025",
"select /*+ use_index_merge(t8, primary,is2,is3,is4,is5) */ * from t8 where s1 like '啊A%' and s2 > 'abc' and s3 > 'cba' and s4 in ('aA', '??') and s5 = 'test,2'",
"select (select /*+ use_index_merge(t1,ia,ibc,ic) */ a from t1 where t1.a > 10 and t1.b = 20 and t1.c = t2.a) from t2",
"select (select /*+ use_index_merge(t1,ia,ibc,ic) */ a from t1 where t1.a > 10 and t1.b = 20 and t1.c > t2.a) from t2",
"select (select /*+ use_index_merge(t1,ia,ibc,ic) */ a from t1 where t1.a > 10 and t1.b = 20 and t1.e > t2.a) from t2",
"set @@tidb_partition_prune_mode = 'static'",
"select * from vh",
"select /*+ qb_name(v, v), use_index_merge(@v t1, ia, ibc, id) */ * from v",
"select /*+ qb_name(v, v@sel_1), use_index_merge(@v t1, ia, ibc, id) */ * from v",
"select /*+ qb_name(v, v@sel_1 .@sel_1), use_index_merge(@v t1, ia, ibc, id) */ * from v",
"select /*+ qb_name(v, v@sel_1 .@sel_1), use_index_merge(@v t1, ia, ibc, id) */ * from v",
"select /*+ use_index_merge(t2, ia, ibc, id, ie) */ * from t2 where a > 10 and b = 20 and c < 35 and d < 45 and e = 100",
"select /*+ use_index_merge(t3, ia, ibc, id, ie) */ * from t3 where a > 10 and b = 20 and c < 35 and d < 45 and e = 100",
"select /*+ use_index_merge(t4, ia, ibc, id, ie) */ * from t4 where a > 10 and b = 20 and c < 35 and d in (1,3,8,9) and e = 100"
]
}
]
| planner/core/testdata/index_merge_suite_in.json | 0 | https://github.com/pingcap/tidb/commit/90e727194074b352ffe826fca411c1776b5562cd | [
0.00017563569417688996,
0.00017027782450895756,
0.0001654502411838621,
0.00016965257236734033,
0.000003139821728836978
] |
{
"id": 2,
"code_window": [
"\n",
"// GlobalResourceManager is a global resource manager\n",
"var GlobalResourceManager = NewResourceManger()\n",
"\n",
"// ResourceManager is a resource manager\n",
"type ResourceManager struct {\n",
"\tpoolMap *util.ShardPoolMap\n",
"\tscheduler []scheduler.Scheduler\n",
"\tcpuObserver *cpu.Observer\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"// RandomName is to get a random name for register pool. It is just for test.\n",
"func RandomName() string {\n",
"\treturn uuid.New().String()\n",
"}\n",
"\n"
],
"file_path": "resourcemanager/rm.go",
"type": "add",
"edit_start_line_idx": 28
} | // Copyright 2022 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package resourcemanager
import (
"time"
"github.com/pingcap/tidb/resourcemanager/scheduler"
"github.com/pingcap/tidb/resourcemanager/util"
tidbutil "github.com/pingcap/tidb/util"
"github.com/pingcap/tidb/util/cpu"
)
// GlobalResourceManager is a global resource manager
var GlobalResourceManager = NewResourceManger()
// ResourceManager is a resource manager
type ResourceManager struct {
poolMap *util.ShardPoolMap
scheduler []scheduler.Scheduler
cpuObserver *cpu.Observer
exitCh chan struct{}
wg tidbutil.WaitGroupWrapper
}
// NewResourceManger is to create a new resource manager
func NewResourceManger() *ResourceManager {
sc := make([]scheduler.Scheduler, 0, 1)
sc = append(sc, scheduler.NewCPUScheduler())
return &ResourceManager{
cpuObserver: cpu.NewCPUObserver(),
exitCh: make(chan struct{}),
poolMap: util.NewShardPoolMap(),
scheduler: sc,
}
}
// Start is to start resource manager
func (r *ResourceManager) Start() {
r.wg.Run(r.cpuObserver.Start)
r.wg.Run(func() {
tick := time.NewTicker(100 * time.Millisecond)
defer tick.Stop()
for {
select {
case <-tick.C:
r.schedule()
case <-r.exitCh:
return
}
}
})
}
// Stop is to stop resource manager
func (r *ResourceManager) Stop() {
r.cpuObserver.Stop()
close(r.exitCh)
r.wg.Wait()
}
// Register is to register pool into resource manager
func (r *ResourceManager) Register(pool util.GorotinuePool, name string, component util.Component) error {
p := util.PoolContainer{Pool: pool, Component: component}
return r.registerPool(name, &p)
}
func (r *ResourceManager) registerPool(name string, pool *util.PoolContainer) error {
return r.poolMap.Add(name, pool)
}
// Unregister is to unregister pool into resource manager.
func (r *ResourceManager) Unregister(name string) {
r.poolMap.Del(name)
}
| resourcemanager/rm.go | 1 | https://github.com/pingcap/tidb/commit/90e727194074b352ffe826fca411c1776b5562cd | [
0.996231734752655,
0.565382719039917,
0.00017706411017570645,
0.9628094434738159,
0.46439823508262634
] |
{
"id": 2,
"code_window": [
"\n",
"// GlobalResourceManager is a global resource manager\n",
"var GlobalResourceManager = NewResourceManger()\n",
"\n",
"// ResourceManager is a resource manager\n",
"type ResourceManager struct {\n",
"\tpoolMap *util.ShardPoolMap\n",
"\tscheduler []scheduler.Scheduler\n",
"\tcpuObserver *cpu.Observer\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"// RandomName is to get a random name for register pool. It is just for test.\n",
"func RandomName() string {\n",
"\treturn uuid.New().String()\n",
"}\n",
"\n"
],
"file_path": "resourcemanager/rm.go",
"type": "add",
"edit_start_line_idx": 28
} | // Copyright 2015 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package charset
import (
"strings"
"golang.org/x/text/encoding"
"golang.org/x/text/encoding/charmap"
"golang.org/x/text/encoding/japanese"
"golang.org/x/text/encoding/korean"
"golang.org/x/text/encoding/simplifiedchinese"
"golang.org/x/text/encoding/traditionalchinese"
"golang.org/x/text/encoding/unicode"
)
// Lookup returns the encoding with the specified label, and its canonical
// name. It returns nil and the empty string if label is not one of the
// standard encodings for HTML. Matching is case-insensitive and ignores
// leading and trailing whitespace.
func Lookup(label string) (e encoding.Encoding, name string) {
label = strings.ToLower(strings.Trim(label, "\t\n\r\f "))
return lookup(label)
}
func lookup(label string) (e encoding.Encoding, name string) {
enc := encodings[label]
return enc.e, enc.name
}
var encodings = map[string]struct {
e encoding.Encoding
name string
}{
"unicode-1-1-utf-8": {encoding.Nop, "utf-8"},
"utf-8": {encoding.Nop, "utf-8"},
"utf8": {encoding.Nop, "utf-8"},
"utf8mb4": {encoding.Nop, "utf-8"},
"binary": {encoding.Nop, "binary"},
"866": {charmap.CodePage866, "ibm866"},
"cp866": {charmap.CodePage866, "ibm866"},
"csibm866": {charmap.CodePage866, "ibm866"},
"ibm866": {charmap.CodePage866, "ibm866"},
"csisolatin2": {charmap.ISO8859_2, "iso-8859-2"},
"iso-8859-2": {charmap.ISO8859_2, "iso-8859-2"},
"iso-ir-101": {charmap.ISO8859_2, "iso-8859-2"},
"iso8859-2": {charmap.ISO8859_2, "iso-8859-2"},
"iso88592": {charmap.ISO8859_2, "iso-8859-2"},
"iso_8859-2": {charmap.ISO8859_2, "iso-8859-2"},
"iso_8859-2:1987": {charmap.ISO8859_2, "iso-8859-2"},
"l2": {charmap.ISO8859_2, "iso-8859-2"},
"latin2": {charmap.ISO8859_2, "iso-8859-2"},
"csisolatin3": {charmap.ISO8859_3, "iso-8859-3"},
"iso-8859-3": {charmap.ISO8859_3, "iso-8859-3"},
"iso-ir-109": {charmap.ISO8859_3, "iso-8859-3"},
"iso8859-3": {charmap.ISO8859_3, "iso-8859-3"},
"iso88593": {charmap.ISO8859_3, "iso-8859-3"},
"iso_8859-3": {charmap.ISO8859_3, "iso-8859-3"},
"iso_8859-3:1988": {charmap.ISO8859_3, "iso-8859-3"},
"l3": {charmap.ISO8859_3, "iso-8859-3"},
"latin3": {charmap.ISO8859_3, "iso-8859-3"},
"csisolatin4": {charmap.ISO8859_4, "iso-8859-4"},
"iso-8859-4": {charmap.ISO8859_4, "iso-8859-4"},
"iso-ir-110": {charmap.ISO8859_4, "iso-8859-4"},
"iso8859-4": {charmap.ISO8859_4, "iso-8859-4"},
"iso88594": {charmap.ISO8859_4, "iso-8859-4"},
"iso_8859-4": {charmap.ISO8859_4, "iso-8859-4"},
"iso_8859-4:1988": {charmap.ISO8859_4, "iso-8859-4"},
"l4": {charmap.ISO8859_4, "iso-8859-4"},
"latin4": {charmap.ISO8859_4, "iso-8859-4"},
"csisolatincyrillic": {charmap.ISO8859_5, "iso-8859-5"},
"cyrillic": {charmap.ISO8859_5, "iso-8859-5"},
"iso-8859-5": {charmap.ISO8859_5, "iso-8859-5"},
"iso-ir-144": {charmap.ISO8859_5, "iso-8859-5"},
"iso8859-5": {charmap.ISO8859_5, "iso-8859-5"},
"iso88595": {charmap.ISO8859_5, "iso-8859-5"},
"iso_8859-5": {charmap.ISO8859_5, "iso-8859-5"},
"iso_8859-5:1988": {charmap.ISO8859_5, "iso-8859-5"},
"arabic": {charmap.ISO8859_6, "iso-8859-6"},
"asmo-708": {charmap.ISO8859_6, "iso-8859-6"},
"csiso88596e": {charmap.ISO8859_6, "iso-8859-6"},
"csiso88596i": {charmap.ISO8859_6, "iso-8859-6"},
"csisolatinarabic": {charmap.ISO8859_6, "iso-8859-6"},
"ecma-114": {charmap.ISO8859_6, "iso-8859-6"},
"iso-8859-6": {charmap.ISO8859_6, "iso-8859-6"},
"iso-8859-6-e": {charmap.ISO8859_6, "iso-8859-6"},
"iso-8859-6-i": {charmap.ISO8859_6, "iso-8859-6"},
"iso-ir-127": {charmap.ISO8859_6, "iso-8859-6"},
"iso8859-6": {charmap.ISO8859_6, "iso-8859-6"},
"iso88596": {charmap.ISO8859_6, "iso-8859-6"},
"iso_8859-6": {charmap.ISO8859_6, "iso-8859-6"},
"iso_8859-6:1987": {charmap.ISO8859_6, "iso-8859-6"},
"csisolatingreek": {charmap.ISO8859_7, "iso-8859-7"},
"ecma-118": {charmap.ISO8859_7, "iso-8859-7"},
"elot_928": {charmap.ISO8859_7, "iso-8859-7"},
"greek": {charmap.ISO8859_7, "iso-8859-7"},
"greek8": {charmap.ISO8859_7, "iso-8859-7"},
"iso-8859-7": {charmap.ISO8859_7, "iso-8859-7"},
"iso-ir-126": {charmap.ISO8859_7, "iso-8859-7"},
"iso8859-7": {charmap.ISO8859_7, "iso-8859-7"},
"iso88597": {charmap.ISO8859_7, "iso-8859-7"},
"iso_8859-7": {charmap.ISO8859_7, "iso-8859-7"},
"iso_8859-7:1987": {charmap.ISO8859_7, "iso-8859-7"},
"sun_eu_greek": {charmap.ISO8859_7, "iso-8859-7"},
"csiso88598e": {charmap.ISO8859_8, "iso-8859-8"},
"csisolatinhebrew": {charmap.ISO8859_8, "iso-8859-8"},
"hebrew": {charmap.ISO8859_8, "iso-8859-8"},
"iso-8859-8": {charmap.ISO8859_8, "iso-8859-8"},
"iso-8859-8-e": {charmap.ISO8859_8, "iso-8859-8"},
"iso-ir-138": {charmap.ISO8859_8, "iso-8859-8"},
"iso8859-8": {charmap.ISO8859_8, "iso-8859-8"},
"iso88598": {charmap.ISO8859_8, "iso-8859-8"},
"iso_8859-8": {charmap.ISO8859_8, "iso-8859-8"},
"iso_8859-8:1988": {charmap.ISO8859_8, "iso-8859-8"},
"visual": {charmap.ISO8859_8, "iso-8859-8"},
"csiso88598i": {charmap.ISO8859_8, "iso-8859-8-i"},
"iso-8859-8-i": {charmap.ISO8859_8, "iso-8859-8-i"},
"logical": {charmap.ISO8859_8, "iso-8859-8-i"},
"csisolatin6": {charmap.ISO8859_10, "iso-8859-10"},
"iso-8859-10": {charmap.ISO8859_10, "iso-8859-10"},
"iso-ir-157": {charmap.ISO8859_10, "iso-8859-10"},
"iso8859-10": {charmap.ISO8859_10, "iso-8859-10"},
"iso885910": {charmap.ISO8859_10, "iso-8859-10"},
"l6": {charmap.ISO8859_10, "iso-8859-10"},
"latin6": {charmap.ISO8859_10, "iso-8859-10"},
"iso-8859-13": {charmap.ISO8859_13, "iso-8859-13"},
"iso8859-13": {charmap.ISO8859_13, "iso-8859-13"},
"iso885913": {charmap.ISO8859_13, "iso-8859-13"},
"iso-8859-14": {charmap.ISO8859_14, "iso-8859-14"},
"iso8859-14": {charmap.ISO8859_14, "iso-8859-14"},
"iso885914": {charmap.ISO8859_14, "iso-8859-14"},
"csisolatin9": {charmap.ISO8859_15, "iso-8859-15"},
"iso-8859-15": {charmap.ISO8859_15, "iso-8859-15"},
"iso8859-15": {charmap.ISO8859_15, "iso-8859-15"},
"iso885915": {charmap.ISO8859_15, "iso-8859-15"},
"iso_8859-15": {charmap.ISO8859_15, "iso-8859-15"},
"l9": {charmap.ISO8859_15, "iso-8859-15"},
"iso-8859-16": {charmap.ISO8859_16, "iso-8859-16"},
"cskoi8r": {charmap.KOI8R, "koi8-r"},
"koi": {charmap.KOI8R, "koi8-r"},
"koi8": {charmap.KOI8R, "koi8-r"},
"koi8-r": {charmap.KOI8R, "koi8-r"},
"koi8_r": {charmap.KOI8R, "koi8-r"},
"koi8-u": {charmap.KOI8U, "koi8-u"},
"csmacintosh": {charmap.Macintosh, "macintosh"},
"mac": {charmap.Macintosh, "macintosh"},
"macintosh": {charmap.Macintosh, "macintosh"},
"x-mac-roman": {charmap.Macintosh, "macintosh"},
"dos-874": {charmap.Windows874, "windows-874"},
"iso-8859-11": {charmap.Windows874, "windows-874"},
"iso8859-11": {charmap.Windows874, "windows-874"},
"iso885911": {charmap.Windows874, "windows-874"},
"tis-620": {charmap.Windows874, "windows-874"},
"windows-874": {charmap.Windows874, "windows-874"},
"cp1250": {charmap.Windows1250, "windows-1250"},
"windows-1250": {charmap.Windows1250, "windows-1250"},
"x-cp1250": {charmap.Windows1250, "windows-1250"},
"cp1251": {charmap.Windows1251, "windows-1251"},
"windows-1251": {charmap.Windows1251, "windows-1251"},
"x-cp1251": {charmap.Windows1251, "windows-1251"},
"ansi_x3.4-1968": {charmap.Windows1252, "windows-1252"},
"ascii": {charmap.Windows1252, "windows-1252"},
"cp1252": {charmap.Windows1252, "windows-1252"},
"cp819": {charmap.Windows1252, "windows-1252"},
"csisolatin1": {charmap.Windows1252, "windows-1252"},
"ibm819": {charmap.Windows1252, "windows-1252"},
"iso-8859-1": {charmap.Windows1252, "windows-1252"},
"iso-ir-100": {charmap.Windows1252, "windows-1252"},
"iso8859-1": {charmap.Windows1252, "windows-1252"},
"iso88591": {charmap.Windows1252, "windows-1252"},
"iso_8859-1": {charmap.Windows1252, "windows-1252"},
"iso_8859-1:1987": {charmap.Windows1252, "windows-1252"},
"l1": {charmap.Windows1252, "windows-1252"},
"latin1": {charmap.Windows1252, "windows-1252"},
"us-ascii": {charmap.Windows1252, "windows-1252"},
"windows-1252": {charmap.Windows1252, "windows-1252"},
"x-cp1252": {charmap.Windows1252, "windows-1252"},
"cp1253": {charmap.Windows1253, "windows-1253"},
"windows-1253": {charmap.Windows1253, "windows-1253"},
"x-cp1253": {charmap.Windows1253, "windows-1253"},
"cp1254": {charmap.Windows1254, "windows-1254"},
"csisolatin5": {charmap.Windows1254, "windows-1254"},
"iso-8859-9": {charmap.Windows1254, "windows-1254"},
"iso-ir-148": {charmap.Windows1254, "windows-1254"},
"iso8859-9": {charmap.Windows1254, "windows-1254"},
"iso88599": {charmap.Windows1254, "windows-1254"},
"iso_8859-9": {charmap.Windows1254, "windows-1254"},
"iso_8859-9:1989": {charmap.Windows1254, "windows-1254"},
"l5": {charmap.Windows1254, "windows-1254"},
"latin5": {charmap.Windows1254, "windows-1254"},
"windows-1254": {charmap.Windows1254, "windows-1254"},
"x-cp1254": {charmap.Windows1254, "windows-1254"},
"cp1255": {charmap.Windows1255, "windows-1255"},
"windows-1255": {charmap.Windows1255, "windows-1255"},
"x-cp1255": {charmap.Windows1255, "windows-1255"},
"cp1256": {charmap.Windows1256, "windows-1256"},
"windows-1256": {charmap.Windows1256, "windows-1256"},
"x-cp1256": {charmap.Windows1256, "windows-1256"},
"cp1257": {charmap.Windows1257, "windows-1257"},
"windows-1257": {charmap.Windows1257, "windows-1257"},
"x-cp1257": {charmap.Windows1257, "windows-1257"},
"cp1258": {charmap.Windows1258, "windows-1258"},
"windows-1258": {charmap.Windows1258, "windows-1258"},
"x-cp1258": {charmap.Windows1258, "windows-1258"},
"x-mac-cyrillic": {charmap.MacintoshCyrillic, "x-mac-cyrillic"},
"x-mac-ukrainian": {charmap.MacintoshCyrillic, "x-mac-cyrillic"},
"chinese": {simplifiedchinese.GBK, "gbk"},
"csgb2312": {simplifiedchinese.GBK, "gbk"},
"csiso58gb231280": {simplifiedchinese.GBK, "gbk"},
"gb2312": {simplifiedchinese.GBK, "gbk"},
"gb_2312": {simplifiedchinese.GBK, "gbk"},
"gb_2312-80": {simplifiedchinese.GBK, "gbk"},
"gbk": {simplifiedchinese.GBK, "gbk"},
"iso-ir-58": {simplifiedchinese.GBK, "gbk"},
"x-gbk": {simplifiedchinese.GBK, "gbk"},
"gb18030": {simplifiedchinese.GB18030, "gb18030"},
"hz-gb-2312": {simplifiedchinese.HZGB2312, "hz-gb-2312"},
"big5": {traditionalchinese.Big5, "big5"},
"big5-hkscs": {traditionalchinese.Big5, "big5"},
"cn-big5": {traditionalchinese.Big5, "big5"},
"csbig5": {traditionalchinese.Big5, "big5"},
"x-x-big5": {traditionalchinese.Big5, "big5"},
"cseucpkdfmtjapanese": {japanese.EUCJP, "euc-jp"},
"euc-jp": {japanese.EUCJP, "euc-jp"},
"x-euc-jp": {japanese.EUCJP, "euc-jp"},
"csiso2022jp": {japanese.ISO2022JP, "iso-2022-jp"},
"iso-2022-jp": {japanese.ISO2022JP, "iso-2022-jp"},
"csshiftjis": {japanese.ShiftJIS, "shift_jis"},
"ms_kanji": {japanese.ShiftJIS, "shift_jis"},
"shift-jis": {japanese.ShiftJIS, "shift_jis"},
"shift_jis": {japanese.ShiftJIS, "shift_jis"},
"sjis": {japanese.ShiftJIS, "shift_jis"},
"windows-31j": {japanese.ShiftJIS, "shift_jis"},
"x-sjis": {japanese.ShiftJIS, "shift_jis"},
"cseuckr": {korean.EUCKR, "euc-kr"},
"csksc56011987": {korean.EUCKR, "euc-kr"},
"euc-kr": {korean.EUCKR, "euc-kr"},
"iso-ir-149": {korean.EUCKR, "euc-kr"},
"korean": {korean.EUCKR, "euc-kr"},
"ks_c_5601-1987": {korean.EUCKR, "euc-kr"},
"ks_c_5601-1989": {korean.EUCKR, "euc-kr"},
"ksc5601": {korean.EUCKR, "euc-kr"},
"ksc_5601": {korean.EUCKR, "euc-kr"},
"windows-949": {korean.EUCKR, "euc-kr"},
"csiso2022kr": {encoding.Replacement, "replacement"},
"iso-2022-kr": {encoding.Replacement, "replacement"},
"iso-2022-cn": {encoding.Replacement, "replacement"},
"iso-2022-cn-ext": {encoding.Replacement, "replacement"},
"utf-16be": {unicode.UTF16(unicode.BigEndian, unicode.IgnoreBOM), "utf-16be"},
"utf-16": {unicode.UTF16(unicode.LittleEndian, unicode.IgnoreBOM), "utf-16le"},
"utf-16le": {unicode.UTF16(unicode.LittleEndian, unicode.IgnoreBOM), "utf-16le"},
"x-user-defined": {charmap.XUserDefined, "x-user-defined"},
}
| parser/charset/encoding_table.go | 0 | https://github.com/pingcap/tidb/commit/90e727194074b352ffe826fca411c1776b5562cd | [
0.00017703507910482585,
0.000169244987773709,
0.00016583276737947017,
0.00016850823885761201,
0.0000027999640224152245
] |
{
"id": 2,
"code_window": [
"\n",
"// GlobalResourceManager is a global resource manager\n",
"var GlobalResourceManager = NewResourceManger()\n",
"\n",
"// ResourceManager is a resource manager\n",
"type ResourceManager struct {\n",
"\tpoolMap *util.ShardPoolMap\n",
"\tscheduler []scheduler.Scheduler\n",
"\tcpuObserver *cpu.Observer\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"// RandomName is to get a random name for register pool. It is just for test.\n",
"func RandomName() string {\n",
"\treturn uuid.New().String()\n",
"}\n",
"\n"
],
"file_path": "resourcemanager/rm.go",
"type": "add",
"edit_start_line_idx": 28
} | CREATE TABLE ci(i INT PRIMARY KEY, v varchar(32));
| br/tests/lightning_new_collation/data/nc.ci-schema.sql | 0 | https://github.com/pingcap/tidb/commit/90e727194074b352ffe826fca411c1776b5562cd | [
0.000165736346389167,
0.000165736346389167,
0.000165736346389167,
0.000165736346389167,
0
] |
{
"id": 2,
"code_window": [
"\n",
"// GlobalResourceManager is a global resource manager\n",
"var GlobalResourceManager = NewResourceManger()\n",
"\n",
"// ResourceManager is a resource manager\n",
"type ResourceManager struct {\n",
"\tpoolMap *util.ShardPoolMap\n",
"\tscheduler []scheduler.Scheduler\n",
"\tcpuObserver *cpu.Observer\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"// RandomName is to get a random name for register pool. It is just for test.\n",
"func RandomName() string {\n",
"\treturn uuid.New().String()\n",
"}\n",
"\n"
],
"file_path": "resourcemanager/rm.go",
"type": "add",
"edit_start_line_idx": 28
} | // Copyright 2022 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package txntest
import (
"testing"
"github.com/pingcap/tidb/tests/realtikvtest"
)
func TestMain(m *testing.M) {
realtikvtest.RunTestMain(m)
}
| tests/realtikvtest/txntest/main_test.go | 0 | https://github.com/pingcap/tidb/commit/90e727194074b352ffe826fca411c1776b5562cd | [
0.00017706411017570645,
0.00017341166676487774,
0.00016871854313649237,
0.00017445233243051916,
0.0000034856245747505454
] |
{
"id": 3,
"code_window": [
"\n",
"// Unregister is to unregister pool into resource manager.\n",
"func (r *ResourceManager) Unregister(name string) {\n",
"\tr.poolMap.Del(name)\n",
"}\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add"
],
"after_edit": [
"\n",
"// Reset is to Reset resource manager. it is just for test.\n",
"func (r *ResourceManager) Reset() {\n",
"\tr.poolMap = util.NewShardPoolMap()\n",
"}"
],
"file_path": "resourcemanager/rm.go",
"type": "add",
"edit_start_line_idx": 87
} | // Copyright 2021 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build !codes
package testkit
import (
"flag"
"testing"
"time"
"github.com/pingcap/tidb/ddl/schematracker"
"github.com/pingcap/tidb/domain"
"github.com/pingcap/tidb/kv"
"github.com/pingcap/tidb/session"
"github.com/pingcap/tidb/store/driver"
"github.com/pingcap/tidb/store/mockstore"
"github.com/pingcap/tidb/util/gctuner"
"github.com/stretchr/testify/require"
"go.opencensus.io/stats/view"
)
// WithTiKV flag is only used for debugging locally with real tikv cluster.
var WithTiKV = flag.String("with-tikv", "", "address of tikv cluster, if set, running test with real tikv cluster")
// CreateMockStore return a new mock kv.Storage.
func CreateMockStore(t testing.TB, opts ...mockstore.MockTiKVStoreOption) kv.Storage {
if *WithTiKV != "" {
var d driver.TiKVDriver
var err error
store, err := d.Open("tikv://" + *WithTiKV)
require.NoError(t, err)
var dom *domain.Domain
dom, err = session.BootstrapSession(store)
t.Cleanup(func() {
dom.Close()
err := store.Close()
require.NoError(t, err)
view.Stop()
})
require.NoError(t, err)
return store
}
t.Cleanup(func() {
view.Stop()
})
gctuner.GlobalMemoryLimitTuner.Stop()
store, _ := CreateMockStoreAndDomain(t, opts...)
return store
}
// CreateMockStoreAndDomain return a new mock kv.Storage and *domain.Domain.
func CreateMockStoreAndDomain(t testing.TB, opts ...mockstore.MockTiKVStoreOption) (kv.Storage, *domain.Domain) {
store, err := mockstore.NewMockStore(opts...)
require.NoError(t, err)
dom := bootstrap(t, store, 500*time.Millisecond)
sm := MockSessionManager{}
dom.InfoSyncer().SetSessionManager(&sm)
t.Cleanup(func() {
view.Stop()
gctuner.GlobalMemoryLimitTuner.Stop()
})
return schematracker.UnwrapStorage(store), dom
}
func bootstrap(t testing.TB, store kv.Storage, lease time.Duration) *domain.Domain {
session.SetSchemaLease(lease)
session.DisableStats4Test()
domain.DisablePlanReplayerBackgroundJob4Test()
domain.DisableDumpHistoricalStats4Test()
dom, err := session.BootstrapSession(store)
require.NoError(t, err)
dom.SetStatsUpdating(true)
t.Cleanup(func() {
dom.Close()
err := store.Close()
require.NoError(t, err)
view.Stop()
})
return dom
}
// CreateMockStoreWithSchemaLease return a new mock kv.Storage.
func CreateMockStoreWithSchemaLease(t testing.TB, lease time.Duration, opts ...mockstore.MockTiKVStoreOption) kv.Storage {
store, _ := CreateMockStoreAndDomainWithSchemaLease(t, lease, opts...)
return schematracker.UnwrapStorage(store)
}
// CreateMockStoreAndDomainWithSchemaLease return a new mock kv.Storage and *domain.Domain.
func CreateMockStoreAndDomainWithSchemaLease(t testing.TB, lease time.Duration, opts ...mockstore.MockTiKVStoreOption) (kv.Storage, *domain.Domain) {
store, err := mockstore.NewMockStore(opts...)
require.NoError(t, err)
dom := bootstrap(t, store, lease)
sm := MockSessionManager{}
dom.InfoSyncer().SetSessionManager(&sm)
return schematracker.UnwrapStorage(store), dom
}
| testkit/mockstore.go | 1 | https://github.com/pingcap/tidb/commit/90e727194074b352ffe826fca411c1776b5562cd | [
0.0034243471454828978,
0.0010013470891863108,
0.00016764011525083333,
0.00017726968508213758,
0.0012411019997671247
] |
{
"id": 3,
"code_window": [
"\n",
"// Unregister is to unregister pool into resource manager.\n",
"func (r *ResourceManager) Unregister(name string) {\n",
"\tr.poolMap.Del(name)\n",
"}\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add"
],
"after_edit": [
"\n",
"// Reset is to Reset resource manager. it is just for test.\n",
"func (r *ResourceManager) Reset() {\n",
"\tr.poolMap = util.NewShardPoolMap()\n",
"}"
],
"file_path": "resourcemanager/rm.go",
"type": "add",
"edit_start_line_idx": 87
} | // Copyright 2021 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package memory
import (
"testing"
"github.com/pingcap/tidb/testkit/testsetup"
"go.uber.org/goleak"
)
func TestMain(m *testing.M) {
testsetup.SetupForCommonTest()
goleak.VerifyTestMain(m)
}
| util/memory/main_test.go | 0 | https://github.com/pingcap/tidb/commit/90e727194074b352ffe826fca411c1776b5562cd | [
0.00017781427595764399,
0.000175463457708247,
0.00017264808411709964,
0.00017592798394616693,
0.0000021345147160900524
] |
{
"id": 3,
"code_window": [
"\n",
"// Unregister is to unregister pool into resource manager.\n",
"func (r *ResourceManager) Unregister(name string) {\n",
"\tr.poolMap.Del(name)\n",
"}\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add"
],
"after_edit": [
"\n",
"// Reset is to Reset resource manager. it is just for test.\n",
"func (r *ResourceManager) Reset() {\n",
"\tr.poolMap = util.NewShardPoolMap()\n",
"}"
],
"file_path": "resourcemanager/rm.go",
"type": "add",
"edit_start_line_idx": 87
} | load("@io_bazel_rules_go//go:def.bzl", "go_test")
go_test(
name = "metadatalocktest_test",
srcs = [
"main_test.go",
"mdl_test.go",
],
flaky = True,
deps = [
"//config",
"//ddl",
"//errno",
"//server",
"//testkit",
"//testkit/testsetup",
"@com_github_stretchr_testify//require",
"@org_uber_go_goleak//:goleak",
],
)
| ddl/metadatalocktest/BUILD.bazel | 0 | https://github.com/pingcap/tidb/commit/90e727194074b352ffe826fca411c1776b5562cd | [
0.00017455231864005327,
0.00017380637291353196,
0.0001723617169773206,
0.00017450506857130677,
0.0000010217047474725405
] |
{
"id": 3,
"code_window": [
"\n",
"// Unregister is to unregister pool into resource manager.\n",
"func (r *ResourceManager) Unregister(name string) {\n",
"\tr.poolMap.Del(name)\n",
"}\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add"
],
"after_edit": [
"\n",
"// Reset is to Reset resource manager. it is just for test.\n",
"func (r *ResourceManager) Reset() {\n",
"\tr.poolMap = util.NewShardPoolMap()\n",
"}"
],
"file_path": "resourcemanager/rm.go",
"type": "add",
"edit_start_line_idx": 87
} | /*!40014 SET FOREIGN_KEY_CHECKS=0*/;
/*!40101 SET NAMES binary*/;
CREATE TABLE `v`(
`a` int,
`b` int
)ENGINE=MyISAM;
| dumpling/tests/views/data/views.v-schema.sql | 0 | https://github.com/pingcap/tidb/commit/90e727194074b352ffe826fca411c1776b5562cd | [
0.0001670127676334232,
0.0001670127676334232,
0.0001670127676334232,
0.0001670127676334232,
0
] |
{
"id": 4,
"code_window": [
" \"//kv\",\n",
" \"//parser/ast\",\n",
" \"//parser/terror\",\n",
" \"//planner/core\",\n",
" \"//session\",\n",
" \"//session/txninfo\",\n",
" \"//sessionctx/variable\",\n",
" \"//store/driver\",\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
" \"//resourcemanager\",\n"
],
"file_path": "testkit/BUILD.bazel",
"type": "add",
"edit_start_line_idx": 23
} | // Copyright 2021 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build !codes
package testkit
import (
"flag"
"testing"
"time"
"github.com/pingcap/tidb/ddl/schematracker"
"github.com/pingcap/tidb/domain"
"github.com/pingcap/tidb/kv"
"github.com/pingcap/tidb/session"
"github.com/pingcap/tidb/store/driver"
"github.com/pingcap/tidb/store/mockstore"
"github.com/pingcap/tidb/util/gctuner"
"github.com/stretchr/testify/require"
"go.opencensus.io/stats/view"
)
// WithTiKV flag is only used for debugging locally with real tikv cluster.
var WithTiKV = flag.String("with-tikv", "", "address of tikv cluster, if set, running test with real tikv cluster")
// CreateMockStore return a new mock kv.Storage.
func CreateMockStore(t testing.TB, opts ...mockstore.MockTiKVStoreOption) kv.Storage {
if *WithTiKV != "" {
var d driver.TiKVDriver
var err error
store, err := d.Open("tikv://" + *WithTiKV)
require.NoError(t, err)
var dom *domain.Domain
dom, err = session.BootstrapSession(store)
t.Cleanup(func() {
dom.Close()
err := store.Close()
require.NoError(t, err)
view.Stop()
})
require.NoError(t, err)
return store
}
t.Cleanup(func() {
view.Stop()
})
gctuner.GlobalMemoryLimitTuner.Stop()
store, _ := CreateMockStoreAndDomain(t, opts...)
return store
}
// CreateMockStoreAndDomain return a new mock kv.Storage and *domain.Domain.
func CreateMockStoreAndDomain(t testing.TB, opts ...mockstore.MockTiKVStoreOption) (kv.Storage, *domain.Domain) {
store, err := mockstore.NewMockStore(opts...)
require.NoError(t, err)
dom := bootstrap(t, store, 500*time.Millisecond)
sm := MockSessionManager{}
dom.InfoSyncer().SetSessionManager(&sm)
t.Cleanup(func() {
view.Stop()
gctuner.GlobalMemoryLimitTuner.Stop()
})
return schematracker.UnwrapStorage(store), dom
}
func bootstrap(t testing.TB, store kv.Storage, lease time.Duration) *domain.Domain {
session.SetSchemaLease(lease)
session.DisableStats4Test()
domain.DisablePlanReplayerBackgroundJob4Test()
domain.DisableDumpHistoricalStats4Test()
dom, err := session.BootstrapSession(store)
require.NoError(t, err)
dom.SetStatsUpdating(true)
t.Cleanup(func() {
dom.Close()
err := store.Close()
require.NoError(t, err)
view.Stop()
})
return dom
}
// CreateMockStoreWithSchemaLease return a new mock kv.Storage.
func CreateMockStoreWithSchemaLease(t testing.TB, lease time.Duration, opts ...mockstore.MockTiKVStoreOption) kv.Storage {
store, _ := CreateMockStoreAndDomainWithSchemaLease(t, lease, opts...)
return schematracker.UnwrapStorage(store)
}
// CreateMockStoreAndDomainWithSchemaLease return a new mock kv.Storage and *domain.Domain.
func CreateMockStoreAndDomainWithSchemaLease(t testing.TB, lease time.Duration, opts ...mockstore.MockTiKVStoreOption) (kv.Storage, *domain.Domain) {
store, err := mockstore.NewMockStore(opts...)
require.NoError(t, err)
dom := bootstrap(t, store, lease)
sm := MockSessionManager{}
dom.InfoSyncer().SetSessionManager(&sm)
return schematracker.UnwrapStorage(store), dom
}
| testkit/mockstore.go | 1 | https://github.com/pingcap/tidb/commit/90e727194074b352ffe826fca411c1776b5562cd | [
0.00027240588678978384,
0.0002011052711168304,
0.0001675003150012344,
0.00019166059792041779,
0.00003349912367411889
] |
{
"id": 4,
"code_window": [
" \"//kv\",\n",
" \"//parser/ast\",\n",
" \"//parser/terror\",\n",
" \"//planner/core\",\n",
" \"//session\",\n",
" \"//session/txninfo\",\n",
" \"//sessionctx/variable\",\n",
" \"//store/driver\",\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
" \"//resourcemanager\",\n"
],
"file_path": "testkit/BUILD.bazel",
"type": "add",
"edit_start_line_idx": 23
} | // Copyright 2017 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package statistics
import (
"fmt"
"math"
"math/rand"
"testing"
"time"
"github.com/pingcap/errors"
"github.com/pingcap/tidb/parser/mysql"
"github.com/pingcap/tidb/sessionctx/stmtctx"
"github.com/pingcap/tidb/types"
"github.com/pingcap/tidb/util/chunk"
"github.com/pingcap/tidb/util/codec"
"github.com/stretchr/testify/require"
)
func (c *CMSketch) insert(val *types.Datum) error {
bytes, err := codec.EncodeValue(nil, nil, *val)
if err != nil {
return errors.Trace(err)
}
c.InsertBytes(bytes)
return nil
}
func prepareCMSAndTopN(d, w int32, vals []*types.Datum, n uint32, total uint64) (*CMSketch, *TopN, error) {
data := make([][]byte, 0, len(vals))
for _, v := range vals {
bytes, err := codec.EncodeValue(nil, nil, *v)
if err != nil {
return nil, nil, errors.Trace(err)
}
data = append(data, bytes)
}
cms, topN, _, _ := NewCMSketchAndTopN(d, w, data, n, total)
return cms, topN, nil
}
// buildCMSketchAndMapWithOffset builds cm sketch using zipf and the generated values starts from `offset`.
func buildCMSketchAndMapWithOffset(d, w int32, seed int64, total, imax uint64, s float64, offset int64) (*CMSketch, map[int64]uint32, error) {
cms := NewCMSketch(d, w)
mp := make(map[int64]uint32)
zipf := rand.NewZipf(rand.New(rand.NewSource(seed)), s, 1, imax)
for i := uint64(0); i < total; i++ {
val := types.NewIntDatum(int64(zipf.Uint64()) + offset)
err := cms.insert(&val)
if err != nil {
return nil, nil, errors.Trace(err)
}
mp[val.GetInt64()]++
}
return cms, mp, nil
}
func buildCMSketchAndMap(d, w int32, seed int64, total, imax uint64, s float64) (*CMSketch, map[int64]uint32, error) {
return buildCMSketchAndMapWithOffset(d, w, seed, total, imax, s, 0)
}
func buildCMSketchTopNAndMap(d, w, n, sample int32, seed int64, total, imax uint64, s float64) (*CMSketch, *TopN, map[int64]uint32, error) {
mp := make(map[int64]uint32)
zipf := rand.NewZipf(rand.New(rand.NewSource(seed)), s, 1, imax)
vals := make([]*types.Datum, 0)
for i := uint64(0); i < total; i++ {
val := types.NewIntDatum(int64(zipf.Uint64()))
mp[val.GetInt64()]++
if i < uint64(sample) {
vals = append(vals, &val)
}
}
cms, topN, err := prepareCMSAndTopN(d, w, vals, uint32(n), total)
return cms, topN, mp, err
}
func averageAbsoluteError(cms *CMSketch, topN *TopN, mp map[int64]uint32) (uint64, error) {
sc := &stmtctx.StatementContext{TimeZone: time.Local}
var total uint64
for num, count := range mp {
estimate, err := queryValue(sc, cms, topN, types.NewIntDatum(num))
if err != nil {
return 0, errors.Trace(err)
}
var diff uint64
if uint64(count) > estimate {
diff = uint64(count) - estimate
} else {
diff = estimate - uint64(count)
}
total += diff
}
return total / uint64(len(mp)), nil
}
func TestCMSketch(t *testing.T) {
tests := []struct {
zipfFactor float64
avgError uint64
}{
{
zipfFactor: 1.1,
avgError: 3,
},
{
zipfFactor: 2,
avgError: 24,
},
{
zipfFactor: 3,
avgError: 63,
},
}
d, w := int32(5), int32(2048)
total, imax := uint64(100000), uint64(1000000)
for _, tt := range tests {
lSketch, lMap, err := buildCMSketchAndMap(d, w, 0, total, imax, tt.zipfFactor)
require.NoError(t, err)
avg, err := averageAbsoluteError(lSketch, nil, lMap)
require.NoError(t, err)
require.LessOrEqual(t, avg, tt.avgError)
rSketch, rMap, err := buildCMSketchAndMap(d, w, 1, total, imax, tt.zipfFactor)
require.NoError(t, err)
avg, err = averageAbsoluteError(rSketch, nil, rMap)
require.NoError(t, err)
require.LessOrEqual(t, avg, tt.avgError)
err = lSketch.MergeCMSketch(rSketch)
require.NoError(t, err)
for val, count := range rMap {
lMap[val] += count
}
avg, err = averageAbsoluteError(lSketch, nil, lMap)
require.NoError(t, err)
require.Less(t, avg, tt.avgError*2)
}
}
func TestCMSketchCoding(t *testing.T) {
lSketch := NewCMSketch(5, 2048)
lSketch.count = 2048 * math.MaxUint32
for i := range lSketch.table {
for j := range lSketch.table[i] {
lSketch.table[i][j] = math.MaxUint32
}
}
bytes, err := EncodeCMSketchWithoutTopN(lSketch)
require.NoError(t, err)
require.Len(t, bytes, 61457)
rSketch, _, err := DecodeCMSketchAndTopN(bytes, nil)
require.NoError(t, err)
require.True(t, lSketch.Equal(rSketch))
}
func TestCMSketchTopN(t *testing.T) {
tests := []struct {
zipfFactor float64
avgError uint64
}{
// If no significant most items, TopN may will produce results worse than normal algorithm.
// The first two tests produces almost same avg.
{
zipfFactor: 1.0000001,
avgError: 30,
},
{
zipfFactor: 1.1,
avgError: 30,
},
{
zipfFactor: 2,
avgError: 89,
},
// If the most data lies in a narrow range, our guess may have better result.
// The error mainly comes from huge numbers.
{
zipfFactor: 5,
avgError: 208,
},
}
d, w := int32(5), int32(2048)
total, imax := uint64(1000000), uint64(1000000)
for _, tt := range tests {
lSketch, topN, lMap, err := buildCMSketchTopNAndMap(d, w, 20, 1000, 0, total, imax, tt.zipfFactor)
require.NoError(t, err)
require.LessOrEqual(t, len(topN.TopN), 40)
avg, err := averageAbsoluteError(lSketch, topN, lMap)
require.NoError(t, err)
require.LessOrEqual(t, avg, tt.avgError)
}
}
func TestMergeCMSketch4IncrementalAnalyze(t *testing.T) {
tests := []struct {
zipfFactor float64
avgError uint64
}{
{
zipfFactor: 1.0000001,
avgError: 48,
},
{
zipfFactor: 1.1,
avgError: 48,
},
{
zipfFactor: 2,
avgError: 128,
},
{
zipfFactor: 5,
avgError: 256,
},
}
d, w := int32(5), int32(2048)
total, imax := uint64(100000), uint64(1000000)
for _, tt := range tests {
lSketch, lMap, err := buildCMSketchAndMap(d, w, 0, total, imax, tt.zipfFactor)
require.NoError(t, err)
avg, err := averageAbsoluteError(lSketch, nil, lMap)
require.NoError(t, err)
require.LessOrEqual(t, avg, tt.avgError)
rSketch, rMap, err := buildCMSketchAndMapWithOffset(d, w, 1, total, imax, tt.zipfFactor, int64(imax))
require.NoError(t, err)
avg, err = averageAbsoluteError(rSketch, nil, rMap)
require.NoError(t, err)
require.LessOrEqual(t, avg, tt.avgError)
for key, val := range rMap {
lMap[key] += val
}
require.NoError(t, lSketch.MergeCMSketch4IncrementalAnalyze(rSketch, 0))
avg, err = averageAbsoluteError(lSketch, nil, lMap)
require.NoError(t, err)
require.LessOrEqual(t, avg, tt.avgError)
width, depth := lSketch.GetWidthAndDepth()
require.Equal(t, int32(2048), width)
require.Equal(t, int32(5), depth)
}
}
func TestCMSketchTopNUniqueData(t *testing.T) {
d, w := int32(5), int32(2048)
total := uint64(1000000)
mp := make(map[int64]uint32)
vals := make([]*types.Datum, 0)
for i := uint64(0); i < total; i++ {
val := types.NewIntDatum(int64(i))
mp[val.GetInt64()]++
if i < uint64(1000) {
vals = append(vals, &val)
}
}
cms, topN, err := prepareCMSAndTopN(d, w, vals, uint32(20), total)
require.NoError(t, err)
avg, err := averageAbsoluteError(cms, topN, mp)
require.NoError(t, err)
require.Equal(t, uint64(1), cms.defaultValue)
require.Equal(t, uint64(0), avg)
require.Nil(t, topN)
}
func TestCMSketchCodingTopN(t *testing.T) {
lSketch := NewCMSketch(5, 2048)
lSketch.count = 2048 * (math.MaxUint32)
for i := range lSketch.table {
for j := range lSketch.table[i] {
lSketch.table[i][j] = math.MaxUint32
}
}
topN := make([]TopNMeta, 20)
unsignedLong := types.NewFieldType(mysql.TypeLonglong)
unsignedLong.AddFlag(mysql.UnsignedFlag)
chk := chunk.New([]*types.FieldType{types.NewFieldType(mysql.TypeBlob), unsignedLong}, 20, 20)
var rows []chunk.Row
for i := 0; i < 20; i++ {
tString := []byte(fmt.Sprintf("%20000d", i))
topN[i] = TopNMeta{tString, math.MaxUint64}
chk.AppendBytes(0, tString)
chk.AppendUint64(1, math.MaxUint64)
rows = append(rows, chk.GetRow(i))
}
bytes, err := EncodeCMSketchWithoutTopN(lSketch)
require.NoError(t, err)
require.Len(t, bytes, 61457)
rSketch, _, err := DecodeCMSketchAndTopN(bytes, rows)
require.NoError(t, err)
require.True(t, lSketch.Equal(rSketch))
// do not panic
_, _, err = DecodeCMSketchAndTopN([]byte{}, rows)
require.NoError(t, err)
}
| statistics/cmsketch_test.go | 0 | https://github.com/pingcap/tidb/commit/90e727194074b352ffe826fca411c1776b5562cd | [
0.00019286677706986666,
0.00017534785729367286,
0.00016737880650907755,
0.00017489318270236254,
0.000004352322321210522
] |
{
"id": 4,
"code_window": [
" \"//kv\",\n",
" \"//parser/ast\",\n",
" \"//parser/terror\",\n",
" \"//planner/core\",\n",
" \"//session\",\n",
" \"//session/txninfo\",\n",
" \"//sessionctx/variable\",\n",
" \"//store/driver\",\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
" \"//resourcemanager\",\n"
],
"file_path": "testkit/BUILD.bazel",
"type": "add",
"edit_start_line_idx": 23
} | // Copyright 2021 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package profile
import (
"testing"
"github.com/pingcap/tidb/testkit/testsetup"
"go.uber.org/goleak"
)
func TestMain(m *testing.M) {
testsetup.SetupForCommonTest()
opts := []goleak.Option{
goleak.IgnoreTopFunction("github.com/golang/glog.(*loggingT).flushDaemon"),
goleak.IgnoreTopFunction("github.com/lestrrat-go/httprc.runFetchWorker"),
goleak.IgnoreTopFunction("go.etcd.io/etcd/client/pkg/v3/logutil.(*MergeLogger).outputLoop"),
}
goleak.VerifyTestMain(m, opts...)
}
| util/profile/main_test.go | 0 | https://github.com/pingcap/tidb/commit/90e727194074b352ffe826fca411c1776b5562cd | [
0.00017838641360867769,
0.00017543346621096134,
0.00017271822434850037,
0.00017531460616737604,
0.0000021214759726717602
] |
{
"id": 4,
"code_window": [
" \"//kv\",\n",
" \"//parser/ast\",\n",
" \"//parser/terror\",\n",
" \"//planner/core\",\n",
" \"//session\",\n",
" \"//session/txninfo\",\n",
" \"//sessionctx/variable\",\n",
" \"//store/driver\",\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
" \"//resourcemanager\",\n"
],
"file_path": "testkit/BUILD.bazel",
"type": "add",
"edit_start_line_idx": 23
} | // Copyright 2021 PingCAP, Inc. Licensed under Apache-2.0.
package export
import (
"database/sql"
"github.com/pingcap/errors"
"github.com/pingcap/tidb/br/pkg/utils"
tcontext "github.com/pingcap/tidb/dumpling/context"
"go.uber.org/zap"
)
// BaseConn wraps connection instance.
type BaseConn struct {
DBConn *sql.Conn
backOffer backOfferResettable
rebuildConnFn func(*sql.Conn, bool) (*sql.Conn, error)
}
func newBaseConn(conn *sql.Conn, shouldRetry bool, rebuildConnFn func(*sql.Conn, bool) (*sql.Conn, error)) *BaseConn {
baseConn := &BaseConn{DBConn: conn}
baseConn.backOffer = newRebuildConnBackOffer(shouldRetry)
if shouldRetry {
baseConn.rebuildConnFn = rebuildConnFn
}
return baseConn
}
// QuerySQL defines query statement, and connect to real DB.
func (conn *BaseConn) QuerySQL(tctx *tcontext.Context, handleOneRow func(*sql.Rows) error, reset func(), query string, args ...interface{}) error {
retryTime := 0
err := utils.WithRetry(tctx, func() (err error) {
retryTime++
if retryTime > 1 && conn.rebuildConnFn != nil {
conn.DBConn, err = conn.rebuildConnFn(conn.DBConn, false)
if err != nil {
return
}
}
err = simpleQueryWithArgs(tctx, conn.DBConn, handleOneRow, query, args...)
if err != nil {
tctx.L().Info("cannot execute query", zap.Int("retryTime", retryTime), zap.String("sql", query),
zap.Any("args", args), zap.Error(err))
reset()
return err
}
return nil
}, conn.backOffer)
conn.backOffer.Reset()
return err
}
// QuerySQLWithColumns defines query statement, and connect to real DB and get results for special column names
func (conn *BaseConn) QuerySQLWithColumns(tctx *tcontext.Context, columns []string, query string, args ...interface{}) ([][]string, error) {
retryTime := 0
var results [][]string
err := utils.WithRetry(tctx, func() (err error) {
retryTime++
if retryTime > 1 && conn.rebuildConnFn != nil {
conn.DBConn, err = conn.rebuildConnFn(conn.DBConn, false)
if err != nil {
tctx.L().Warn("rebuild connection failed", zap.Error(err))
return
}
}
rows, err := conn.DBConn.QueryContext(tctx, query, args...)
if err != nil {
tctx.L().Info("cannot execute query", zap.Int("retryTime", retryTime), zap.String("sql", query),
zap.Any("args", args), zap.Error(err))
return errors.Annotatef(err, "sql: %s", query)
}
results, err = GetSpecifiedColumnValuesAndClose(rows, columns...)
if err != nil {
tctx.L().Info("cannot execute query", zap.Int("retryTime", retryTime), zap.String("sql", query),
zap.Any("args", args), zap.Error(err))
results = nil
return errors.Annotatef(err, "sql: %s", query)
}
return err
}, conn.backOffer)
conn.backOffer.Reset()
return results, err
}
// ExecSQL defines exec statement, and connect to real DB.
func (conn *BaseConn) ExecSQL(tctx *tcontext.Context, canRetryFunc func(sql.Result, error) error, query string, args ...interface{}) error {
retryTime := 0
err := utils.WithRetry(tctx, func() (err error) {
retryTime++
if retryTime > 1 && conn.rebuildConnFn != nil {
conn.DBConn, err = conn.rebuildConnFn(conn.DBConn, false)
if err != nil {
return
}
}
res, err := conn.DBConn.ExecContext(tctx, query, args...)
if err = canRetryFunc(res, err); err != nil {
tctx.L().Info("cannot execute query", zap.Int("retryTime", retryTime), zap.String("sql", query),
zap.Any("args", args), zap.Error(err))
return err
}
return nil
}, conn.backOffer)
conn.backOffer.Reset()
return err
}
| dumpling/export/conn.go | 0 | https://github.com/pingcap/tidb/commit/90e727194074b352ffe826fca411c1776b5562cd | [
0.0007961750961840153,
0.0002654602867551148,
0.000166788071510382,
0.00017340337217319757,
0.00018701728549785912
] |
{
"id": 5,
"code_window": [
"\t\"github.com/pingcap/tidb/ddl/schematracker\"\n",
"\t\"github.com/pingcap/tidb/domain\"\n",
"\t\"github.com/pingcap/tidb/kv\"\n",
"\t\"github.com/pingcap/tidb/session\"\n",
"\t\"github.com/pingcap/tidb/store/driver\"\n",
"\t\"github.com/pingcap/tidb/store/mockstore\"\n",
"\t\"github.com/pingcap/tidb/util/gctuner\"\n"
],
"labels": [
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\"github.com/pingcap/tidb/resourcemanager\"\n"
],
"file_path": "testkit/mockstore.go",
"type": "add",
"edit_start_line_idx": 26
} | load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "testkit",
srcs = [
"asynctestkit.go",
"dbtestkit.go",
"mocksessionmanager.go",
"mockstore.go",
"result.go",
"stepped.go",
"testkit.go",
],
importpath = "github.com/pingcap/tidb/testkit",
visibility = ["//visibility:public"],
deps = [
"//ddl/schematracker",
"//domain",
"//expression",
"//kv",
"//parser/ast",
"//parser/terror",
"//planner/core",
"//session",
"//session/txninfo",
"//sessionctx/variable",
"//store/driver",
"//store/mockstore",
"//util",
"//util/breakpoint",
"//util/chunk",
"//util/gctuner",
"//util/mathutil",
"//util/sqlexec",
"@com_github_pingcap_errors//:errors",
"@com_github_pingcap_failpoint//:failpoint",
"@com_github_stretchr_testify//assert",
"@com_github_stretchr_testify//require",
"@com_github_tikv_client_go_v2//tikv",
"@com_github_tikv_client_go_v2//tikvrpc",
"@io_opencensus_go//stats/view",
"@org_golang_x_exp//slices",
"@org_uber_go_atomic//:atomic",
],
)
| testkit/BUILD.bazel | 1 | https://github.com/pingcap/tidb/commit/90e727194074b352ffe826fca411c1776b5562cd | [
0.007024479564279318,
0.0015550687676295638,
0.0001683816808508709,
0.00017683228361420333,
0.0027348049916327
] |
{
"id": 5,
"code_window": [
"\t\"github.com/pingcap/tidb/ddl/schematracker\"\n",
"\t\"github.com/pingcap/tidb/domain\"\n",
"\t\"github.com/pingcap/tidb/kv\"\n",
"\t\"github.com/pingcap/tidb/session\"\n",
"\t\"github.com/pingcap/tidb/store/driver\"\n",
"\t\"github.com/pingcap/tidb/store/mockstore\"\n",
"\t\"github.com/pingcap/tidb/util/gctuner\"\n"
],
"labels": [
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\"github.com/pingcap/tidb/resourcemanager\"\n"
],
"file_path": "testkit/mockstore.go",
"type": "add",
"edit_start_line_idx": 26
} | // Copyright 2021 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package executor
import (
"context"
"github.com/pingcap/errors"
"github.com/pingcap/tidb/executor/aggfuncs"
"github.com/pingcap/tidb/expression"
"github.com/pingcap/tidb/parser/ast"
"github.com/pingcap/tidb/planner/core"
"github.com/pingcap/tidb/sessionctx"
"github.com/pingcap/tidb/util/chunk"
"github.com/pingcap/tidb/util/mathutil"
)
type dataInfo struct {
chk *chunk.Chunk
remaining uint64
accumulated uint64
}
// PipelinedWindowExec is the executor for window functions.
type PipelinedWindowExec struct {
baseExecutor
numWindowFuncs int
windowFuncs []aggfuncs.AggFunc
slidingWindowFuncs []aggfuncs.SlidingWindowAggFunc
partialResults []aggfuncs.PartialResult
start *core.FrameBound
end *core.FrameBound
groupChecker *vecGroupChecker
// childResult stores the child chunk. Note that even if remaining is 0, e.rows might still references rows in data[0].chk after returned it to upper executor, since there is no guarantee what the upper executor will do to the returned chunk, it might destroy the data (as in the benchmark test, it reused the chunk to pull data, and it will be chk.Reset(), causing panicking). So dataIdx, accumulated and dropped are added to ensure that chunk will only be returned if there is no row reference.
childResult *chunk.Chunk
data []dataInfo
dataIdx int
// done indicates the child executor is drained or something unexpected happened.
done bool
accumulated uint64
dropped uint64
rowToConsume uint64
newPartition bool
curRowIdx uint64
// curStartRow and curEndRow defines the current frame range
lastStartRow uint64
lastEndRow uint64
stagedStartRow uint64
stagedEndRow uint64
rowStart uint64
orderByCols []*expression.Column
// expectedCmpResult is used to decide if one value is included in the frame.
expectedCmpResult int64
// rows keeps rows starting from curStartRow
rows []chunk.Row
rowCnt uint64
whole bool
isRangeFrame bool
emptyFrame bool
initializedSlidingWindow bool
}
// Close implements the Executor Close interface.
func (e *PipelinedWindowExec) Close() error {
return errors.Trace(e.baseExecutor.Close())
}
// Open implements the Executor Open interface
func (e *PipelinedWindowExec) Open(ctx context.Context) (err error) {
e.rowToConsume = 0
e.done = false
e.accumulated = 0
e.dropped = 0
e.data = make([]dataInfo, 0)
e.dataIdx = 0
e.slidingWindowFuncs = make([]aggfuncs.SlidingWindowAggFunc, len(e.windowFuncs))
for i, windowFunc := range e.windowFuncs {
if slidingWindowAggFunc, ok := windowFunc.(aggfuncs.SlidingWindowAggFunc); ok {
e.slidingWindowFuncs[i] = slidingWindowAggFunc
}
}
e.rows = make([]chunk.Row, 0)
return e.baseExecutor.Open(ctx)
}
func (e *PipelinedWindowExec) firstResultChunkNotReady() bool {
if !e.done && len(e.data) == 0 {
return true
}
// chunk can't be ready unless, 1. all of the rows in the chunk is filled, 2. e.rows doesn't contain rows in the chunk
return len(e.data) > 0 && (e.data[0].remaining != 0 || e.data[0].accumulated > e.dropped)
}
// Next implements the Executor Next interface.
func (e *PipelinedWindowExec) Next(ctx context.Context, chk *chunk.Chunk) (err error) {
chk.Reset()
for e.firstResultChunkNotReady() {
// we firstly gathering enough rows and consume them, until we are able to produce.
// for unbounded frame, it needs consume the whole partition before being able to produce, in this case
// e.p.enoughToProduce will be false until so.
var enough bool
enough, err = e.enoughToProduce(e.ctx)
if err != nil {
return
}
if !enough {
if !e.done && e.rowToConsume == 0 {
err = e.getRowsInPartition(ctx)
if err != nil {
return err
}
}
if e.done || e.newPartition {
e.finish()
// if we continued, the rows will not be consumed, so next time we should consume it instead of calling e.getRowsInPartition
enough, err = e.enoughToProduce(e.ctx)
if err != nil {
return
}
if enough {
continue
}
e.newPartition = false
e.reset()
if e.rowToConsume == 0 {
// no more data
break
}
}
e.rowCnt += e.rowToConsume
e.rowToConsume = 0
}
// e.p is ready to produce data
if len(e.data) > e.dataIdx && e.data[e.dataIdx].remaining != 0 {
produced, err := e.produce(e.ctx, e.data[e.dataIdx].chk, e.data[e.dataIdx].remaining)
if err != nil {
return err
}
e.data[e.dataIdx].remaining -= produced
if e.data[e.dataIdx].remaining == 0 {
e.dataIdx++
}
}
}
if len(e.data) > 0 {
chk.SwapColumns(e.data[0].chk)
e.data = e.data[1:]
e.dataIdx--
}
return nil
}
func (e *PipelinedWindowExec) getRowsInPartition(ctx context.Context) (err error) {
e.newPartition = true
if len(e.rows) == 0 {
// if getRowsInPartition is called for the first time, we ignore it as a new partition
e.newPartition = false
}
if e.groupChecker.isExhausted() {
var drained, samePartition bool
drained, err = e.fetchChild(ctx)
if err != nil {
return errors.Trace(err)
}
// we return immediately to use a combination of true newPartition but 0 in e.rowToConsume to indicate the data source is drained,
if drained {
e.done = true
return nil
}
samePartition, err = e.groupChecker.splitIntoGroups(e.childResult)
if samePartition {
// the only case that when getRowsInPartition gets called, it is not a new partition.
e.newPartition = false
}
if err != nil {
return errors.Trace(err)
}
}
begin, end := e.groupChecker.getNextGroup()
e.rowToConsume += uint64(end - begin)
for i := begin; i < end; i++ {
e.rows = append(e.rows, e.childResult.GetRow(i))
}
return
}
func (e *PipelinedWindowExec) fetchChild(ctx context.Context) (EOF bool, err error) {
// TODO: reuse chunks
childResult := tryNewCacheChunk(e.children[0])
err = Next(ctx, e.children[0], childResult)
if err != nil {
return false, errors.Trace(err)
}
// No more data.
numRows := childResult.NumRows()
if numRows == 0 {
return true, nil
}
// TODO: reuse chunks
resultChk := e.ctx.GetSessionVars().GetNewChunkWithCapacity(e.retFieldTypes, 0, numRows, e.AllocPool)
err = e.copyChk(childResult, resultChk)
if err != nil {
return false, err
}
e.accumulated += uint64(numRows)
e.data = append(e.data, dataInfo{chk: resultChk, remaining: uint64(numRows), accumulated: e.accumulated})
e.childResult = childResult
return false, nil
}
func (e *PipelinedWindowExec) copyChk(src, dst *chunk.Chunk) error {
columns := e.Schema().Columns[:len(e.Schema().Columns)-e.numWindowFuncs]
for i, col := range columns {
if err := dst.MakeRefTo(i, src, col.Index); err != nil {
return err
}
}
return nil
}
func (e *PipelinedWindowExec) getRow(i uint64) chunk.Row {
return e.rows[i-e.rowStart]
}
func (e *PipelinedWindowExec) getRows(start, end uint64) []chunk.Row {
return e.rows[start-e.rowStart : end-e.rowStart]
}
// finish is called upon a whole partition is consumed
func (e *PipelinedWindowExec) finish() {
e.whole = true
}
func (e *PipelinedWindowExec) getStart(ctx sessionctx.Context) (uint64, error) {
if e.start.UnBounded {
return 0, nil
}
if e.isRangeFrame {
var start uint64
for start = mathutil.Max(e.lastStartRow, e.stagedStartRow); start < e.rowCnt; start++ {
var res int64
var err error
for i := range e.orderByCols {
res, _, err = e.start.CmpFuncs[i](ctx, e.orderByCols[i], e.start.CalcFuncs[i], e.getRow(start), e.getRow(e.curRowIdx))
if err != nil {
return 0, err
}
if res != 0 {
break
}
}
// For asc, break when the calculated result is greater than the current value.
// For desc, break when the calculated result is less than the current value.
if res != e.expectedCmpResult {
break
}
}
e.stagedStartRow = start
return start, nil
}
switch e.start.Type {
case ast.Preceding:
if e.curRowIdx > e.start.Num {
return e.curRowIdx - e.start.Num, nil
}
return 0, nil
case ast.Following:
return e.curRowIdx + e.start.Num, nil
default: // ast.CurrentRow
return e.curRowIdx, nil
}
}
func (e *PipelinedWindowExec) getEnd(ctx sessionctx.Context) (uint64, error) {
if e.end.UnBounded {
return e.rowCnt, nil
}
if e.isRangeFrame {
var end uint64
for end = mathutil.Max(e.lastEndRow, e.stagedEndRow); end < e.rowCnt; end++ {
var res int64
var err error
for i := range e.orderByCols {
res, _, err = e.end.CmpFuncs[i](ctx, e.end.CalcFuncs[i], e.orderByCols[i], e.getRow(e.curRowIdx), e.getRow(end))
if err != nil {
return 0, err
}
if res != 0 {
break
}
}
// For asc, break when the calculated result is greater than the current value.
// For desc, break when the calculated result is less than the current value.
if res == e.expectedCmpResult {
break
}
}
e.stagedEndRow = end
return end, nil
}
switch e.end.Type {
case ast.Preceding:
if e.curRowIdx >= e.end.Num {
return e.curRowIdx - e.end.Num + 1, nil
}
return 0, nil
case ast.Following:
return e.curRowIdx + e.end.Num + 1, nil
default: // ast.CurrentRow:
return e.curRowIdx + 1, nil
}
}
// produce produces rows and append it to chk, return produced means number of rows appended into chunk, available means
// number of rows processed but not fetched
func (e *PipelinedWindowExec) produce(ctx sessionctx.Context, chk *chunk.Chunk, remained uint64) (produced uint64, err error) {
var (
start uint64
end uint64
enough bool
)
for remained > 0 {
enough, err = e.enoughToProduce(ctx)
if err != nil {
return
}
if !enough {
break
}
start, err = e.getStart(ctx)
if err != nil {
return
}
end, err = e.getEnd(ctx)
if err != nil {
return
}
if end > e.rowCnt {
end = e.rowCnt
}
if start >= e.rowCnt {
start = e.rowCnt
}
// if start >= end, we should return a default value, and we reset the frame to empty.
if start >= end {
for i, wf := range e.windowFuncs {
if !e.emptyFrame {
wf.ResetPartialResult(e.partialResults[i])
}
err = wf.AppendFinalResult2Chunk(ctx, e.partialResults[i], chk)
if err != nil {
return
}
}
if !e.emptyFrame {
e.emptyFrame = true
e.initializedSlidingWindow = false
}
} else {
e.emptyFrame = false
for i, wf := range e.windowFuncs {
slidingWindowAggFunc := e.slidingWindowFuncs[i]
if e.lastStartRow != start || e.lastEndRow != end {
if slidingWindowAggFunc != nil && e.initializedSlidingWindow {
err = slidingWindowAggFunc.Slide(ctx, e.getRow, e.lastStartRow, e.lastEndRow, start-e.lastStartRow, end-e.lastEndRow, e.partialResults[i])
} else {
// For MinMaxSlidingWindowAggFuncs, it needs the absolute value of each start of window, to compare
// whether elements inside deque are out of current window.
if minMaxSlidingWindowAggFunc, ok := wf.(aggfuncs.MaxMinSlidingWindowAggFunc); ok {
// Store start inside MaxMinSlidingWindowAggFunc.windowInfo
minMaxSlidingWindowAggFunc.SetWindowStart(start)
}
// TODO(zhifeng): track memory usage here
wf.ResetPartialResult(e.partialResults[i])
_, err = wf.UpdatePartialResult(ctx, e.getRows(start, end), e.partialResults[i])
}
}
if err != nil {
return
}
err = wf.AppendFinalResult2Chunk(ctx, e.partialResults[i], chk)
if err != nil {
return
}
}
e.initializedSlidingWindow = true
}
e.curRowIdx++
e.lastStartRow, e.lastEndRow = start, end
produced++
remained--
}
extend := mathutil.Min(e.curRowIdx, e.lastEndRow, e.lastStartRow)
if extend > e.rowStart {
numDrop := extend - e.rowStart
e.dropped += numDrop
e.rows = e.rows[numDrop:]
e.rowStart = extend
}
return
}
func (e *PipelinedWindowExec) enoughToProduce(ctx sessionctx.Context) (enough bool, err error) {
if e.curRowIdx >= e.rowCnt {
return false, nil
}
if e.whole {
return true, nil
}
start, err := e.getStart(ctx)
if err != nil {
return
}
end, err := e.getEnd(ctx)
if err != nil {
return
}
return end < e.rowCnt && start < e.rowCnt, nil
}
// reset resets the processor
func (e *PipelinedWindowExec) reset() {
e.lastStartRow = 0
e.lastEndRow = 0
e.stagedStartRow = 0
e.stagedEndRow = 0
e.emptyFrame = false
e.curRowIdx = 0
e.whole = false
numDrop := e.rowCnt - e.rowStart
e.dropped += numDrop
e.rows = e.rows[numDrop:]
e.rowStart = 0
e.rowCnt = 0
e.initializedSlidingWindow = false
for i, windowFunc := range e.windowFuncs {
windowFunc.ResetPartialResult(e.partialResults[i])
}
}
| executor/pipelined_window.go | 0 | https://github.com/pingcap/tidb/commit/90e727194074b352ffe826fca411c1776b5562cd | [
0.005730411969125271,
0.00029175900272093713,
0.00016422412591055036,
0.00017025426495820284,
0.0008019965025596321
] |
{
"id": 5,
"code_window": [
"\t\"github.com/pingcap/tidb/ddl/schematracker\"\n",
"\t\"github.com/pingcap/tidb/domain\"\n",
"\t\"github.com/pingcap/tidb/kv\"\n",
"\t\"github.com/pingcap/tidb/session\"\n",
"\t\"github.com/pingcap/tidb/store/driver\"\n",
"\t\"github.com/pingcap/tidb/store/mockstore\"\n",
"\t\"github.com/pingcap/tidb/util/gctuner\"\n"
],
"labels": [
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\"github.com/pingcap/tidb/resourcemanager\"\n"
],
"file_path": "testkit/mockstore.go",
"type": "add",
"edit_start_line_idx": 26
} | load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
go_library(
name = "session",
srcs = ["session.go"],
importpath = "github.com/pingcap/tidb/ttl/session",
visibility = ["//visibility:public"],
deps = [
"//infoschema",
"//kv",
"//parser/terror",
"//sessionctx",
"//sessionctx/variable",
"//sessiontxn",
"//ttl/metrics",
"//util/chunk",
"//util/sqlexec",
"@com_github_pingcap_errors//:errors",
],
)
go_test(
name = "session_test",
srcs = [
"main_test.go",
"session_test.go",
"sysvar_test.go",
],
flaky = True,
deps = [
":session",
"//sessionctx/variable",
"//testkit",
"//testkit/testsetup",
"@com_github_pingcap_errors//:errors",
"@com_github_stretchr_testify//require",
"@org_uber_go_goleak//:goleak",
],
)
| ttl/session/BUILD.bazel | 0 | https://github.com/pingcap/tidb/commit/90e727194074b352ffe826fca411c1776b5562cd | [
0.0015644774539396167,
0.0005587111227214336,
0.00018577031733002514,
0.00024229835253208876,
0.0005812508170492947
] |
{
"id": 5,
"code_window": [
"\t\"github.com/pingcap/tidb/ddl/schematracker\"\n",
"\t\"github.com/pingcap/tidb/domain\"\n",
"\t\"github.com/pingcap/tidb/kv\"\n",
"\t\"github.com/pingcap/tidb/session\"\n",
"\t\"github.com/pingcap/tidb/store/driver\"\n",
"\t\"github.com/pingcap/tidb/store/mockstore\"\n",
"\t\"github.com/pingcap/tidb/util/gctuner\"\n"
],
"labels": [
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\"github.com/pingcap/tidb/resourcemanager\"\n"
],
"file_path": "testkit/mockstore.go",
"type": "add",
"edit_start_line_idx": 26
} | // Copyright 2018 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package core
import (
"fmt"
"testing"
"github.com/pingcap/tidb/expression"
"github.com/pingcap/tidb/parser/ast"
"github.com/pingcap/tidb/parser/charset"
"github.com/pingcap/tidb/parser/model"
"github.com/pingcap/tidb/parser/mysql"
"github.com/pingcap/tidb/planner/property"
"github.com/pingcap/tidb/planner/util"
"github.com/pingcap/tidb/sessionctx"
"github.com/pingcap/tidb/statistics"
"github.com/pingcap/tidb/types"
"github.com/pingcap/tidb/util/ranger"
"github.com/stretchr/testify/require"
)
func rewriteSimpleExpr(ctx sessionctx.Context, str string, schema *expression.Schema, names types.NameSlice) ([]expression.Expression, error) {
if str == "" {
return nil, nil
}
filters, err := expression.ParseSimpleExprsWithNames(ctx, str, schema, names)
if err != nil {
return nil, err
}
if sf, ok := filters[0].(*expression.ScalarFunction); ok && sf.FuncName.L == ast.LogicAnd {
filters = expression.FlattenCNFConditions(sf)
}
return filters, nil
}
type indexJoinContext struct {
dataSourceNode *DataSource
dsNames types.NameSlice
path *util.AccessPath
joinNode *LogicalJoin
joinColNames types.NameSlice
}
func prepareForAnalyzeLookUpFilters() *indexJoinContext {
ctx := MockContext()
ctx.GetSessionVars().PlanID = -1
joinNode := LogicalJoin{}.Init(ctx, 0)
dataSourceNode := DataSource{}.Init(ctx, 0)
dsSchema := expression.NewSchema()
var dsNames types.NameSlice
dsSchema.Append(&expression.Column{
UniqueID: ctx.GetSessionVars().AllocPlanColumnID(),
RetType: types.NewFieldType(mysql.TypeLonglong),
})
dsNames = append(dsNames, &types.FieldName{
ColName: model.NewCIStr("a"),
TblName: model.NewCIStr("t"),
DBName: model.NewCIStr("test"),
})
dsSchema.Append(&expression.Column{
UniqueID: ctx.GetSessionVars().AllocPlanColumnID(),
RetType: types.NewFieldType(mysql.TypeLonglong),
})
dsNames = append(dsNames, &types.FieldName{
ColName: model.NewCIStr("b"),
TblName: model.NewCIStr("t"),
DBName: model.NewCIStr("test"),
})
dsSchema.Append(&expression.Column{
UniqueID: ctx.GetSessionVars().AllocPlanColumnID(),
RetType: types.NewFieldTypeWithCollation(mysql.TypeVarchar, mysql.DefaultCollationName, types.UnspecifiedLength),
})
dsNames = append(dsNames, &types.FieldName{
ColName: model.NewCIStr("c"),
TblName: model.NewCIStr("t"),
DBName: model.NewCIStr("test"),
})
dsSchema.Append(&expression.Column{
UniqueID: ctx.GetSessionVars().AllocPlanColumnID(),
RetType: types.NewFieldType(mysql.TypeLonglong),
})
dsNames = append(dsNames, &types.FieldName{
ColName: model.NewCIStr("d"),
TblName: model.NewCIStr("t"),
DBName: model.NewCIStr("test"),
})
dsSchema.Append(&expression.Column{
UniqueID: ctx.GetSessionVars().AllocPlanColumnID(),
RetType: types.NewFieldTypeWithCollation(mysql.TypeVarchar, charset.CollationASCII, types.UnspecifiedLength),
})
dsNames = append(dsNames, &types.FieldName{
ColName: model.NewCIStr("c_ascii"),
TblName: model.NewCIStr("t"),
DBName: model.NewCIStr("test"),
})
dataSourceNode.schema = dsSchema
dataSourceNode.stats = &property.StatsInfo{StatsVersion: statistics.PseudoVersion}
path := &util.AccessPath{
IdxCols: append(make([]*expression.Column, 0, 5), dsSchema.Columns...),
IdxColLens: []int{types.UnspecifiedLength, types.UnspecifiedLength, 2, types.UnspecifiedLength, 2},
}
outerChildSchema := expression.NewSchema()
var outerChildNames types.NameSlice
outerChildSchema.Append(&expression.Column{
UniqueID: ctx.GetSessionVars().AllocPlanColumnID(),
RetType: types.NewFieldType(mysql.TypeLonglong),
})
outerChildNames = append(outerChildNames, &types.FieldName{
ColName: model.NewCIStr("e"),
TblName: model.NewCIStr("t1"),
DBName: model.NewCIStr("test"),
})
outerChildSchema.Append(&expression.Column{
UniqueID: ctx.GetSessionVars().AllocPlanColumnID(),
RetType: types.NewFieldType(mysql.TypeLonglong),
})
outerChildNames = append(outerChildNames, &types.FieldName{
ColName: model.NewCIStr("f"),
TblName: model.NewCIStr("t1"),
DBName: model.NewCIStr("test"),
})
outerChildSchema.Append(&expression.Column{
UniqueID: ctx.GetSessionVars().AllocPlanColumnID(),
RetType: types.NewFieldTypeWithCollation(mysql.TypeVarchar, mysql.DefaultCollationName, types.UnspecifiedLength),
})
outerChildNames = append(outerChildNames, &types.FieldName{
ColName: model.NewCIStr("g"),
TblName: model.NewCIStr("t1"),
DBName: model.NewCIStr("test"),
})
outerChildSchema.Append(&expression.Column{
UniqueID: ctx.GetSessionVars().AllocPlanColumnID(),
RetType: types.NewFieldType(mysql.TypeLonglong),
})
outerChildNames = append(outerChildNames, &types.FieldName{
ColName: model.NewCIStr("h"),
TblName: model.NewCIStr("t1"),
DBName: model.NewCIStr("test"),
})
joinNode.SetSchema(expression.MergeSchema(dsSchema, outerChildSchema))
joinColNames := append(dsNames.Shallow(), outerChildNames...)
return &indexJoinContext{
dataSourceNode: dataSourceNode,
dsNames: dsNames,
path: path,
joinNode: joinNode,
joinColNames: joinColNames,
}
}
type indexJoinTestCase struct {
// input
innerKeys []*expression.Column
pushedDownConds string
otherConds string
rangeMaxSize int64
rebuildMode bool
// expected output
ranges string
idxOff2KeyOff string
accesses string
remained string
compareFilters string
}
func testAnalyzeLookUpFilters(t *testing.T, testCtx *indexJoinContext, testCase *indexJoinTestCase, msgAndArgs ...interface{}) *indexJoinBuildHelper {
ctx := testCtx.dataSourceNode.ctx
ctx.GetSessionVars().RangeMaxSize = testCase.rangeMaxSize
dataSourceNode := testCtx.dataSourceNode
joinNode := testCtx.joinNode
pushed, err := rewriteSimpleExpr(ctx, testCase.pushedDownConds, dataSourceNode.schema, testCtx.dsNames)
require.NoError(t, err)
dataSourceNode.pushedDownConds = pushed
others, err := rewriteSimpleExpr(ctx, testCase.otherConds, joinNode.schema, testCtx.joinColNames)
require.NoError(t, err)
joinNode.OtherConditions = others
helper := &indexJoinBuildHelper{join: joinNode, lastColManager: nil, innerPlan: dataSourceNode}
_, err = helper.analyzeLookUpFilters(testCtx.path, dataSourceNode, testCase.innerKeys, testCase.innerKeys, testCase.rebuildMode)
if helper.chosenRanges == nil {
helper.chosenRanges = ranger.Ranges{}
}
require.NoError(t, err)
if testCase.rebuildMode {
require.Equal(t, testCase.ranges, fmt.Sprintf("%v", helper.chosenRanges.Range()), msgAndArgs)
} else {
require.Equal(t, testCase.accesses, fmt.Sprintf("%v", helper.chosenAccess), msgAndArgs)
require.Equal(t, testCase.ranges, fmt.Sprintf("%v", helper.chosenRanges.Range()), msgAndArgs)
require.Equal(t, testCase.idxOff2KeyOff, fmt.Sprintf("%v", helper.idxOff2KeyOff), msgAndArgs)
require.Equal(t, testCase.remained, fmt.Sprintf("%v", helper.chosenRemained), msgAndArgs)
require.Equal(t, testCase.compareFilters, fmt.Sprintf("%v", helper.lastColManager), msgAndArgs)
}
return helper
}
func TestIndexJoinAnalyzeLookUpFilters(t *testing.T) {
indexJoinCtx := prepareForAnalyzeLookUpFilters()
dsSchema := indexJoinCtx.dataSourceNode.schema
tests := []indexJoinTestCase{
// Join key not continuous and no pushed filter to match.
{
innerKeys: []*expression.Column{dsSchema.Columns[0], dsSchema.Columns[2]},
pushedDownConds: "",
otherConds: "",
ranges: "[[NULL,NULL]]",
idxOff2KeyOff: "[0 -1 -1 -1 -1]",
accesses: "[]",
remained: "[]",
compareFilters: "<nil>",
},
// Join key and pushed eq filter not continuous.
{
innerKeys: []*expression.Column{dsSchema.Columns[2]},
pushedDownConds: "a = 1",
otherConds: "",
ranges: "[]",
idxOff2KeyOff: "[]",
accesses: "[]",
remained: "[]",
compareFilters: "<nil>",
},
// Keys are continuous.
{
innerKeys: []*expression.Column{dsSchema.Columns[1]},
pushedDownConds: "a = 1",
otherConds: "",
ranges: "[[1 NULL,1 NULL]]",
idxOff2KeyOff: "[-1 0 -1 -1 -1]",
accesses: "[eq(Column#1, 1)]",
remained: "[]",
compareFilters: "<nil>",
},
// Keys are continuous and there're correlated filters.
{
innerKeys: []*expression.Column{dsSchema.Columns[1]},
pushedDownConds: "a = 1",
otherConds: "c > g and c < concat(g, \"ab\")",
ranges: "[[1 NULL NULL,1 NULL NULL]]",
idxOff2KeyOff: "[-1 0 -1 -1 -1]",
accesses: "[eq(Column#1, 1) gt(Column#3, Column#8) lt(Column#3, concat(Column#8, ab))]",
remained: "[]",
compareFilters: "gt(Column#3, Column#8) lt(Column#3, concat(Column#8, ab))",
},
// cast function won't be involved.
{
innerKeys: []*expression.Column{dsSchema.Columns[1]},
pushedDownConds: "a = 1",
otherConds: "c > g and c < g + 10",
ranges: "[[1 NULL NULL,1 NULL NULL]]",
idxOff2KeyOff: "[-1 0 -1 -1 -1]",
accesses: "[eq(Column#1, 1) gt(Column#3, Column#8)]",
remained: "[]",
compareFilters: "gt(Column#3, Column#8)",
},
// Can deal with prefix index correctly.
{
innerKeys: []*expression.Column{dsSchema.Columns[1]},
pushedDownConds: "a = 1 and c > 'a' and c < 'aaaaaa'",
otherConds: "",
ranges: "[(1 NULL \"a\",1 NULL \"aa\"]]",
idxOff2KeyOff: "[-1 0 -1 -1 -1]",
accesses: "[eq(Column#1, 1) gt(Column#3, a) lt(Column#3, aaaaaa)]",
remained: "[gt(Column#3, a) lt(Column#3, aaaaaa)]",
compareFilters: "<nil>",
},
{
innerKeys: []*expression.Column{dsSchema.Columns[1], dsSchema.Columns[2], dsSchema.Columns[3]},
pushedDownConds: "a = 1 and c_ascii > 'a' and c_ascii < 'aaaaaa'",
otherConds: "",
ranges: "[(1 NULL NULL NULL \"a\",1 NULL NULL NULL \"aa\"]]",
idxOff2KeyOff: "[-1 0 1 2 -1]",
accesses: "[eq(Column#1, 1) gt(Column#5, a) lt(Column#5, aaaaaa)]",
remained: "[gt(Column#5, a) lt(Column#5, aaaaaa)]",
compareFilters: "<nil>",
},
// Can generate correct ranges for in functions.
{
innerKeys: []*expression.Column{dsSchema.Columns[1]},
pushedDownConds: "a in (1, 2, 3) and c in ('a', 'b', 'c')",
otherConds: "",
ranges: "[[1 NULL \"a\",1 NULL \"a\"] [1 NULL \"b\",1 NULL \"b\"] [1 NULL \"c\",1 NULL \"c\"] [2 NULL \"a\",2 NULL \"a\"] [2 NULL \"b\",2 NULL \"b\"] [2 NULL \"c\",2 NULL \"c\"] [3 NULL \"a\",3 NULL \"a\"] [3 NULL \"b\",3 NULL \"b\"] [3 NULL \"c\",3 NULL \"c\"]]",
idxOff2KeyOff: "[-1 0 -1 -1 -1]",
accesses: "[in(Column#1, 1, 2, 3) in(Column#3, a, b, c)]",
remained: "[in(Column#3, a, b, c)]",
compareFilters: "<nil>",
},
// Can generate correct ranges for in functions with correlated filters..
{
innerKeys: []*expression.Column{dsSchema.Columns[1]},
pushedDownConds: "a in (1, 2, 3) and c in ('a', 'b', 'c')",
otherConds: "d > h and d < h + 100",
ranges: "[[1 NULL \"a\" NULL,1 NULL \"a\" NULL] [1 NULL \"b\" NULL,1 NULL \"b\" NULL] [1 NULL \"c\" NULL,1 NULL \"c\" NULL] [2 NULL \"a\" NULL,2 NULL \"a\" NULL] [2 NULL \"b\" NULL,2 NULL \"b\" NULL] [2 NULL \"c\" NULL,2 NULL \"c\" NULL] [3 NULL \"a\" NULL,3 NULL \"a\" NULL] [3 NULL \"b\" NULL,3 NULL \"b\" NULL] [3 NULL \"c\" NULL,3 NULL \"c\" NULL]]",
idxOff2KeyOff: "[-1 0 -1 -1 -1]",
accesses: "[in(Column#1, 1, 2, 3) in(Column#3, a, b, c) gt(Column#4, Column#9) lt(Column#4, plus(Column#9, 100))]",
remained: "[in(Column#3, a, b, c)]",
compareFilters: "gt(Column#4, Column#9) lt(Column#4, plus(Column#9, 100))",
},
// Join keys are not continuous and the pushed key connect the key but not eq/in functions.
{
innerKeys: []*expression.Column{dsSchema.Columns[0], dsSchema.Columns[2]},
pushedDownConds: "b > 1",
otherConds: "",
ranges: "[(NULL 1,NULL +inf]]",
idxOff2KeyOff: "[0 -1 -1 -1 -1]",
accesses: "[gt(Column#2, 1)]",
remained: "[]",
compareFilters: "<nil>",
},
{
innerKeys: []*expression.Column{dsSchema.Columns[1]},
pushedDownConds: "a = 1 and c > 'a' and c < '一二三'",
otherConds: "",
ranges: "[(1 NULL \"a\",1 NULL \"一二\"]]",
idxOff2KeyOff: "[-1 0 -1 -1 -1]",
accesses: "[eq(Column#1, 1) gt(Column#3, a) lt(Column#3, 一二三)]",
remained: "[gt(Column#3, a) lt(Column#3, 一二三)]",
compareFilters: "<nil>",
},
}
for i, tt := range tests {
testAnalyzeLookUpFilters(t, indexJoinCtx, &tt, fmt.Sprintf("test case: %v", i))
}
}
func checkRangeFallbackAndReset(t *testing.T, ctx sessionctx.Context, expectedRangeFallback bool) {
require.Equal(t, expectedRangeFallback, ctx.GetSessionVars().StmtCtx.RangeFallback)
ctx.GetSessionVars().StmtCtx.RangeFallback = false
}
func TestRangeFallbackForAnalyzeLookUpFilters(t *testing.T) {
ijCtx := prepareForAnalyzeLookUpFilters()
ctx := ijCtx.dataSourceNode.ctx
dsSchema := ijCtx.dataSourceNode.schema
type testOutput struct {
ranges string
idxOff2KeyOff string
accesses string
remained string
compareFilters string
}
tests := []struct {
innerKeys []*expression.Column
pushedDownConds string
otherConds string
outputs []testOutput
}{
{
innerKeys: []*expression.Column{dsSchema.Columns[1], dsSchema.Columns[3]},
pushedDownConds: "a in (1, 3) and c in ('aaa', 'bbb')",
otherConds: "",
outputs: []testOutput{
{
ranges: "[[1 NULL \"aa\" NULL,1 NULL \"aa\" NULL] [1 NULL \"bb\" NULL,1 NULL \"bb\" NULL] [3 NULL \"aa\" NULL,3 NULL \"aa\" NULL] [3 NULL \"bb\" NULL,3 NULL \"bb\" NULL]]",
idxOff2KeyOff: "[-1 0 -1 1 -1]",
accesses: "[in(Column#1, 1, 3) in(Column#3, aaa, bbb)]",
remained: "[in(Column#3, aaa, bbb)]",
compareFilters: "<nil>",
},
{
ranges: "[[1 NULL \"aa\",1 NULL \"aa\"] [1 NULL \"bb\",1 NULL \"bb\"] [3 NULL \"aa\",3 NULL \"aa\"] [3 NULL \"bb\",3 NULL \"bb\"]]",
idxOff2KeyOff: "[-1 0 -1 -1 -1]",
accesses: "[in(Column#1, 1, 3) in(Column#3, aaa, bbb)]",
remained: "[in(Column#3, aaa, bbb)]",
compareFilters: "<nil>",
},
{
ranges: "[[1 NULL,1 NULL] [3 NULL,3 NULL]]",
idxOff2KeyOff: "[-1 0 -1 -1 -1]",
accesses: "[in(Column#1, 1, 3)]",
remained: "[in(Column#3, aaa, bbb)]",
compareFilters: "<nil>",
},
{
ranges: "[]",
idxOff2KeyOff: "[]",
accesses: "[]",
remained: "[]",
compareFilters: "<nil>",
},
},
},
{
// test haveExtraCol
innerKeys: []*expression.Column{dsSchema.Columns[0]},
pushedDownConds: "b in (1, 3, 5)",
otherConds: "c > g and c < concat(g, 'aaa')",
outputs: []testOutput{
{
ranges: "[[NULL 1 NULL,NULL 1 NULL] [NULL 3 NULL,NULL 3 NULL] [NULL 5 NULL,NULL 5 NULL]]",
idxOff2KeyOff: "[0 -1 -1 -1 -1]",
accesses: "[in(Column#2, 1, 3, 5) gt(Column#3, Column#8) lt(Column#3, concat(Column#8, aaa))]",
remained: "[]",
compareFilters: "gt(Column#3, Column#8) lt(Column#3, concat(Column#8, aaa))",
},
{
ranges: "[[NULL 1,NULL 1] [NULL 3,NULL 3] [NULL 5,NULL 5]]",
idxOff2KeyOff: "[0 -1 -1 -1 -1]",
accesses: "[in(Column#2, 1, 3, 5)]",
remained: "[]",
compareFilters: "<nil>",
},
{
ranges: "[[NULL,NULL]]",
idxOff2KeyOff: "[0 -1 -1 -1 -1]",
accesses: "[]",
remained: "[in(Column#2, 1, 3, 5)]",
compareFilters: "<nil>",
},
},
},
{
// test nextColRange
innerKeys: []*expression.Column{dsSchema.Columns[1]},
pushedDownConds: "a in (1, 3) and c > 'aaa' and c < 'bbb'",
otherConds: "",
outputs: []testOutput{
{
ranges: "[[1 NULL \"aa\",1 NULL \"bb\"] [3 NULL \"aa\",3 NULL \"bb\"]]",
idxOff2KeyOff: "[-1 0 -1 -1 -1]",
accesses: "[in(Column#1, 1, 3) gt(Column#3, aaa) lt(Column#3, bbb)]",
remained: "[gt(Column#3, aaa) lt(Column#3, bbb)]",
compareFilters: "<nil>",
},
{
ranges: "[[1 NULL,1 NULL] [3 NULL,3 NULL]]",
idxOff2KeyOff: "[-1 0 -1 -1 -1]",
accesses: "[in(Column#1, 1, 3)]",
remained: "[gt(Column#3, aaa) lt(Column#3, bbb)]",
compareFilters: "<nil>",
},
},
},
}
for _, tt := range tests {
ijCase := &indexJoinTestCase{
innerKeys: tt.innerKeys,
pushedDownConds: tt.pushedDownConds,
otherConds: tt.otherConds,
rangeMaxSize: 0,
}
for i, res := range tt.outputs {
ijCase.ranges = res.ranges
ijCase.idxOff2KeyOff = res.idxOff2KeyOff
ijCase.accesses = res.accesses
ijCase.remained = res.remained
ijCase.compareFilters = res.compareFilters
ijHelper := testAnalyzeLookUpFilters(t, ijCtx, ijCase)
checkRangeFallbackAndReset(t, ctx, i > 0)
ijCase.rangeMaxSize = ijHelper.chosenRanges.Range().MemUsage() - 1
}
}
// test that building ranges doesn't have mem limit under rebuild mode
ijCase := &indexJoinTestCase{
innerKeys: []*expression.Column{dsSchema.Columns[0], dsSchema.Columns[2]},
pushedDownConds: "b in (1, 3) and d in (2, 4)",
otherConds: "",
rangeMaxSize: 1,
rebuildMode: true,
ranges: "[[NULL 1 NULL 2,NULL 1 NULL 2] [NULL 1 NULL 4,NULL 1 NULL 4] [NULL 3 NULL 2,NULL 3 NULL 2] [NULL 3 NULL 4,NULL 3 NULL 4]]",
}
ijHelper := testAnalyzeLookUpFilters(t, ijCtx, ijCase)
checkRangeFallbackAndReset(t, ctx, false)
require.Greater(t, ijHelper.chosenRanges.Range().MemUsage(), ijCase.rangeMaxSize)
}
| planner/core/exhaust_physical_plans_test.go | 0 | https://github.com/pingcap/tidb/commit/90e727194074b352ffe826fca411c1776b5562cd | [
0.007563144434243441,
0.00038976356154307723,
0.0001620654365979135,
0.00016919313929975033,
0.001131669501774013
] |
{
"id": 6,
"code_window": [
"\tt.Cleanup(func() {\n",
"\t\tdom.Close()\n",
"\t\terr := store.Close()\n",
"\t\trequire.NoError(t, err)\n",
"\t\tview.Stop()\n",
"\t})\n",
"\treturn dom\n",
"}\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tresourcemanager.GlobalResourceManager.Reset()\n"
],
"file_path": "testkit/mockstore.go",
"type": "add",
"edit_start_line_idx": 93
} | // Copyright 2021 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build !codes
package testkit
import (
"flag"
"testing"
"time"
"github.com/pingcap/tidb/ddl/schematracker"
"github.com/pingcap/tidb/domain"
"github.com/pingcap/tidb/kv"
"github.com/pingcap/tidb/session"
"github.com/pingcap/tidb/store/driver"
"github.com/pingcap/tidb/store/mockstore"
"github.com/pingcap/tidb/util/gctuner"
"github.com/stretchr/testify/require"
"go.opencensus.io/stats/view"
)
// WithTiKV flag is only used for debugging locally with real tikv cluster.
var WithTiKV = flag.String("with-tikv", "", "address of tikv cluster, if set, running test with real tikv cluster")
// CreateMockStore return a new mock kv.Storage.
func CreateMockStore(t testing.TB, opts ...mockstore.MockTiKVStoreOption) kv.Storage {
if *WithTiKV != "" {
var d driver.TiKVDriver
var err error
store, err := d.Open("tikv://" + *WithTiKV)
require.NoError(t, err)
var dom *domain.Domain
dom, err = session.BootstrapSession(store)
t.Cleanup(func() {
dom.Close()
err := store.Close()
require.NoError(t, err)
view.Stop()
})
require.NoError(t, err)
return store
}
t.Cleanup(func() {
view.Stop()
})
gctuner.GlobalMemoryLimitTuner.Stop()
store, _ := CreateMockStoreAndDomain(t, opts...)
return store
}
// CreateMockStoreAndDomain return a new mock kv.Storage and *domain.Domain.
func CreateMockStoreAndDomain(t testing.TB, opts ...mockstore.MockTiKVStoreOption) (kv.Storage, *domain.Domain) {
store, err := mockstore.NewMockStore(opts...)
require.NoError(t, err)
dom := bootstrap(t, store, 500*time.Millisecond)
sm := MockSessionManager{}
dom.InfoSyncer().SetSessionManager(&sm)
t.Cleanup(func() {
view.Stop()
gctuner.GlobalMemoryLimitTuner.Stop()
})
return schematracker.UnwrapStorage(store), dom
}
func bootstrap(t testing.TB, store kv.Storage, lease time.Duration) *domain.Domain {
session.SetSchemaLease(lease)
session.DisableStats4Test()
domain.DisablePlanReplayerBackgroundJob4Test()
domain.DisableDumpHistoricalStats4Test()
dom, err := session.BootstrapSession(store)
require.NoError(t, err)
dom.SetStatsUpdating(true)
t.Cleanup(func() {
dom.Close()
err := store.Close()
require.NoError(t, err)
view.Stop()
})
return dom
}
// CreateMockStoreWithSchemaLease return a new mock kv.Storage.
func CreateMockStoreWithSchemaLease(t testing.TB, lease time.Duration, opts ...mockstore.MockTiKVStoreOption) kv.Storage {
store, _ := CreateMockStoreAndDomainWithSchemaLease(t, lease, opts...)
return schematracker.UnwrapStorage(store)
}
// CreateMockStoreAndDomainWithSchemaLease return a new mock kv.Storage and *domain.Domain.
func CreateMockStoreAndDomainWithSchemaLease(t testing.TB, lease time.Duration, opts ...mockstore.MockTiKVStoreOption) (kv.Storage, *domain.Domain) {
store, err := mockstore.NewMockStore(opts...)
require.NoError(t, err)
dom := bootstrap(t, store, lease)
sm := MockSessionManager{}
dom.InfoSyncer().SetSessionManager(&sm)
return schematracker.UnwrapStorage(store), dom
}
| testkit/mockstore.go | 1 | https://github.com/pingcap/tidb/commit/90e727194074b352ffe826fca411c1776b5562cd | [
0.9907049536705017,
0.14306211471557617,
0.0001712735538603738,
0.0018371595069766045,
0.32165539264678955
] |
{
"id": 6,
"code_window": [
"\tt.Cleanup(func() {\n",
"\t\tdom.Close()\n",
"\t\terr := store.Close()\n",
"\t\trequire.NoError(t, err)\n",
"\t\tview.Stop()\n",
"\t})\n",
"\treturn dom\n",
"}\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tresourcemanager.GlobalResourceManager.Reset()\n"
],
"file_path": "testkit/mockstore.go",
"type": "add",
"edit_start_line_idx": 93
} | create database routes_a1;
| br/tests/lightning_ignore_columns/data/routes_a1-schema-create.sql | 0 | https://github.com/pingcap/tidb/commit/90e727194074b352ffe826fca411c1776b5562cd | [
0.00016753879026509821,
0.00016753879026509821,
0.00016753879026509821,
0.00016753879026509821,
0
] |
{
"id": 6,
"code_window": [
"\tt.Cleanup(func() {\n",
"\t\tdom.Close()\n",
"\t\terr := store.Close()\n",
"\t\trequire.NoError(t, err)\n",
"\t\tview.Stop()\n",
"\t})\n",
"\treturn dom\n",
"}\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tresourcemanager.GlobalResourceManager.Reset()\n"
],
"file_path": "testkit/mockstore.go",
"type": "add",
"edit_start_line_idx": 93
} | #!/bin/sh
#
# Copyright 2019 PingCAP, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -eux
bin/cdc \
--ca "$TEST_DIR/certs/ca.pem" \
--cert "$TEST_DIR/certs/ticdc.pem" \
--key "$TEST_DIR/certs/ticdc.key" \
"$@"
| br/tests/_utils/run_cdc | 0 | https://github.com/pingcap/tidb/commit/90e727194074b352ffe826fca411c1776b5562cd | [
0.00017852695600595325,
0.00017735963047016412,
0.00017643699538893998,
0.00017711494001559913,
8.705894174454443e-7
] |
{
"id": 6,
"code_window": [
"\tt.Cleanup(func() {\n",
"\t\tdom.Close()\n",
"\t\terr := store.Close()\n",
"\t\trequire.NoError(t, err)\n",
"\t\tview.Stop()\n",
"\t})\n",
"\treturn dom\n",
"}\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tresourcemanager.GlobalResourceManager.Reset()\n"
],
"file_path": "testkit/mockstore.go",
"type": "add",
"edit_start_line_idx": 93
} | // Copyright 2022 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package session
import (
"context"
"fmt"
"math"
"strings"
"github.com/pingcap/errors"
"github.com/pingcap/failpoint"
"github.com/pingcap/tidb/domain"
"github.com/pingcap/tidb/errno"
"github.com/pingcap/tidb/metrics"
"github.com/pingcap/tidb/parser"
"github.com/pingcap/tidb/parser/ast"
"github.com/pingcap/tidb/parser/format"
"github.com/pingcap/tidb/parser/model"
"github.com/pingcap/tidb/parser/mysql"
"github.com/pingcap/tidb/parser/opcode"
"github.com/pingcap/tidb/planner/core"
"github.com/pingcap/tidb/sessionctx/variable"
"github.com/pingcap/tidb/table"
"github.com/pingcap/tidb/types"
driver "github.com/pingcap/tidb/types/parser_driver"
"github.com/pingcap/tidb/util/chunk"
"github.com/pingcap/tidb/util/collate"
"github.com/pingcap/tidb/util/dbterror"
"github.com/pingcap/tidb/util/logutil"
"github.com/pingcap/tidb/util/mathutil"
"github.com/pingcap/tidb/util/memory"
"github.com/pingcap/tidb/util/sqlexec"
"github.com/prometheus/client_golang/prometheus"
"go.uber.org/zap"
)
// ErrNonTransactionalJobFailure is the error when a non-transactional job fails. The error is returned and following jobs are canceled.
var ErrNonTransactionalJobFailure = dbterror.ClassSession.NewStd(errno.ErrNonTransactionalJobFailure)
var (
nonTransactionalDeleteCount = metrics.NonTransactionalDMLCount.With(prometheus.Labels{metrics.LblType: "delete"})
nonTransactionalInsertCount = metrics.NonTransactionalDMLCount.With(prometheus.Labels{metrics.LblType: "insert"})
nonTransactionalUpdateCount = metrics.NonTransactionalDMLCount.With(prometheus.Labels{metrics.LblType: "update"})
)
// job: handle keys in [start, end]
type job struct {
start types.Datum
end types.Datum
err error
jobID int
jobSize int // it can be inaccurate if there are concurrent writes
sql string
}
// statementBuildInfo contains information that is needed to build the split statement in a job
type statementBuildInfo struct {
stmt *ast.NonTransactionalDMLStmt
shardColumnType types.FieldType
shardColumnRefer *ast.ResultField
originalCondition ast.ExprNode
}
func (j job) String(redacted bool) string {
if redacted {
return fmt.Sprintf("job id: %d, estimated size: %d", j.jobID, j.jobSize)
}
return fmt.Sprintf("job id: %d, estimated size: %d, sql: %s", j.jobID, j.jobSize, j.sql)
}
// HandleNonTransactionalDML is the entry point for a non-transactional DML statement
func HandleNonTransactionalDML(ctx context.Context, stmt *ast.NonTransactionalDMLStmt, se Session) (sqlexec.RecordSet, error) {
sessVars := se.GetSessionVars()
originalReadStaleness := se.GetSessionVars().ReadStaleness
// NT-DML is a write operation, and should not be affected by read_staleness that is supposed to affect only SELECT.
sessVars.ReadStaleness = 0
defer func() {
sessVars.ReadStaleness = originalReadStaleness
}()
err := core.Preprocess(ctx, se, stmt)
if err != nil {
return nil, err
}
if err := checkConstraint(stmt, se); err != nil {
return nil, err
}
tableName, selectSQL, shardColumnInfo, tableSources, err := buildSelectSQL(stmt, se)
if err != nil {
return nil, err
}
if err := checkConstraintWithShardColumn(se, stmt, tableName, shardColumnInfo, tableSources); err != nil {
return nil, err
}
if stmt.DryRun == ast.DryRunQuery {
return buildDryRunResults(stmt.DryRun, []string{selectSQL}, se.GetSessionVars().BatchSize.MaxChunkSize)
}
// TODO: choose an appropriate quota.
// Use the mem-quota-query as a workaround. As a result, a NT-DML may consume 2x of the memory quota.
memTracker := memory.NewTracker(memory.LabelForNonTransactionalDML, -1)
memTracker.AttachTo(se.GetSessionVars().MemTracker)
se.GetSessionVars().MemTracker.SetBytesLimit(se.GetSessionVars().MemQuotaQuery)
defer memTracker.Detach()
jobs, err := buildShardJobs(ctx, stmt, se, selectSQL, shardColumnInfo, memTracker)
if err != nil {
return nil, err
}
splitStmts, err := runJobs(ctx, jobs, stmt, tableName, se, stmt.DMLStmt.WhereExpr())
if err != nil {
return nil, err
}
if stmt.DryRun == ast.DryRunSplitDml {
return buildDryRunResults(stmt.DryRun, splitStmts, se.GetSessionVars().BatchSize.MaxChunkSize)
}
return buildExecuteResults(ctx, jobs, se.GetSessionVars().BatchSize.MaxChunkSize, se.GetSessionVars().EnableRedactLog)
}
// we require:
// (1) in an update statement, shard column cannot be updated
//
// Note: this is not a comprehensive check.
// We do this to help user prevent some easy mistakes, at an acceptable maintenance cost.
func checkConstraintWithShardColumn(se Session, stmt *ast.NonTransactionalDMLStmt,
tableName *ast.TableName, shardColumnInfo *model.ColumnInfo, tableSources []*ast.TableSource) error {
switch s := stmt.DMLStmt.(type) {
case *ast.UpdateStmt:
if err := checkUpdateShardColumn(se, s.List, shardColumnInfo, tableName, tableSources, true); err != nil {
return err
}
case *ast.InsertStmt:
// FIXME: is it possible to happen?
// `insert into t select * from t on duplicate key update id = id + 1` will return an ambiguous column error?
if err := checkUpdateShardColumn(se, s.OnDuplicate, shardColumnInfo, tableName, tableSources, false); err != nil {
return err
}
default:
}
return nil
}
// shard column should not be updated.
func checkUpdateShardColumn(se Session, assignments []*ast.Assignment, shardColumnInfo *model.ColumnInfo,
tableName *ast.TableName, tableSources []*ast.TableSource, isUpdate bool) error {
// if the table has alias, the alias is used in assignments, and we should use aliased name to compare
aliasedShardColumnTableName := tableName.Name.L
for _, tableSource := range tableSources {
if tableSource.Source.(*ast.TableName).Name.L == aliasedShardColumnTableName && tableSource.AsName.L != "" {
aliasedShardColumnTableName = tableSource.AsName.L
}
}
if shardColumnInfo == nil {
return nil
}
for _, assignment := range assignments {
sameDB := (assignment.Column.Schema.L == tableName.Schema.L) ||
(assignment.Column.Schema.L == "" && tableName.Schema.L == se.GetSessionVars().CurrentDB)
if !sameDB {
continue
}
sameTable := (assignment.Column.Table.L == aliasedShardColumnTableName) || (isUpdate && len(tableSources) == 1)
if !sameTable {
continue
}
if assignment.Column.Name.L == shardColumnInfo.Name.L {
return errors.New("Non-transactional DML, shard column cannot be updated")
}
}
return nil
}
func checkConstraint(stmt *ast.NonTransactionalDMLStmt, se Session) error {
sessVars := se.GetSessionVars()
if !(sessVars.IsAutocommit() && !sessVars.InTxn()) {
return errors.Errorf("non-transactional DML can only run in auto-commit mode. auto-commit:%v, inTxn:%v",
se.GetSessionVars().IsAutocommit(), se.GetSessionVars().InTxn())
}
if variable.EnableBatchDML.Load() && sessVars.DMLBatchSize > 0 && (sessVars.BatchDelete || sessVars.BatchInsert) {
return errors.Errorf("can't run non-transactional DML with batch-dml")
}
if sessVars.ReadConsistency.IsWeak() {
return errors.New("can't run non-transactional under weak read consistency")
}
if sessVars.SnapshotTS != 0 {
return errors.New("can't do non-transactional DML when tidb_snapshot is set")
}
switch s := stmt.DMLStmt.(type) {
case *ast.DeleteStmt:
if err := checkTableRef(s.TableRefs, true); err != nil {
return err
}
if err := checkReadClauses(s.Limit, s.Order); err != nil {
return err
}
nonTransactionalDeleteCount.Inc()
case *ast.UpdateStmt:
if err := checkTableRef(s.TableRefs, true); err != nil {
return err
}
if err := checkReadClauses(s.Limit, s.Order); err != nil {
return err
}
nonTransactionalUpdateCount.Inc()
case *ast.InsertStmt:
if s.Select == nil {
return errors.New("Non-transactional insert supports insert select stmt only")
}
selectStmt, ok := s.Select.(*ast.SelectStmt)
if !ok {
return errors.New("Non-transactional insert doesn't support non-select source")
}
if err := checkTableRef(selectStmt.From, true); err != nil {
return err
}
if err := checkReadClauses(selectStmt.Limit, selectStmt.OrderBy); err != nil {
return err
}
nonTransactionalInsertCount.Inc()
default:
return errors.New("Unsupported DML type for non-transactional DML")
}
return nil
}
func checkTableRef(t *ast.TableRefsClause, allowMultipleTables bool) error {
if t == nil || t.TableRefs == nil || t.TableRefs.Left == nil {
return errors.New("table reference is nil")
}
if !allowMultipleTables && t.TableRefs.Right != nil {
return errors.New("Non-transactional statements don't support multiple tables")
}
return nil
}
func checkReadClauses(limit *ast.Limit, order *ast.OrderByClause) error {
if limit != nil {
return errors.New("Non-transactional statements don't support limit")
}
if order != nil {
return errors.New("Non-transactional statements don't support order by")
}
return nil
}
// single-threaded worker. work on the key range [start, end]
func runJobs(ctx context.Context, jobs []job, stmt *ast.NonTransactionalDMLStmt,
tableName *ast.TableName, se Session, originalCondition ast.ExprNode) ([]string, error) {
// prepare for the construction of statement
var shardColumnRefer *ast.ResultField
var shardColumnType types.FieldType
for _, col := range tableName.TableInfo.Columns {
if col.Name.L == stmt.ShardColumn.Name.L {
shardColumnRefer = &ast.ResultField{
Column: col,
Table: tableName.TableInfo,
DBName: tableName.Schema,
TableName: tableName,
}
shardColumnType = col.FieldType
}
}
if shardColumnRefer == nil && stmt.ShardColumn.Name.L != model.ExtraHandleName.L {
return nil, errors.New("Non-transactional DML, shard column not found")
}
splitStmts := make([]string, 0, len(jobs))
for i := range jobs {
select {
case <-ctx.Done():
failedJobs := make([]string, 0)
for _, job := range jobs {
if job.err != nil {
failedJobs = append(failedJobs, fmt.Sprintf("job:%s, error: %s", job.String(se.GetSessionVars().EnableRedactLog), job.err.Error()))
}
}
if len(failedJobs) == 0 {
logutil.Logger(ctx).Warn("Non-transactional DML worker exit because context canceled. No errors",
zap.Int("finished", i), zap.Int("total", len(jobs)))
} else {
logutil.Logger(ctx).Warn("Non-transactional DML worker exit because context canceled. Errors found",
zap.Int("finished", i), zap.Int("total", len(jobs)), zap.Strings("errors found", failedJobs))
}
return nil, ctx.Err()
default:
}
// _tidb_rowid
if shardColumnRefer == nil {
shardColumnType = *types.NewFieldType(mysql.TypeLonglong)
shardColumnRefer = &ast.ResultField{
Column: model.NewExtraHandleColInfo(),
Table: tableName.TableInfo,
DBName: tableName.Schema,
TableName: tableName,
}
}
stmtBuildInfo := statementBuildInfo{
stmt: stmt,
shardColumnType: shardColumnType,
shardColumnRefer: shardColumnRefer,
originalCondition: originalCondition,
}
if stmt.DryRun == ast.DryRunSplitDml {
if i > 0 && i < len(jobs)-1 {
continue
}
splitStmt := doOneJob(ctx, &jobs[i], len(jobs), stmtBuildInfo, se, true)
splitStmts = append(splitStmts, splitStmt)
} else {
doOneJob(ctx, &jobs[i], len(jobs), stmtBuildInfo, se, false)
}
// if the first job failed, there is a large chance that all jobs will fail. So return early.
if i == 0 && jobs[i].err != nil {
return nil, errors.Annotate(jobs[i].err, "Early return: error occurred in the first job. All jobs are canceled")
}
if jobs[i].err != nil && !se.GetSessionVars().NonTransactionalIgnoreError {
return nil, ErrNonTransactionalJobFailure.GenWithStackByArgs(jobs[i].jobID, len(jobs), jobs[i].start.String(), jobs[i].end.String(), jobs[i].String(se.GetSessionVars().EnableRedactLog), jobs[i].err.Error())
}
}
return splitStmts, nil
}
func doOneJob(ctx context.Context, job *job, totalJobCount int, options statementBuildInfo, se Session, dryRun bool) string {
var whereCondition ast.ExprNode
if job.start.IsNull() {
isNullCondition := &ast.IsNullExpr{
Expr: &ast.ColumnNameExpr{
Name: options.stmt.ShardColumn,
Refer: options.shardColumnRefer,
},
Not: false,
}
if job.end.IsNull() {
// `where x is null`
whereCondition = isNullCondition
} else {
// `where (x <= job.end) || (x is null)`
right := &driver.ValueExpr{}
right.Type = options.shardColumnType
right.Datum = job.end
leCondition := &ast.BinaryOperationExpr{
Op: opcode.LE,
L: &ast.ColumnNameExpr{
Name: options.stmt.ShardColumn,
Refer: options.shardColumnRefer,
},
R: right,
}
whereCondition = &ast.BinaryOperationExpr{
Op: opcode.LogicOr,
L: leCondition,
R: isNullCondition,
}
}
} else {
// a normal between condition: `where x between start and end`
left := &driver.ValueExpr{}
left.Type = options.shardColumnType
left.Datum = job.start
right := &driver.ValueExpr{}
right.Type = options.shardColumnType
right.Datum = job.end
whereCondition = &ast.BetweenExpr{
Expr: &ast.ColumnNameExpr{
Name: options.stmt.ShardColumn,
Refer: options.shardColumnRefer,
},
Left: left,
Right: right,
Not: false,
}
}
if options.originalCondition == nil {
options.stmt.DMLStmt.SetWhereExpr(whereCondition)
} else {
options.stmt.DMLStmt.SetWhereExpr(&ast.BinaryOperationExpr{
Op: opcode.LogicAnd,
L: whereCondition,
R: options.originalCondition,
})
}
var sb strings.Builder
err := options.stmt.DMLStmt.Restore(format.NewRestoreCtx(format.DefaultRestoreFlags|
format.RestoreNameBackQuotes|
format.RestoreSpacesAroundBinaryOperation|
format.RestoreBracketAroundBinaryOperation|
format.RestoreStringWithoutCharset, &sb))
if err != nil {
logutil.Logger(ctx).Error("Non-transactional DML, failed to restore the DML statement", zap.Error(err))
job.err = errors.New("Failed to restore the DML statement, probably because of unsupported type of the shard column")
return ""
}
dmlSQL := sb.String()
if dryRun {
return dmlSQL
}
job.sql = dmlSQL
logutil.Logger(ctx).Info("start a Non-transactional DML",
zap.String("job", job.String(se.GetSessionVars().EnableRedactLog)), zap.Int("totalJobCount", totalJobCount))
var dmlSQLInLog string
if se.GetSessionVars().EnableRedactLog {
dmlSQLInLog = parser.Normalize(dmlSQL)
} else {
dmlSQLInLog = dmlSQL
}
options.stmt.DMLStmt.SetText(nil, fmt.Sprintf("/* job %v/%v */ %s", job.jobID, totalJobCount, dmlSQL))
rs, err := se.ExecuteStmt(ctx, options.stmt.DMLStmt)
// collect errors
failpoint.Inject("batchDMLError", func(val failpoint.Value) {
if val.(bool) {
err = errors.New("injected batch(non-transactional) DML error")
}
})
if err != nil {
logutil.Logger(ctx).Error("Non-transactional DML SQL failed", zap.String("job", dmlSQLInLog), zap.Error(err), zap.Int("jobID", job.jobID), zap.Int("jobSize", job.jobSize))
job.err = err
} else {
logutil.Logger(ctx).Info("Non-transactional DML SQL finished successfully", zap.Int("jobID", job.jobID),
zap.Int("jobSize", job.jobSize), zap.String("dmlSQL", dmlSQLInLog))
}
if rs != nil {
_ = rs.Close()
}
return ""
}
func buildShardJobs(ctx context.Context, stmt *ast.NonTransactionalDMLStmt, se Session,
selectSQL string, shardColumnInfo *model.ColumnInfo, memTracker *memory.Tracker) ([]job, error) {
var shardColumnCollate string
if shardColumnInfo != nil {
shardColumnCollate = shardColumnInfo.GetCollate()
} else {
shardColumnCollate = ""
}
// A NT-DML is not a SELECT. We ignore the SelectLimit for selectSQL so that it can read all values.
originalSelectLimit := se.GetSessionVars().SelectLimit
se.GetSessionVars().SelectLimit = math.MaxUint64
// NT-DML is a write operation, and should not be affected by read_staleness that is supposed to affect only SELECT.
rss, err := se.Execute(ctx, selectSQL)
se.GetSessionVars().SelectLimit = originalSelectLimit
if err != nil {
return nil, err
}
if len(rss) != 1 {
return nil, errors.Errorf("Non-transactional DML, expecting 1 record set, but got %d", len(rss))
}
rs := rss[0]
defer func() {
_ = rs.Close()
}()
batchSize := int(stmt.Limit)
if batchSize <= 0 {
return nil, errors.New("Non-transactional DML, batch size should be positive")
}
jobCount := 0
jobs := make([]job, 0)
currentSize := 0
var currentStart, currentEnd types.Datum
chk := rs.NewChunk(nil)
for {
err = rs.Next(ctx, chk)
if err != nil {
return nil, err
}
// last chunk
if chk.NumRows() == 0 {
if currentSize > 0 {
// there's remaining work
jobs = appendNewJob(jobs, jobCount+1, currentStart, currentEnd, currentSize, memTracker)
}
break
}
if len(jobs) > 0 && chk.NumRows()+currentSize < batchSize {
// not enough data for a batch
currentSize += chk.NumRows()
newEnd := chk.GetRow(chk.NumRows()-1).GetDatum(0, &rs.Fields()[0].Column.FieldType)
currentEnd = *newEnd.Clone()
continue
}
iter := chunk.NewIterator4Chunk(chk)
for row := iter.Begin(); row != iter.End(); row = iter.Next() {
if currentSize == 0 {
newStart := row.GetDatum(0, &rs.Fields()[0].Column.FieldType)
currentStart = *newStart.Clone()
}
newEnd := row.GetDatum(0, &rs.Fields()[0].Column.FieldType)
if currentSize >= batchSize {
cmp, err := newEnd.Compare(se.GetSessionVars().StmtCtx, ¤tEnd, collate.GetCollator(shardColumnCollate))
if err != nil {
return nil, err
}
if cmp != 0 {
jobCount++
jobs = appendNewJob(jobs, jobCount, *currentStart.Clone(), *currentEnd.Clone(), currentSize, memTracker)
currentSize = 0
currentStart = newEnd
}
}
currentEnd = newEnd
currentSize++
}
currentEnd = *currentEnd.Clone()
currentStart = *currentStart.Clone()
}
return jobs, nil
}
func appendNewJob(jobs []job, id int, start types.Datum, end types.Datum, size int, tracker *memory.Tracker) []job {
jobs = append(jobs, job{jobID: id, start: start, end: end, jobSize: size})
tracker.Consume(start.EstimatedMemUsage() + end.EstimatedMemUsage() + 64)
return jobs
}
func buildSelectSQL(stmt *ast.NonTransactionalDMLStmt, se Session) (
*ast.TableName, string, *model.ColumnInfo, []*ast.TableSource, error) {
// only use the first table
join, ok := stmt.DMLStmt.TableRefsJoin()
if !ok {
return nil, "", nil, nil, errors.New("Non-transactional DML, table source not found")
}
tableSources := make([]*ast.TableSource, 0)
tableSources, err := collectTableSourcesInJoin(join, tableSources)
if err != nil {
return nil, "", nil, nil, err
}
if len(tableSources) == 0 {
return nil, "", nil, nil, errors.New("Non-transactional DML, no tables found in table refs")
}
leftMostTableSource := tableSources[0]
leftMostTableName, ok := leftMostTableSource.Source.(*ast.TableName)
if !ok {
return nil, "", nil, nil, errors.New("Non-transactional DML, table name not found")
}
shardColumnInfo, tableName, err := selectShardColumn(stmt, se, tableSources, leftMostTableName, leftMostTableSource)
if err != nil {
return nil, "", nil, nil, err
}
var sb strings.Builder
if stmt.DMLStmt.WhereExpr() != nil {
err := stmt.DMLStmt.WhereExpr().Restore(format.NewRestoreCtx(format.DefaultRestoreFlags|
format.RestoreNameBackQuotes|
format.RestoreSpacesAroundBinaryOperation|
format.RestoreBracketAroundBinaryOperation|
format.RestoreStringWithoutCharset, &sb),
)
if err != nil {
return nil, "", nil, nil, errors.Annotate(err, "Failed to restore where clause in non-transactional DML")
}
} else {
sb.WriteString("TRUE")
}
// assure NULL values are placed first
selectSQL := fmt.Sprintf("SELECT `%s` FROM `%s`.`%s` WHERE %s ORDER BY IF(ISNULL(`%s`),0,1),`%s`",
stmt.ShardColumn.Name.O, tableName.DBInfo.Name.O, tableName.Name.O, sb.String(), stmt.ShardColumn.Name.O, stmt.ShardColumn.Name.O)
return tableName, selectSQL, shardColumnInfo, tableSources, nil
}
func selectShardColumn(stmt *ast.NonTransactionalDMLStmt, se Session, tableSources []*ast.TableSource,
leftMostTableName *ast.TableName, leftMostTableSource *ast.TableSource) (
*model.ColumnInfo, *ast.TableName, error) {
var indexed bool
var shardColumnInfo *model.ColumnInfo
var selectedTableName *ast.TableName
if len(tableSources) == 1 {
// single table
leftMostTable, err := domain.GetDomain(se).InfoSchema().TableByName(leftMostTableName.Schema, leftMostTableName.Name)
if err != nil {
return nil, nil, err
}
selectedTableName = leftMostTableName
indexed, shardColumnInfo, err = selectShardColumnFromTheOnlyTable(
stmt, leftMostTableName, leftMostTableSource.AsName, leftMostTable)
if err != nil {
return nil, nil, err
}
} else {
// multi table join
if stmt.ShardColumn == nil {
leftMostTable, err := domain.GetDomain(se).InfoSchema().TableByName(leftMostTableName.Schema, leftMostTableName.Name)
if err != nil {
return nil, nil, err
}
selectedTableName = leftMostTableName
indexed, shardColumnInfo, err = selectShardColumnAutomatically(stmt, leftMostTable, leftMostTableName, leftMostTableSource.AsName)
if err != nil {
return nil, nil, err
}
} else if stmt.ShardColumn.Schema.L != "" && stmt.ShardColumn.Table.L != "" && stmt.ShardColumn.Name.L != "" {
specifiedDbName := stmt.ShardColumn.Schema
specifiedTableName := stmt.ShardColumn.Table
specifiedColName := stmt.ShardColumn.Name
// the specified table must be in the join
tableInJoin := false
var chosenTableName model.CIStr
for _, tableSource := range tableSources {
tableSourceName := tableSource.Source.(*ast.TableName)
tableSourceFinalTableName := tableSource.AsName // precedence: alias name, then table name
if tableSourceFinalTableName.O == "" {
tableSourceFinalTableName = tableSourceName.Name
}
if tableSourceName.Schema.L == specifiedDbName.L && tableSourceFinalTableName.L == specifiedTableName.L {
tableInJoin = true
selectedTableName = tableSourceName
chosenTableName = tableSourceName.Name
break
}
}
if !tableInJoin {
return nil, nil,
errors.Errorf(
"Non-transactional DML, shard column %s.%s.%s is not in the tables involved in the join",
specifiedDbName.L, specifiedTableName.L, specifiedColName.L,
)
}
tbl, err := domain.GetDomain(se).InfoSchema().TableByName(specifiedDbName, chosenTableName)
if err != nil {
return nil, nil, err
}
indexed, shardColumnInfo, err = selectShardColumnByGivenName(specifiedColName.L, tbl)
if err != nil {
return nil, nil, err
}
} else {
return nil, nil, errors.New(
"Non-transactional DML, shard column must be fully specified (i.e. `BATCH ON dbname.tablename.colname`) when multiple tables are involved",
)
}
}
if !indexed {
return nil, nil, errors.Errorf("Non-transactional DML, shard column %s is not indexed", stmt.ShardColumn.Name.L)
}
return shardColumnInfo, selectedTableName, nil
}
func collectTableSourcesInJoin(node ast.ResultSetNode, tableSources []*ast.TableSource) ([]*ast.TableSource, error) {
if node == nil {
return tableSources, nil
}
switch x := node.(type) {
case *ast.Join:
var err error
tableSources, err = collectTableSourcesInJoin(x.Left, tableSources)
if err != nil {
return nil, err
}
tableSources, err = collectTableSourcesInJoin(x.Right, tableSources)
if err != nil {
return nil, err
}
case *ast.TableSource:
// assert it's a table name
if _, ok := x.Source.(*ast.TableName); !ok {
return nil, errors.New("Non-transactional DML, table name not found in join")
}
tableSources = append(tableSources, x)
default:
return nil, errors.Errorf("Non-transactional DML, unknown type %T in table refs", node)
}
return tableSources, nil
}
// it attempts to auto-select a shard column from handle if not specified, and fills back the corresponding info in the stmt,
// making it transparent to following steps
func selectShardColumnFromTheOnlyTable(stmt *ast.NonTransactionalDMLStmt, tableName *ast.TableName,
tableAsName model.CIStr, tbl table.Table) (
indexed bool, shardColumnInfo *model.ColumnInfo, err error) {
if stmt.ShardColumn == nil {
return selectShardColumnAutomatically(stmt, tbl, tableName, tableAsName)
}
return selectShardColumnByGivenName(stmt.ShardColumn.Name.L, tbl)
}
func selectShardColumnByGivenName(shardColumnName string, tbl table.Table) (
indexed bool, shardColumnInfo *model.ColumnInfo, err error) {
tableInfo := tbl.Meta()
if shardColumnName == model.ExtraHandleName.L && !tableInfo.HasClusteredIndex() {
return true, nil, nil
}
for _, col := range tbl.Cols() {
if col.Name.L == shardColumnName {
shardColumnInfo = col.ColumnInfo
break
}
}
if shardColumnInfo == nil {
return false, nil, errors.Errorf("shard column %s not found", shardColumnName)
}
// is int handle
if mysql.HasPriKeyFlag(shardColumnInfo.GetFlag()) && tableInfo.PKIsHandle {
return true, shardColumnInfo, nil
}
for _, index := range tbl.Indices() {
if index.Meta().State != model.StatePublic || index.Meta().Invisible {
continue
}
indexColumns := index.Meta().Columns
// check only the first column
if len(indexColumns) > 0 && indexColumns[0].Name.L == shardColumnName {
indexed = true
break
}
}
return indexed, shardColumnInfo, nil
}
func selectShardColumnAutomatically(stmt *ast.NonTransactionalDMLStmt, tbl table.Table,
tableName *ast.TableName, tableAsName model.CIStr) (bool, *model.ColumnInfo, error) {
// auto-detect shard column
var shardColumnInfo *model.ColumnInfo
tableInfo := tbl.Meta()
if tbl.Meta().PKIsHandle {
shardColumnInfo = tableInfo.GetPkColInfo()
} else if tableInfo.IsCommonHandle {
for _, index := range tableInfo.Indices {
if index.Primary {
if len(index.Columns) == 1 {
shardColumnInfo = tableInfo.Columns[index.Columns[0].Offset]
break
}
// if the clustered index contains multiple columns, we cannot automatically choose a column as the shard column
return false, nil, errors.New("Non-transactional DML, the clustered index contains multiple columns. Please specify a shard column")
}
}
if shardColumnInfo == nil {
return false, nil, errors.New("Non-transactional DML, the clustered index is not found")
}
}
shardColumnName := model.ExtraHandleName.L
if shardColumnInfo != nil {
shardColumnName = shardColumnInfo.Name.L
}
outputTableName := tableName.Name
if tableAsName.L != "" {
outputTableName = tableAsName
}
stmt.ShardColumn = &ast.ColumnName{
Schema: tableName.Schema,
Table: outputTableName, // so that table alias works
Name: model.NewCIStr(shardColumnName),
}
return true, shardColumnInfo, nil
}
func buildDryRunResults(dryRunOption int, results []string, maxChunkSize int) (sqlexec.RecordSet, error) {
var fieldName string
if dryRunOption == ast.DryRunSplitDml {
fieldName = "split statement examples"
} else {
fieldName = "query statement"
}
resultFields := []*ast.ResultField{{
Column: &model.ColumnInfo{
FieldType: *types.NewFieldType(mysql.TypeString),
},
ColumnAsName: model.NewCIStr(fieldName),
}}
rows := make([][]interface{}, 0, len(results))
for _, result := range results {
row := make([]interface{}, 1)
row[0] = result
rows = append(rows, row)
}
return &sqlexec.SimpleRecordSet{
ResultFields: resultFields,
Rows: rows,
MaxChunkSize: maxChunkSize,
}, nil
}
func buildExecuteResults(ctx context.Context, jobs []job, maxChunkSize int, redactLog bool) (sqlexec.RecordSet, error) {
failedJobs := make([]job, 0)
for _, job := range jobs {
if job.err != nil {
failedJobs = append(failedJobs, job)
}
}
if len(failedJobs) == 0 {
resultFields := []*ast.ResultField{
{
Column: &model.ColumnInfo{
FieldType: *types.NewFieldType(mysql.TypeLong),
},
ColumnAsName: model.NewCIStr("number of jobs"),
},
{
Column: &model.ColumnInfo{
FieldType: *types.NewFieldType(mysql.TypeString),
},
ColumnAsName: model.NewCIStr("job status"),
},
}
rows := make([][]interface{}, 1)
row := make([]interface{}, 2)
row[0] = len(jobs)
row[1] = "all succeeded"
rows[0] = row
return &sqlexec.SimpleRecordSet{
ResultFields: resultFields,
Rows: rows,
MaxChunkSize: maxChunkSize,
}, nil
}
// ignoreError must be set.
var sb strings.Builder
for _, job := range failedJobs {
sb.WriteString(fmt.Sprintf("%s, %s;\n", job.String(redactLog), job.err.Error()))
}
errStr := sb.String()
// log errors here in case the output is too long. There can be thousands of errors.
logutil.Logger(ctx).Error("Non-transactional DML failed",
zap.Int("num_failed_jobs", len(failedJobs)), zap.String("failed_jobs", errStr))
return nil, fmt.Errorf("%d/%d jobs failed in the non-transactional DML: %s, ...(more in logs)",
len(failedJobs), len(jobs), errStr[:mathutil.Min(500, len(errStr)-1)])
}
| session/nontransactional.go | 0 | https://github.com/pingcap/tidb/commit/90e727194074b352ffe826fca411c1776b5562cd | [
0.0012193958973512053,
0.00019071252609137446,
0.0001599358656676486,
0.0001718998682918027,
0.00011936615919694304
] |
{
"id": 0,
"code_window": [
"\t}\n",
"\n",
"\tif idx, ok := table.ColIdxMap.Get(colID); ok {\n",
"\t\tif rf.traceKV {\n",
"\t\t\tprettyKey = fmt.Sprintf(\"%s/%s\", prettyKey, table.desc.DeletableColumns()[idx].GetName())\n",
"\t\t}\n",
"\t\tval := rf.machine.nextKV.Value\n",
"\t\tif len(val.RawBytes) == 0 {\n",
"\t\t\treturn prettyKey, \"\", nil\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\tprettyKey = fmt.Sprintf(\"%s/%s\", prettyKey, table.cols[idx].GetName())\n"
],
"file_path": "pkg/sql/colfetcher/cfetcher.go",
"type": "replace",
"edit_start_line_idx": 1000
} | // Copyright 2021 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package colfetcher
import (
"context"
"sync"
"github.com/cockroachdb/cockroach/pkg/sql/catalog"
"github.com/cockroachdb/cockroach/pkg/sql/colexec/colexecargs"
"github.com/cockroachdb/cockroach/pkg/sql/execinfra"
"github.com/cockroachdb/cockroach/pkg/sql/execinfrapb"
"github.com/cockroachdb/cockroach/pkg/sql/physicalplan"
"github.com/cockroachdb/cockroach/pkg/sql/types"
"github.com/cockroachdb/cockroach/pkg/util"
)
// cFetcherTableArgs describes the information about the index we're fetching
// from. Note that only columns that need to be fetched (i.e. requested by the
// caller) are included in the internal state.
type cFetcherTableArgs struct {
desc catalog.TableDescriptor
index catalog.Index
// ColIdxMap is a mapping from ColumnID of each column to its ordinal. Only
// needed columns are present.
ColIdxMap catalog.TableColMap
isSecondaryIndex bool
// cols are all needed columns of the table that are present in the index.
// The system columns, if requested, are at the end of cols.
cols []catalog.Column
// typs are the types of only needed columns from the table.
typs []*types.T
}
var cFetcherTableArgsPool = sync.Pool{
New: func() interface{} {
return &cFetcherTableArgs{}
},
}
func (a *cFetcherTableArgs) Release() {
// Deeply reset the column descriptors.
for i := range a.cols {
a.cols[i] = nil
}
*a = cFetcherTableArgs{
cols: a.cols[:0],
// The types are small objects, so we don't bother deeply resetting this
// slice.
typs: a.typs[:0],
}
cFetcherTableArgsPool.Put(a)
}
func (a *cFetcherTableArgs) populateTypes(cols []catalog.Column) {
if cap(a.typs) < len(cols) {
a.typs = make([]*types.T, len(cols))
} else {
a.typs = a.typs[:len(cols)]
}
for i := range cols {
a.typs[i] = cols[i].GetType()
}
}
// populateTableArgs fills all fields of the cFetcherTableArgs except for
// ColIdxMap. Note that all columns accessible from the index (i.e. present in
// the key or value part) will be included in the result. In order to prune
// the unnecessary columns away, use keepOnlyNeededColumns.
//
// If index is a secondary index, then all inaccessible columns are pruned away.
// In such a scenario a non-nil idxMap is returned that allows to remap ordinals
// referring to columns from the whole table to the correct positions among only
// accessible columns. post will be adjusted automatically. Columns that are
// not accessible from the secondary index have an undefined value corresponding
// to them if idxMap is non-nil.
//
// For example, say the table has 4 columns (@1, @2, @3, @4), but only 2 columns
// are present in the index we're reading from (@3, @1). In this case, the
// returned table args only contains columns (@1, @3) and we get an index map as
// idxMap = [0, x, 1, x] (where 'x' indicates an undefined value).
// Note that although @3 appears earlier than @1 in the index, because we
// iterate over all columns of the table according to their column ordinals, we
// will see @1 first, so it gets the 0th slot, and @3 second, so it gets the 1st
// slot.
func populateTableArgs(
ctx context.Context,
flowCtx *execinfra.FlowCtx,
table catalog.TableDescriptor,
index catalog.Index,
invertedCol catalog.Column,
visibility execinfrapb.ScanVisibility,
hasSystemColumns bool,
post *execinfrapb.PostProcessSpec,
helper *colexecargs.ExprHelper,
) (_ *cFetcherTableArgs, idxMap []int, _ error) {
args := cFetcherTableArgsPool.Get().(*cFetcherTableArgs)
// First, find all columns present in the table and possibly include the
// system columns (when requested).
cols := args.cols[:0]
if visibility == execinfra.ScanVisibilityPublicAndNotPublic {
cols = append(cols, table.ReadableColumns()...)
} else {
cols = append(cols, table.PublicColumns()...)
}
if invertedCol != nil {
for i, col := range cols {
if col.GetID() == invertedCol.GetID() {
cols[i] = invertedCol
break
}
}
}
numSystemCols := 0
if hasSystemColumns {
systemCols := table.SystemColumns()
numSystemCols = len(systemCols)
cols = append(cols, systemCols...)
}
if !index.Primary() {
// If we have a secondary index, not all columns might be available from
// the index, so we'll prune the unavailable columns away.
colIDs := index.CollectKeyColumnIDs()
colIDs.UnionWith(index.CollectSecondaryStoredColumnIDs())
colIDs.UnionWith(index.CollectKeySuffixColumnIDs())
if colIDs.Len() < len(cols)-numSystemCols {
needTypesBeforeRemapping := post.RenderExprs != nil
if needTypesBeforeRemapping {
args.populateTypes(cols)
}
idxMap = make([]int, len(cols))
colIdx := 0
for i := range cols {
//gcassert:bce
id := cols[i].GetID()
if colIDs.Contains(id) || (hasSystemColumns && i >= len(cols)-numSystemCols) {
idxMap[i] = colIdx
cols[colIdx] = cols[i]
colIdx++
}
}
cols = cols[:colIdx]
if err := remapPostProcessSpec(
flowCtx, post, idxMap, helper, args.typs,
); err != nil {
return nil, nil, err
}
}
}
*args = cFetcherTableArgs{
desc: table,
index: index,
isSecondaryIndex: !index.Primary(),
cols: cols,
typs: args.typs,
}
args.populateTypes(cols)
// Before we can safely use types from the table descriptor, we need to
// make sure they are hydrated. In row execution engine it is done during
// the processor initialization, but neither ColBatchScan nor cFetcher are
// processors, so we need to do the hydration ourselves.
resolver := flowCtx.TypeResolverFactory.NewTypeResolver(flowCtx.Txn)
return args, idxMap, resolver.HydrateTypeSlice(ctx, args.typs)
}
// keepOnlyNeededColumns updates the tableArgs to prune all unnecessary columns
// away based on neededColumns slice. If we're reading of the secondary index
// that is not covering all columns, idxMap must be non-nil describing the
// remapping that needs to be used for column ordinals from neededColumns.
// post is updated accordingly to refer to new ordinals of columns. The method
// also populates tableArgs.ColIdxMap.
//
// If traceKV is true, then all columns are considered as needed, and
// neededColumns is ignored.
func keepOnlyNeededColumns(
flowCtx *execinfra.FlowCtx,
tableArgs *cFetcherTableArgs,
idxMap []int,
neededColumns []uint32,
post *execinfrapb.PostProcessSpec,
helper *colexecargs.ExprHelper,
) error {
if !flowCtx.TraceKV && len(neededColumns) < len(tableArgs.cols) {
// If the tracing is not enabled and we don't need all of the available
// columns, we will prune all of the not needed columns away.
// First, populate a set of needed columns.
var neededColumnsSet util.FastIntSet
for _, neededColumn := range neededColumns {
neededColIdx := int(neededColumn)
if idxMap != nil {
neededColIdx = idxMap[neededColIdx]
}
neededColumnsSet.Add(neededColIdx)
}
// When idxMap is non-nil, we can reuse that. Note that in this case
// the length of idxMap is equal to the number of columns in the
// whole table, and we are reading from the secondary index, so the
// slice will have the sufficient size. We also don't need to reset
// it since we'll update the needed positions below.
if idxMap == nil {
idxMap = make([]int, len(tableArgs.typs))
}
// Iterate over all needed columns, populate the idxMap, and adjust
// the post-processing spec to refer only to the needed columns
// directly.
//
// If non-nil idxMap was passed into this method, we have to update it
// by essentially applying a projection on top of the already present
// projection. Consider the following example:
// idxMap = [0, x, 1, x] (where 'x' indicates an undefined value)
// and
// neededColumns = [2].
// Such a setup means that only columns with ordinals @1 and @3 are
// present in the secondary index while only @3 is actually needed.
// Above, we have already remapped neededColIdx = 2 to be 1, so now
// neededColumnsSet only contains 1. The post-processing already refers
// to this column as having index 1.
// However, since we are pruning the column with index 0 away, the
// post-processing stage will see a single column. Thus, we have to
// update the index map to be
// idxMap = [x, 0, x, x]
// and then remap the post-processing spec below so that it refers to
// the single needed column with the correct ordinal.
neededColIdx := 0
for idx, ok := neededColumnsSet.Next(0); ok; idx, ok = neededColumnsSet.Next(idx + 1) {
idxMap[idx] = neededColIdx
neededColIdx++
}
if err := remapPostProcessSpec(
flowCtx, post, idxMap, helper, tableArgs.typs,
); err != nil {
return err
}
// Now we have to actually prune out the unnecessary columns.
neededColIdx = 0
for idx, ok := neededColumnsSet.Next(0); ok; idx, ok = neededColumnsSet.Next(idx + 1) {
tableArgs.cols[neededColIdx] = tableArgs.cols[idx]
tableArgs.typs[neededColIdx] = tableArgs.typs[idx]
neededColIdx++
}
tableArgs.cols = tableArgs.cols[:neededColIdx]
tableArgs.typs = tableArgs.typs[:neededColIdx]
}
// Populate the ColIdxMap.
for i := range tableArgs.cols {
tableArgs.ColIdxMap.Set(tableArgs.cols[i].GetID(), i)
}
return nil
}
// remapPostProcessSpec updates post so that all IndexedVars refer to the new
// ordinals according to idxMap.
//
// For example, say we have idxMap = [0, 0, 1, 2, 0, 0] and a render expression
// like '(@1 + @4) / @3`, then it'll be updated into '(@1 + @3) / @2'. Such an
// idxMap indicates that the table has 6 columns and only 3 of them (0th, 2nd,
// 3rd) are needed.
//
// typsBeforeRemapping need to contain all the types of columns before the
// mapping of idxMap was applied. These will only be used if post.RenderExprs is
// not nil.
//
// If preserveFlowSpecs is true, then this method updates post to store the
// original output columns or render expressions. Notably, in order to not
// corrupt the flow specs that have been scheduled to run on the remote nodes,
// this method will allocate fresh slices instead of updating the old slices in
// place (the flow specs for the remote nodes have shallow copies of this
// PostProcessSpec).
// NB: it is ok that we're modifying the specs - we are in the flow setup path
// which occurs **after** we have sent out SetupFlowRequest RPCs. In other
// words, every node must have gotten the unmodified version of the spec and is
// now free to modify it as it pleases.
func remapPostProcessSpec(
flowCtx *execinfra.FlowCtx,
post *execinfrapb.PostProcessSpec,
idxMap []int,
helper *colexecargs.ExprHelper,
typsBeforeRemapping []*types.T,
) error {
if post.Projection {
outputColumns := post.OutputColumns
if flowCtx.PreserveFlowSpecs && post.OriginalOutputColumns == nil {
// This is the first time we're modifying this PostProcessSpec, but
// we've been asked to preserve the specs, so we have to set the
// original output columns. We are also careful to allocate a new
// slice to populate the updated projection.
post.OriginalOutputColumns = outputColumns
post.OutputColumns = make([]uint32, len(outputColumns))
}
for i, colIdx := range outputColumns {
post.OutputColumns[i] = uint32(idxMap[colIdx])
}
} else if post.RenderExprs != nil {
renderExprs := post.RenderExprs
if flowCtx.PreserveFlowSpecs && post.OriginalRenderExprs == nil {
// This is the first time we're modifying this PostProcessSpec, but
// we've been asked to preserve the specs, so we have to set the
// original render expressions. We are also careful to allocate a
// new slice to populate the updated render expressions.
post.OriginalRenderExprs = renderExprs
post.RenderExprs = make([]execinfrapb.Expression, len(renderExprs))
}
var err error
for i := range renderExprs {
// Make sure that the render expression is deserialized if we
// are on the remote node.
//
// It is ok to use the evalCtx of the flowCtx since it won't be
// mutated (we are not evaluating the expressions).
post.RenderExprs[i].LocalExpr, err = helper.ProcessExpr(renderExprs[i], flowCtx.EvalCtx, typsBeforeRemapping)
if err != nil {
return err
}
post.RenderExprs[i].LocalExpr = physicalplan.RemapIVarsInTypedExpr(renderExprs[i].LocalExpr, idxMap)
}
}
return nil
}
| pkg/sql/colfetcher/cfetcher_setup.go | 1 | https://github.com/cockroachdb/cockroach/commit/a519265872c58fcfc2cc815c9dc58660534f9cd1 | [
0.01864415407180786,
0.002155474154278636,
0.00016217457596212626,
0.000433408742537722,
0.0037769523914903402
] |
{
"id": 0,
"code_window": [
"\t}\n",
"\n",
"\tif idx, ok := table.ColIdxMap.Get(colID); ok {\n",
"\t\tif rf.traceKV {\n",
"\t\t\tprettyKey = fmt.Sprintf(\"%s/%s\", prettyKey, table.desc.DeletableColumns()[idx].GetName())\n",
"\t\t}\n",
"\t\tval := rf.machine.nextKV.Value\n",
"\t\tif len(val.RawBytes) == 0 {\n",
"\t\t\treturn prettyKey, \"\", nil\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\tprettyKey = fmt.Sprintf(\"%s/%s\", prettyKey, table.cols[idx].GetName())\n"
],
"file_path": "pkg/sql/colfetcher/cfetcher.go",
"type": "replace",
"edit_start_line_idx": 1000
} | // Copyright 2017 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package sql
import (
"context"
"fmt"
"strings"
"github.com/cockroachdb/cockroach/pkg/clusterversion"
"github.com/cockroachdb/cockroach/pkg/kv/kvclient"
"github.com/cockroachdb/cockroach/pkg/roachpb"
"github.com/cockroachdb/cockroach/pkg/server/telemetry"
"github.com/cockroachdb/cockroach/pkg/sql/catalog"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc"
"github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode"
"github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror"
"github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgnotice"
"github.com/cockroachdb/cockroach/pkg/sql/privilege"
"github.com/cockroachdb/cockroach/pkg/sql/sem/tree"
"github.com/cockroachdb/cockroach/pkg/sql/sqlerrors"
"github.com/cockroachdb/cockroach/pkg/sql/sqltelemetry"
"github.com/cockroachdb/cockroach/pkg/util/log/eventpb"
"github.com/cockroachdb/errors"
)
type dropIndexNode struct {
n *tree.DropIndex
idxNames []fullIndexName
}
// DropIndex drops an index.
// Privileges: CREATE on table.
// Notes: postgres allows only the index owner to DROP an index.
// mysql requires the INDEX privilege on the table.
func (p *planner) DropIndex(ctx context.Context, n *tree.DropIndex) (planNode, error) {
if err := checkSchemaChangeEnabled(
ctx,
p.ExecCfg(),
"DROP INDEX",
); err != nil {
return nil, err
}
// Keep a track of the indexes that exist to check. When the IF EXISTS
// options are provided, we will simply not include any indexes that
// don't exist and continue execution.
idxNames := make([]fullIndexName, 0, len(n.IndexList))
for _, index := range n.IndexList {
tn, tableDesc, err := expandMutableIndexName(ctx, p, index, !n.IfExists /* requireTable */)
if err != nil {
// Error or table did not exist.
return nil, err
}
if tableDesc == nil {
// IfExists specified and table did not exist.
continue
}
if err := p.CheckPrivilege(ctx, tableDesc, privilege.CREATE); err != nil {
return nil, err
}
idxNames = append(idxNames, fullIndexName{tn: tn, idxName: index.Index})
}
return &dropIndexNode{n: n, idxNames: idxNames}, nil
}
// ReadingOwnWrites implements the planNodeReadingOwnWrites interface.
// This is because DROP INDEX performs multiple KV operations on descriptors
// and expects to see its own writes.
func (n *dropIndexNode) ReadingOwnWrites() {}
func (n *dropIndexNode) startExec(params runParams) error {
telemetry.Inc(sqltelemetry.SchemaChangeDropCounter("index"))
if n.n.Concurrently {
params.p.BufferClientNotice(
params.ctx,
pgnotice.Newf("CONCURRENTLY is not required as all indexes are dropped concurrently"),
)
}
ctx := params.ctx
for _, index := range n.idxNames {
// Need to retrieve the descriptor again for each index name in
// the list: when two or more index names refer to the same table,
// the mutation list and new version number created by the first
// drop need to be visible to the second drop.
_, tableDesc, err := params.p.ResolveMutableTableDescriptor(
ctx, index.tn, true /*required*/, tree.ResolveRequireTableOrViewDesc)
if sqlerrors.IsUndefinedRelationError(err) {
// Somehow the descriptor we had during planning is not there
// any more.
return errors.NewAssertionErrorWithWrappedErrf(err,
"table descriptor for %q became unavailable within same txn",
tree.ErrString(index.tn))
}
if err != nil {
return err
}
if tableDesc.IsView() && !tableDesc.MaterializedView() {
return pgerror.Newf(pgcode.WrongObjectType, "%q is not a table or materialized view", tableDesc.Name)
}
// If we couldn't find the index by name, this is either a legitimate error or
// this statement contains an 'IF EXISTS' qualifier. Both of these cases are
// handled by `dropIndexByName()` below so we just ignore the error here.
idx, _ := tableDesc.FindIndexWithName(string(index.idxName))
var shardColName string
// If we're dropping a sharded index, record the name of its shard column to
// potentially drop it if no other index refers to it.
if idx != nil && idx.IsSharded() && !idx.Dropped() {
shardColName = idx.GetShardColumnName()
}
// Drop inaccessible indexed columns. They are created for expression
// indexes. They cannot be referenced in constraints, computed columns,
// or other indexes, so they are safe to drop.
columnsDropped := false
if idx != nil {
for i, count := 0, idx.NumKeyColumns(); i < count; i++ {
id := idx.GetKeyColumnID(i)
col, err := tableDesc.FindColumnWithID(id)
if err != nil {
return err
}
if col.IsExpressionIndexColumn() {
n.queueDropColumn(tableDesc, col)
columnsDropped = true
}
}
}
// CAUTION: After dropIndexByName returns, idx will be a pointer to a
// different index than the one being dropped.
if err := params.p.dropIndexByName(
ctx, index.tn, index.idxName, tableDesc, n.n.IfExists, n.n.DropBehavior, checkIdxConstraint,
tree.AsStringWithFQNames(n.n, params.Ann()),
); err != nil {
return err
}
if shardColName != "" {
ok, err := n.maybeQueueDropShardColumn(tableDesc, shardColName)
if err != nil {
return err
}
columnsDropped = columnsDropped || ok
}
if columnsDropped {
if err := n.finalizeDropColumn(params, tableDesc); err != nil {
return err
}
}
}
return nil
}
// queueDropColumn queues a column to be dropped. Once all columns to drop are
// queued, call finalizeDropColumn.
func (n *dropIndexNode) queueDropColumn(tableDesc *tabledesc.Mutable, col catalog.Column) {
tableDesc.AddColumnMutation(col.ColumnDesc(), descpb.DescriptorMutation_DROP)
for i := range tableDesc.Columns {
if tableDesc.Columns[i].ID == col.GetID() {
// Note the third slice parameter which will force a copy of the backing
// array if the column being removed is not the last column.
tableDesc.Columns = append(tableDesc.Columns[:i:i],
tableDesc.Columns[i+1:]...)
break
}
}
}
// maybeDropShardColumn drops the given shard column, if there aren't any other
// indexes referring to it. It returns true if the column was queued to be
// dropped.
//
// Assumes that the given index is sharded.
func (n *dropIndexNode) maybeQueueDropShardColumn(
tableDesc *tabledesc.Mutable, shardColName string,
) (bool, error) {
shardColDesc, err := tableDesc.FindColumnWithName(tree.Name(shardColName))
if err != nil {
return false, err
}
if shardColDesc.Dropped() {
return false, nil
}
if catalog.FindNonDropIndex(tableDesc, func(otherIdx catalog.Index) bool {
colIDs := otherIdx.CollectKeyColumnIDs()
if !otherIdx.Primary() {
colIDs.UnionWith(otherIdx.CollectSecondaryStoredColumnIDs())
colIDs.UnionWith(otherIdx.CollectKeySuffixColumnIDs())
}
return colIDs.Contains(shardColDesc.GetID())
}) != nil {
return false, nil
}
if err := n.dropShardColumnAndConstraint(tableDesc, shardColDesc); err != nil {
return false, err
}
return true, nil
}
// dropShardColumnAndConstraint drops the given shard column and its associated check
// constraint.
func (n *dropIndexNode) dropShardColumnAndConstraint(
tableDesc *tabledesc.Mutable, shardCol catalog.Column,
) error {
validChecks := tableDesc.Checks[:0]
for _, check := range tableDesc.AllActiveAndInactiveChecks() {
if used, err := tableDesc.CheckConstraintUsesColumn(check, shardCol.GetID()); err != nil {
return err
} else if used {
if check.Validity == descpb.ConstraintValidity_Validating {
return pgerror.Newf(pgcode.ObjectNotInPrerequisiteState,
"referencing constraint %q in the middle of being added, try again later", check.Name)
}
} else {
validChecks = append(validChecks, check)
}
}
if len(validChecks) != len(tableDesc.Checks) {
tableDesc.Checks = validChecks
}
n.queueDropColumn(tableDesc, shardCol)
return nil
}
// finalizeDropColumn finalizes the dropping of one or more columns. It should
// only be called if queueDropColumn has been called at least once.
func (n *dropIndexNode) finalizeDropColumn(params runParams, tableDesc *tabledesc.Mutable) error {
if err := tableDesc.AllocateIDs(params.ctx); err != nil {
return err
}
mutationID := tableDesc.ClusterVersion.NextMutationID
if err := params.p.writeSchemaChange(
params.ctx, tableDesc, mutationID, tree.AsStringWithFQNames(n.n, params.Ann()),
); err != nil {
return err
}
return nil
}
func (*dropIndexNode) Next(runParams) (bool, error) { return false, nil }
func (*dropIndexNode) Values() tree.Datums { return tree.Datums{} }
func (*dropIndexNode) Close(context.Context) {}
type fullIndexName struct {
tn *tree.TableName
idxName tree.UnrestrictedName
}
// dropIndexConstraintBehavior is used when dropping an index to signal whether
// it is okay to do so even if it is in use as a constraint (outbound FK or
// unique). This is a subset of what is implied by DropBehavior CASCADE, which
// implies dropping *all* dependencies. This is used e.g. when the element
// constrained is being dropped anyway.
type dropIndexConstraintBehavior bool
const (
checkIdxConstraint dropIndexConstraintBehavior = true
ignoreIdxConstraint dropIndexConstraintBehavior = false
)
func (p *planner) dropIndexByName(
ctx context.Context,
tn *tree.TableName,
idxName tree.UnrestrictedName,
tableDesc *tabledesc.Mutable,
ifExists bool,
behavior tree.DropBehavior,
constraintBehavior dropIndexConstraintBehavior,
jobDesc string,
) error {
idx, err := tableDesc.FindIndexWithName(string(idxName))
if err != nil {
// Only index names of the form "table@idx" throw an error here if they
// don't exist.
if ifExists {
// Noop.
return nil
}
// Index does not exist, but we want it to: error out.
return pgerror.WithCandidateCode(err, pgcode.UndefinedObject)
}
if idx.Dropped() {
return nil
}
if tableDesc.IsLocalityRegionalByRow() {
if err := p.checkNoRegionChangeUnderway(
ctx,
tableDesc.GetParentID(),
"DROP INDEX on a REGIONAL BY ROW table",
); err != nil {
return err
}
}
if idx.IsUnique() && behavior != tree.DropCascade && constraintBehavior != ignoreIdxConstraint && !idx.IsCreatedExplicitly() {
return errors.WithHint(
pgerror.Newf(pgcode.DependentObjectsStillExist,
"index %q is in use as unique constraint", idx.GetName()),
"use CASCADE if you really want to drop it.",
)
}
// Check if requires CCL binary for eventual zone config removal. This is only
// required if `system.zones` exists.
if ZonesTableExists(ctx, p.ExecCfg().Codec, p.ExecCfg().Settings.Version) {
_, zone, _, err := GetZoneConfigInTxn(
ctx, p.txn, p.ExecCfg().Codec, tableDesc.ID, nil /* index */, "", false,
)
if err != nil {
return err
}
for _, s := range zone.Subzones {
if s.IndexID != uint32(idx.GetID()) {
_, err = GenerateSubzoneSpans(
p.ExecCfg().Settings,
p.ExecCfg().ClusterID(),
p.ExecCfg().Codec,
tableDesc,
zone.Subzones,
false, /* newSubzones */
)
if sqlerrors.IsCCLRequiredError(err) {
return sqlerrors.NewCCLRequiredError(fmt.Errorf("schema change requires a CCL binary "+
"because table %q has at least one remaining index or partition with a zone config",
tableDesc.Name))
}
break
}
}
}
// Remove all foreign key references and backreferences from the index.
// TODO (lucy): This is incorrect for two reasons: The first is that FKs won't
// be restored if the DROP INDEX is rolled back, and the second is that
// validated constraints should be dropped in the schema changer in multiple
// steps to avoid inconsistencies. We should be queuing a mutation to drop the
// FK instead. The reason why the FK is removed here is to keep the index
// state consistent with the removal of the reference on the other table
// involved in the FK, in case of rollbacks (#38733).
// TODO (rohany): switching all the checks from checking the legacy ID's to
// checking if the index has a prefix of the columns needed for the foreign
// key might result in some false positives for this index while it is in
// a mixed version cluster, but we have to remove all reads of the legacy
// explicit index fields.
// Construct a list of all the remaining indexes, so that we can see if there
// is another index that could replace the one we are deleting for a given
// foreign key constraint.
remainingIndexes := make([]catalog.Index, 1, len(tableDesc.ActiveIndexes()))
remainingIndexes[0] = tableDesc.GetPrimaryIndex()
for _, index := range tableDesc.PublicNonPrimaryIndexes() {
if index.GetID() != idx.GetID() {
remainingIndexes = append(remainingIndexes, index)
}
}
// indexHasReplacementCandidate runs isValidIndex on each index in remainingIndexes and returns
// true if at least one index satisfies isValidIndex.
indexHasReplacementCandidate := func(isValidIndex func(index catalog.Index) bool) bool {
foundReplacement := false
for _, index := range remainingIndexes {
if isValidIndex(index) {
foundReplacement = true
break
}
}
return foundReplacement
}
// Check for foreign key mutations referencing this index.
for _, m := range tableDesc.Mutations {
if c := m.GetConstraint(); c != nil &&
c.ConstraintType == descpb.ConstraintToUpdate_FOREIGN_KEY &&
// If the index being deleted could be used as a index for this outbound
// foreign key mutation, then make sure that we have another index that
// could be used for this mutation.
idx.IsValidOriginIndex(c.ForeignKey.OriginColumnIDs) &&
!indexHasReplacementCandidate(func(idx catalog.Index) bool {
return idx.IsValidOriginIndex(c.ForeignKey.OriginColumnIDs)
}) {
return pgerror.Newf(pgcode.ObjectNotInPrerequisiteState,
"referencing constraint %q in the middle of being added, try again later", c.ForeignKey.Name)
}
}
// If this index is used on the referencing side of any FK constraints, try
// to remove the references or find an alternate index that will suffice.
candidateConstraints := make([]descpb.UniqueConstraint, len(remainingIndexes))
for i := range remainingIndexes {
// We can't copy directly because of the interface conversion.
candidateConstraints[i] = remainingIndexes[i]
}
if err := p.tryRemoveFKBackReferences(
ctx, tableDesc, idx, behavior, candidateConstraints,
); err != nil {
return err
}
var droppedViews []string
for _, tableRef := range tableDesc.DependedOnBy {
if tableRef.IndexID == idx.GetID() {
// Ensure that we have DROP privilege on all dependent views
err := p.canRemoveDependentViewGeneric(
ctx, "index", idx.GetName(), tableDesc.ParentID, tableRef, behavior)
if err != nil {
return err
}
viewDesc, err := p.getViewDescForCascade(
ctx, "index", idx.GetName(), tableDesc.ParentID, tableRef.ID, behavior,
)
if err != nil {
return err
}
viewJobDesc := fmt.Sprintf("removing view %q dependent on index %q which is being dropped",
viewDesc.Name, idx.GetName())
cascadedViews, err := p.removeDependentView(ctx, tableDesc, viewDesc, viewJobDesc)
if err != nil {
return err
}
qualifiedView, err := p.getQualifiedTableName(ctx, viewDesc)
if err != nil {
return err
}
droppedViews = append(droppedViews, qualifiedView.FQString())
droppedViews = append(droppedViews, cascadedViews...)
}
}
// Overwriting tableDesc.Index may mess up with the idx object we collected above. Make a copy.
idxCopy := *idx.IndexDesc()
idxDesc := &idxCopy
// Currently, a replacement primary index must be specified when dropping the primary index,
// and this cannot be done with DROP INDEX.
if idxDesc.ID == tableDesc.GetPrimaryIndexID() {
return errors.WithHint(
pgerror.Newf(pgcode.FeatureNotSupported, "cannot drop the primary index of a table using DROP INDEX"),
"instead, use ALTER TABLE ... ALTER PRIMARY KEY or"+
"use DROP CONSTRAINT ... PRIMARY KEY followed by ADD CONSTRAINT ... PRIMARY KEY in a transaction",
)
}
foundIndex := catalog.FindPublicNonPrimaryIndex(tableDesc, func(idxEntry catalog.Index) bool {
return idxEntry.GetID() == idxDesc.ID
})
if foundIndex == nil {
return pgerror.Newf(
pgcode.ObjectNotInPrerequisiteState,
"index %q in the middle of being added, try again later",
idxName,
)
}
idxEntry := *foundIndex.IndexDesc()
idxOrdinal := foundIndex.Ordinal()
// Unsplit all manually split ranges in the index so they can be
// automatically merged by the merge queue. Gate this on being the
// system tenant because secondary tenants aren't allowed to scan
// the meta ranges directly.
// TODO(Chengxiong): Remove this range unsplitting in 22.2
st := p.EvalContext().Settings
if p.ExecCfg().Codec.ForSystemTenant() &&
!st.Version.IsActive(ctx, clusterversion.UnsplitRangesInAsyncGCJobs) {
span := tableDesc.IndexSpan(p.ExecCfg().Codec, idxEntry.ID)
txn := p.ExecCfg().DB.NewTxn(ctx, "scan-ranges-for-index-drop")
ranges, err := kvclient.ScanMetaKVs(ctx, txn, span)
if err != nil {
return err
}
for _, r := range ranges {
var desc roachpb.RangeDescriptor
if err := r.ValueProto(&desc); err != nil {
return err
}
// We have to explicitly check that the range descriptor's start key
// lies within the span of the index since ScanMetaKVs returns all
// intersecting spans.
if !desc.GetStickyBit().IsEmpty() && span.Key.Compare(desc.StartKey.AsRawKey()) <= 0 {
// Swallow "key is not the start of a range" errors because it would
// mean that the sticky bit was removed and merged concurrently. DROP
// INDEX should not fail because of this.
if err := p.ExecCfg().DB.AdminUnsplit(ctx, desc.StartKey); err != nil && !strings.Contains(err.Error(), "is not the start of a range") {
return err
}
}
}
}
// the idx we picked up with FindIndexByID at the top may not
// contain the same field any more due to other schema changes
// intervening since the initial lookup. So we send the recent
// copy idxEntry for drop instead.
if err := tableDesc.AddIndexMutation(&idxEntry, descpb.DescriptorMutation_DROP); err != nil {
return err
}
tableDesc.RemovePublicNonPrimaryIndex(idxOrdinal)
if err := p.removeIndexComment(ctx, tableDesc.ID, idxDesc.ID); err != nil {
return err
}
if err := validateDescriptor(ctx, p, tableDesc); err != nil {
return err
}
mutationID := tableDesc.ClusterVersion.NextMutationID
if err := p.writeSchemaChange(ctx, tableDesc, mutationID, jobDesc); err != nil {
return err
}
p.BufferClientNotice(
ctx,
errors.WithHint(
pgnotice.Newf("the data for dropped indexes is reclaimed asynchronously"),
"The reclamation delay can be customized in the zone configuration for the table.",
),
)
// Record index drop in the event log. This is an auditable log event
// and is recorded in the same transaction as the table descriptor
// update.
return p.logEvent(ctx,
tableDesc.ID,
&eventpb.DropIndex{
TableName: tn.FQString(),
IndexName: string(idxName),
MutationID: uint32(mutationID),
CascadeDroppedViews: droppedViews,
})
}
| pkg/sql/drop_index.go | 0 | https://github.com/cockroachdb/cockroach/commit/a519265872c58fcfc2cc815c9dc58660534f9cd1 | [
0.02871927246451378,
0.001115484512411058,
0.00016163577674888074,
0.00017888916772790253,
0.0038531755562871695
] |
{
"id": 0,
"code_window": [
"\t}\n",
"\n",
"\tif idx, ok := table.ColIdxMap.Get(colID); ok {\n",
"\t\tif rf.traceKV {\n",
"\t\t\tprettyKey = fmt.Sprintf(\"%s/%s\", prettyKey, table.desc.DeletableColumns()[idx].GetName())\n",
"\t\t}\n",
"\t\tval := rf.machine.nextKV.Value\n",
"\t\tif len(val.RawBytes) == 0 {\n",
"\t\t\treturn prettyKey, \"\", nil\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\tprettyKey = fmt.Sprintf(\"%s/%s\", prettyKey, table.cols[idx].GetName())\n"
],
"file_path": "pkg/sql/colfetcher/cfetcher.go",
"type": "replace",
"edit_start_line_idx": 1000
} | // Copyright 2019 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
import { Modal, Button } from "antd";
import React, { Fragment } from "react";
import "./styles.styl";
import { ModalProps } from "antd/lib/modal";
interface ICustomModalProps extends ModalProps {
children?: React.ReactNode;
trigger?: React.ReactChildren | React.ReactNode;
triggerStyle?: string;
triggerTitle?: string;
}
interface ICustomModalState {
visible: boolean;
}
class CustomModal extends React.Component<
ICustomModalProps,
ICustomModalState
> {
state = { visible: false };
showModal = () => {
this.setState({
visible: true,
});
};
handleOk = () => {
this.setState({
visible: false,
});
};
handleCancel = () => {
this.setState({
visible: false,
});
};
render() {
const {
trigger,
visible,
children,
triggerStyle,
triggerTitle,
} = this.props;
return (
<Fragment>
{trigger ? (
trigger
) : (
<a onClick={this.showModal} className={`${triggerStyle}`}>
{triggerTitle}
</a>
)}
<Modal
visible={trigger ? visible : this.state.visible}
onOk={this.handleOk}
onCancel={this.handleCancel}
className="custom--modal"
maskStyle={{
background: "rgba(71, 88, 114, 0.73)",
}}
footer={
<Button
type="link"
className="custom--modal__close--button"
onClick={this.handleCancel}
>
Done
</Button>
}
{...this.props}
>
{children}
</Modal>
</Fragment>
);
}
}
export default CustomModal;
| pkg/ui/workspaces/db-console/src/views/app/components/modal/index.tsx | 0 | https://github.com/cockroachdb/cockroach/commit/a519265872c58fcfc2cc815c9dc58660534f9cd1 | [
0.00017798205954022706,
0.0001736252597765997,
0.00016661097470205277,
0.00017453522013965994,
0.000003044079903702368
] |
{
"id": 0,
"code_window": [
"\t}\n",
"\n",
"\tif idx, ok := table.ColIdxMap.Get(colID); ok {\n",
"\t\tif rf.traceKV {\n",
"\t\t\tprettyKey = fmt.Sprintf(\"%s/%s\", prettyKey, table.desc.DeletableColumns()[idx].GetName())\n",
"\t\t}\n",
"\t\tval := rf.machine.nextKV.Value\n",
"\t\tif len(val.RawBytes) == 0 {\n",
"\t\t\treturn prettyKey, \"\", nil\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\tprettyKey = fmt.Sprintf(\"%s/%s\", prettyKey, table.cols[idx].GetName())\n"
],
"file_path": "pkg/sql/colfetcher/cfetcher.go",
"type": "replace",
"edit_start_line_idx": 1000
} | load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "sa1010",
srcs = ["analyzer.go"],
importpath = "github.com/cockroachdb/cockroach/build/bazelutil/staticcheckanalyzers/sa1010",
visibility = ["//visibility:public"],
deps = [
"//pkg/testutils/lint/passes/staticcheck",
"@co_honnef_go_tools//staticcheck",
"@org_golang_x_tools//go/analysis",
],
)
| build/bazelutil/staticcheckanalyzers/sa1010/BUILD.bazel | 0 | https://github.com/cockroachdb/cockroach/commit/a519265872c58fcfc2cc815c9dc58660534f9cd1 | [
0.00017341742932330817,
0.00017259441665373743,
0.0001717714185360819,
0.00017259441665373743,
8.230053936131299e-7
] |
{
"id": 1,
"code_window": [
"\t\t\tcontinue\n",
"\t\t}\n",
"\n",
"\t\tif rf.traceKV {\n",
"\t\t\tprettyKey = fmt.Sprintf(\"%s/%s\", prettyKey, table.desc.DeletableColumns()[vecIdx].GetName())\n",
"\t\t}\n",
"\n",
"\t\tvalueBytes, err = colencoding.DecodeTableValueToCol(\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\tprettyKey = fmt.Sprintf(\"%s/%s\", prettyKey, table.cols[vecIdx].GetName())\n"
],
"file_path": "pkg/sql/colfetcher/cfetcher.go",
"type": "replace",
"edit_start_line_idx": 1097
} | // Copyright 2018 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package colfetcher
import (
"bytes"
"context"
"fmt"
"sort"
"strings"
"sync"
"time"
"github.com/cockroachdb/apd/v2"
"github.com/cockroachdb/cockroach/pkg/col/coldata"
"github.com/cockroachdb/cockroach/pkg/keys"
"github.com/cockroachdb/cockroach/pkg/kv"
"github.com/cockroachdb/cockroach/pkg/roachpb"
"github.com/cockroachdb/cockroach/pkg/sql/catalog"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/colinfo"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb"
"github.com/cockroachdb/cockroach/pkg/sql/colconv"
"github.com/cockroachdb/cockroach/pkg/sql/colencoding"
"github.com/cockroachdb/cockroach/pkg/sql/colexecerror"
"github.com/cockroachdb/cockroach/pkg/sql/colmem"
"github.com/cockroachdb/cockroach/pkg/sql/execinfra"
"github.com/cockroachdb/cockroach/pkg/sql/row"
"github.com/cockroachdb/cockroach/pkg/sql/rowenc"
"github.com/cockroachdb/cockroach/pkg/sql/rowinfra"
"github.com/cockroachdb/cockroach/pkg/sql/scrub"
"github.com/cockroachdb/cockroach/pkg/sql/sem/tree"
"github.com/cockroachdb/cockroach/pkg/sql/types"
"github.com/cockroachdb/cockroach/pkg/util"
"github.com/cockroachdb/cockroach/pkg/util/encoding"
"github.com/cockroachdb/cockroach/pkg/util/hlc"
"github.com/cockroachdb/cockroach/pkg/util/log"
"github.com/cockroachdb/cockroach/pkg/util/mon"
"github.com/cockroachdb/errors"
)
type cTableInfo struct {
// -- Fields initialized once --
*cFetcherTableArgs
indexColumnDirs []descpb.IndexDescriptor_Direction
// The set of required value-component column ordinals among only needed
// columns.
neededValueColsByIdx util.FastIntSet
// Map used to get the column index based on the descpb.ColumnID.
// It's kept as a pointer so we don't have to re-allocate to sort it each
// time.
orderedColIdxMap *colIdxMap
// One value per column that is part of the key; each value is a column
// ordinal among only needed columns; -1 if we don't need the value for
// that column.
//
// Note that if the tracing is enabled on the cFetcher (traceKV == true),
// then values for all columns are needed and, thus, there will be no -1 in
// indexColOrdinals.
indexColOrdinals []int
// The set of column ordinals which are both composite and part of the index
// key.
compositeIndexColOrdinals util.FastIntSet
// One number per column coming from the "key suffix" that is part of the
// value; each number is a column ordinal among only needed columns; -1 if
// we don't need the value for that column.
//
// The "key suffix" columns are only used for secondary indexes:
// - for non-unique indexes, these columns are appended to the key (and will
// be included in indexColOrdinals instead);
// - for unique indexes, these columns are stored in the value (unless the
// key contains a NULL value: then the extra columns are appended to the key
// to unique-ify it).
//
// Note that if the tracing is enabled on the cFetcher (traceKV == true),
// then values for all columns are needed and, thus, there will be no -1 in
// extraValColOrdinals.
extraValColOrdinals []int
// invertedColOrdinal is a column ordinal among only needed columns,
// indicating the inverted column; -1 if there is no inverted column or we
// don't need the value for that column.
invertedColOrdinal int
// maxColumnFamilyID is the maximum possible family id for the configured
// table.
maxColumnFamilyID descpb.FamilyID
// knownPrefixLength is the number of bytes in the index key prefix this
// Fetcher is configured for. The index key prefix is the table id, index
// id pair at the start of the key.
knownPrefixLength int
// The following fields contain MVCC metadata for each row and may be
// returned to users of cFetcher immediately after NextBatch returns.
//
// rowLastModified is the timestamp of the last time any family in the row
// was modified in any way.
rowLastModified hlc.Timestamp
// timestampOutputIdx controls at what column ordinal in the output batch to
// write the timestamp for the MVCC timestamp system column.
timestampOutputIdx int
// oidOutputIdx controls at what column ordinal in the output batch to write
// the value for the tableoid system column.
oidOutputIdx int
keyValTypes []*types.T
extraTypes []*types.T
// extraValDirections contains len(extraTypes) ASC directions. This will
// only be used for unique secondary indexes.
extraValDirections []descpb.IndexDescriptor_Direction
da rowenc.DatumAlloc
}
var _ execinfra.Releasable = &cTableInfo{}
var cTableInfoPool = sync.Pool{
New: func() interface{} {
return &cTableInfo{
orderedColIdxMap: &colIdxMap{},
}
},
}
func newCTableInfo() *cTableInfo {
return cTableInfoPool.Get().(*cTableInfo)
}
// Release implements the execinfra.Releasable interface.
func (c *cTableInfo) Release() {
c.cFetcherTableArgs.Release()
// Note that all slices are being reused, but there is no need to deeply
// reset them since all of the slices are of Go native types.
c.orderedColIdxMap.ords = c.orderedColIdxMap.ords[:0]
c.orderedColIdxMap.vals = c.orderedColIdxMap.vals[:0]
*c = cTableInfo{
orderedColIdxMap: c.orderedColIdxMap,
indexColOrdinals: c.indexColOrdinals[:0],
extraValColOrdinals: c.extraValColOrdinals[:0],
keyValTypes: c.keyValTypes[:0],
extraTypes: c.extraTypes[:0],
extraValDirections: c.extraValDirections[:0],
}
cTableInfoPool.Put(c)
}
// colIdxMap is a "map" that contains the ordinals for each ColumnID among the
// columns that need to be fetched. This map is used to figure out what index
// within a row a particular value-component column goes into. Value-component
// columns are encoded with a column id prefix, with the guarantee that within
// any given row, the column ids are always increasing. Because of this
// guarantee, we can store this map as two sorted lists that the fetcher keeps
// an index into, giving fast access during decoding.
//
// It implements sort.Interface to be sortable on vals, while keeping ords
// matched up to the order of vals.
type colIdxMap struct {
// vals is the sorted list of descpb.ColumnIDs in the table to fetch.
vals descpb.ColumnIDs
// ords is the list of ordinals into all columns of the table for each
// column in vals. The ith entry in ords is the ordinal among all columns of
// the table for the ith column in vals.
ords []int
}
// Len implements sort.Interface.
func (m colIdxMap) Len() int {
return len(m.vals)
}
// Less implements sort.Interface.
func (m colIdxMap) Less(i, j int) bool {
return m.vals[i] < m.vals[j]
}
// Swap implements sort.Interface.
func (m colIdxMap) Swap(i, j int) {
m.vals[i], m.vals[j] = m.vals[j], m.vals[i]
m.ords[i], m.ords[j] = m.ords[j], m.ords[i]
}
type cFetcherArgs struct {
// lockStrength represents the row-level locking mode to use when fetching
// rows.
lockStrength descpb.ScanLockingStrength
// lockWaitPolicy represents the policy to be used for handling conflicting
// locks held by other active transactions.
lockWaitPolicy descpb.ScanLockingWaitPolicy
// lockTimeout specifies the maximum amount of time that the fetcher will
// wait while attempting to acquire a lock on a key or while blocking on an
// existing lock in order to perform a non-locking read on a key.
lockTimeout time.Duration
// memoryLimit determines the maximum memory footprint of the output batch.
memoryLimit int64
// estimatedRowCount is the optimizer-derived number of expected rows that
// this fetch will produce, if non-zero.
estimatedRowCount uint64
// reverse denotes whether or not the spans should be read in reverse or not
// when StartScan is invoked.
reverse bool
// traceKV indicates whether or not session tracing is enabled. It is set
// when initializing the fetcher.
traceKV bool
}
// noOutputColumn is a sentinel value to denote that a system column is not
// part of the output.
const noOutputColumn = -1
// cFetcher handles fetching kvs and forming table rows for an
// arbitrary number of tables.
// Usage:
// var rf cFetcher
// err := rf.Init(..)
// // Handle err
// err := rf.StartScan(..)
// // Handle err
// for {
// res, err := rf.NextBatch()
// // Handle err
// if res.colBatch.Length() == 0 {
// // Done
// break
// }
// // Process res.colBatch
// }
// rf.Close(ctx)
type cFetcher struct {
cFetcherArgs
// table is the table that's configured for fetching.
table *cTableInfo
// maxKeysPerRow memoizes the maximum number of keys per row in the index
// we're fetching from. This is used to calculate the kvBatchFetcher's
// firstBatchLimit.
maxKeysPerRow int
// True if the index key must be decoded. This is only false if there are no
// needed columns and the tracing is not enabled.
mustDecodeIndexKey bool
// mvccDecodeStrategy controls whether or not MVCC timestamps should
// be decoded from KV's fetched. It is set if any of the requested tables
// are required to produce an MVCC timestamp system column.
mvccDecodeStrategy row.MVCCDecodingStrategy
// fetcher is the underlying fetcher that provides KVs.
fetcher *row.KVFetcher
// machine contains fields that get updated during the run of the fetcher.
machine struct {
// state is the queue of next states of the state machine. The 0th entry
// is the next state.
state [3]fetcherState
// rowIdx is always set to the ordinal of the row we're currently writing to
// within the current batch. It's incremented as soon as we detect that a row
// is finished.
rowIdx int
// nextKV is the kv to process next.
nextKV roachpb.KeyValue
// limitHint is a hint as to the number of rows that the caller expects
// to be returned from this fetch. It will be decremented whenever a
// batch is returned by the length of the batch so that it tracks the
// hint for the rows remaining to be returned. It might become negative
// indicating that the hint is no longer applicable.
limitHint int
// remainingValueColsByIdx is the set of value columns that are yet to be
// seen during the decoding of the current row.
remainingValueColsByIdx util.FastIntSet
// lastRowPrefix is the row prefix for the last row we saw a key for. New
// keys are compared against this prefix to determine whether they're part
// of a new row or not.
lastRowPrefix roachpb.Key
// prettyValueBuf is a temp buffer used to create strings for tracing.
prettyValueBuf *bytes.Buffer
// batch is the output batch the fetcher writes to.
batch coldata.Batch
// colvecs are the vectors of batch that have been converted to the well
// typed columns to avoid expensive type casts on each row.
colvecs coldata.TypedVecs
// timestampCol is the underlying ColVec for the timestamp output column,
// or nil if the timestamp column was not requested. It is pulled out from
// colvecs to avoid having to cast the vec to decimal on every write.
timestampCol []apd.Decimal
// tableoidCol is the same as timestampCol but for the tableoid system column.
tableoidCol coldata.DatumVec
}
// scratch is a scratch space used when decoding bytes-like and decimal
// keys.
scratch []byte
accountingHelper colmem.SetAccountingHelper
// kvFetcherMemAcc is a memory account that will be used by the underlying
// KV fetcher.
kvFetcherMemAcc *mon.BoundAccount
// maxCapacity if non-zero indicates the target capacity of the output
// batch. It is set when at the row finalization we realize that the output
// batch has exceeded the memory limit.
maxCapacity int
}
func (rf *cFetcher) resetBatch() {
var reallocated bool
var minDesiredCapacity int
if rf.maxCapacity > 0 {
// If we have already exceeded the memory limit for the output batch, we
// will only be using the same batch from now on.
minDesiredCapacity = rf.maxCapacity
} else if rf.machine.limitHint > 0 && (rf.estimatedRowCount == 0 || uint64(rf.machine.limitHint) < rf.estimatedRowCount) {
// If we have a limit hint, and either
// 1) we don't have an estimate, or
// 2) we have a soft limit,
// use the hint to size the batch. Note that if it exceeds
// coldata.BatchSize, ResetMaybeReallocate will chop it down.
minDesiredCapacity = rf.machine.limitHint
} else {
// Otherwise, use the estimate. Note that if the estimate is not
// present, it'll be 0 and ResetMaybeReallocate will allocate the
// initial batch of capacity 1 which is the desired behavior.
//
// We need to transform our rf.estimatedRowCount, which is a uint64,
// into an int. We have to be careful: if we just cast it directly, a
// giant estimate will wrap around and become negative.
if rf.estimatedRowCount > uint64(coldata.BatchSize()) {
minDesiredCapacity = coldata.BatchSize()
} else {
minDesiredCapacity = int(rf.estimatedRowCount)
}
}
rf.machine.batch, reallocated = rf.accountingHelper.ResetMaybeReallocate(
rf.table.typs, rf.machine.batch, minDesiredCapacity, rf.memoryLimit,
)
if reallocated {
rf.machine.colvecs.SetBatch(rf.machine.batch)
// Pull out any requested system column output vecs.
if rf.table.timestampOutputIdx != noOutputColumn {
rf.machine.timestampCol = rf.machine.colvecs.DecimalCols[rf.machine.colvecs.ColsMap[rf.table.timestampOutputIdx]]
}
if rf.table.oidOutputIdx != noOutputColumn {
rf.machine.tableoidCol = rf.machine.colvecs.DatumCols[rf.machine.colvecs.ColsMap[rf.table.oidOutputIdx]]
}
// Change the allocation size to be the same as the capacity of the
// batch we allocated above.
rf.table.da.AllocSize = rf.machine.batch.Capacity()
}
}
// Init sets up a Fetcher based on the table args. Only columns present in
// tableArgs.cols will be fetched.
func (rf *cFetcher) Init(
codec keys.SQLCodec,
allocator *colmem.Allocator,
kvFetcherMemAcc *mon.BoundAccount,
tableArgs *cFetcherTableArgs,
hasSystemColumns bool,
) error {
rf.kvFetcherMemAcc = kvFetcherMemAcc
table := newCTableInfo()
nCols := tableArgs.ColIdxMap.Len()
if cap(table.orderedColIdxMap.vals) < nCols {
table.orderedColIdxMap.vals = make(descpb.ColumnIDs, 0, nCols)
table.orderedColIdxMap.ords = make([]int, 0, nCols)
}
colDescriptors := tableArgs.cols
for i := range colDescriptors {
//gcassert:bce
id := colDescriptors[i].GetID()
table.orderedColIdxMap.vals = append(table.orderedColIdxMap.vals, id)
table.orderedColIdxMap.ords = append(table.orderedColIdxMap.ords, tableArgs.ColIdxMap.GetDefault(id))
}
sort.Sort(table.orderedColIdxMap)
*table = cTableInfo{
cFetcherTableArgs: tableArgs,
orderedColIdxMap: table.orderedColIdxMap,
indexColOrdinals: table.indexColOrdinals[:0],
extraValColOrdinals: table.extraValColOrdinals[:0],
keyValTypes: table.keyValTypes[:0],
extraTypes: table.extraTypes[:0],
extraValDirections: table.extraValDirections[:0],
timestampOutputIdx: noOutputColumn,
oidOutputIdx: noOutputColumn,
}
if nCols > 0 {
table.neededValueColsByIdx.AddRange(0 /* start */, nCols-1)
}
if hasSystemColumns {
// System columns, if present, are at the end of colDescriptors.
nonSystemColOffset := nCols - len(colinfo.AllSystemColumnDescs)
if nonSystemColOffset < 0 {
nonSystemColOffset = 0
}
for idx := nonSystemColOffset; idx < nCols; idx++ {
col := colDescriptors[idx].GetID()
// Set up extra metadata for system columns, if this is a system
// column.
//
// Currently the system columns are present in neededValueColsByIdx,
// but we don't want to include them in that set because the
// handling of system columns is separate from the standard value
// decoding process.
switch colinfo.GetSystemColumnKindFromColumnID(col) {
case descpb.SystemColumnKind_MVCCTIMESTAMP:
table.timestampOutputIdx = idx
rf.mvccDecodeStrategy = row.MVCCDecodingRequired
table.neededValueColsByIdx.Remove(idx)
case descpb.SystemColumnKind_TABLEOID:
table.oidOutputIdx = idx
table.neededValueColsByIdx.Remove(idx)
}
}
}
table.knownPrefixLength = len(rowenc.MakeIndexKeyPrefix(codec, table.desc, table.index.GetID()))
var indexColumnIDs []descpb.ColumnID
indexColumnIDs, table.indexColumnDirs = catalog.FullIndexColumnIDs(table.index)
compositeColumnIDs := util.MakeFastIntSet()
for i := 0; i < table.index.NumCompositeColumns(); i++ {
id := table.index.GetCompositeColumnID(i)
compositeColumnIDs.Add(int(id))
}
nIndexCols := len(indexColumnIDs)
if cap(table.indexColOrdinals) >= nIndexCols {
table.indexColOrdinals = table.indexColOrdinals[:nIndexCols]
} else {
table.indexColOrdinals = make([]int, nIndexCols)
}
indexColOrdinals := table.indexColOrdinals
_ = indexColOrdinals[len(indexColumnIDs)-1]
needToDecodeDecimalKey := false
for i, id := range indexColumnIDs {
colIdx, ok := tableArgs.ColIdxMap.Get(id)
if ok {
//gcassert:bce
indexColOrdinals[i] = colIdx
rf.mustDecodeIndexKey = true
needToDecodeDecimalKey = needToDecodeDecimalKey || tableArgs.typs[colIdx].Family() == types.DecimalFamily
// A composite column might also have a value encoding which must be
// decoded. Others can be removed from neededValueColsByIdx.
if compositeColumnIDs.Contains(int(id)) {
table.compositeIndexColOrdinals.Add(colIdx)
} else {
table.neededValueColsByIdx.Remove(colIdx)
}
} else {
//gcassert:bce
indexColOrdinals[i] = -1
}
}
if needToDecodeDecimalKey && cap(rf.scratch) < 64 {
// If we need to decode the decimal key encoding, it might use a scratch
// byte slice internally, so we'll allocate such a space to be reused
// for every decimal.
// TODO(yuzefovich): 64 was chosen arbitrarily, tune it.
rf.scratch = make([]byte, 64)
}
table.invertedColOrdinal = -1
if table.index.GetType() == descpb.IndexDescriptor_INVERTED {
id := table.index.InvertedColumnID()
colIdx, ok := tableArgs.ColIdxMap.Get(id)
if ok {
table.invertedColOrdinal = colIdx
// TODO(yuzefovich): for some reason the setup of ColBatchScan
// sometimes doesn't find the inverted column, so we have to be a
// bit tricky here and overwrite the type to what we need for the
// inverted column. Figure it out.
table.typs[colIdx] = types.Bytes
}
}
// Unique secondary indexes contain the extra column IDs as part of
// the value component. We process these separately, so we need to know
// what extra columns are composite or not.
if table.isSecondaryIndex && table.index.IsUnique() {
for i := 0; i < table.index.NumKeySuffixColumns(); i++ {
id := table.index.GetKeySuffixColumnID(i)
colIdx, ok := tableArgs.ColIdxMap.Get(id)
if ok {
if compositeColumnIDs.Contains(int(id)) {
table.compositeIndexColOrdinals.Add(colIdx)
table.neededValueColsByIdx.Remove(colIdx)
}
}
}
}
// Prepare our index key vals slice.
table.keyValTypes = colinfo.GetColumnTypesFromColDescs(
colDescriptors, indexColumnIDs, table.keyValTypes,
)
if table.index.NumKeySuffixColumns() > 0 {
// Unique secondary indexes have a value that is the
// primary index key.
// Primary indexes only contain ascendingly-encoded
// values. If this ever changes, we'll probably have to
// figure out the directions here too.
table.extraTypes = colinfo.GetColumnTypesFromColDescs(
colDescriptors, table.index.IndexDesc().KeySuffixColumnIDs, table.extraTypes,
)
nExtraColumns := table.index.NumKeySuffixColumns()
if cap(table.extraValColOrdinals) >= nExtraColumns {
table.extraValColOrdinals = table.extraValColOrdinals[:nExtraColumns]
} else {
table.extraValColOrdinals = make([]int, nExtraColumns)
}
// Note that for extraValDirections we only need to make sure that the
// slice has the correct length set since the ASC direction is the zero
// value and we don't modify the elements of this slice.
if cap(table.extraValDirections) >= nExtraColumns {
table.extraValDirections = table.extraValDirections[:nExtraColumns]
} else {
table.extraValDirections = make([]descpb.IndexDescriptor_Direction, nExtraColumns)
}
extraValColOrdinals := table.extraValColOrdinals
_ = extraValColOrdinals[nExtraColumns-1]
for i := 0; i < nExtraColumns; i++ {
id := table.index.GetKeySuffixColumnID(i)
idx, ok := tableArgs.ColIdxMap.Get(id)
if ok {
//gcassert:bce
extraValColOrdinals[i] = idx
} else {
//gcassert:bce
extraValColOrdinals[i] = -1
}
}
}
// Keep track of the maximum keys per row to accommodate a
// limitHint when StartScan is invoked.
var err error
rf.maxKeysPerRow, err = table.desc.KeysPerRow(table.index.GetID())
if err != nil {
return err
}
_ = table.desc.ForeachFamily(func(family *descpb.ColumnFamilyDescriptor) error {
id := family.ID
if id > table.maxColumnFamilyID {
table.maxColumnFamilyID = id
}
return nil
})
rf.table = table
rf.accountingHelper.Init(allocator, rf.table.typs)
return nil
}
// StartScan initializes and starts the key-value scan. Can be used multiple
// times.
//
// The fetcher takes ownership of the spans slice - it can modify the slice and
// will perform the memory accounting accordingly. The caller can only reuse the
// spans slice after the fetcher has been closed (which happens when the fetcher
// emits the first zero batch), and if the caller does, it becomes responsible
// for the memory accounting.
func (rf *cFetcher) StartScan(
ctx context.Context,
txn *kv.Txn,
spans roachpb.Spans,
bsHeader *roachpb.BoundedStalenessHeader,
limitBatches bool,
batchBytesLimit rowinfra.BytesLimit,
limitHint rowinfra.RowLimit,
forceProductionKVBatchSize bool,
) error {
if len(spans) == 0 {
return errors.AssertionFailedf("no spans")
}
if !limitBatches && batchBytesLimit != rowinfra.NoBytesLimit {
return errors.AssertionFailedf("batchBytesLimit set without limitBatches")
}
// If we have a limit hint, we limit the first batch size. Subsequent
// batches get larger to avoid making things too slow (e.g. in case we have
// a very restrictive filter and actually have to retrieve a lot of rows).
firstBatchLimit := rowinfra.KeyLimit(limitHint)
if firstBatchLimit != 0 {
// The limitHint is a row limit, but each row could be made up of more
// than one key. We take the maximum possible keys per row out of all
// the table rows we could potentially scan over.
//
// Note that unlike for the row.Fetcher, we don't need an extra key to
// form the last row in the cFetcher because we are eagerly finalizing
// each row once we know that all KVs comprising that row have been
// fetched. Consider several cases:
// - the table has only one column family - then we can finalize each
// row right after the first KV is decoded;
// - the table has multiple column families:
// - KVs for all column families are present for all rows - then for
// each row, when its last KV is fetched, the row can be finalized
// (and firstBatchLimit asks exactly for the correct number of KVs);
// - KVs for some column families are omitted for some rows - then we
// will actually fetch more KVs than necessary, but we'll decode
// limitHint number of rows.
firstBatchLimit = rowinfra.KeyLimit(int(limitHint) * rf.maxKeysPerRow)
}
f, err := row.NewKVFetcher(
ctx,
txn,
spans,
bsHeader,
rf.reverse,
batchBytesLimit,
firstBatchLimit,
rf.lockStrength,
rf.lockWaitPolicy,
rf.lockTimeout,
rf.kvFetcherMemAcc,
forceProductionKVBatchSize,
)
if err != nil {
return err
}
rf.fetcher = f
rf.machine.lastRowPrefix = nil
rf.machine.limitHint = int(limitHint)
rf.machine.state[0] = stateResetBatch
rf.machine.state[1] = stateInitFetch
return nil
}
// fetcherState is the state enum for NextBatch.
type fetcherState int
//go:generate stringer -type=fetcherState
const (
stateInvalid fetcherState = iota
// stateInitFetch is the empty state of a fetcher: there is no current KV to
// look at, and there's no current row, either because the fetcher has just
// started, or because the last row was already finalized.
//
// 1. fetch next kv into nextKV buffer
// -> decodeFirstKVOfRow
stateInitFetch
// stateResetBatch resets the batch of a fetcher, removing nulls and the
// selection vector.
stateResetBatch
// stateDecodeFirstKVOfRow is the state of looking at a key that is part of
// a row that the fetcher hasn't processed before. s.machine.nextKV must be
// set.
// 1. skip common prefix
// 2. parse key (past common prefix) into row buffer, setting last row prefix buffer
// 3. parse value into row buffer.
// 4. 1-cf or secondary index?
// -> doneRow(initFetch)
// else:
// -> fetchNextKVWithUnfinishedRow
stateDecodeFirstKVOfRow
// stateFetchNextKVWithUnfinishedRow is the state of getting a new key for
// the current row. The machine will read a new key from the underlying
// fetcher, process it, and either add the results to the current row, or
// shift to a new row.
// 1. fetch next kv into nextKV buffer
// 2. skip common prefix
// 3. check equality to last row prefix buffer
// 4. no?
// -> finalizeRow(decodeFirstKVOfRow)
// 5. skip to end of last row prefix buffer
// 6. parse value into row buffer
// 7. -> fetchNextKVWithUnfinishedRow
stateFetchNextKVWithUnfinishedRow
// stateFinalizeRow is the state of finalizing a row. It assumes that no more
// keys for the current row are present.
// state[1] must be set, and stateFinalizeRow will transition to that state
// once it finishes finalizing the row.
// 1. fill missing nulls
// 2. bump rowIdx
// -> nextState and optionally return if row-by-row or batch full
stateFinalizeRow
// stateEmitLastBatch emits the current batch and then transitions to
// stateFinished.
stateEmitLastBatch
// stateFinished is the end state of the state machine - it causes NextBatch
// to return empty batches forever.
stateFinished
)
// Turn this on to enable super verbose logging of the fetcher state machine.
const debugState = false
func (rf *cFetcher) setEstimatedRowCount(estimatedRowCount uint64) {
rf.estimatedRowCount = estimatedRowCount
}
// setNextKV sets the next KV to process to the input KV. needsCopy, if true,
// causes the input kv to be deep copied. needsCopy should be set to true if
// the input KV is pointing to the last KV of a batch, so that the batch can
// be garbage collected before fetching the next one.
// gcassert:inline
func (rf *cFetcher) setNextKV(kv roachpb.KeyValue, needsCopy bool) {
if !needsCopy {
rf.machine.nextKV = kv
return
}
// If we've made it to the very last key in the batch, copy out the key
// so that the GC can reclaim the large backing slice before we call
// NextKV() again.
kvCopy := roachpb.KeyValue{}
kvCopy.Key = make(roachpb.Key, len(kv.Key))
copy(kvCopy.Key, kv.Key)
kvCopy.Value.RawBytes = make([]byte, len(kv.Value.RawBytes))
copy(kvCopy.Value.RawBytes, kv.Value.RawBytes)
kvCopy.Value.Timestamp = kv.Value.Timestamp
rf.machine.nextKV = kvCopy
}
// NextBatch processes keys until we complete one batch of rows (subject to the
// limit hint and the memory limit while being max coldata.BatchSize() in
// length), which are returned in columnar format as a coldata.Batch. The batch
// contains one Vec per table column, regardless of the index used; columns that
// are not needed (as per neededCols) are filled with nulls. The Batch should
// not be modified and is only valid until the next call. When there are no more
// rows, the Batch.Length is 0.
func (rf *cFetcher) NextBatch(ctx context.Context) (coldata.Batch, error) {
for {
if debugState {
log.Infof(ctx, "State %s", rf.machine.state[0])
}
switch rf.machine.state[0] {
case stateInvalid:
return nil, errors.New("invalid fetcher state")
case stateInitFetch:
moreKVs, kv, finalReferenceToBatch, err := rf.fetcher.NextKV(ctx, rf.mvccDecodeStrategy)
if err != nil {
return nil, rf.convertFetchError(ctx, err)
}
if !moreKVs {
rf.machine.state[0] = stateEmitLastBatch
continue
}
// TODO(jordan): parse the logical longest common prefix of the span
// into a buffer. The logical longest common prefix is the longest
// common prefix that contains only full key components. For example,
// the keys /Table/53/1/foo/bar/10 and /Table/53/1/foo/bop/10 would
// have LLCS of /Table/53/1/foo, even though they share a b prefix of
// the next key, since that prefix isn't a complete key component.
/*
if newSpan {
lcs := rf.fetcher.span.LongestCommonPrefix()
// parse lcs into stuff
key, matches, err := rowenc.DecodeIndexKeyWithoutTableIDIndexIDPrefix(
rf.table.desc, rf.table.info.index, rf.table.info.keyValTypes,
rf.table.keyVals, rf.table.info.indexColumnDirs, kv.Key[rf.table.info.knownPrefixLength:],
)
if err != nil {
// This is expected - the longest common prefix of the keyspan might
// end half way through a key. Suppress the error and set the actual
// LCS we'll use later to the decodable components of the key.
}
}
*/
rf.setNextKV(kv, finalReferenceToBatch)
rf.machine.state[0] = stateDecodeFirstKVOfRow
case stateResetBatch:
rf.resetBatch()
rf.shiftState()
case stateDecodeFirstKVOfRow:
// Reset MVCC metadata for the table, since this is the first KV of a row.
rf.table.rowLastModified = hlc.Timestamp{}
// foundNull is set when decoding a new index key for a row finds a NULL value
// in the index key. This is used when decoding unique secondary indexes in order
// to tell whether they have extra columns appended to the key.
var foundNull bool
if rf.mustDecodeIndexKey {
if debugState {
log.Infof(ctx, "decoding first key %s", rf.machine.nextKV.Key)
}
var (
key []byte
err error
)
// For unique secondary indexes on tables with multiple column
// families, we must check all columns for NULL values in order
// to determine whether a KV belongs to the same row as the
// previous KV or a different row.
checkAllColsForNull := rf.table.isSecondaryIndex && rf.table.index.IsUnique() && rf.table.desc.NumFamilies() != 1
key, foundNull, rf.scratch, err = colencoding.DecodeKeyValsToCols(
&rf.table.da,
&rf.machine.colvecs,
rf.machine.rowIdx,
rf.table.indexColOrdinals,
checkAllColsForNull,
rf.table.keyValTypes,
rf.table.indexColumnDirs,
nil, /* unseen */
rf.machine.nextKV.Key[rf.table.knownPrefixLength:],
rf.table.invertedColOrdinal,
rf.scratch,
)
if err != nil {
return nil, err
}
prefix := rf.machine.nextKV.Key[:len(rf.machine.nextKV.Key)-len(key)]
rf.machine.lastRowPrefix = prefix
} else {
prefixLen, err := keys.GetRowPrefixLength(rf.machine.nextKV.Key)
if err != nil {
return nil, err
}
rf.machine.lastRowPrefix = rf.machine.nextKV.Key[:prefixLen]
}
// For unique secondary indexes on tables with multiple column
// families, the index-key does not distinguish one row from the
// next if both rows contain identical values along with a NULL.
// Consider the keys:
//
// /test/unique_idx/NULL/0
// /test/unique_idx/NULL/1
//
// The index-key extracted from the above keys is
// /test/unique_idx/NULL. The trailing /0 and /1 are the primary key
// used to unique-ify the keys when a NULL is present. When a null
// is present in the index key, we include the primary key columns
// in lastRowPrefix.
//
// Note that we do not need to do this for non-unique secondary
// indexes because the extra columns in the primary key will
// _always_ be there, so we can decode them when processing the
// index. The difference with unique secondary indexes is that the
// extra columns are not always there, and are used to unique-ify
// the index key, rather than provide the primary key column values.
//
// We also do not need to do this when a table has only one column
// family because it is guaranteed that there is only one KV per
// row. We entirely skip the check that determines if the row is
// unfinished.
if foundNull && rf.table.isSecondaryIndex && rf.table.index.IsUnique() && rf.table.desc.NumFamilies() != 1 {
// We get the remaining bytes after the computed prefix, and then
// slice off the extra encoded columns from those bytes. We calculate
// how many bytes were sliced away, and then extend lastRowPrefix
// by that amount.
prefixLen := len(rf.machine.lastRowPrefix)
remainingBytes := rf.machine.nextKV.Key[prefixLen:]
origRemainingBytesLen := len(remainingBytes)
for i := 0; i < rf.table.index.NumKeySuffixColumns(); i++ {
var err error
// Slice off an extra encoded column from remainingBytes.
remainingBytes, err = rowenc.SkipTableKey(remainingBytes)
if err != nil {
return nil, err
}
}
rf.machine.lastRowPrefix = rf.machine.nextKV.Key[:prefixLen+(origRemainingBytesLen-len(remainingBytes))]
}
familyID, err := rf.getCurrentColumnFamilyID()
if err != nil {
return nil, err
}
rf.machine.remainingValueColsByIdx.CopyFrom(rf.table.neededValueColsByIdx)
// Process the current KV's value component.
if err := rf.processValue(ctx, familyID); err != nil {
return nil, err
}
// Update the MVCC values for this row.
if rf.table.rowLastModified.Less(rf.machine.nextKV.Value.Timestamp) {
rf.table.rowLastModified = rf.machine.nextKV.Value.Timestamp
}
// If the table has only one column family, then the next KV will
// always belong to a different row than the current KV.
if rf.table.desc.NumFamilies() == 1 {
rf.machine.state[0] = stateFinalizeRow
rf.machine.state[1] = stateInitFetch
continue
}
// If the table has more than one column family, then the next KV
// may belong to the same row as the current KV.
rf.machine.state[0] = stateFetchNextKVWithUnfinishedRow
case stateFetchNextKVWithUnfinishedRow:
moreKVs, kv, finalReferenceToBatch, err := rf.fetcher.NextKV(ctx, rf.mvccDecodeStrategy)
if err != nil {
return nil, rf.convertFetchError(ctx, err)
}
if !moreKVs {
// No more data. Finalize the row and exit.
rf.machine.state[0] = stateFinalizeRow
rf.machine.state[1] = stateEmitLastBatch
continue
}
// TODO(jordan): if nextKV returns newSpan = true, set the new span
// prefix and indicate that it needs decoding.
rf.setNextKV(kv, finalReferenceToBatch)
if debugState {
log.Infof(ctx, "decoding next key %s", rf.machine.nextKV.Key)
}
// TODO(yuzefovich): optimize this prefix check by skipping logical
// longest common span prefix.
if !bytes.HasPrefix(kv.Key[rf.table.knownPrefixLength:], rf.machine.lastRowPrefix[rf.table.knownPrefixLength:]) {
// The kv we just found is from a different row.
rf.machine.state[0] = stateFinalizeRow
rf.machine.state[1] = stateDecodeFirstKVOfRow
continue
}
familyID, err := rf.getCurrentColumnFamilyID()
if err != nil {
return nil, err
}
// Process the current KV's value component.
if err := rf.processValue(ctx, familyID); err != nil {
return nil, err
}
// Update the MVCC values for this row.
if rf.table.rowLastModified.Less(rf.machine.nextKV.Value.Timestamp) {
rf.table.rowLastModified = rf.machine.nextKV.Value.Timestamp
}
if familyID == rf.table.maxColumnFamilyID {
// We know the row can't have any more keys, so finalize the row.
rf.machine.state[0] = stateFinalizeRow
rf.machine.state[1] = stateInitFetch
} else {
// Continue with current state.
rf.machine.state[0] = stateFetchNextKVWithUnfinishedRow
}
case stateFinalizeRow:
// Populate the timestamp system column if needed. We have to do it
// on a per row basis since each row can be modified at a different
// time.
if rf.table.timestampOutputIdx != noOutputColumn {
rf.machine.timestampCol[rf.machine.rowIdx] = tree.TimestampToDecimal(rf.table.rowLastModified)
}
// We're finished with a row. Fill the row in with nulls if
// necessary, perform the memory accounting for the row, bump the
// row index, emit the batch if necessary, and move to the next
// state.
if err := rf.fillNulls(); err != nil {
return nil, err
}
// Note that we haven't set the tableoid value (if that system
// column is requested) yet, but it is ok for the purposes of the
// memory accounting - oids are fixed length values and, thus, have
// already been accounted for when the batch was allocated.
rf.accountingHelper.AccountForSet(rf.machine.rowIdx)
rf.machine.rowIdx++
rf.shiftState()
var emitBatch bool
if rf.maxCapacity == 0 && rf.accountingHelper.Allocator.Used() >= rf.memoryLimit {
rf.maxCapacity = rf.machine.rowIdx
}
if rf.machine.rowIdx >= rf.machine.batch.Capacity() ||
(rf.maxCapacity > 0 && rf.machine.rowIdx >= rf.maxCapacity) ||
(rf.machine.limitHint > 0 && rf.machine.rowIdx >= rf.machine.limitHint) {
// We either
// 1. have no more room in our batch, so output it immediately
// or
// 2. we made it to our limit hint, so output our batch early
// to make sure that we don't bother filling in extra data
// if we don't need to.
emitBatch = true
// Update the limit hint to track the expected remaining rows to
// be fetched.
//
// Note that limitHint might become negative at which point we
// will start ignoring it.
rf.machine.limitHint -= rf.machine.rowIdx
}
if emitBatch {
rf.pushState(stateResetBatch)
rf.finalizeBatch()
return rf.machine.batch, nil
}
case stateEmitLastBatch:
rf.machine.state[0] = stateFinished
rf.finalizeBatch()
return rf.machine.batch, nil
case stateFinished:
// Close the fetcher eagerly so that its memory could be GCed.
rf.Close(ctx)
return coldata.ZeroBatch, nil
}
}
}
// shiftState shifts the state queue to the left, removing the first element and
// clearing the last element.
func (rf *cFetcher) shiftState() {
copy(rf.machine.state[:2], rf.machine.state[1:])
rf.machine.state[2] = stateInvalid
}
func (rf *cFetcher) pushState(state fetcherState) {
copy(rf.machine.state[1:], rf.machine.state[:2])
rf.machine.state[0] = state
}
// getDatumAt returns the converted datum object at the given (colIdx, rowIdx).
// This function is meant for tracing and should not be used in hot paths.
func (rf *cFetcher) getDatumAt(colIdx int, rowIdx int) tree.Datum {
res := []tree.Datum{nil}
colconv.ColVecToDatumAndDeselect(res, rf.machine.colvecs.Vecs[colIdx], 1 /* length */, []int{rowIdx}, &rf.table.da)
return res[0]
}
// processValue processes the state machine's current value component, setting
// columns in the rowIdx'th tuple in the current batch depending on what data
// is found in the current value component.
func (rf *cFetcher) processValue(ctx context.Context, familyID descpb.FamilyID) (err error) {
table := rf.table
var prettyKey, prettyValue string
if rf.traceKV {
defer func() {
if err == nil {
log.VEventf(ctx, 2, "fetched: %s -> %s", prettyKey, prettyValue)
}
}()
var buf strings.Builder
buf.WriteByte('/')
buf.WriteString(rf.table.desc.GetName())
buf.WriteByte('/')
buf.WriteString(rf.table.index.GetName())
// Note that because rf.traceKV is true, rf.table.indexColOrdinals will
// not include any -1, so idx values will all be valid.
for _, idx := range rf.table.indexColOrdinals {
buf.WriteByte('/')
buf.WriteString(rf.getDatumAt(idx, rf.machine.rowIdx).String())
}
prettyKey = buf.String()
}
if len(table.cols) == 0 {
// We don't need to decode any values. Note that this branch can only be
// executed if the tracing is disabled (if it was enabled, we would
// decode values from all columns).
return nil
}
val := rf.machine.nextKV.Value
if !table.isSecondaryIndex || table.index.GetEncodingType() == descpb.PrimaryIndexEncoding {
// If familyID is 0, kv.Value contains values for composite key columns.
// These columns already have a table.row value assigned above, but that value
// (obtained from the key encoding) might not be correct (e.g. for decimals,
// it might not contain the right number of trailing 0s; for collated
// strings, it is one of potentially many strings with the same collation
// key).
//
// In these cases, the correct value will be present in family 0 and the
// table.row value gets overwritten.
switch val.GetTag() {
case roachpb.ValueType_TUPLE:
// In this case, we don't need to decode the column family ID, because
// the ValueType_TUPLE encoding includes the column id with every encoded
// column value.
var tupleBytes []byte
tupleBytes, err = val.GetTuple()
if err != nil {
break
}
prettyKey, prettyValue, err = rf.processValueBytes(ctx, table, tupleBytes, prettyKey)
default:
var family *descpb.ColumnFamilyDescriptor
family, err = table.desc.FindFamilyByID(familyID)
if err != nil {
return scrub.WrapError(scrub.IndexKeyDecodingError, err)
}
prettyKey, prettyValue, err = rf.processValueSingle(ctx, table, family, prettyKey)
}
if err != nil {
return scrub.WrapError(scrub.IndexValueDecodingError, err)
}
} else {
tag := val.GetTag()
var valueBytes []byte
switch tag {
case roachpb.ValueType_BYTES:
// If we have the ValueType_BYTES on a secondary index, then we know we
// are looking at column family 0. Column family 0 stores the extra primary
// key columns if they are present, so we decode them here.
valueBytes, err = val.GetBytes()
if err != nil {
return scrub.WrapError(scrub.IndexValueDecodingError, err)
}
if table.isSecondaryIndex && table.index.IsUnique() {
// This is a unique secondary index; decode the extra
// column values from the value.
valueBytes, _, rf.scratch, err = colencoding.DecodeKeyValsToCols(
&table.da,
&rf.machine.colvecs,
rf.machine.rowIdx,
table.extraValColOrdinals,
false, /* checkAllColsForNull */
table.extraTypes,
table.extraValDirections,
&rf.machine.remainingValueColsByIdx,
valueBytes,
rf.table.invertedColOrdinal,
rf.scratch,
)
if err != nil {
return scrub.WrapError(scrub.SecondaryIndexKeyExtraValueDecodingError, err)
}
if rf.traceKV {
var buf strings.Builder
for _, idx := range table.extraValColOrdinals {
buf.WriteByte('/')
buf.WriteString(rf.getDatumAt(idx, rf.machine.rowIdx).String())
}
prettyValue = buf.String()
}
}
case roachpb.ValueType_TUPLE:
valueBytes, err = val.GetTuple()
if err != nil {
return scrub.WrapError(scrub.IndexValueDecodingError, err)
}
}
if len(valueBytes) > 0 {
prettyKey, prettyValue, err = rf.processValueBytes(
ctx, table, valueBytes, prettyKey,
)
if err != nil {
return scrub.WrapError(scrub.IndexValueDecodingError, err)
}
}
}
if rf.traceKV && prettyValue == "" {
prettyValue = tree.DNull.String()
}
return nil
}
// processValueSingle processes the given value (of column
// family.DefaultColumnID), setting values in table.row accordingly. The key is
// only used for logging.
func (rf *cFetcher) processValueSingle(
ctx context.Context,
table *cTableInfo,
family *descpb.ColumnFamilyDescriptor,
prettyKeyPrefix string,
) (prettyKey string, prettyValue string, err error) {
prettyKey = prettyKeyPrefix
// If this is the row sentinel (in the legacy pre-family format),
// a value is not expected, so we're done.
if family.ID == 0 {
return "", "", nil
}
colID := family.DefaultColumnID
if colID == 0 {
return "", "", errors.Errorf("single entry value with no default column id")
}
if idx, ok := table.ColIdxMap.Get(colID); ok {
if rf.traceKV {
prettyKey = fmt.Sprintf("%s/%s", prettyKey, table.desc.DeletableColumns()[idx].GetName())
}
val := rf.machine.nextKV.Value
if len(val.RawBytes) == 0 {
return prettyKey, "", nil
}
typ := rf.table.typs[idx]
err := colencoding.UnmarshalColumnValueToCol(
&table.da, &rf.machine.colvecs, idx, rf.machine.rowIdx, typ, val,
)
if err != nil {
return "", "", err
}
rf.machine.remainingValueColsByIdx.Remove(idx)
if rf.traceKV {
prettyValue = rf.getDatumAt(idx, rf.machine.rowIdx).String()
}
if row.DebugRowFetch {
log.Infof(ctx, "Scan %s -> %v", rf.machine.nextKV.Key, "?")
}
return prettyKey, prettyValue, nil
}
// No need to unmarshal the column value. Either the column was part of
// the index key or it isn't needed.
if row.DebugRowFetch {
log.Infof(ctx, "Scan %s -> [%d] (skipped)", rf.machine.nextKV.Key, colID)
}
return "", "", nil
}
func (rf *cFetcher) processValueBytes(
ctx context.Context, table *cTableInfo, valueBytes []byte, prettyKeyPrefix string,
) (prettyKey string, prettyValue string, err error) {
prettyKey = prettyKeyPrefix
if rf.traceKV {
if rf.machine.prettyValueBuf == nil {
rf.machine.prettyValueBuf = &bytes.Buffer{}
}
rf.machine.prettyValueBuf.Reset()
}
// Composite columns that are key encoded in the value (like the pk columns
// in a unique secondary index) have gotten removed from the set of
// remaining value columns. So, we need to add them back in here in case
// they have full value encoded composite values.
rf.table.compositeIndexColOrdinals.ForEach(func(i int) {
rf.machine.remainingValueColsByIdx.Add(i)
})
var (
colIDDiff uint32
lastColID descpb.ColumnID
dataOffset int
typ encoding.Type
lastColIDIndex int
)
// Continue reading data until there's none left or we've finished
// populating the data for all of the requested columns.
for len(valueBytes) > 0 && rf.machine.remainingValueColsByIdx.Len() > 0 {
_, dataOffset, colIDDiff, typ, err = encoding.DecodeValueTag(valueBytes)
if err != nil {
return "", "", err
}
colID := lastColID + descpb.ColumnID(colIDDiff)
lastColID = colID
vecIdx := -1
// Find the ordinal into table.cols for the column ID we just decoded,
// by advancing through the sorted list of needed value columns until
// there's a match, or we passed the column ID we're looking for.
for ; lastColIDIndex < len(table.orderedColIdxMap.vals); lastColIDIndex++ {
nextID := table.orderedColIdxMap.vals[lastColIDIndex]
if nextID == colID {
vecIdx = table.orderedColIdxMap.ords[lastColIDIndex]
// Since the next value part (if it exists) will belong to the
// column after the current one, we can advance the index.
lastColIDIndex++
break
} else if nextID > colID {
break
}
}
if vecIdx == -1 {
// This column wasn't requested, so read its length and skip it.
len, err := encoding.PeekValueLengthWithOffsetsAndType(valueBytes, dataOffset, typ)
if err != nil {
return "", "", err
}
valueBytes = valueBytes[len:]
if row.DebugRowFetch {
log.Infof(ctx, "Scan %s -> [%d] (skipped)", rf.machine.nextKV.Key, colID)
}
continue
}
if rf.traceKV {
prettyKey = fmt.Sprintf("%s/%s", prettyKey, table.desc.DeletableColumns()[vecIdx].GetName())
}
valueBytes, err = colencoding.DecodeTableValueToCol(
&table.da, &rf.machine.colvecs, vecIdx, rf.machine.rowIdx, typ,
dataOffset, rf.table.typs[vecIdx], valueBytes,
)
if err != nil {
return "", "", err
}
rf.machine.remainingValueColsByIdx.Remove(vecIdx)
if rf.traceKV {
dVal := rf.getDatumAt(vecIdx, rf.machine.rowIdx)
if _, err := fmt.Fprintf(rf.machine.prettyValueBuf, "/%v", dVal.String()); err != nil {
return "", "", err
}
}
}
if rf.traceKV {
prettyValue = rf.machine.prettyValueBuf.String()
}
return prettyKey, prettyValue, nil
}
func (rf *cFetcher) fillNulls() error {
table := rf.table
if rf.machine.remainingValueColsByIdx.Empty() {
return nil
}
for i, ok := rf.machine.remainingValueColsByIdx.Next(0); ok; i, ok = rf.machine.remainingValueColsByIdx.Next(i + 1) {
// Composite index columns may have a key but no value. Ignore them so we
// don't incorrectly mark them as null.
if table.compositeIndexColOrdinals.Contains(i) {
continue
}
if !table.cols[i].IsNullable() {
var indexColValues []string
for _, idx := range table.indexColOrdinals {
if idx != -1 {
indexColValues = append(indexColValues, rf.getDatumAt(idx, rf.machine.rowIdx).String())
} else {
indexColValues = append(indexColValues, "?")
}
}
return scrub.WrapError(scrub.UnexpectedNullValueError, errors.Errorf(
"non-nullable column \"%s:%s\" with no value! Index scanned was %q with the index key columns (%s) and the values (%s)",
table.desc.GetName(), table.cols[i].GetName(), table.index.GetName(),
strings.Join(table.index.IndexDesc().KeyColumnNames, ","), strings.Join(indexColValues, ",")))
}
rf.machine.colvecs.Nulls[i].SetNull(rf.machine.rowIdx)
}
return nil
}
func (rf *cFetcher) finalizeBatch() {
// Populate the tableoid system column for the whole batch if necessary.
if rf.table.oidOutputIdx != noOutputColumn {
id := rf.table.desc.GetID()
for i := 0; i < rf.machine.rowIdx; i++ {
// Note that we don't need to update the memory accounting because
// oids are fixed length values and have already been accounted for
// when finalizing each row.
rf.machine.tableoidCol.Set(i, rf.table.da.NewDOid(tree.MakeDOid(tree.DInt(id))))
}
}
rf.machine.batch.SetLength(rf.machine.rowIdx)
rf.machine.rowIdx = 0
}
// getCurrentColumnFamilyID returns the column family id of the key in
// rf.machine.nextKV.Key.
func (rf *cFetcher) getCurrentColumnFamilyID() (descpb.FamilyID, error) {
// If the table only has 1 column family, and its ID is 0, we know that the
// key has to be the 0th column family.
if rf.table.maxColumnFamilyID == 0 {
return 0, nil
}
// The column family is encoded in the final bytes of the key. The last
// byte of the key is the length of the column family id encoding
// itself. See encoding.md for more details, and see MakeFamilyKey for
// the routine that performs this encoding.
var id uint64
_, id, err := encoding.DecodeUvarintAscending(rf.machine.nextKV.Key[len(rf.machine.lastRowPrefix):])
if err != nil {
return 0, scrub.WrapError(scrub.IndexKeyDecodingError, err)
}
return descpb.FamilyID(id), nil
}
// convertFetchError converts an error generated during a key-value fetch to a
// storage error that will propagate through the exec subsystem unchanged. The
// error may also undergo a mapping to make it more user friendly for SQL
// consumers.
func (rf *cFetcher) convertFetchError(ctx context.Context, err error) error {
err = row.ConvertFetchError(ctx, rf, err)
err = colexecerror.NewStorageError(err)
return err
}
// KeyToDesc implements the KeyToDescTranslator interface. The implementation is
// used by convertFetchError.
func (rf *cFetcher) KeyToDesc(key roachpb.Key) (catalog.TableDescriptor, bool) {
if len(key) < rf.table.knownPrefixLength {
return nil, false
}
nIndexCols := rf.table.index.NumKeyColumns() + rf.table.index.NumKeySuffixColumns()
tableKeyVals := make([]rowenc.EncDatum, nIndexCols)
_, _, err := rowenc.DecodeKeyVals(
rf.table.keyValTypes,
tableKeyVals,
rf.table.indexColumnDirs,
key[rf.table.knownPrefixLength:],
)
if err != nil {
return nil, false
}
return rf.table.desc, true
}
var cFetcherPool = sync.Pool{
New: func() interface{} {
return &cFetcher{}
},
}
func (rf *cFetcher) Release() {
rf.accountingHelper.Release()
if rf.table != nil {
rf.table.Release()
}
colvecs := rf.machine.colvecs
colvecs.Reset()
*rf = cFetcher{
scratch: rf.scratch[:0],
}
rf.machine.colvecs = colvecs
cFetcherPool.Put(rf)
}
func (rf *cFetcher) Close(ctx context.Context) {
if rf != nil && rf.fetcher != nil {
rf.fetcher.Close(ctx)
rf.fetcher = nil
}
}
| pkg/sql/colfetcher/cfetcher.go | 1 | https://github.com/cockroachdb/cockroach/commit/a519265872c58fcfc2cc815c9dc58660534f9cd1 | [
0.9985877275466919,
0.062399424612522125,
0.00016192470502573997,
0.0003835405223071575,
0.22512079775333405
] |
{
"id": 1,
"code_window": [
"\t\t\tcontinue\n",
"\t\t}\n",
"\n",
"\t\tif rf.traceKV {\n",
"\t\t\tprettyKey = fmt.Sprintf(\"%s/%s\", prettyKey, table.desc.DeletableColumns()[vecIdx].GetName())\n",
"\t\t}\n",
"\n",
"\t\tvalueBytes, err = colencoding.DecodeTableValueToCol(\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\tprettyKey = fmt.Sprintf(\"%s/%s\", prettyKey, table.cols[vecIdx].GetName())\n"
],
"file_path": "pkg/sql/colfetcher/cfetcher.go",
"type": "replace",
"edit_start_line_idx": 1097
} | # LogicTest: 5node
# First, we set up two data tables:
# - NumToSquare maps integers from 1 to 100 to their squares
# - NumToStr maps integers from 1 to 100*100 to strings; this table is
# split and distributed to all nodes.
statement ok
CREATE TABLE NumToSquare (x INT PRIMARY KEY, xsquared INT)
statement ok
INSERT INTO NumToSquare SELECT i, i*i FROM generate_series(1, 100) AS g(i)
statement ok
CREATE TABLE NumToStr (y INT PRIMARY KEY, str STRING)
# Split into five parts.
statement ok
ALTER TABLE NumToStr SPLIT AT SELECT (i * 100 * 100 / 5)::int FROM generate_series(1, 4) AS g(i)
# Relocate the five parts to the five nodes.
statement ok
ALTER TABLE NumToStr EXPERIMENTAL_RELOCATE
SELECT ARRAY[i+1], (i * 100 * 100 / 5)::int FROM generate_series(0, 4) AS g(i)
# Verify data placement.
query TTTI colnames
SELECT start_key, end_key, replicas, lease_holder FROM [SHOW RANGES FROM TABLE NumToSquare]
----
start_key end_key replicas lease_holder
NULL NULL {1} 1
query TTTI colnames,rowsort
SELECT start_key, end_key, replicas, lease_holder FROM [SHOW RANGES FROM TABLE NumToStr]
----
start_key end_key replicas lease_holder
NULL /2000 {1} 1
/2000 /4000 {2} 2
/4000 /6000 {3} 3
/6000 /8000 {4} 4
/8000 NULL {5} 5
#
# -- Basic tests --
#
# Query with a restricted span.
query T
EXPLAIN (DISTSQL) SELECT 5, 2+y, * FROM NumToStr WHERE y <= 10 ORDER BY str
----
distribution: local
vectorized: true
·
• render
│
└── • sort
│ order: +str
│
└── • scan
missing stats
table: numtostr@numtostr_pkey
spans: [ - /10]
·
Diagram: https://cockroachdb.github.io/distsqlplan/decode.html#eJyUkFFL60AQhd_vrxjm6d7bkWYjBVkQojZioLY1CahoHmIylEC6G3c3YCn579IEsRUq-njO7DnnY7doX2uUGD4sZxfRHP5OoyRN7mb_IAln4VUKEwIfRrAh-A_X8eIWVLt22joD9zdhHMIGnlvPOy3OQXiwiKdhDJePYJ1BQqVLnudrtiifUGBG2BhdsLXa7Kxt_yAq31B6hJVqWrezM8JCG0a5RVe5mlFimr_UHHNeshl7SFiyy6u6r_3ACRpTrXOzQcKkyZWVcDIWArOOULfus9q6fMUoRUc_n0-0cWzG4nA58EdIGLMq2UiYSCmjeXpGEIiRvycIAv8ohv8bjJhto5XlA4xjzV6XEXK54uGnrW5NwUuji35mkIs-1xslWzdcxSAiNZx2gPth8W3Y_xLOuj_vAQAA__8hFMCN
# Query which requires a full table scan.
query T
EXPLAIN (DISTSQL) SELECT 5, 2 + y, * FROM NumToStr WHERE y % 1000 = 0 ORDER BY str
----
distribution: full
vectorized: true
·
• render
│
└── • sort
│ order: +str
│
└── • filter
│ filter: (y % 1000) = 0
│
└── • scan
missing stats
table: numtostr@numtostr_pkey
spans: FULL SCAN
·
Diagram: https://cockroachdb.github.io/distsqlplan/decode.html#eJy0lWGL2jAYx9_vU4QHBne7SJu09TQwcNv1mOD0VoVtDF90NhyCNl0SYSJ-99FWvVM0yXS-s63_5_nlxx-yAvV7Bgzi70-9D90-unnoDkfDr71bNIx78acRijCi6A4tMXqHHpPBF5Qv5looLdG3z3ESo5sleouI7_u36D3y0SB5iBP08QdSWgKGXGS8n865AvYTCGCggCEADCFgiGCMoZBiwpUSsvzLqgp0sz_AfAzTvFjo8vUYw0RIDmwFeqpnHBiM0l8znvA049LzAUPGdTqdVWu2gJ1CTuepXAKGYZHmiqGGR33fh_Eag1jol-FKp88cGFljd4DH6UxzyaUX7W-v3zN00yEbMYyxbn_Uqvxsfp9EoP-CMBRSc-mRg_N36N3J-cHJ-S9jhcy45NmxoUcg-qIhCo8cWEh4npUWos2BMeqQO_rqAaMOPUkZ7lES9yYQ1yZURWh4oXsdLBS7OjSvVwcLwrYO5Nw6UHfR1Fl0WIluuou2UOxE319PtAVhK5qeKzpwFx04i25Wolvuoi0UO9Gt64m2IGxFB-eKDt1Fh86iS8UNR8cWgJ3j9vUcWxC2jsP_cYkcmZ9wVYhc8b3xpyb75Q3Ds2de30hKLOSEP0kxqdbUj4MqV73IuNL1V1I_dPP6Uwn4OkyMYWoOU2M42AuTw3Bgxm6aV4fGdGQOR8awZXPzkkPfG8Mt8-aWMdw2h9uXYBNLx2wlM7eMWGpGLuoZsRQttCw3N41YqkbMXTtkH6_f_A0AAP__W-z62Q==
# Query with a restricted span + filter.
query T
EXPLAIN (DISTSQL) SELECT str FROM NumToStr WHERE y < 10 AND str LIKE '%e%' ORDER BY y
----
distribution: local
vectorized: true
·
• filter
│ filter: str LIKE '%e%'
│
└── • scan
missing stats
table: numtostr@numtostr_pkey
spans: [ - /9]
·
Diagram: https://cockroachdb.github.io/distsqlplan/decode.html#eJyUUE9L-0AQvf8-xTBQ2sL-aFJve2q1qQZjUpOAiuawJkMJpNm4uwFLyXeXTRBboaLH9968P8wB9VuFHL3HTbD0Q5is_CRN7oMpJF7gXaWgjYJ1HN1B3e6MtOjhxos9mOzhpXWcixxcZwrLcAUTKwb-rQfjEY3GU4jilRfD5RPskWEtCwrFjjTyZ3QxY9gomZPWUlnq0B_4xTtyh2FZN62xdMYwl4qQH9CUpiLkmIrXimISBamZgwwLMqKs-tjPiYtGlTuhbG3SiFpz-D9zHcw6hrI1X9HaiC0hdzv2-_p1WRlSpGbuaffAc1jMj37AOU_S2A-vkWHUGqueXTH_y4qYdCNrTScbziU7XcaQii0Nj9ayVTltlMz7mgFGva8nCtJmUN0B-PUg2YHHZvdH8_ybOev-fQQAAP__0Z3FQw==
# Query which requires a full table scan.
query T
EXPLAIN (DISTSQL) SELECT str FROM NumToStr WHERE y % 1000 = 0 AND str LIKE '%i%' ORDER BY y
----
distribution: full
vectorized: true
·
• filter
│ filter: ((y % 1000) = 0) AND (str LIKE '%i%')
│
└── • scan
missing stats
table: numtostr@numtostr_pkey
spans: FULL SCAN
·
Diagram: https://cockroachdb.github.io/distsqlplan/decode.html#eJzMlGGL2jAYx9_vU4QH5CqLmNTqeYGBt9nbyly9VWEbwxedfTgK2nRJhB3idx9tN--Usw34wntnEv_9__p7Sragf69AgP_9fnIbhMQZB7P57OukTWb-xP8wJ9oochdNv5BsszayWH375Ec-cZxH0iKcMdYm7whrk9twTJzifBJ89slVK21dtck0GvsRef-DPAKFTCYYxmvUIH4CBwouUOgBBQ8o9GFBIVdyiVpLVfxlWwaC5A8IRiHN8o0pthcUllIhiC2Y1KwQBMzjXyuMME5QdRlQSNDE6aqs-U89ylW6jlWBMcvjTAvS6bqMMVjsKMiNeXq4NvEDguA7ag9wl64MKlTd_mF7tS-I44z4P1tCiCCcD0tp-9-lu5H7TJ0QYjaPgvBj-yShe5LwCUyqBBUmh1Qj_hYWuxdeI5QdmXf5kcLpxggyck9y9A44uP2ouO2oykl1up79vBoo9vMaXGxerr0n19qTV3oa2HtqoNh7ur6Yp569p561p0HpaWjvqYFi72l4MU-evSfP2lNhqGOpqAFgr-jmVVyRLxBGqHOZaTzgO_VkVtyfmDxgdd9quVFLvFdyWdZUy2mZKzcS1KY65dUiyKqjAvB5mNeG3YMwPw679c0N1b3atFcf9s7h7teGB_XNg3Oar2vDw_rm4TnNN_WzYg2fSf1Hdty92L35GwAA__-sYBlG
#
# -- Join tests --
#
query T
EXPLAIN (DISTSQL) SELECT x, str FROM NumToSquare JOIN NumToStr ON y = xsquared
----
distribution: full
vectorized: true
·
• hash join
│ equality: (xsquared) = (y)
│ right cols are key
│
├── • scan
│ missing stats
│ table: numtosquare@numtosquare_pkey
│ spans: FULL SCAN
│
└── • scan
missing stats
table: numtostr@numtostr_pkey
spans: FULL SCAN
·
Diagram: https://cockroachdb.github.io/distsqlplan/decode.html#eJy8ld-K2kAUh-_7FMO52oVZzCRR18BCSmupi9WtelEouciaqQY0k52ZgCK-e0my4KrpTNJRL_Pnm3PO94M5OxBvK_Cg_-tl-HkwQndfB9PZ9OfwHk37w_6XGdpgJCRH3ybjHyjJ1pKJtyzkFD2PB6P3F5Kj8Qht0RPalB8jwJCwiI7CNRXg_QYCGGzA4AAGFzC0IcCQcjanQjCe_7IrgEG0Ac_CECdpJvPXAYY54xS8HchYrih4MAtfV3RCw4jylgUYIirDeFWU-dCfn_J4HfItYJimYSI89ADBHgPL5PvRhxNft2gZiuXxWb4NwT7AIGS4oOCRPf6_FklVi5JX9NeyLctq2CQ5atL-Z5OHc7KE8YiWGR1OCnJS90vFpN9DsXxmcUJ5q3Pc2or-kXe-ff_E48VS3vnkHjCMM-khn2DfPZnzMINjMENFgyP2wNIWIafTVtZ2j2qT-iHbdUMuMn5ouZdNmtw26e41krbr23Zq23YL253L2rZva_vxGrad-rbd2rY7he3Hy9p2bmu7dw3bbn3b7dq2c89Nd5pKtHtb0cS69r6oqD-hImWJoLW2gZVPQKMFLY0IlvE5feFsXpQpH8cFV9zAERWy_GqXD4Ok_JQ3WB9um8BdE7hnAhOipkkDY3YzuG0Cd03gngl8YuyMtk9p6yPtqHU7Spgc-7ZOadckLDWsCUsNa8JSw5qw1LAurLZJWB0T3WpYo1sNa3SrYY1uNazT3TXR_WiiWw1rdKthjW41rNGthnW6eya6SZNleX6HNtmWTWnd5d9kXzaldc7J2fZQSg_2n_4GAAD__8eN5D4=
query T
EXPLAIN (VERBOSE) SELECT x, str FROM NumToSquare JOIN NumToStr ON x = y WHERE x % 2 = 0
----
distribution: full
vectorized: true
·
• project
│ columns: (x, str)
│ estimated row count: 333 (missing stats)
│
└── • merge join (inner)
│ columns: (x, y, str)
│ estimated row count: 333 (missing stats)
│ equality: (x) = (y)
│ left cols are key
│ right cols are key
│ merge ordering: +"(x=y)"
│
├── • filter
│ │ columns: (x)
│ │ ordering: +x
│ │ estimated row count: 333 (missing stats)
│ │ filter: (x % 2) = 0
│ │
│ └── • scan
│ columns: (x)
│ ordering: +x
│ estimated row count: 1,000 (missing stats)
│ table: numtosquare@numtosquare_pkey
│ spans: FULL SCAN
│
└── • filter
│ columns: (y, str)
│ ordering: +y
│ estimated row count: 333 (missing stats)
│ filter: (y % 2) = 0
│
└── • scan
columns: (y, str)
ordering: +y
estimated row count: 1,000 (missing stats)
table: numtostr@numtostr_pkey
spans: FULL SCAN
query T
EXPLAIN (DISTSQL) SELECT x, str FROM NumToSquare JOIN NumToStr ON x = y WHERE x % 2 = 0
----
distribution: full
vectorized: true
·
• merge join
│ equality: (x) = (y)
│ left cols are key
│ right cols are key
│
├── • filter
│ │ filter: (x % 2) = 0
│ │
│ └── • scan
│ missing stats
│ table: numtosquare@numtosquare_pkey
│ spans: FULL SCAN
│
└── • filter
│ filter: (y % 2) = 0
│
└── • scan
missing stats
table: numtostr@numtostr_pkey
spans: FULL SCAN
·
Diagram: https://cockroachdb.github.io/distsqlplan/decode.html#eJzEluFr2kAYxr_vrzheGFh6Yi6JmgYKjs0yS6udChsMP6TmZgM2sXcXsJT-7yNJ12jUu8sO67fkkufe557f62tegD8twYf-r7ubL4MhanwbTKaTHzdnaNK_6X-dojVGXDB0NR7dojh9FAl_SgNG0fVoMHxbEAyNhmiNLtEz-vm9P-6jxhp9RvYZukQWYIiTkA6DR8rB_w0EMNiAwQEMLmBowwzDiiVzynnCsldecsEgXINvYYjiVSqy5RmGecIo-C8gIrGk4MM0uF_SMQ1CylpZoZCKIFrmZTas9lYsegzYM2CYrIKY-6gJGEap8FGPwOwVQ5KKsggXwYKCT16xvpGraCkoo6xFtl0U6z5q9EgWiO_7g-HUy3N5u67UL7e8f0YPAX_Y3i_3W3q063jcDMveF5Zge5Jq2ZZlHQzJ-a-Quh8aknvQY7lPGicspIyGWzvNMuW_V_a9AD1yXtSqnvWWsgW9TqI464lK2kv6RzR65PzskkWLh-KybEjccw7G3TY4yh6Tw6SZrFqkWz303tqdrdpEv9cc3V7LW63ZcvUbTuHiveG8EzYcOUHDOcdpOFsfuqsN3c2hd_ShK1y8Q784IXT7BNDd40B39KG3taF3cuiePnSFi_L_1zohdecE1NvHoe7qU-9oU894NzWBKwyUwD_2i8s9aPKjgHeO_zGxx8KY8lUSc6r1qWBlh6Dhgha58CRlc3rHknleprgd5bp8IaRcFE9JcTOIi0eZwU0xqYrJptjdEpN6Ys9ETIiR2sg46crVtjRwRx64UyNwu57YMxFXAq-rNjJeCXxH7UoDb8tpteU_D0-OqyNVd-XirglruVjBWi5WsVaojYyrWHsmrC_krC3FLNyZpHV4KdQKYAq1iphKbuZdxYzszNM60Ih8nhLFQCVGE1WhVlEzm6kquZl3JTWjsUoUc7WjoLYzWGtRk6tV1ORqJTWF3My7ktrOeJVSm71--hsAAP__o1BXlA==
#
# -- Aggregation tests --
#
# Sum the numbers in the NumToStr table.
query T
EXPLAIN (DISTSQL) SELECT sum(y) FROM NumToStr
----
distribution: full
vectorized: true
·
• group (scalar)
│
└── • scan
missing stats
table: numtostr@numtostr_pkey
spans: FULL SCAN
·
Diagram: https://cockroachdb.github.io/distsqlplan/decode.html#eJyslF1r2zAUhu_3K8y5SkAhlu24qa9StgwCadPFGQyGL7ToYAKJ5UkyrIT892GZUTu0shrv0h-vnveRxDmD-n2EBJY_ntcPqydv9GWV7tJv67GXLtfLzztPVafRy9j7ut08ekV10kJpCQQKwfGJnVBB8hMoEAiAQAgEIiAwg4xAKcUelRKy_uVsAiv-BxKfwKEoK12_zgjshURIzqAP-oiQwI79OuIWGUc59YEAR80OR4P5h1-U8nBi8gUIpCUrVOJNpoHv139vKp14CwrZhYCo9CtGaZYjJPRC3Ks85LnEnGkhp7Nuk_T742hBx-9igncxr6tXhZAcJfLO0tnFXoT6H2sSdppQ972nrntvtn4yjW45gJ4-Le94yAEE7tqBs3ZktONbtHv6tLTvhmiH7tqhs3ZstOe3aPf0aWnPh2hH7tqRs3YtPPmwcU-VlvH9_xowb2C2qEpRKLwaNG-v7NcDCHmOzbRSopJ7fJZibzDN48bkzAuOSjdfafOwKppPdcF2mFrDQSdMr8OBndyDDq3pyB6OhvSeWcOxnRwPId9Zw3M7eT6EfG8_K7_nmtgv2TU7u3z6GwAA___56Ne0
# Count the rows in the NumToStr table.
query T
EXPLAIN (DISTSQL) SELECT count(*) FROM NumToStr
----
distribution: full
vectorized: true
·
• group (scalar)
│
└── • scan
missing stats
table: numtostr@numtostr_pkey
spans: FULL SCAN
·
Diagram: https://cockroachdb.github.io/distsqlplan/decode.html#eJyslF2Lm0AUhu_7K-RcJWVC_Iqb9WqXbQpCVrfRpYUii9WDBJIZOzNCS8h_LyplTUjGIe6lH6_P-3iGcwDxewc-rH68rB-D0Jh8CeIk_raeGvFqvXpKjJzVVE4-T42vm-jZoPVeMiE5EKCswDDbowD_J1hAwAYCDhBwgcACUgIVZzkKwXjzyqENBMUf8E0CW1rVsrmdEsgZR_APILdyh-BDkv3a4QazAvncBAIFymy7azH_8Q8V3-4z_hcIxFVGhW_M5rZpNm9HtfSNkFGE9EiA1fIdJGRWIvjWkeiXeSxLjmUmGZ8vTrs8Ra9h8raJvseT6VWWfZX1jqgp4wVyLE6-nx7VbayzXxO_Pr8FYTJ5sK63cU7aWPpjsHTH0E5hNndvm8VAo569N3oWtr69rW3vtvbebfYDjXr2d6PtHX17R9vea-2Xt9kPNOrZL0fbu_r2rrZ94z27QXygTE_8_kMX0AXWBkXFqMCzRXT5y2azoLAosdtmgtU8xxfO8hbTXUZtrr1RoJDdU6u7CGj3qCnYD1vKsH0Sts7Dtpo8gHaUaVcddsf0XijDnprsjSHfKcNLNXk5hnyvnpU5cEzUh-ycnR4__QsAAP__YDroHw==
# Count how many numbers contain the digit 5.
query T
EXPLAIN (DISTSQL) SELECT count(*) FROM NumToStr WHERE str LIKE '%five%'
----
distribution: full
vectorized: true
·
• group (scalar)
│
└── • filter
│ filter: str LIKE '%five%'
│
└── • scan
missing stats
table: numtostr@numtostr_pkey
spans: FULL SCAN
·
Diagram: https://cockroachdb.github.io/distsqlplan/decode.html#eJy0lVFv2jwUhu-_XxEdqSr9ZAROAgVfUXXpFo2GLknVSROqMnKGIkHMHGfahPjvUxKVAip2SsYdcfL6PPh5Ja8h-7kABs7Xh_GN6xmtD24QBl_GV0bgjJ3b0JjxPJWt_6-MO39yb6T5UvJMCuPpk-M7RvFr7H52jMuLH8kvvLgEAimP0YuWmAH7BhQImEDAAgI2EOjBlMBK8BlmGRfFJ-sy4Ma_gXUJJOkql8XylMCMCwS2BpnIBQKDMPq-QB-jGEWnCwRilFGyKMe8YI1WIllG4g8QCFZRmjGj3TG73eLrSS6ZMTJhuiHAc_k6JpPRHIHRDamPcpcsJAoUnd4-R7XOjBHdPxbGWBD6rvfxBcTjKR5FMd-DcjOfC5xHkosOPTiV28mjFz77k6egdXV0mHV02OuMPOUiRoHx3v7TjQbn4HCCx_tn1wtbI3qcxt6jofULQesWouxDu2Of0goNz7YV_fO3QoOyq4E2boVZ34NZ24Ndeuif4kHDs_VwfX4PGpRdD2ZjD1Z9D1ZtD_3Sw-AUDxqerYfB-T1oUHY9WI092PU92LU9FAba71agQdkqGJ5fgQZlV4H9Ty-qN4b5mK14muHBhfX2zt3iIsN4jtWtl_FczPBB8Fk5pnqclLlyIcZMVm9p9eCm1asCcDdMlWFTHTaVYWsvTA_Dlhq7rx5tK9M9dbinDGsm95v86WtleKCePFCGh-rwsAk21XRMVzJ1y6imZrRRz6imaLZmuLppVFM1qu7aIft089_fAAAA__-w0Sxc
#
# -- Limit tests --
#
query T
EXPLAIN (DISTSQL) SELECT y FROM NumToStr LIMIT 5
----
distribution: local
vectorized: true
·
• scan
missing stats
table: numtostr@numtostr_pkey
spans: LIMITED SCAN
limit: 5
·
Diagram: https://cockroachdb.github.io/distsqlplan/decode.html#eJyMj0FL_DAUxO__TxHm9Bcibg97yWlFVwh0t2vbgyA9xOaxBNqkJq_gUvrdZVtEPAgeZyaZ37wJ6b2Dwv7llN_ro_j_qKu6es5vRLXP9w-1uIinsjgIP_YcEkeR64OuxRYSPlg6mp4S1CsyNBJDDC2lFOLVmpYH2n5AbSScH0a-2o1EGyJBTWDHHUGhNm8dlWQsxbsNJCyxcd1S-4XdDdH1Jl4gUQ3GJyVuIVGMrMQug0Tuesdii2aWCCN_sxKbM0Fls_z7npLSEHyiH1N-a97MjQTZM603pzDGlk4xtAtmlcXybzEsJV7TbBXar9HczP8-AwAA___5z4BX
query T
EXPLAIN (DISTSQL) SELECT y FROM NumToStr ORDER BY y LIMIT 5
----
distribution: local
vectorized: true
·
• scan
missing stats
table: numtostr@numtostr_pkey
spans: LIMITED SCAN
limit: 5
·
Diagram: https://cockroachdb.github.io/distsqlplan/decode.html#eJyMj0FLxDAUhO_-isecFCJuD3vJadWtUOhu17YHRXqIzWMJtElNUnBZ-t9lW0Q8CB5nJpn53hnho4NE-nLI77M9XW-zqq6e8xuq0jx9rOlET2WxIzv20YXoqSi3aUkPr3SiPNtlNa0hYJ3mveo5QL4hQSMweNdyCM5frPP8INOfkCsBY4cxXuxGoHWeIc-IJnYMiVq9d1yy0uzvVhDQHJXp5tpvgs3gTa_8CQLVoGyQdAuBYoySNgkEctObSGs0k4Ab489WiOrIkMkk_s9TchicDfwL5a_m1dQIsD7ycnNwo2_54F07zyyymP_NhuYQlzRZRGaXaGqmq68AAAD__6v2g0c=
# Test that the correct node is chosen in a reverse scan with multiple spans.
query T
EXPLAIN (DISTSQL) SELECT y FROM NumToStr WHERE y < 1000 OR y > 9000 ORDER BY y DESC LIMIT 5
----
distribution: full
vectorized: true
·
• revscan
missing stats
table: numtostr@numtostr_pkey
spans: [ - /999] [/9001 - ]
limit: 5
·
Diagram: https://cockroachdb.github.io/distsqlplan/decode.html#eJyMkE9vozAQxe_7KUZzSiRHMVnlsD5lN2G1SCRkAamtWg4ujFIkwNQ2UlHEd6-A_kkqNe3Rb-b3_OYd0TwWKNC93vu_vR1MNl4UR__9KUSu765jaOFvGGyhakqrjNVw9c8NXZi0cNdw_jMFh3M-hSB8kwh-vUgbN4Q_N9DCxo3W4HtbL4YlMqxURjtZkkFxiw4yXGLCsNYqJWOU7uXjsORlTyg4w7yqG9vLCcNUaUJxRJvbglDgTs1UPe9dMrIyL4a1jqFq7DtkrDwQikXHToydy8axvC8oJJmRnvMze3wtY1XrvJS6RYZRLSsjYDbv-wBZZeCAsg-kkWHQWAGrPqKfl7mFJX4W0DkL-MXlIZlaVYa-dTrvEoaUHWhs16hGp7TXKh2-GZ_BwA1CRsaO08X48Kpx1Ac8hZ2LMP8AJ92P5wAAAP__9MXFMQ==
query T
EXPLAIN (VERBOSE) SELECT x FROM (SELECT x, 2*x, x+1 FROM NumToSquare)
----
distribution: local
vectorized: true
·
• scan
columns: (x)
estimated row count: 1,000 (missing stats)
table: numtosquare@numtosquare_pkey
spans: FULL SCAN
# Verifies that unused renders don't cause us to do rendering instead of a
# simple projection.
query T
EXPLAIN (DISTSQL) SELECT x FROM (SELECT x, 2*x, x+1 FROM NumToSquare)
----
distribution: local
vectorized: true
·
• scan
missing stats
table: numtosquare@numtosquare_pkey
spans: FULL SCAN
·
Diagram: https://cockroachdb.github.io/distsqlplan/decode.html#eJyMT81K9EAQvH9PMdRp93PEjcc5regKgbhZkxwEyWHMNEsgmcnOD0RC3l2SQdSD4Kmpqu6q6gnu0kHg8HLK7tIj2zykZVU-Z1tWHrLDfcVG9ljkT2zzCTm7Zf-XMbIrlkRRh94bdwnS0hYc2ig6yp4cxCsS1ByDNQ05Z-xCTetCqkaIHUerh-AXuuZojCWICb71HUGgkm8dFSQV2ZsdOBR52Xar7bfI_WDbXtp3cJSD1E6wa3DkwQu2T1DPHCb4rxDn5Zkgkpn_vUhBbjDa0Y8Ovznv5pqD1Jnis84E29DJmmaNiTBf71ZCkfNRTSJIdZTmev73EQAA___Tc4Tr
query T
EXPLAIN (VERBOSE) SELECT y, str, repeat('test', y) AS res FROM NumToStr ORDER BY res
----
distribution: full
vectorized: true
·
• sort
│ columns: (y, str, res)
│ ordering: +res
│ estimated row count: 1,000 (missing stats)
│ order: +res
│
└── • render
│ columns: (res, y, str)
│ estimated row count: 1,000 (missing stats)
│ render res: repeat('test', y)
│ render y: y
│ render str: str
│
└── • scan
columns: (y, str)
estimated row count: 1,000 (missing stats)
table: numtostr@numtostr_pkey
spans: FULL SCAN
query T
EXPLAIN (VERBOSE) SELECT y, str, repeat('test', y) AS res FROM NumToStr ORDER BY res LIMIT 10
----
distribution: full
vectorized: true
·
• top-k
│ columns: (y, str, res)
│ ordering: +res
│ estimated row count: 10 (missing stats)
│ order: +res
│ k: 10
│
└── • render
│ columns: (res, y, str)
│ estimated row count: 1,000 (missing stats)
│ render res: repeat('test', y)
│ render y: y
│ render str: str
│
└── • scan
columns: (y, str)
estimated row count: 1,000 (missing stats)
table: numtostr@numtostr_pkey
spans: FULL SCAN
# Regression test for #20481.
query T
EXPLAIN (DISTSQL) SELECT count(*) FROM (SELECT 1 AS one FROM NumToSquare WHERE x > 10 ORDER BY xsquared LIMIT 10)
----
distribution: local
vectorized: true
·
• group (scalar)
│
└── • top-k
│ order: +xsquared
│ k: 10
│
└── • scan
missing stats
table: numtosquare@numtosquare_pkey
spans: [/11 - ]
·
Diagram: https://cockroachdb.github.io/distsqlplan/decode.html#eJyUkU2L1EAQhu_-iqJOM9qSdObWpx13IwazyZpEVtEgbVKEQKY72x8wMuS_SxLBHWEG99hv1VPPC31C-zSgwPjLQ7pPMtjcJWVVfkq3UMZpfFtBo71ym9dbeF_k97D5k3LYl6AVranyB6ftk5eG4PFDXMRwhO8-DHcEPIS8uIsLePcVjutKC2lyn1TAwy0yVLqlTB7IoviGHGuGo9ENWavNHJ2WhaQ9oggZ9mr0bo5rho02hOKErncDocBK_hyoINmSCUJk2JKT_bCcfdbvZjT9QZpfyLAcpbICAs7fYj0x1N79vW6d7AgFn9j_Nyi1cWQCfi6_id4gw0qPHwXwuVjunYBMK7pojV5i3XedoU46bYLo3Hybf86qH0X-WG62F127l7gKsqNWls48ly6HU82Q2o7Wf7Tam4YejG4WzfrMF24JWrJunfL1kah1NBd8DvOrcHQdjq7Cu3_genr1OwAA__-JxwGY
| pkg/sql/opt/exec/execbuilder/testdata/distsql_numtables | 0 | https://github.com/cockroachdb/cockroach/commit/a519265872c58fcfc2cc815c9dc58660534f9cd1 | [
0.00375432544387877,
0.0005717643653042614,
0.00016033729480113834,
0.00017579051200300455,
0.0007953550666570663
] |
{
"id": 1,
"code_window": [
"\t\t\tcontinue\n",
"\t\t}\n",
"\n",
"\t\tif rf.traceKV {\n",
"\t\t\tprettyKey = fmt.Sprintf(\"%s/%s\", prettyKey, table.desc.DeletableColumns()[vecIdx].GetName())\n",
"\t\t}\n",
"\n",
"\t\tvalueBytes, err = colencoding.DecodeTableValueToCol(\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\tprettyKey = fmt.Sprintf(\"%s/%s\", prettyKey, table.cols[vecIdx].GetName())\n"
],
"file_path": "pkg/sql/colfetcher/cfetcher.go",
"type": "replace",
"edit_start_line_idx": 1097
} | load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "errors",
srcs = ["errors.go"],
importpath = "github.com/cockroachdb/cockroach/pkg/roachprod/errors",
visibility = ["//visibility:public"],
deps = ["@com_github_cockroachdb_errors//:errors"],
)
| pkg/roachprod/errors/BUILD.bazel | 0 | https://github.com/cockroachdb/cockroach/commit/a519265872c58fcfc2cc815c9dc58660534f9cd1 | [
0.0001747988280840218,
0.0001747988280840218,
0.0001747988280840218,
0.0001747988280840218,
0
] |
{
"id": 1,
"code_window": [
"\t\t\tcontinue\n",
"\t\t}\n",
"\n",
"\t\tif rf.traceKV {\n",
"\t\t\tprettyKey = fmt.Sprintf(\"%s/%s\", prettyKey, table.desc.DeletableColumns()[vecIdx].GetName())\n",
"\t\t}\n",
"\n",
"\t\tvalueBytes, err = colencoding.DecodeTableValueToCol(\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\tprettyKey = fmt.Sprintf(\"%s/%s\", prettyKey, table.cols[vecIdx].GetName())\n"
],
"file_path": "pkg/sql/colfetcher/cfetcher.go",
"type": "replace",
"edit_start_line_idx": 1097
} | // Copyright 2017 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package settings
import (
"bytes"
"context"
"fmt"
"sort"
"strconv"
"strings"
"github.com/cockroachdb/errors"
)
// EnumSetting is a StringSetting that restricts the values to be one of the `enumValues`
type EnumSetting struct {
IntSetting
enumValues map[int64]string
}
var _ numericSetting = &EnumSetting{}
// Typ returns the short (1 char) string denoting the type of setting.
func (e *EnumSetting) Typ() string {
return "e"
}
// String returns the enum's string value.
func (e *EnumSetting) String(sv *Values) string {
enumID := e.Get(sv)
if str, ok := e.enumValues[enumID]; ok {
return str
}
return fmt.Sprintf("unknown(%d)", enumID)
}
// ParseEnum returns the enum value, and a boolean that indicates if it was parseable.
func (e *EnumSetting) ParseEnum(raw string) (int64, bool) {
rawLower := strings.ToLower(raw)
for k, v := range e.enumValues {
if v == rawLower {
return k, true
}
}
// Attempt to parse the string as an integer since it isn't a valid enum string.
v, err := strconv.ParseInt(raw, 10, 64)
if err != nil {
return 0, false
}
_, ok := e.enumValues[v]
return v, ok
}
// GetAvailableValuesAsHint returns the possible enum settings as a string that
// can be provided as an error hint to a user.
func (e *EnumSetting) GetAvailableValuesAsHint() string {
// First stabilize output by sorting by key.
valIdxs := make([]int, 0, len(e.enumValues))
for i := range e.enumValues {
valIdxs = append(valIdxs, int(i))
}
sort.Ints(valIdxs)
// Now use those indices
vals := make([]string, 0, len(e.enumValues))
for _, enumIdx := range valIdxs {
vals = append(vals, fmt.Sprintf("%d: %s", enumIdx, e.enumValues[int64(enumIdx)]))
}
return "Available values: " + strings.Join(vals, ", ")
}
func (e *EnumSetting) set(ctx context.Context, sv *Values, k int64) error {
if _, ok := e.enumValues[k]; !ok {
return errors.Errorf("unrecognized value %d", k)
}
return e.IntSetting.set(ctx, sv, k)
}
func enumValuesToDesc(enumValues map[int64]string) string {
var buffer bytes.Buffer
values := make([]int64, 0, len(enumValues))
for k := range enumValues {
values = append(values, k)
}
sort.Slice(values, func(i, j int) bool { return values[i] < values[j] })
buffer.WriteString("[")
for i, k := range values {
if i > 0 {
buffer.WriteString(", ")
}
fmt.Fprintf(&buffer, "%s = %d", strings.ToLower(enumValues[k]), k)
}
buffer.WriteString("]")
return buffer.String()
}
// WithPublic sets public visibility and can be chained.
func (e *EnumSetting) WithPublic() *EnumSetting {
e.SetVisibility(Public)
return e
}
// RegisterEnumSetting defines a new setting with type int.
func RegisterEnumSetting(
class Class, key, desc string, defaultValue string, enumValues map[int64]string,
) *EnumSetting {
enumValuesLower := make(map[int64]string)
var i int64
var found bool
for k, v := range enumValues {
enumValuesLower[k] = strings.ToLower(v)
if v == defaultValue {
i = k
found = true
}
}
if !found {
panic(fmt.Sprintf("enum registered with default value %s not in map %s", defaultValue, enumValuesToDesc(enumValuesLower)))
}
setting := &EnumSetting{
IntSetting: IntSetting{defaultValue: i},
enumValues: enumValuesLower,
}
register(class, key, fmt.Sprintf("%s %s", desc, enumValuesToDesc(enumValues)), setting)
return setting
}
| pkg/settings/enum.go | 0 | https://github.com/cockroachdb/cockroach/commit/a519265872c58fcfc2cc815c9dc58660534f9cd1 | [
0.00023814174346625805,
0.0001743106113281101,
0.00016090627468656749,
0.00017021066742017865,
0.000018441409338265657
] |
{
"id": 2,
"code_window": [
"// from. Note that only columns that need to be fetched (i.e. requested by the\n",
"// caller) are included in the internal state.\n",
"type cFetcherTableArgs struct {\n",
"\tdesc catalog.TableDescriptor\n",
"\tindex catalog.Index\n",
"\t// ColIdxMap is a mapping from ColumnID of each column to its ordinal. Only\n",
"\t// needed columns are present.\n",
"\tColIdxMap catalog.TableColMap\n",
"\tisSecondaryIndex bool\n",
"\t// cols are all needed columns of the table that are present in the index.\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t// ColIdxMap is a mapping from ColumnID to the ordinal of the corresponding\n",
"\t// column within the cols field. Only needed columns are present.\n"
],
"file_path": "pkg/sql/colfetcher/cfetcher_setup.go",
"type": "replace",
"edit_start_line_idx": 31
} | // Copyright 2018 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package colfetcher
import (
"bytes"
"context"
"fmt"
"sort"
"strings"
"sync"
"time"
"github.com/cockroachdb/apd/v2"
"github.com/cockroachdb/cockroach/pkg/col/coldata"
"github.com/cockroachdb/cockroach/pkg/keys"
"github.com/cockroachdb/cockroach/pkg/kv"
"github.com/cockroachdb/cockroach/pkg/roachpb"
"github.com/cockroachdb/cockroach/pkg/sql/catalog"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/colinfo"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb"
"github.com/cockroachdb/cockroach/pkg/sql/colconv"
"github.com/cockroachdb/cockroach/pkg/sql/colencoding"
"github.com/cockroachdb/cockroach/pkg/sql/colexecerror"
"github.com/cockroachdb/cockroach/pkg/sql/colmem"
"github.com/cockroachdb/cockroach/pkg/sql/execinfra"
"github.com/cockroachdb/cockroach/pkg/sql/row"
"github.com/cockroachdb/cockroach/pkg/sql/rowenc"
"github.com/cockroachdb/cockroach/pkg/sql/rowinfra"
"github.com/cockroachdb/cockroach/pkg/sql/scrub"
"github.com/cockroachdb/cockroach/pkg/sql/sem/tree"
"github.com/cockroachdb/cockroach/pkg/sql/types"
"github.com/cockroachdb/cockroach/pkg/util"
"github.com/cockroachdb/cockroach/pkg/util/encoding"
"github.com/cockroachdb/cockroach/pkg/util/hlc"
"github.com/cockroachdb/cockroach/pkg/util/log"
"github.com/cockroachdb/cockroach/pkg/util/mon"
"github.com/cockroachdb/errors"
)
type cTableInfo struct {
// -- Fields initialized once --
*cFetcherTableArgs
indexColumnDirs []descpb.IndexDescriptor_Direction
// The set of required value-component column ordinals among only needed
// columns.
neededValueColsByIdx util.FastIntSet
// Map used to get the column index based on the descpb.ColumnID.
// It's kept as a pointer so we don't have to re-allocate to sort it each
// time.
orderedColIdxMap *colIdxMap
// One value per column that is part of the key; each value is a column
// ordinal among only needed columns; -1 if we don't need the value for
// that column.
//
// Note that if the tracing is enabled on the cFetcher (traceKV == true),
// then values for all columns are needed and, thus, there will be no -1 in
// indexColOrdinals.
indexColOrdinals []int
// The set of column ordinals which are both composite and part of the index
// key.
compositeIndexColOrdinals util.FastIntSet
// One number per column coming from the "key suffix" that is part of the
// value; each number is a column ordinal among only needed columns; -1 if
// we don't need the value for that column.
//
// The "key suffix" columns are only used for secondary indexes:
// - for non-unique indexes, these columns are appended to the key (and will
// be included in indexColOrdinals instead);
// - for unique indexes, these columns are stored in the value (unless the
// key contains a NULL value: then the extra columns are appended to the key
// to unique-ify it).
//
// Note that if the tracing is enabled on the cFetcher (traceKV == true),
// then values for all columns are needed and, thus, there will be no -1 in
// extraValColOrdinals.
extraValColOrdinals []int
// invertedColOrdinal is a column ordinal among only needed columns,
// indicating the inverted column; -1 if there is no inverted column or we
// don't need the value for that column.
invertedColOrdinal int
// maxColumnFamilyID is the maximum possible family id for the configured
// table.
maxColumnFamilyID descpb.FamilyID
// knownPrefixLength is the number of bytes in the index key prefix this
// Fetcher is configured for. The index key prefix is the table id, index
// id pair at the start of the key.
knownPrefixLength int
// The following fields contain MVCC metadata for each row and may be
// returned to users of cFetcher immediately after NextBatch returns.
//
// rowLastModified is the timestamp of the last time any family in the row
// was modified in any way.
rowLastModified hlc.Timestamp
// timestampOutputIdx controls at what column ordinal in the output batch to
// write the timestamp for the MVCC timestamp system column.
timestampOutputIdx int
// oidOutputIdx controls at what column ordinal in the output batch to write
// the value for the tableoid system column.
oidOutputIdx int
keyValTypes []*types.T
extraTypes []*types.T
// extraValDirections contains len(extraTypes) ASC directions. This will
// only be used for unique secondary indexes.
extraValDirections []descpb.IndexDescriptor_Direction
da rowenc.DatumAlloc
}
var _ execinfra.Releasable = &cTableInfo{}
var cTableInfoPool = sync.Pool{
New: func() interface{} {
return &cTableInfo{
orderedColIdxMap: &colIdxMap{},
}
},
}
func newCTableInfo() *cTableInfo {
return cTableInfoPool.Get().(*cTableInfo)
}
// Release implements the execinfra.Releasable interface.
func (c *cTableInfo) Release() {
c.cFetcherTableArgs.Release()
// Note that all slices are being reused, but there is no need to deeply
// reset them since all of the slices are of Go native types.
c.orderedColIdxMap.ords = c.orderedColIdxMap.ords[:0]
c.orderedColIdxMap.vals = c.orderedColIdxMap.vals[:0]
*c = cTableInfo{
orderedColIdxMap: c.orderedColIdxMap,
indexColOrdinals: c.indexColOrdinals[:0],
extraValColOrdinals: c.extraValColOrdinals[:0],
keyValTypes: c.keyValTypes[:0],
extraTypes: c.extraTypes[:0],
extraValDirections: c.extraValDirections[:0],
}
cTableInfoPool.Put(c)
}
// colIdxMap is a "map" that contains the ordinals for each ColumnID among the
// columns that need to be fetched. This map is used to figure out what index
// within a row a particular value-component column goes into. Value-component
// columns are encoded with a column id prefix, with the guarantee that within
// any given row, the column ids are always increasing. Because of this
// guarantee, we can store this map as two sorted lists that the fetcher keeps
// an index into, giving fast access during decoding.
//
// It implements sort.Interface to be sortable on vals, while keeping ords
// matched up to the order of vals.
type colIdxMap struct {
// vals is the sorted list of descpb.ColumnIDs in the table to fetch.
vals descpb.ColumnIDs
// ords is the list of ordinals into all columns of the table for each
// column in vals. The ith entry in ords is the ordinal among all columns of
// the table for the ith column in vals.
ords []int
}
// Len implements sort.Interface.
func (m colIdxMap) Len() int {
return len(m.vals)
}
// Less implements sort.Interface.
func (m colIdxMap) Less(i, j int) bool {
return m.vals[i] < m.vals[j]
}
// Swap implements sort.Interface.
func (m colIdxMap) Swap(i, j int) {
m.vals[i], m.vals[j] = m.vals[j], m.vals[i]
m.ords[i], m.ords[j] = m.ords[j], m.ords[i]
}
type cFetcherArgs struct {
// lockStrength represents the row-level locking mode to use when fetching
// rows.
lockStrength descpb.ScanLockingStrength
// lockWaitPolicy represents the policy to be used for handling conflicting
// locks held by other active transactions.
lockWaitPolicy descpb.ScanLockingWaitPolicy
// lockTimeout specifies the maximum amount of time that the fetcher will
// wait while attempting to acquire a lock on a key or while blocking on an
// existing lock in order to perform a non-locking read on a key.
lockTimeout time.Duration
// memoryLimit determines the maximum memory footprint of the output batch.
memoryLimit int64
// estimatedRowCount is the optimizer-derived number of expected rows that
// this fetch will produce, if non-zero.
estimatedRowCount uint64
// reverse denotes whether or not the spans should be read in reverse or not
// when StartScan is invoked.
reverse bool
// traceKV indicates whether or not session tracing is enabled. It is set
// when initializing the fetcher.
traceKV bool
}
// noOutputColumn is a sentinel value to denote that a system column is not
// part of the output.
const noOutputColumn = -1
// cFetcher handles fetching kvs and forming table rows for an
// arbitrary number of tables.
// Usage:
// var rf cFetcher
// err := rf.Init(..)
// // Handle err
// err := rf.StartScan(..)
// // Handle err
// for {
// res, err := rf.NextBatch()
// // Handle err
// if res.colBatch.Length() == 0 {
// // Done
// break
// }
// // Process res.colBatch
// }
// rf.Close(ctx)
type cFetcher struct {
cFetcherArgs
// table is the table that's configured for fetching.
table *cTableInfo
// maxKeysPerRow memoizes the maximum number of keys per row in the index
// we're fetching from. This is used to calculate the kvBatchFetcher's
// firstBatchLimit.
maxKeysPerRow int
// True if the index key must be decoded. This is only false if there are no
// needed columns and the tracing is not enabled.
mustDecodeIndexKey bool
// mvccDecodeStrategy controls whether or not MVCC timestamps should
// be decoded from KV's fetched. It is set if any of the requested tables
// are required to produce an MVCC timestamp system column.
mvccDecodeStrategy row.MVCCDecodingStrategy
// fetcher is the underlying fetcher that provides KVs.
fetcher *row.KVFetcher
// machine contains fields that get updated during the run of the fetcher.
machine struct {
// state is the queue of next states of the state machine. The 0th entry
// is the next state.
state [3]fetcherState
// rowIdx is always set to the ordinal of the row we're currently writing to
// within the current batch. It's incremented as soon as we detect that a row
// is finished.
rowIdx int
// nextKV is the kv to process next.
nextKV roachpb.KeyValue
// limitHint is a hint as to the number of rows that the caller expects
// to be returned from this fetch. It will be decremented whenever a
// batch is returned by the length of the batch so that it tracks the
// hint for the rows remaining to be returned. It might become negative
// indicating that the hint is no longer applicable.
limitHint int
// remainingValueColsByIdx is the set of value columns that are yet to be
// seen during the decoding of the current row.
remainingValueColsByIdx util.FastIntSet
// lastRowPrefix is the row prefix for the last row we saw a key for. New
// keys are compared against this prefix to determine whether they're part
// of a new row or not.
lastRowPrefix roachpb.Key
// prettyValueBuf is a temp buffer used to create strings for tracing.
prettyValueBuf *bytes.Buffer
// batch is the output batch the fetcher writes to.
batch coldata.Batch
// colvecs are the vectors of batch that have been converted to the well
// typed columns to avoid expensive type casts on each row.
colvecs coldata.TypedVecs
// timestampCol is the underlying ColVec for the timestamp output column,
// or nil if the timestamp column was not requested. It is pulled out from
// colvecs to avoid having to cast the vec to decimal on every write.
timestampCol []apd.Decimal
// tableoidCol is the same as timestampCol but for the tableoid system column.
tableoidCol coldata.DatumVec
}
// scratch is a scratch space used when decoding bytes-like and decimal
// keys.
scratch []byte
accountingHelper colmem.SetAccountingHelper
// kvFetcherMemAcc is a memory account that will be used by the underlying
// KV fetcher.
kvFetcherMemAcc *mon.BoundAccount
// maxCapacity if non-zero indicates the target capacity of the output
// batch. It is set when at the row finalization we realize that the output
// batch has exceeded the memory limit.
maxCapacity int
}
func (rf *cFetcher) resetBatch() {
var reallocated bool
var minDesiredCapacity int
if rf.maxCapacity > 0 {
// If we have already exceeded the memory limit for the output batch, we
// will only be using the same batch from now on.
minDesiredCapacity = rf.maxCapacity
} else if rf.machine.limitHint > 0 && (rf.estimatedRowCount == 0 || uint64(rf.machine.limitHint) < rf.estimatedRowCount) {
// If we have a limit hint, and either
// 1) we don't have an estimate, or
// 2) we have a soft limit,
// use the hint to size the batch. Note that if it exceeds
// coldata.BatchSize, ResetMaybeReallocate will chop it down.
minDesiredCapacity = rf.machine.limitHint
} else {
// Otherwise, use the estimate. Note that if the estimate is not
// present, it'll be 0 and ResetMaybeReallocate will allocate the
// initial batch of capacity 1 which is the desired behavior.
//
// We need to transform our rf.estimatedRowCount, which is a uint64,
// into an int. We have to be careful: if we just cast it directly, a
// giant estimate will wrap around and become negative.
if rf.estimatedRowCount > uint64(coldata.BatchSize()) {
minDesiredCapacity = coldata.BatchSize()
} else {
minDesiredCapacity = int(rf.estimatedRowCount)
}
}
rf.machine.batch, reallocated = rf.accountingHelper.ResetMaybeReallocate(
rf.table.typs, rf.machine.batch, minDesiredCapacity, rf.memoryLimit,
)
if reallocated {
rf.machine.colvecs.SetBatch(rf.machine.batch)
// Pull out any requested system column output vecs.
if rf.table.timestampOutputIdx != noOutputColumn {
rf.machine.timestampCol = rf.machine.colvecs.DecimalCols[rf.machine.colvecs.ColsMap[rf.table.timestampOutputIdx]]
}
if rf.table.oidOutputIdx != noOutputColumn {
rf.machine.tableoidCol = rf.machine.colvecs.DatumCols[rf.machine.colvecs.ColsMap[rf.table.oidOutputIdx]]
}
// Change the allocation size to be the same as the capacity of the
// batch we allocated above.
rf.table.da.AllocSize = rf.machine.batch.Capacity()
}
}
// Init sets up a Fetcher based on the table args. Only columns present in
// tableArgs.cols will be fetched.
func (rf *cFetcher) Init(
codec keys.SQLCodec,
allocator *colmem.Allocator,
kvFetcherMemAcc *mon.BoundAccount,
tableArgs *cFetcherTableArgs,
hasSystemColumns bool,
) error {
rf.kvFetcherMemAcc = kvFetcherMemAcc
table := newCTableInfo()
nCols := tableArgs.ColIdxMap.Len()
if cap(table.orderedColIdxMap.vals) < nCols {
table.orderedColIdxMap.vals = make(descpb.ColumnIDs, 0, nCols)
table.orderedColIdxMap.ords = make([]int, 0, nCols)
}
colDescriptors := tableArgs.cols
for i := range colDescriptors {
//gcassert:bce
id := colDescriptors[i].GetID()
table.orderedColIdxMap.vals = append(table.orderedColIdxMap.vals, id)
table.orderedColIdxMap.ords = append(table.orderedColIdxMap.ords, tableArgs.ColIdxMap.GetDefault(id))
}
sort.Sort(table.orderedColIdxMap)
*table = cTableInfo{
cFetcherTableArgs: tableArgs,
orderedColIdxMap: table.orderedColIdxMap,
indexColOrdinals: table.indexColOrdinals[:0],
extraValColOrdinals: table.extraValColOrdinals[:0],
keyValTypes: table.keyValTypes[:0],
extraTypes: table.extraTypes[:0],
extraValDirections: table.extraValDirections[:0],
timestampOutputIdx: noOutputColumn,
oidOutputIdx: noOutputColumn,
}
if nCols > 0 {
table.neededValueColsByIdx.AddRange(0 /* start */, nCols-1)
}
if hasSystemColumns {
// System columns, if present, are at the end of colDescriptors.
nonSystemColOffset := nCols - len(colinfo.AllSystemColumnDescs)
if nonSystemColOffset < 0 {
nonSystemColOffset = 0
}
for idx := nonSystemColOffset; idx < nCols; idx++ {
col := colDescriptors[idx].GetID()
// Set up extra metadata for system columns, if this is a system
// column.
//
// Currently the system columns are present in neededValueColsByIdx,
// but we don't want to include them in that set because the
// handling of system columns is separate from the standard value
// decoding process.
switch colinfo.GetSystemColumnKindFromColumnID(col) {
case descpb.SystemColumnKind_MVCCTIMESTAMP:
table.timestampOutputIdx = idx
rf.mvccDecodeStrategy = row.MVCCDecodingRequired
table.neededValueColsByIdx.Remove(idx)
case descpb.SystemColumnKind_TABLEOID:
table.oidOutputIdx = idx
table.neededValueColsByIdx.Remove(idx)
}
}
}
table.knownPrefixLength = len(rowenc.MakeIndexKeyPrefix(codec, table.desc, table.index.GetID()))
var indexColumnIDs []descpb.ColumnID
indexColumnIDs, table.indexColumnDirs = catalog.FullIndexColumnIDs(table.index)
compositeColumnIDs := util.MakeFastIntSet()
for i := 0; i < table.index.NumCompositeColumns(); i++ {
id := table.index.GetCompositeColumnID(i)
compositeColumnIDs.Add(int(id))
}
nIndexCols := len(indexColumnIDs)
if cap(table.indexColOrdinals) >= nIndexCols {
table.indexColOrdinals = table.indexColOrdinals[:nIndexCols]
} else {
table.indexColOrdinals = make([]int, nIndexCols)
}
indexColOrdinals := table.indexColOrdinals
_ = indexColOrdinals[len(indexColumnIDs)-1]
needToDecodeDecimalKey := false
for i, id := range indexColumnIDs {
colIdx, ok := tableArgs.ColIdxMap.Get(id)
if ok {
//gcassert:bce
indexColOrdinals[i] = colIdx
rf.mustDecodeIndexKey = true
needToDecodeDecimalKey = needToDecodeDecimalKey || tableArgs.typs[colIdx].Family() == types.DecimalFamily
// A composite column might also have a value encoding which must be
// decoded. Others can be removed from neededValueColsByIdx.
if compositeColumnIDs.Contains(int(id)) {
table.compositeIndexColOrdinals.Add(colIdx)
} else {
table.neededValueColsByIdx.Remove(colIdx)
}
} else {
//gcassert:bce
indexColOrdinals[i] = -1
}
}
if needToDecodeDecimalKey && cap(rf.scratch) < 64 {
// If we need to decode the decimal key encoding, it might use a scratch
// byte slice internally, so we'll allocate such a space to be reused
// for every decimal.
// TODO(yuzefovich): 64 was chosen arbitrarily, tune it.
rf.scratch = make([]byte, 64)
}
table.invertedColOrdinal = -1
if table.index.GetType() == descpb.IndexDescriptor_INVERTED {
id := table.index.InvertedColumnID()
colIdx, ok := tableArgs.ColIdxMap.Get(id)
if ok {
table.invertedColOrdinal = colIdx
// TODO(yuzefovich): for some reason the setup of ColBatchScan
// sometimes doesn't find the inverted column, so we have to be a
// bit tricky here and overwrite the type to what we need for the
// inverted column. Figure it out.
table.typs[colIdx] = types.Bytes
}
}
// Unique secondary indexes contain the extra column IDs as part of
// the value component. We process these separately, so we need to know
// what extra columns are composite or not.
if table.isSecondaryIndex && table.index.IsUnique() {
for i := 0; i < table.index.NumKeySuffixColumns(); i++ {
id := table.index.GetKeySuffixColumnID(i)
colIdx, ok := tableArgs.ColIdxMap.Get(id)
if ok {
if compositeColumnIDs.Contains(int(id)) {
table.compositeIndexColOrdinals.Add(colIdx)
table.neededValueColsByIdx.Remove(colIdx)
}
}
}
}
// Prepare our index key vals slice.
table.keyValTypes = colinfo.GetColumnTypesFromColDescs(
colDescriptors, indexColumnIDs, table.keyValTypes,
)
if table.index.NumKeySuffixColumns() > 0 {
// Unique secondary indexes have a value that is the
// primary index key.
// Primary indexes only contain ascendingly-encoded
// values. If this ever changes, we'll probably have to
// figure out the directions here too.
table.extraTypes = colinfo.GetColumnTypesFromColDescs(
colDescriptors, table.index.IndexDesc().KeySuffixColumnIDs, table.extraTypes,
)
nExtraColumns := table.index.NumKeySuffixColumns()
if cap(table.extraValColOrdinals) >= nExtraColumns {
table.extraValColOrdinals = table.extraValColOrdinals[:nExtraColumns]
} else {
table.extraValColOrdinals = make([]int, nExtraColumns)
}
// Note that for extraValDirections we only need to make sure that the
// slice has the correct length set since the ASC direction is the zero
// value and we don't modify the elements of this slice.
if cap(table.extraValDirections) >= nExtraColumns {
table.extraValDirections = table.extraValDirections[:nExtraColumns]
} else {
table.extraValDirections = make([]descpb.IndexDescriptor_Direction, nExtraColumns)
}
extraValColOrdinals := table.extraValColOrdinals
_ = extraValColOrdinals[nExtraColumns-1]
for i := 0; i < nExtraColumns; i++ {
id := table.index.GetKeySuffixColumnID(i)
idx, ok := tableArgs.ColIdxMap.Get(id)
if ok {
//gcassert:bce
extraValColOrdinals[i] = idx
} else {
//gcassert:bce
extraValColOrdinals[i] = -1
}
}
}
// Keep track of the maximum keys per row to accommodate a
// limitHint when StartScan is invoked.
var err error
rf.maxKeysPerRow, err = table.desc.KeysPerRow(table.index.GetID())
if err != nil {
return err
}
_ = table.desc.ForeachFamily(func(family *descpb.ColumnFamilyDescriptor) error {
id := family.ID
if id > table.maxColumnFamilyID {
table.maxColumnFamilyID = id
}
return nil
})
rf.table = table
rf.accountingHelper.Init(allocator, rf.table.typs)
return nil
}
// StartScan initializes and starts the key-value scan. Can be used multiple
// times.
//
// The fetcher takes ownership of the spans slice - it can modify the slice and
// will perform the memory accounting accordingly. The caller can only reuse the
// spans slice after the fetcher has been closed (which happens when the fetcher
// emits the first zero batch), and if the caller does, it becomes responsible
// for the memory accounting.
func (rf *cFetcher) StartScan(
ctx context.Context,
txn *kv.Txn,
spans roachpb.Spans,
bsHeader *roachpb.BoundedStalenessHeader,
limitBatches bool,
batchBytesLimit rowinfra.BytesLimit,
limitHint rowinfra.RowLimit,
forceProductionKVBatchSize bool,
) error {
if len(spans) == 0 {
return errors.AssertionFailedf("no spans")
}
if !limitBatches && batchBytesLimit != rowinfra.NoBytesLimit {
return errors.AssertionFailedf("batchBytesLimit set without limitBatches")
}
// If we have a limit hint, we limit the first batch size. Subsequent
// batches get larger to avoid making things too slow (e.g. in case we have
// a very restrictive filter and actually have to retrieve a lot of rows).
firstBatchLimit := rowinfra.KeyLimit(limitHint)
if firstBatchLimit != 0 {
// The limitHint is a row limit, but each row could be made up of more
// than one key. We take the maximum possible keys per row out of all
// the table rows we could potentially scan over.
//
// Note that unlike for the row.Fetcher, we don't need an extra key to
// form the last row in the cFetcher because we are eagerly finalizing
// each row once we know that all KVs comprising that row have been
// fetched. Consider several cases:
// - the table has only one column family - then we can finalize each
// row right after the first KV is decoded;
// - the table has multiple column families:
// - KVs for all column families are present for all rows - then for
// each row, when its last KV is fetched, the row can be finalized
// (and firstBatchLimit asks exactly for the correct number of KVs);
// - KVs for some column families are omitted for some rows - then we
// will actually fetch more KVs than necessary, but we'll decode
// limitHint number of rows.
firstBatchLimit = rowinfra.KeyLimit(int(limitHint) * rf.maxKeysPerRow)
}
f, err := row.NewKVFetcher(
ctx,
txn,
spans,
bsHeader,
rf.reverse,
batchBytesLimit,
firstBatchLimit,
rf.lockStrength,
rf.lockWaitPolicy,
rf.lockTimeout,
rf.kvFetcherMemAcc,
forceProductionKVBatchSize,
)
if err != nil {
return err
}
rf.fetcher = f
rf.machine.lastRowPrefix = nil
rf.machine.limitHint = int(limitHint)
rf.machine.state[0] = stateResetBatch
rf.machine.state[1] = stateInitFetch
return nil
}
// fetcherState is the state enum for NextBatch.
type fetcherState int
//go:generate stringer -type=fetcherState
const (
stateInvalid fetcherState = iota
// stateInitFetch is the empty state of a fetcher: there is no current KV to
// look at, and there's no current row, either because the fetcher has just
// started, or because the last row was already finalized.
//
// 1. fetch next kv into nextKV buffer
// -> decodeFirstKVOfRow
stateInitFetch
// stateResetBatch resets the batch of a fetcher, removing nulls and the
// selection vector.
stateResetBatch
// stateDecodeFirstKVOfRow is the state of looking at a key that is part of
// a row that the fetcher hasn't processed before. s.machine.nextKV must be
// set.
// 1. skip common prefix
// 2. parse key (past common prefix) into row buffer, setting last row prefix buffer
// 3. parse value into row buffer.
// 4. 1-cf or secondary index?
// -> doneRow(initFetch)
// else:
// -> fetchNextKVWithUnfinishedRow
stateDecodeFirstKVOfRow
// stateFetchNextKVWithUnfinishedRow is the state of getting a new key for
// the current row. The machine will read a new key from the underlying
// fetcher, process it, and either add the results to the current row, or
// shift to a new row.
// 1. fetch next kv into nextKV buffer
// 2. skip common prefix
// 3. check equality to last row prefix buffer
// 4. no?
// -> finalizeRow(decodeFirstKVOfRow)
// 5. skip to end of last row prefix buffer
// 6. parse value into row buffer
// 7. -> fetchNextKVWithUnfinishedRow
stateFetchNextKVWithUnfinishedRow
// stateFinalizeRow is the state of finalizing a row. It assumes that no more
// keys for the current row are present.
// state[1] must be set, and stateFinalizeRow will transition to that state
// once it finishes finalizing the row.
// 1. fill missing nulls
// 2. bump rowIdx
// -> nextState and optionally return if row-by-row or batch full
stateFinalizeRow
// stateEmitLastBatch emits the current batch and then transitions to
// stateFinished.
stateEmitLastBatch
// stateFinished is the end state of the state machine - it causes NextBatch
// to return empty batches forever.
stateFinished
)
// Turn this on to enable super verbose logging of the fetcher state machine.
const debugState = false
func (rf *cFetcher) setEstimatedRowCount(estimatedRowCount uint64) {
rf.estimatedRowCount = estimatedRowCount
}
// setNextKV sets the next KV to process to the input KV. needsCopy, if true,
// causes the input kv to be deep copied. needsCopy should be set to true if
// the input KV is pointing to the last KV of a batch, so that the batch can
// be garbage collected before fetching the next one.
// gcassert:inline
func (rf *cFetcher) setNextKV(kv roachpb.KeyValue, needsCopy bool) {
if !needsCopy {
rf.machine.nextKV = kv
return
}
// If we've made it to the very last key in the batch, copy out the key
// so that the GC can reclaim the large backing slice before we call
// NextKV() again.
kvCopy := roachpb.KeyValue{}
kvCopy.Key = make(roachpb.Key, len(kv.Key))
copy(kvCopy.Key, kv.Key)
kvCopy.Value.RawBytes = make([]byte, len(kv.Value.RawBytes))
copy(kvCopy.Value.RawBytes, kv.Value.RawBytes)
kvCopy.Value.Timestamp = kv.Value.Timestamp
rf.machine.nextKV = kvCopy
}
// NextBatch processes keys until we complete one batch of rows (subject to the
// limit hint and the memory limit while being max coldata.BatchSize() in
// length), which are returned in columnar format as a coldata.Batch. The batch
// contains one Vec per table column, regardless of the index used; columns that
// are not needed (as per neededCols) are filled with nulls. The Batch should
// not be modified and is only valid until the next call. When there are no more
// rows, the Batch.Length is 0.
func (rf *cFetcher) NextBatch(ctx context.Context) (coldata.Batch, error) {
for {
if debugState {
log.Infof(ctx, "State %s", rf.machine.state[0])
}
switch rf.machine.state[0] {
case stateInvalid:
return nil, errors.New("invalid fetcher state")
case stateInitFetch:
moreKVs, kv, finalReferenceToBatch, err := rf.fetcher.NextKV(ctx, rf.mvccDecodeStrategy)
if err != nil {
return nil, rf.convertFetchError(ctx, err)
}
if !moreKVs {
rf.machine.state[0] = stateEmitLastBatch
continue
}
// TODO(jordan): parse the logical longest common prefix of the span
// into a buffer. The logical longest common prefix is the longest
// common prefix that contains only full key components. For example,
// the keys /Table/53/1/foo/bar/10 and /Table/53/1/foo/bop/10 would
// have LLCS of /Table/53/1/foo, even though they share a b prefix of
// the next key, since that prefix isn't a complete key component.
/*
if newSpan {
lcs := rf.fetcher.span.LongestCommonPrefix()
// parse lcs into stuff
key, matches, err := rowenc.DecodeIndexKeyWithoutTableIDIndexIDPrefix(
rf.table.desc, rf.table.info.index, rf.table.info.keyValTypes,
rf.table.keyVals, rf.table.info.indexColumnDirs, kv.Key[rf.table.info.knownPrefixLength:],
)
if err != nil {
// This is expected - the longest common prefix of the keyspan might
// end half way through a key. Suppress the error and set the actual
// LCS we'll use later to the decodable components of the key.
}
}
*/
rf.setNextKV(kv, finalReferenceToBatch)
rf.machine.state[0] = stateDecodeFirstKVOfRow
case stateResetBatch:
rf.resetBatch()
rf.shiftState()
case stateDecodeFirstKVOfRow:
// Reset MVCC metadata for the table, since this is the first KV of a row.
rf.table.rowLastModified = hlc.Timestamp{}
// foundNull is set when decoding a new index key for a row finds a NULL value
// in the index key. This is used when decoding unique secondary indexes in order
// to tell whether they have extra columns appended to the key.
var foundNull bool
if rf.mustDecodeIndexKey {
if debugState {
log.Infof(ctx, "decoding first key %s", rf.machine.nextKV.Key)
}
var (
key []byte
err error
)
// For unique secondary indexes on tables with multiple column
// families, we must check all columns for NULL values in order
// to determine whether a KV belongs to the same row as the
// previous KV or a different row.
checkAllColsForNull := rf.table.isSecondaryIndex && rf.table.index.IsUnique() && rf.table.desc.NumFamilies() != 1
key, foundNull, rf.scratch, err = colencoding.DecodeKeyValsToCols(
&rf.table.da,
&rf.machine.colvecs,
rf.machine.rowIdx,
rf.table.indexColOrdinals,
checkAllColsForNull,
rf.table.keyValTypes,
rf.table.indexColumnDirs,
nil, /* unseen */
rf.machine.nextKV.Key[rf.table.knownPrefixLength:],
rf.table.invertedColOrdinal,
rf.scratch,
)
if err != nil {
return nil, err
}
prefix := rf.machine.nextKV.Key[:len(rf.machine.nextKV.Key)-len(key)]
rf.machine.lastRowPrefix = prefix
} else {
prefixLen, err := keys.GetRowPrefixLength(rf.machine.nextKV.Key)
if err != nil {
return nil, err
}
rf.machine.lastRowPrefix = rf.machine.nextKV.Key[:prefixLen]
}
// For unique secondary indexes on tables with multiple column
// families, the index-key does not distinguish one row from the
// next if both rows contain identical values along with a NULL.
// Consider the keys:
//
// /test/unique_idx/NULL/0
// /test/unique_idx/NULL/1
//
// The index-key extracted from the above keys is
// /test/unique_idx/NULL. The trailing /0 and /1 are the primary key
// used to unique-ify the keys when a NULL is present. When a null
// is present in the index key, we include the primary key columns
// in lastRowPrefix.
//
// Note that we do not need to do this for non-unique secondary
// indexes because the extra columns in the primary key will
// _always_ be there, so we can decode them when processing the
// index. The difference with unique secondary indexes is that the
// extra columns are not always there, and are used to unique-ify
// the index key, rather than provide the primary key column values.
//
// We also do not need to do this when a table has only one column
// family because it is guaranteed that there is only one KV per
// row. We entirely skip the check that determines if the row is
// unfinished.
if foundNull && rf.table.isSecondaryIndex && rf.table.index.IsUnique() && rf.table.desc.NumFamilies() != 1 {
// We get the remaining bytes after the computed prefix, and then
// slice off the extra encoded columns from those bytes. We calculate
// how many bytes were sliced away, and then extend lastRowPrefix
// by that amount.
prefixLen := len(rf.machine.lastRowPrefix)
remainingBytes := rf.machine.nextKV.Key[prefixLen:]
origRemainingBytesLen := len(remainingBytes)
for i := 0; i < rf.table.index.NumKeySuffixColumns(); i++ {
var err error
// Slice off an extra encoded column from remainingBytes.
remainingBytes, err = rowenc.SkipTableKey(remainingBytes)
if err != nil {
return nil, err
}
}
rf.machine.lastRowPrefix = rf.machine.nextKV.Key[:prefixLen+(origRemainingBytesLen-len(remainingBytes))]
}
familyID, err := rf.getCurrentColumnFamilyID()
if err != nil {
return nil, err
}
rf.machine.remainingValueColsByIdx.CopyFrom(rf.table.neededValueColsByIdx)
// Process the current KV's value component.
if err := rf.processValue(ctx, familyID); err != nil {
return nil, err
}
// Update the MVCC values for this row.
if rf.table.rowLastModified.Less(rf.machine.nextKV.Value.Timestamp) {
rf.table.rowLastModified = rf.machine.nextKV.Value.Timestamp
}
// If the table has only one column family, then the next KV will
// always belong to a different row than the current KV.
if rf.table.desc.NumFamilies() == 1 {
rf.machine.state[0] = stateFinalizeRow
rf.machine.state[1] = stateInitFetch
continue
}
// If the table has more than one column family, then the next KV
// may belong to the same row as the current KV.
rf.machine.state[0] = stateFetchNextKVWithUnfinishedRow
case stateFetchNextKVWithUnfinishedRow:
moreKVs, kv, finalReferenceToBatch, err := rf.fetcher.NextKV(ctx, rf.mvccDecodeStrategy)
if err != nil {
return nil, rf.convertFetchError(ctx, err)
}
if !moreKVs {
// No more data. Finalize the row and exit.
rf.machine.state[0] = stateFinalizeRow
rf.machine.state[1] = stateEmitLastBatch
continue
}
// TODO(jordan): if nextKV returns newSpan = true, set the new span
// prefix and indicate that it needs decoding.
rf.setNextKV(kv, finalReferenceToBatch)
if debugState {
log.Infof(ctx, "decoding next key %s", rf.machine.nextKV.Key)
}
// TODO(yuzefovich): optimize this prefix check by skipping logical
// longest common span prefix.
if !bytes.HasPrefix(kv.Key[rf.table.knownPrefixLength:], rf.machine.lastRowPrefix[rf.table.knownPrefixLength:]) {
// The kv we just found is from a different row.
rf.machine.state[0] = stateFinalizeRow
rf.machine.state[1] = stateDecodeFirstKVOfRow
continue
}
familyID, err := rf.getCurrentColumnFamilyID()
if err != nil {
return nil, err
}
// Process the current KV's value component.
if err := rf.processValue(ctx, familyID); err != nil {
return nil, err
}
// Update the MVCC values for this row.
if rf.table.rowLastModified.Less(rf.machine.nextKV.Value.Timestamp) {
rf.table.rowLastModified = rf.machine.nextKV.Value.Timestamp
}
if familyID == rf.table.maxColumnFamilyID {
// We know the row can't have any more keys, so finalize the row.
rf.machine.state[0] = stateFinalizeRow
rf.machine.state[1] = stateInitFetch
} else {
// Continue with current state.
rf.machine.state[0] = stateFetchNextKVWithUnfinishedRow
}
case stateFinalizeRow:
// Populate the timestamp system column if needed. We have to do it
// on a per row basis since each row can be modified at a different
// time.
if rf.table.timestampOutputIdx != noOutputColumn {
rf.machine.timestampCol[rf.machine.rowIdx] = tree.TimestampToDecimal(rf.table.rowLastModified)
}
// We're finished with a row. Fill the row in with nulls if
// necessary, perform the memory accounting for the row, bump the
// row index, emit the batch if necessary, and move to the next
// state.
if err := rf.fillNulls(); err != nil {
return nil, err
}
// Note that we haven't set the tableoid value (if that system
// column is requested) yet, but it is ok for the purposes of the
// memory accounting - oids are fixed length values and, thus, have
// already been accounted for when the batch was allocated.
rf.accountingHelper.AccountForSet(rf.machine.rowIdx)
rf.machine.rowIdx++
rf.shiftState()
var emitBatch bool
if rf.maxCapacity == 0 && rf.accountingHelper.Allocator.Used() >= rf.memoryLimit {
rf.maxCapacity = rf.machine.rowIdx
}
if rf.machine.rowIdx >= rf.machine.batch.Capacity() ||
(rf.maxCapacity > 0 && rf.machine.rowIdx >= rf.maxCapacity) ||
(rf.machine.limitHint > 0 && rf.machine.rowIdx >= rf.machine.limitHint) {
// We either
// 1. have no more room in our batch, so output it immediately
// or
// 2. we made it to our limit hint, so output our batch early
// to make sure that we don't bother filling in extra data
// if we don't need to.
emitBatch = true
// Update the limit hint to track the expected remaining rows to
// be fetched.
//
// Note that limitHint might become negative at which point we
// will start ignoring it.
rf.machine.limitHint -= rf.machine.rowIdx
}
if emitBatch {
rf.pushState(stateResetBatch)
rf.finalizeBatch()
return rf.machine.batch, nil
}
case stateEmitLastBatch:
rf.machine.state[0] = stateFinished
rf.finalizeBatch()
return rf.machine.batch, nil
case stateFinished:
// Close the fetcher eagerly so that its memory could be GCed.
rf.Close(ctx)
return coldata.ZeroBatch, nil
}
}
}
// shiftState shifts the state queue to the left, removing the first element and
// clearing the last element.
func (rf *cFetcher) shiftState() {
copy(rf.machine.state[:2], rf.machine.state[1:])
rf.machine.state[2] = stateInvalid
}
func (rf *cFetcher) pushState(state fetcherState) {
copy(rf.machine.state[1:], rf.machine.state[:2])
rf.machine.state[0] = state
}
// getDatumAt returns the converted datum object at the given (colIdx, rowIdx).
// This function is meant for tracing and should not be used in hot paths.
func (rf *cFetcher) getDatumAt(colIdx int, rowIdx int) tree.Datum {
res := []tree.Datum{nil}
colconv.ColVecToDatumAndDeselect(res, rf.machine.colvecs.Vecs[colIdx], 1 /* length */, []int{rowIdx}, &rf.table.da)
return res[0]
}
// processValue processes the state machine's current value component, setting
// columns in the rowIdx'th tuple in the current batch depending on what data
// is found in the current value component.
func (rf *cFetcher) processValue(ctx context.Context, familyID descpb.FamilyID) (err error) {
table := rf.table
var prettyKey, prettyValue string
if rf.traceKV {
defer func() {
if err == nil {
log.VEventf(ctx, 2, "fetched: %s -> %s", prettyKey, prettyValue)
}
}()
var buf strings.Builder
buf.WriteByte('/')
buf.WriteString(rf.table.desc.GetName())
buf.WriteByte('/')
buf.WriteString(rf.table.index.GetName())
// Note that because rf.traceKV is true, rf.table.indexColOrdinals will
// not include any -1, so idx values will all be valid.
for _, idx := range rf.table.indexColOrdinals {
buf.WriteByte('/')
buf.WriteString(rf.getDatumAt(idx, rf.machine.rowIdx).String())
}
prettyKey = buf.String()
}
if len(table.cols) == 0 {
// We don't need to decode any values. Note that this branch can only be
// executed if the tracing is disabled (if it was enabled, we would
// decode values from all columns).
return nil
}
val := rf.machine.nextKV.Value
if !table.isSecondaryIndex || table.index.GetEncodingType() == descpb.PrimaryIndexEncoding {
// If familyID is 0, kv.Value contains values for composite key columns.
// These columns already have a table.row value assigned above, but that value
// (obtained from the key encoding) might not be correct (e.g. for decimals,
// it might not contain the right number of trailing 0s; for collated
// strings, it is one of potentially many strings with the same collation
// key).
//
// In these cases, the correct value will be present in family 0 and the
// table.row value gets overwritten.
switch val.GetTag() {
case roachpb.ValueType_TUPLE:
// In this case, we don't need to decode the column family ID, because
// the ValueType_TUPLE encoding includes the column id with every encoded
// column value.
var tupleBytes []byte
tupleBytes, err = val.GetTuple()
if err != nil {
break
}
prettyKey, prettyValue, err = rf.processValueBytes(ctx, table, tupleBytes, prettyKey)
default:
var family *descpb.ColumnFamilyDescriptor
family, err = table.desc.FindFamilyByID(familyID)
if err != nil {
return scrub.WrapError(scrub.IndexKeyDecodingError, err)
}
prettyKey, prettyValue, err = rf.processValueSingle(ctx, table, family, prettyKey)
}
if err != nil {
return scrub.WrapError(scrub.IndexValueDecodingError, err)
}
} else {
tag := val.GetTag()
var valueBytes []byte
switch tag {
case roachpb.ValueType_BYTES:
// If we have the ValueType_BYTES on a secondary index, then we know we
// are looking at column family 0. Column family 0 stores the extra primary
// key columns if they are present, so we decode them here.
valueBytes, err = val.GetBytes()
if err != nil {
return scrub.WrapError(scrub.IndexValueDecodingError, err)
}
if table.isSecondaryIndex && table.index.IsUnique() {
// This is a unique secondary index; decode the extra
// column values from the value.
valueBytes, _, rf.scratch, err = colencoding.DecodeKeyValsToCols(
&table.da,
&rf.machine.colvecs,
rf.machine.rowIdx,
table.extraValColOrdinals,
false, /* checkAllColsForNull */
table.extraTypes,
table.extraValDirections,
&rf.machine.remainingValueColsByIdx,
valueBytes,
rf.table.invertedColOrdinal,
rf.scratch,
)
if err != nil {
return scrub.WrapError(scrub.SecondaryIndexKeyExtraValueDecodingError, err)
}
if rf.traceKV {
var buf strings.Builder
for _, idx := range table.extraValColOrdinals {
buf.WriteByte('/')
buf.WriteString(rf.getDatumAt(idx, rf.machine.rowIdx).String())
}
prettyValue = buf.String()
}
}
case roachpb.ValueType_TUPLE:
valueBytes, err = val.GetTuple()
if err != nil {
return scrub.WrapError(scrub.IndexValueDecodingError, err)
}
}
if len(valueBytes) > 0 {
prettyKey, prettyValue, err = rf.processValueBytes(
ctx, table, valueBytes, prettyKey,
)
if err != nil {
return scrub.WrapError(scrub.IndexValueDecodingError, err)
}
}
}
if rf.traceKV && prettyValue == "" {
prettyValue = tree.DNull.String()
}
return nil
}
// processValueSingle processes the given value (of column
// family.DefaultColumnID), setting values in table.row accordingly. The key is
// only used for logging.
func (rf *cFetcher) processValueSingle(
ctx context.Context,
table *cTableInfo,
family *descpb.ColumnFamilyDescriptor,
prettyKeyPrefix string,
) (prettyKey string, prettyValue string, err error) {
prettyKey = prettyKeyPrefix
// If this is the row sentinel (in the legacy pre-family format),
// a value is not expected, so we're done.
if family.ID == 0 {
return "", "", nil
}
colID := family.DefaultColumnID
if colID == 0 {
return "", "", errors.Errorf("single entry value with no default column id")
}
if idx, ok := table.ColIdxMap.Get(colID); ok {
if rf.traceKV {
prettyKey = fmt.Sprintf("%s/%s", prettyKey, table.desc.DeletableColumns()[idx].GetName())
}
val := rf.machine.nextKV.Value
if len(val.RawBytes) == 0 {
return prettyKey, "", nil
}
typ := rf.table.typs[idx]
err := colencoding.UnmarshalColumnValueToCol(
&table.da, &rf.machine.colvecs, idx, rf.machine.rowIdx, typ, val,
)
if err != nil {
return "", "", err
}
rf.machine.remainingValueColsByIdx.Remove(idx)
if rf.traceKV {
prettyValue = rf.getDatumAt(idx, rf.machine.rowIdx).String()
}
if row.DebugRowFetch {
log.Infof(ctx, "Scan %s -> %v", rf.machine.nextKV.Key, "?")
}
return prettyKey, prettyValue, nil
}
// No need to unmarshal the column value. Either the column was part of
// the index key or it isn't needed.
if row.DebugRowFetch {
log.Infof(ctx, "Scan %s -> [%d] (skipped)", rf.machine.nextKV.Key, colID)
}
return "", "", nil
}
func (rf *cFetcher) processValueBytes(
ctx context.Context, table *cTableInfo, valueBytes []byte, prettyKeyPrefix string,
) (prettyKey string, prettyValue string, err error) {
prettyKey = prettyKeyPrefix
if rf.traceKV {
if rf.machine.prettyValueBuf == nil {
rf.machine.prettyValueBuf = &bytes.Buffer{}
}
rf.machine.prettyValueBuf.Reset()
}
// Composite columns that are key encoded in the value (like the pk columns
// in a unique secondary index) have gotten removed from the set of
// remaining value columns. So, we need to add them back in here in case
// they have full value encoded composite values.
rf.table.compositeIndexColOrdinals.ForEach(func(i int) {
rf.machine.remainingValueColsByIdx.Add(i)
})
var (
colIDDiff uint32
lastColID descpb.ColumnID
dataOffset int
typ encoding.Type
lastColIDIndex int
)
// Continue reading data until there's none left or we've finished
// populating the data for all of the requested columns.
for len(valueBytes) > 0 && rf.machine.remainingValueColsByIdx.Len() > 0 {
_, dataOffset, colIDDiff, typ, err = encoding.DecodeValueTag(valueBytes)
if err != nil {
return "", "", err
}
colID := lastColID + descpb.ColumnID(colIDDiff)
lastColID = colID
vecIdx := -1
// Find the ordinal into table.cols for the column ID we just decoded,
// by advancing through the sorted list of needed value columns until
// there's a match, or we passed the column ID we're looking for.
for ; lastColIDIndex < len(table.orderedColIdxMap.vals); lastColIDIndex++ {
nextID := table.orderedColIdxMap.vals[lastColIDIndex]
if nextID == colID {
vecIdx = table.orderedColIdxMap.ords[lastColIDIndex]
// Since the next value part (if it exists) will belong to the
// column after the current one, we can advance the index.
lastColIDIndex++
break
} else if nextID > colID {
break
}
}
if vecIdx == -1 {
// This column wasn't requested, so read its length and skip it.
len, err := encoding.PeekValueLengthWithOffsetsAndType(valueBytes, dataOffset, typ)
if err != nil {
return "", "", err
}
valueBytes = valueBytes[len:]
if row.DebugRowFetch {
log.Infof(ctx, "Scan %s -> [%d] (skipped)", rf.machine.nextKV.Key, colID)
}
continue
}
if rf.traceKV {
prettyKey = fmt.Sprintf("%s/%s", prettyKey, table.desc.DeletableColumns()[vecIdx].GetName())
}
valueBytes, err = colencoding.DecodeTableValueToCol(
&table.da, &rf.machine.colvecs, vecIdx, rf.machine.rowIdx, typ,
dataOffset, rf.table.typs[vecIdx], valueBytes,
)
if err != nil {
return "", "", err
}
rf.machine.remainingValueColsByIdx.Remove(vecIdx)
if rf.traceKV {
dVal := rf.getDatumAt(vecIdx, rf.machine.rowIdx)
if _, err := fmt.Fprintf(rf.machine.prettyValueBuf, "/%v", dVal.String()); err != nil {
return "", "", err
}
}
}
if rf.traceKV {
prettyValue = rf.machine.prettyValueBuf.String()
}
return prettyKey, prettyValue, nil
}
func (rf *cFetcher) fillNulls() error {
table := rf.table
if rf.machine.remainingValueColsByIdx.Empty() {
return nil
}
for i, ok := rf.machine.remainingValueColsByIdx.Next(0); ok; i, ok = rf.machine.remainingValueColsByIdx.Next(i + 1) {
// Composite index columns may have a key but no value. Ignore them so we
// don't incorrectly mark them as null.
if table.compositeIndexColOrdinals.Contains(i) {
continue
}
if !table.cols[i].IsNullable() {
var indexColValues []string
for _, idx := range table.indexColOrdinals {
if idx != -1 {
indexColValues = append(indexColValues, rf.getDatumAt(idx, rf.machine.rowIdx).String())
} else {
indexColValues = append(indexColValues, "?")
}
}
return scrub.WrapError(scrub.UnexpectedNullValueError, errors.Errorf(
"non-nullable column \"%s:%s\" with no value! Index scanned was %q with the index key columns (%s) and the values (%s)",
table.desc.GetName(), table.cols[i].GetName(), table.index.GetName(),
strings.Join(table.index.IndexDesc().KeyColumnNames, ","), strings.Join(indexColValues, ",")))
}
rf.machine.colvecs.Nulls[i].SetNull(rf.machine.rowIdx)
}
return nil
}
func (rf *cFetcher) finalizeBatch() {
// Populate the tableoid system column for the whole batch if necessary.
if rf.table.oidOutputIdx != noOutputColumn {
id := rf.table.desc.GetID()
for i := 0; i < rf.machine.rowIdx; i++ {
// Note that we don't need to update the memory accounting because
// oids are fixed length values and have already been accounted for
// when finalizing each row.
rf.machine.tableoidCol.Set(i, rf.table.da.NewDOid(tree.MakeDOid(tree.DInt(id))))
}
}
rf.machine.batch.SetLength(rf.machine.rowIdx)
rf.machine.rowIdx = 0
}
// getCurrentColumnFamilyID returns the column family id of the key in
// rf.machine.nextKV.Key.
func (rf *cFetcher) getCurrentColumnFamilyID() (descpb.FamilyID, error) {
// If the table only has 1 column family, and its ID is 0, we know that the
// key has to be the 0th column family.
if rf.table.maxColumnFamilyID == 0 {
return 0, nil
}
// The column family is encoded in the final bytes of the key. The last
// byte of the key is the length of the column family id encoding
// itself. See encoding.md for more details, and see MakeFamilyKey for
// the routine that performs this encoding.
var id uint64
_, id, err := encoding.DecodeUvarintAscending(rf.machine.nextKV.Key[len(rf.machine.lastRowPrefix):])
if err != nil {
return 0, scrub.WrapError(scrub.IndexKeyDecodingError, err)
}
return descpb.FamilyID(id), nil
}
// convertFetchError converts an error generated during a key-value fetch to a
// storage error that will propagate through the exec subsystem unchanged. The
// error may also undergo a mapping to make it more user friendly for SQL
// consumers.
func (rf *cFetcher) convertFetchError(ctx context.Context, err error) error {
err = row.ConvertFetchError(ctx, rf, err)
err = colexecerror.NewStorageError(err)
return err
}
// KeyToDesc implements the KeyToDescTranslator interface. The implementation is
// used by convertFetchError.
func (rf *cFetcher) KeyToDesc(key roachpb.Key) (catalog.TableDescriptor, bool) {
if len(key) < rf.table.knownPrefixLength {
return nil, false
}
nIndexCols := rf.table.index.NumKeyColumns() + rf.table.index.NumKeySuffixColumns()
tableKeyVals := make([]rowenc.EncDatum, nIndexCols)
_, _, err := rowenc.DecodeKeyVals(
rf.table.keyValTypes,
tableKeyVals,
rf.table.indexColumnDirs,
key[rf.table.knownPrefixLength:],
)
if err != nil {
return nil, false
}
return rf.table.desc, true
}
var cFetcherPool = sync.Pool{
New: func() interface{} {
return &cFetcher{}
},
}
func (rf *cFetcher) Release() {
rf.accountingHelper.Release()
if rf.table != nil {
rf.table.Release()
}
colvecs := rf.machine.colvecs
colvecs.Reset()
*rf = cFetcher{
scratch: rf.scratch[:0],
}
rf.machine.colvecs = colvecs
cFetcherPool.Put(rf)
}
func (rf *cFetcher) Close(ctx context.Context) {
if rf != nil && rf.fetcher != nil {
rf.fetcher.Close(ctx)
rf.fetcher = nil
}
}
| pkg/sql/colfetcher/cfetcher.go | 1 | https://github.com/cockroachdb/cockroach/commit/a519265872c58fcfc2cc815c9dc58660534f9cd1 | [
0.9976061582565308,
0.07672324031591415,
0.00016205183055717498,
0.0003338756796438247,
0.25980043411254883
] |
{
"id": 2,
"code_window": [
"// from. Note that only columns that need to be fetched (i.e. requested by the\n",
"// caller) are included in the internal state.\n",
"type cFetcherTableArgs struct {\n",
"\tdesc catalog.TableDescriptor\n",
"\tindex catalog.Index\n",
"\t// ColIdxMap is a mapping from ColumnID of each column to its ordinal. Only\n",
"\t// needed columns are present.\n",
"\tColIdxMap catalog.TableColMap\n",
"\tisSecondaryIndex bool\n",
"\t// cols are all needed columns of the table that are present in the index.\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t// ColIdxMap is a mapping from ColumnID to the ordinal of the corresponding\n",
"\t// column within the cols field. Only needed columns are present.\n"
],
"file_path": "pkg/sql/colfetcher/cfetcher_setup.go",
"type": "replace",
"edit_start_line_idx": 31
} | // Copyright 2014 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package keys_test
import (
"bytes"
"encoding/hex"
"fmt"
"math"
"strconv"
"strings"
"testing"
"time"
"unicode/utf8"
"github.com/cockroachdb/apd/v2"
"github.com/cockroachdb/cockroach/pkg/keys"
"github.com/cockroachdb/cockroach/pkg/roachpb"
"github.com/cockroachdb/cockroach/pkg/util/bitarray"
"github.com/cockroachdb/cockroach/pkg/util/duration"
"github.com/cockroachdb/cockroach/pkg/util/encoding"
"github.com/cockroachdb/cockroach/pkg/util/keysutil"
"github.com/cockroachdb/cockroach/pkg/util/uuid"
"github.com/cockroachdb/errors"
)
func lockTableKey(key roachpb.Key) roachpb.Key {
k, _ := keys.LockTableSingleKey(key, nil)
return k
}
func TestPrettyPrint(t *testing.T) {
tenSysCodec := keys.SystemSQLCodec
ten5Codec := keys.MakeSQLCodec(roachpb.MakeTenantID(5))
tm, _ := time.Parse(time.RFC3339Nano, "2016-03-30T13:40:35.053725008Z")
duration := duration.MakeDuration(1*time.Second.Nanoseconds(), 1, 1)
durationAsc, _ := encoding.EncodeDurationAscending(nil, duration)
durationDesc, _ := encoding.EncodeDurationDescending(nil, duration)
bitArray := bitarray.MakeBitArrayFromInt64(8, 58, 7)
txnID := uuid.MakeV4()
// Support for asserting that the ugly printer supports a key was added after
// most of the tests here were written.
revertSupportUnknown := false
revertMustSupport := true
// The following test cases encode keys with a mixture of ascending and descending direction,
// but always decode keys in the ascending direction. This is why some of the decoded values
// seem bizarre.
testCases := []struct {
key roachpb.Key
exp string
assertRevertSupported bool
}{
// local
{keys.StoreIdentKey(), "/Local/Store/storeIdent", revertSupportUnknown},
{keys.StoreGossipKey(), "/Local/Store/gossipBootstrap", revertSupportUnknown},
{keys.StoreClusterVersionKey(), "/Local/Store/clusterVersion", revertSupportUnknown},
{keys.StoreNodeTombstoneKey(123), "/Local/Store/nodeTombstone/n123", revertSupportUnknown},
{keys.StoreCachedSettingsKey(roachpb.Key("a")), `/Local/Store/cachedSettings/"a"`, revertSupportUnknown},
{keys.AbortSpanKey(roachpb.RangeID(1000001), txnID), fmt.Sprintf(`/Local/RangeID/1000001/r/AbortSpan/%q`, txnID), revertSupportUnknown},
{keys.RangeAppliedStateKey(roachpb.RangeID(1000001)), "/Local/RangeID/1000001/r/RangeAppliedState", revertSupportUnknown},
{keys.RaftTruncatedStateKey(roachpb.RangeID(1000001)), "/Local/RangeID/1000001/u/RaftTruncatedState", revertSupportUnknown},
{keys.RangeLeaseKey(roachpb.RangeID(1000001)), "/Local/RangeID/1000001/r/RangeLease", revertSupportUnknown},
{keys.RangePriorReadSummaryKey(roachpb.RangeID(1000001)), "/Local/RangeID/1000001/r/RangePriorReadSummary", revertSupportUnknown},
{keys.RangeGCThresholdKey(roachpb.RangeID(1000001)), "/Local/RangeID/1000001/r/RangeGCThreshold", revertSupportUnknown},
{keys.RangeVersionKey(roachpb.RangeID(1000001)), "/Local/RangeID/1000001/r/RangeVersion", revertSupportUnknown},
{keys.RaftHardStateKey(roachpb.RangeID(1000001)), "/Local/RangeID/1000001/u/RaftHardState", revertSupportUnknown},
{keys.RangeTombstoneKey(roachpb.RangeID(1000001)), "/Local/RangeID/1000001/u/RangeTombstone", revertSupportUnknown},
{keys.RaftLogKey(roachpb.RangeID(1000001), uint64(200001)), "/Local/RangeID/1000001/u/RaftLog/logIndex:200001", revertSupportUnknown},
{keys.RangeLastReplicaGCTimestampKey(roachpb.RangeID(1000001)), "/Local/RangeID/1000001/u/RangeLastReplicaGCTimestamp", revertSupportUnknown},
{keys.MakeRangeKeyPrefix(roachpb.RKey(tenSysCodec.TablePrefix(42))), `/Local/Range/Table/42`, revertSupportUnknown},
{keys.RangeDescriptorKey(roachpb.RKey(tenSysCodec.TablePrefix(42))), `/Local/Range/Table/42/RangeDescriptor`, revertSupportUnknown},
{keys.TransactionKey(tenSysCodec.TablePrefix(42), txnID), fmt.Sprintf(`/Local/Range/Table/42/Transaction/%q`, txnID), revertSupportUnknown},
{keys.RangeProbeKey(roachpb.RKey(tenSysCodec.TablePrefix(42))), `/Local/Range/Table/42/RangeProbe`, revertSupportUnknown},
{keys.QueueLastProcessedKey(roachpb.RKey(tenSysCodec.TablePrefix(42)), "foo"), `/Local/Range/Table/42/QueueLastProcessed/"foo"`, revertSupportUnknown},
{lockTableKey(keys.RangeDescriptorKey(roachpb.RKey(tenSysCodec.TablePrefix(42)))), `/Local/Lock/Intent/Local/Range/Table/42/RangeDescriptor`, revertSupportUnknown},
{lockTableKey(tenSysCodec.TablePrefix(111)), "/Local/Lock/Intent/Table/111", revertSupportUnknown},
{keys.MakeRangeKeyPrefix(roachpb.RKey(ten5Codec.TenantPrefix())), `/Local/Range/Tenant/5`, revertSupportUnknown},
{keys.MakeRangeKeyPrefix(roachpb.RKey(ten5Codec.TablePrefix(42))), `/Local/Range/Tenant/5/Table/42`, revertSupportUnknown},
{keys.RangeDescriptorKey(roachpb.RKey(ten5Codec.TablePrefix(42))), `/Local/Range/Tenant/5/Table/42/RangeDescriptor`, revertSupportUnknown},
{keys.TransactionKey(ten5Codec.TablePrefix(42), txnID), fmt.Sprintf(`/Local/Range/Tenant/5/Table/42/Transaction/%q`, txnID), revertSupportUnknown},
{keys.QueueLastProcessedKey(roachpb.RKey(ten5Codec.TablePrefix(42)), "foo"), `/Local/Range/Tenant/5/Table/42/QueueLastProcessed/"foo"`, revertSupportUnknown},
{lockTableKey(keys.RangeDescriptorKey(roachpb.RKey(ten5Codec.TablePrefix(42)))), `/Local/Lock/Intent/Local/Range/Tenant/5/Table/42/RangeDescriptor`, revertSupportUnknown},
{lockTableKey(ten5Codec.TablePrefix(111)), "/Local/Lock/Intent/Tenant/5/Table/111", revertSupportUnknown},
{keys.LocalMax, `/Meta1/""`, revertSupportUnknown}, // LocalMax == Meta1Prefix
// system
{makeKey(keys.Meta2Prefix, roachpb.Key("foo")), `/Meta2/"foo"`, revertSupportUnknown},
{makeKey(keys.Meta1Prefix, roachpb.Key("foo")), `/Meta1/"foo"`, revertSupportUnknown},
{keys.RangeMetaKey(roachpb.RKey("f")).AsRawKey(), `/Meta2/"f"`, revertSupportUnknown},
{keys.NodeLivenessKey(10033), "/System/NodeLiveness/10033", revertSupportUnknown},
{keys.NodeStatusKey(1111), "/System/StatusNode/1111", revertSupportUnknown},
{keys.SystemMax, "/System/Max", revertSupportUnknown},
// key of key
{keys.RangeMetaKey(roachpb.RKey(keys.MakeRangeKeyPrefix(roachpb.RKey(tenSysCodec.TablePrefix(42))))).AsRawKey(), `/Meta2/Local/Range/Table/42`, revertSupportUnknown},
{keys.RangeMetaKey(roachpb.RKey(makeKey(tenSysCodec.TablePrefix(42), roachpb.RKey("foo")))).AsRawKey(), `/Meta2/Table/42/"foo"`, revertSupportUnknown},
{keys.RangeMetaKey(roachpb.RKey(makeKey(keys.Meta2Prefix, roachpb.Key("foo")))).AsRawKey(), `/Meta1/"foo"`, revertSupportUnknown},
// table
{keys.SystemConfigSpan.Key, "/Table/SystemConfigSpan/Start", revertSupportUnknown},
{tenSysCodec.TablePrefix(111), "/Table/111", revertMustSupport},
{makeKey(tenSysCodec.TablePrefix(42), encoding.EncodeUvarintAscending(nil, 1)), `/Table/42/1`, revertMustSupport},
{makeKey(tenSysCodec.TablePrefix(42), roachpb.RKey("foo")), `/Table/42/"foo"`, revertSupportUnknown},
{makeKey(tenSysCodec.TablePrefix(42),
roachpb.RKey(encoding.EncodeFloatAscending(nil, float64(233.221112)))),
"/Table/42/233.221112", revertSupportUnknown},
{makeKey(tenSysCodec.TablePrefix(42),
roachpb.RKey(encoding.EncodeFloatDescending(nil, float64(-233.221112)))),
"/Table/42/233.221112", revertSupportUnknown},
{makeKey(tenSysCodec.TablePrefix(42),
roachpb.RKey(encoding.EncodeFloatAscending(nil, math.Inf(1)))),
"/Table/42/+Inf", revertSupportUnknown},
{makeKey(tenSysCodec.TablePrefix(42),
roachpb.RKey(encoding.EncodeFloatAscending(nil, math.NaN()))),
"/Table/42/NaN", revertSupportUnknown},
{makeKey(tenSysCodec.TablePrefix(42),
roachpb.RKey(encoding.EncodeVarintAscending(nil, 1222)),
roachpb.RKey(encoding.EncodeStringAscending(nil, "handsome man"))),
`/Table/42/1222/"handsome man"`, revertSupportUnknown},
{makeKey(tenSysCodec.TablePrefix(42),
roachpb.RKey(encoding.EncodeVarintAscending(nil, 1222))),
`/Table/42/1222`, revertSupportUnknown},
{makeKey(tenSysCodec.TablePrefix(42),
roachpb.RKey(encoding.EncodeVarintDescending(nil, 1222))),
`/Table/42/-1223`, revertSupportUnknown},
{makeKey(tenSysCodec.TablePrefix(42),
roachpb.RKey(encoding.EncodeBytesAscending(nil, []byte{1, 2, 8, 255}))),
`/Table/42/"\x01\x02\b\xff"`, revertSupportUnknown},
{makeKey(tenSysCodec.TablePrefix(42),
roachpb.RKey(encoding.EncodeBytesAscending(nil, []byte{1, 2, 8, 255})),
roachpb.RKey("bar")), `/Table/42/"\x01\x02\b\xff"/"bar"`, revertSupportUnknown},
{makeKey(tenSysCodec.TablePrefix(42),
roachpb.RKey(encoding.EncodeBytesDescending(nil, []byte{1, 2, 8, 255})),
roachpb.RKey("bar")), `/Table/42/"\x01\x02\b\xff"/"bar"`, revertSupportUnknown},
{makeKey(tenSysCodec.TablePrefix(42),
roachpb.RKey(encoding.EncodeNullAscending(nil))), "/Table/42/NULL", revertSupportUnknown},
{makeKey(tenSysCodec.TablePrefix(42),
roachpb.RKey(encoding.EncodeNullDescending(nil))), "/Table/42/NULL", revertSupportUnknown},
{makeKey(tenSysCodec.TablePrefix(42),
roachpb.RKey(encoding.EncodeNotNullAscending(nil))), "/Table/42/!NULL", revertSupportUnknown},
{makeKey(tenSysCodec.TablePrefix(42),
roachpb.RKey(encoding.EncodeNotNullDescending(nil))), "/Table/42/!NULL", revertSupportUnknown},
{makeKey(tenSysCodec.TablePrefix(42),
roachpb.RKey(encoding.EncodeTimeAscending(nil, tm))),
"/Table/42/2016-03-30T13:40:35.053725008Z", revertSupportUnknown},
{makeKey(tenSysCodec.TablePrefix(42),
roachpb.RKey(encoding.EncodeTimeDescending(nil, tm))),
"/Table/42/1923-10-04T10:19:23.946274991Z", revertSupportUnknown},
{makeKey(tenSysCodec.TablePrefix(42),
roachpb.RKey(encoding.EncodeDecimalAscending(nil, apd.New(1234, -2)))),
"/Table/42/12.34", revertSupportUnknown},
{makeKey(tenSysCodec.TablePrefix(42),
roachpb.RKey(encoding.EncodeDecimalDescending(nil, apd.New(1234, -2)))),
"/Table/42/-12.34", revertSupportUnknown},
{makeKey(tenSysCodec.TablePrefix(42),
roachpb.RKey(encoding.EncodeBitArrayAscending(nil, bitArray))),
"/Table/42/B00111010", revertSupportUnknown},
{makeKey(tenSysCodec.TablePrefix(42),
roachpb.RKey(encoding.EncodeBitArrayDescending(nil, bitArray))),
"/Table/42/B00111010", revertSupportUnknown},
// Regression test for #31115.
{roachpb.Key(makeKey(tenSysCodec.TablePrefix(42),
roachpb.RKey(encoding.EncodeBitArrayAscending(nil, bitarray.MakeZeroBitArray(64))),
)).PrefixEnd(),
"/Table/42/B0000000000000000000000000000000000000000000000000000000000000000/PrefixEnd", revertSupportUnknown},
{makeKey(tenSysCodec.TablePrefix(42),
roachpb.RKey(durationAsc)),
"/Table/42/1 mon 1 day 00:00:01", revertSupportUnknown},
{makeKey(tenSysCodec.TablePrefix(42),
roachpb.RKey(durationDesc)),
"/Table/42/-2 mons -2 days +743:59:58.999999+999ns", revertSupportUnknown},
// sequence
{tenSysCodec.SequenceKey(55), `/Table/55/1/0/0`, revertSupportUnknown},
// tenant table
{ten5Codec.TenantPrefix(), "/Tenant/5", revertMustSupport},
{ten5Codec.TablePrefix(0), "/Tenant/5/Table/SystemConfigSpan/Start", revertSupportUnknown},
{ten5Codec.TablePrefix(50), "/Tenant/5/Table/50", revertMustSupport},
{ten5Codec.TablePrefix(111), "/Tenant/5/Table/111", revertMustSupport},
{makeKey(ten5Codec.TablePrefix(42), encoding.EncodeUvarintAscending(nil, 1)), `/Tenant/5/Table/42/1`, revertMustSupport},
{makeKey(ten5Codec.TablePrefix(42), roachpb.RKey("foo")), `/Tenant/5/Table/42/"foo"`, revertSupportUnknown},
{makeKey(ten5Codec.TablePrefix(42),
roachpb.RKey(encoding.EncodeFloatAscending(nil, float64(233.221112)))),
"/Tenant/5/Table/42/233.221112", revertSupportUnknown},
{makeKey(ten5Codec.TablePrefix(42),
roachpb.RKey(encoding.EncodeFloatDescending(nil, float64(-233.221112)))),
"/Tenant/5/Table/42/233.221112", revertSupportUnknown},
{makeKey(ten5Codec.TablePrefix(42),
roachpb.RKey(encoding.EncodeFloatAscending(nil, math.Inf(1)))),
"/Tenant/5/Table/42/+Inf", revertSupportUnknown},
{makeKey(ten5Codec.TablePrefix(42),
roachpb.RKey(encoding.EncodeFloatAscending(nil, math.NaN()))),
"/Tenant/5/Table/42/NaN", revertSupportUnknown},
{makeKey(ten5Codec.TablePrefix(42),
roachpb.RKey(encoding.EncodeVarintAscending(nil, 1222)),
roachpb.RKey(encoding.EncodeStringAscending(nil, "handsome man"))),
`/Tenant/5/Table/42/1222/"handsome man"`, revertSupportUnknown},
{makeKey(ten5Codec.TablePrefix(42),
roachpb.RKey(encoding.EncodeVarintAscending(nil, 1222))),
`/Tenant/5/Table/42/1222`, revertSupportUnknown},
{makeKey(ten5Codec.TablePrefix(42),
roachpb.RKey(encoding.EncodeVarintDescending(nil, 1222))),
`/Tenant/5/Table/42/-1223`, revertSupportUnknown},
{makeKey(ten5Codec.TablePrefix(42),
roachpb.RKey(encoding.EncodeBytesAscending(nil, []byte{1, 2, 8, 255}))),
`/Tenant/5/Table/42/"\x01\x02\b\xff"`, revertSupportUnknown},
{makeKey(ten5Codec.TablePrefix(42),
roachpb.RKey(encoding.EncodeBytesAscending(nil, []byte{1, 2, 8, 255})),
roachpb.RKey("bar")), `/Tenant/5/Table/42/"\x01\x02\b\xff"/"bar"`, revertSupportUnknown},
{makeKey(ten5Codec.TablePrefix(42),
roachpb.RKey(encoding.EncodeBytesDescending(nil, []byte{1, 2, 8, 255})),
roachpb.RKey("bar")), `/Tenant/5/Table/42/"\x01\x02\b\xff"/"bar"`, revertSupportUnknown},
{makeKey(ten5Codec.TablePrefix(42),
roachpb.RKey(encoding.EncodeNullAscending(nil))), "/Tenant/5/Table/42/NULL", revertSupportUnknown},
{makeKey(ten5Codec.TablePrefix(42),
roachpb.RKey(encoding.EncodeNullDescending(nil))), "/Tenant/5/Table/42/NULL", revertSupportUnknown},
{makeKey(ten5Codec.TablePrefix(42),
roachpb.RKey(encoding.EncodeNotNullAscending(nil))), "/Tenant/5/Table/42/!NULL", revertSupportUnknown},
{makeKey(ten5Codec.TablePrefix(42),
roachpb.RKey(encoding.EncodeNotNullDescending(nil))), "/Tenant/5/Table/42/!NULL", revertSupportUnknown},
{makeKey(ten5Codec.TablePrefix(42),
roachpb.RKey(encoding.EncodeTimeAscending(nil, tm))),
"/Tenant/5/Table/42/2016-03-30T13:40:35.053725008Z", revertSupportUnknown},
{makeKey(ten5Codec.TablePrefix(42),
roachpb.RKey(encoding.EncodeTimeDescending(nil, tm))),
"/Tenant/5/Table/42/1923-10-04T10:19:23.946274991Z", revertSupportUnknown},
{makeKey(ten5Codec.TablePrefix(42),
roachpb.RKey(encoding.EncodeDecimalAscending(nil, apd.New(1234, -2)))),
"/Tenant/5/Table/42/12.34", revertSupportUnknown},
{makeKey(ten5Codec.TablePrefix(42),
roachpb.RKey(encoding.EncodeDecimalDescending(nil, apd.New(1234, -2)))),
"/Tenant/5/Table/42/-12.34", revertSupportUnknown},
{makeKey(ten5Codec.TablePrefix(42),
roachpb.RKey(encoding.EncodeBitArrayAscending(nil, bitArray))),
"/Tenant/5/Table/42/B00111010", revertSupportUnknown},
{makeKey(ten5Codec.TablePrefix(42),
roachpb.RKey(encoding.EncodeBitArrayDescending(nil, bitArray))),
"/Tenant/5/Table/42/B00111010", revertSupportUnknown},
// Regression test for #31115.
{roachpb.Key(makeKey(ten5Codec.TablePrefix(42),
roachpb.RKey(encoding.EncodeBitArrayAscending(nil, bitarray.MakeZeroBitArray(64))),
)).PrefixEnd(),
"/Tenant/5/Table/42/B0000000000000000000000000000000000000000000000000000000000000000/PrefixEnd", revertSupportUnknown},
{makeKey(ten5Codec.TablePrefix(42),
roachpb.RKey(durationAsc)),
"/Tenant/5/Table/42/1 mon 1 day 00:00:01", revertSupportUnknown},
{makeKey(ten5Codec.TablePrefix(42),
roachpb.RKey(durationDesc)),
"/Tenant/5/Table/42/-2 mons -2 days +743:59:58.999999+999ns", revertSupportUnknown},
// sequence
{ten5Codec.SequenceKey(55), `/Tenant/5/Table/55/1/0/0`, revertSupportUnknown},
// others
{makeKey([]byte("")), "/Min", revertSupportUnknown},
{keys.Meta1KeyMax, "/Meta1/Max", revertSupportUnknown},
{keys.Meta2KeyMax, "/Meta2/Max", revertSupportUnknown},
{makeKey(tenSysCodec.TablePrefix(42), roachpb.RKey([]byte{0xf6})), `/Table/42/109/PrefixEnd`, revertSupportUnknown},
{makeKey(tenSysCodec.TablePrefix(42), roachpb.RKey([]byte{0xf7})), `/Table/42/255/PrefixEnd`, revertSupportUnknown},
{makeKey(tenSysCodec.TablePrefix(42), roachpb.RKey([]byte{0x12, 'a', 0x00, 0x02})), `/Table/42/"a"/PrefixEnd`, revertSupportUnknown},
{makeKey(tenSysCodec.TablePrefix(42), roachpb.RKey([]byte{0x12, 'a', 0x00, 0x03})), `/Table/42/???`, revertSupportUnknown},
{makeKey(ten5Codec.TablePrefix(42), roachpb.RKey([]byte{0xf6})), `/Tenant/5/Table/42/109/PrefixEnd`, revertSupportUnknown},
{makeKey(ten5Codec.TablePrefix(42), roachpb.RKey([]byte{0xf7})), `/Tenant/5/Table/42/255/PrefixEnd`, revertSupportUnknown},
{makeKey(ten5Codec.TablePrefix(42), roachpb.RKey([]byte{0x12, 'a', 0x00, 0x02})), `/Tenant/5/Table/42/"a"/PrefixEnd`, revertSupportUnknown},
{makeKey(ten5Codec.TablePrefix(42), roachpb.RKey([]byte{0x12, 'a', 0x00, 0x03})), `/Tenant/5/Table/42/???`, revertSupportUnknown},
// Special characters.
{makeKey(tenSysCodec.TablePrefix(61),
encoding.EncodeBytesAscending(nil, []byte("☃⚠"))),
`/Table/61/"☃⚠"`, revertSupportUnknown,
},
// Invalid utf-8 sequence.
{makeKey(tenSysCodec.TablePrefix(61),
encoding.EncodeBytesAscending(nil, []byte{0xff, 0xff})),
`/Table/61/"\xff\xff"`, revertSupportUnknown,
},
}
for i, test := range testCases {
t.Run(fmt.Sprintf("%d", i), func(t *testing.T) {
keyPP := keys.PrettyPrint(nil /* valDirs */, test.key)
keyInfo := massagePrettyPrintedSpanForTest(keyPP, nil)
exp := massagePrettyPrintedSpanForTest(test.exp, nil)
t.Logf(`---- test case #%d:
input: %q
output: %s
exp: %s
`, i+1, []byte(test.key), keyInfo, exp)
if exp != keyInfo {
t.Errorf("%d: expected:\n%+v\ngot:\n%+v", i, []byte(exp), []byte(keyInfo))
}
if exp != massagePrettyPrintedSpanForTest(test.key.String(), nil) {
t.Errorf("%d: from string expected %s, got %s", i, exp, test.key.String())
}
scanner := keysutil.MakePrettyScanner(nil /* tableParser */)
parsed, err := scanner.Scan(keyInfo)
if err != nil {
if !errors.HasType(err, (*keys.ErrUglifyUnsupported)(nil)) {
t.Errorf("%d: %s: %s", i, keyInfo, err)
} else if !test.assertRevertSupported {
t.Logf("%d: skipping parsing of %s; key is unsupported: %v", i, keyInfo, err)
} else {
t.Errorf("%d: ugly print expected unexpectedly unsupported (%s)", i, test.exp)
}
} else if exp, act := test.key, parsed; !bytes.Equal(exp, act) {
t.Errorf("%d: ugly print expected '%q', got '%q'", i, exp, act)
}
if t.Failed() {
return
}
})
}
}
// massagePrettyPrintedSpanForTest does some transformations on pretty-printed spans and keys:
// - if dirs is not nil, replace all ints with their ones' complement for
// descendingly-encoded columns.
// - strips line numbers from error messages.
func massagePrettyPrintedSpanForTest(span string, dirs []encoding.Direction) string {
var r strings.Builder
colIdx := -1
for i := 0; i < len(span); i++ {
if dirs != nil {
var d int
if _, err := fmt.Sscanf(span[i:], "%d", &d); err == nil {
// We've managed to consume an int.
dir := dirs[colIdx]
i += len(strconv.Itoa(d)) - 1
x := d
if dir == encoding.Descending {
x = ^x
}
r.WriteString(strconv.Itoa(x))
continue
}
}
switch {
case span[i] == '/':
colIdx++
r.WriteByte(span[i])
case span[i] == '-' || span[i] == ' ':
// We're switching from the start constraints to the end constraints,
// or starting another span.
colIdx = -1
r.WriteByte(span[i])
case span[i] < ' ':
fmt.Fprintf(&r, "\\x%02x", span[i])
case span[i] <= utf8.RuneSelf:
r.WriteByte(span[i])
default:
c, width := utf8.DecodeRuneInString(span[i:])
if c == utf8.RuneError {
fmt.Fprintf(&r, "\\x%02x", span[i])
} else {
r.WriteRune(c)
}
i += width - 1
}
}
return r.String()
}
func TestPrettyPrintRange(t *testing.T) {
tenSysCodec := keys.SystemSQLCodec
ten5Codec := keys.MakeSQLCodec(roachpb.MakeTenantID(5))
key := makeKey([]byte("a"))
key2 := makeKey([]byte("z"))
tableKey := makeKey(tenSysCodec.TablePrefix(61), encoding.EncodeVarintAscending(nil, 4))
tableKey2 := makeKey(tenSysCodec.TablePrefix(61), encoding.EncodeVarintAscending(nil, 500))
tenTableKey := makeKey(ten5Codec.TablePrefix(61), encoding.EncodeVarintAscending(nil, 999))
specialBytesKeyA := makeKey(tenSysCodec.TablePrefix(61), encoding.EncodeBytesAscending(nil, []byte("☃️")))
specialBytesKeyB := makeKey(tenSysCodec.TablePrefix(61), encoding.EncodeBytesAscending(nil, []byte("☃️⚠")))
specialBytesKeyC := makeKey(tenSysCodec.TablePrefix(61), encoding.EncodeBytesAscending(nil, []byte{0xff, 0x00}))
specialBytesKeyD := makeKey(tenSysCodec.TablePrefix(61), encoding.EncodeBytesAscending(nil, []byte{0xff, 0xfe}))
testCases := []struct {
start, end roachpb.Key
maxChars int
expected string
}{
{key, nil, 20, "a"},
{tableKey, nil, 10, "/Table/61…"},
{tableKey, specialBytesKeyB, 20, `/Table/61/{4-"\xe2…}`},
{tableKey, specialBytesKeyB, 30, `/Table/61/{4-"☃️…}`},
{tableKey, specialBytesKeyB, 50, `/Table/61/{4-"☃️⚠"}`},
{specialBytesKeyA, specialBytesKeyB, 20, `/Table/61/"☃️…`},
{specialBytesKeyA, specialBytesKeyB, 25, `/Table/61/"☃️{"-\xe2…}`},
{specialBytesKeyA, specialBytesKeyB, 30, `/Table/61/"☃️{"-⚠"}`},
// Note: the PrettyPrintRange() algorithm operates on the result
// of PrettyPrint(), which already turns special characters into
// hex sequences. Therefore, it can merge and truncate the hex
// codes. To improve this would require making PrettyPrint() take
// a bool flag to return un-escaped bytes, and let
// PrettyPrintRange() escape the output adequately.
//
// Since all of this is best-effort, we'll accept the status quo
// for now.
{specialBytesKeyC, specialBytesKeyD, 20, `/Table/61/"\xff\x…`},
{specialBytesKeyC, specialBytesKeyD, 30, `/Table/61/"\xff\x{00"-fe"}`},
{specialBytesKeyB, specialBytesKeyD, 20, `/Table/61/"{\xe2\x98…-\x…}`},
{specialBytesKeyB, specialBytesKeyD, 30, `/Table/61/"{☃️\xe2…-\xff\xf…}`},
{specialBytesKeyB, specialBytesKeyD, 50, `/Table/61/"{☃️⚠"-\xff\xfe"}`},
{tenTableKey, nil, 20, "/Tenant/5/Table/61/…"},
{key, key2, 20, "{a-z}"},
{keys.MinKey, tableKey, 8, "/{M…-T…}"},
{keys.MinKey, tableKey, 15, "/{Min-Tabl…}"},
{keys.MinKey, tableKey, 20, "/{Min-Table/6…}"},
{keys.MinKey, tableKey, 25, "/{Min-Table/61/4}"},
{keys.MinKey, tenTableKey, 8, "/{M…-T…}"},
{keys.MinKey, tenTableKey, 15, "/{Min-Tena…}"},
{keys.MinKey, tenTableKey, 20, "/{Min-Tenant/…}"},
{keys.MinKey, tenTableKey, 25, "/{Min-Tenant/5/…}"},
{keys.MinKey, tenTableKey, 30, "/{Min-Tenant/5/Tab…}"},
{tableKey, tableKey2, 8, "/Table/…"},
{tableKey, tableKey2, 15, "/Table/61/…"},
{tableKey, tableKey2, 20, "/Table/61/{4-500}"},
{tableKey, keys.MaxKey, 10, "/{Ta…-Max}"},
{tableKey, keys.MaxKey, 20, "/{Table/6…-Max}"},
{tableKey, keys.MaxKey, 25, "/{Table/61/4-Max}"},
{tenTableKey, keys.MaxKey, 10, "/{Te…-Max}"},
{tenTableKey, keys.MaxKey, 20, "/{Tenant/…-Max}"},
{tenTableKey, keys.MaxKey, 25, "/{Tenant/5/…-Max}"},
{tenTableKey, keys.MaxKey, 30, "/{Tenant/5/Tab…-Max}"},
}
for i, tc := range testCases {
str := keys.PrettyPrintRange(tc.start, tc.end, tc.maxChars)
if str != tc.expected {
t.Errorf("%d: expected:\n%s\ngot:\n%s", i, tc.expected, str)
}
}
}
func TestFormatHexKey(t *testing.T) {
// Verify that we properly handling the 'x' formatting verb in
// roachpb.Key.Format.
key := keys.StoreIdentKey()
decoded, err := hex.DecodeString(fmt.Sprintf("%x", key))
if err != nil {
t.Fatal(err)
}
if !bytes.Equal(key, decoded) {
t.Fatalf("expected %s, but found %s", key, decoded)
}
}
func makeKey(keys ...[]byte) []byte {
return bytes.Join(keys, nil)
}
| pkg/keys/printer_test.go | 0 | https://github.com/cockroachdb/cockroach/commit/a519265872c58fcfc2cc815c9dc58660534f9cd1 | [
0.0007957416819408536,
0.00019386422354727983,
0.00016486678214278072,
0.0001713879028102383,
0.00010036373714683577
] |
{
"id": 2,
"code_window": [
"// from. Note that only columns that need to be fetched (i.e. requested by the\n",
"// caller) are included in the internal state.\n",
"type cFetcherTableArgs struct {\n",
"\tdesc catalog.TableDescriptor\n",
"\tindex catalog.Index\n",
"\t// ColIdxMap is a mapping from ColumnID of each column to its ordinal. Only\n",
"\t// needed columns are present.\n",
"\tColIdxMap catalog.TableColMap\n",
"\tisSecondaryIndex bool\n",
"\t// cols are all needed columns of the table that are present in the index.\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t// ColIdxMap is a mapping from ColumnID to the ordinal of the corresponding\n",
"\t// column within the cols field. Only needed columns are present.\n"
],
"file_path": "pkg/sql/colfetcher/cfetcher_setup.go",
"type": "replace",
"edit_start_line_idx": 31
} | // Copyright 2019 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package kvserver
import (
"testing"
"time"
"github.com/cockroachdb/cockroach/pkg/roachpb"
"github.com/cockroachdb/cockroach/pkg/util/leaktest"
"github.com/cockroachdb/cockroach/pkg/util/log"
"github.com/stretchr/testify/assert"
"go.etcd.io/etcd/raft/v3/tracker"
)
func TestLastUpdateTimesMap(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
m := make(lastUpdateTimesMap)
t1 := time.Time{}.Add(time.Second)
t2 := t1.Add(time.Second)
m.update(3, t1)
m.update(1, t2)
assert.EqualValues(t, map[roachpb.ReplicaID]time.Time{1: t2, 3: t1}, m)
descs := []roachpb.ReplicaDescriptor{{ReplicaID: 1}, {ReplicaID: 2}, {ReplicaID: 3}, {ReplicaID: 4}}
t3 := t2.Add(time.Second)
m.updateOnBecomeLeader(descs, t3)
assert.EqualValues(t, map[roachpb.ReplicaID]time.Time{1: t3, 2: t3, 3: t3, 4: t3}, m)
t4 := t3.Add(time.Second)
descs = append(descs, []roachpb.ReplicaDescriptor{{ReplicaID: 5}, {ReplicaID: 6}}...)
prs := map[uint64]tracker.Progress{
1: {State: tracker.StateReplicate}, // should be updated
// 2 is missing because why not
3: {State: tracker.StateProbe}, // should be ignored
4: {State: tracker.StateSnapshot}, // should be ignored
5: {State: tracker.StateProbe}, // should be ignored
6: {State: tracker.StateReplicate}, // should be added
7: {State: tracker.StateReplicate}, // ignored, not in descs
}
m.updateOnUnquiesce(descs, prs, t4)
assert.EqualValues(t, map[roachpb.ReplicaID]time.Time{
1: t4,
2: t3,
3: t3,
4: t3,
6: t4,
}, m)
}
| pkg/kv/kvserver/replica_raft_test.go | 0 | https://github.com/cockroachdb/cockroach/commit/a519265872c58fcfc2cc815c9dc58660534f9cd1 | [
0.0001784655178198591,
0.00016891502309590578,
0.00016093527665361762,
0.000168764207046479,
0.000005151030563865788
] |
{
"id": 2,
"code_window": [
"// from. Note that only columns that need to be fetched (i.e. requested by the\n",
"// caller) are included in the internal state.\n",
"type cFetcherTableArgs struct {\n",
"\tdesc catalog.TableDescriptor\n",
"\tindex catalog.Index\n",
"\t// ColIdxMap is a mapping from ColumnID of each column to its ordinal. Only\n",
"\t// needed columns are present.\n",
"\tColIdxMap catalog.TableColMap\n",
"\tisSecondaryIndex bool\n",
"\t// cols are all needed columns of the table that are present in the index.\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t// ColIdxMap is a mapping from ColumnID to the ordinal of the corresponding\n",
"\t// column within the cols field. Only needed columns are present.\n"
],
"file_path": "pkg/sql/colfetcher/cfetcher_setup.go",
"type": "replace",
"edit_start_line_idx": 31
} | alter_role_stmt ::=
'ALTER' 'ROLE' role_spec 'WITH' role_option ( ( role_option ) )*
| 'ALTER' 'ROLE' role_spec role_option ( ( role_option ) )*
| 'ALTER' 'ROLE' role_spec
| 'ALTER' 'USER' role_spec 'WITH' role_option ( ( role_option ) )*
| 'ALTER' 'USER' role_spec role_option ( ( role_option ) )*
| 'ALTER' 'USER' role_spec
| 'ALTER' 'ROLE' 'IF' 'EXISTS' role_spec 'WITH' role_option ( ( role_option ) )*
| 'ALTER' 'ROLE' 'IF' 'EXISTS' role_spec role_option ( ( role_option ) )*
| 'ALTER' 'ROLE' 'IF' 'EXISTS' role_spec
| 'ALTER' 'USER' 'IF' 'EXISTS' role_spec 'WITH' role_option ( ( role_option ) )*
| 'ALTER' 'USER' 'IF' 'EXISTS' role_spec role_option ( ( role_option ) )*
| 'ALTER' 'USER' 'IF' 'EXISTS' role_spec
| 'ALTER' 'ROLE' role_spec 'IN' 'DATABASE' database_name 'SET' var_name '=' var_value ( ( ',' var_value ) )*
| 'ALTER' 'ROLE' role_spec 'IN' 'DATABASE' database_name 'SET' var_name 'TO' var_value ( ( ',' var_value ) )*
| 'ALTER' 'ROLE' role_spec 'IN' 'DATABASE' database_name 'RESET_ALL' 'ALL'
| 'ALTER' 'ROLE' role_spec 'IN' 'DATABASE' database_name 'RESET' session_var
| 'ALTER' 'ROLE' role_spec 'SET' var_name '=' var_value ( ( ',' var_value ) )*
| 'ALTER' 'ROLE' role_spec 'SET' var_name 'TO' var_value ( ( ',' var_value ) )*
| 'ALTER' 'ROLE' role_spec 'RESET_ALL' 'ALL'
| 'ALTER' 'ROLE' role_spec 'RESET' session_var
| 'ALTER' 'USER' role_spec 'IN' 'DATABASE' database_name 'SET' var_name '=' var_value ( ( ',' var_value ) )*
| 'ALTER' 'USER' role_spec 'IN' 'DATABASE' database_name 'SET' var_name 'TO' var_value ( ( ',' var_value ) )*
| 'ALTER' 'USER' role_spec 'IN' 'DATABASE' database_name 'RESET_ALL' 'ALL'
| 'ALTER' 'USER' role_spec 'IN' 'DATABASE' database_name 'RESET' session_var
| 'ALTER' 'USER' role_spec 'SET' var_name '=' var_value ( ( ',' var_value ) )*
| 'ALTER' 'USER' role_spec 'SET' var_name 'TO' var_value ( ( ',' var_value ) )*
| 'ALTER' 'USER' role_spec 'RESET_ALL' 'ALL'
| 'ALTER' 'USER' role_spec 'RESET' session_var
| 'ALTER' 'ROLE' 'IF' 'EXISTS' role_spec 'IN' 'DATABASE' database_name 'SET' var_name '=' var_value ( ( ',' var_value ) )*
| 'ALTER' 'ROLE' 'IF' 'EXISTS' role_spec 'IN' 'DATABASE' database_name 'SET' var_name 'TO' var_value ( ( ',' var_value ) )*
| 'ALTER' 'ROLE' 'IF' 'EXISTS' role_spec 'IN' 'DATABASE' database_name 'RESET_ALL' 'ALL'
| 'ALTER' 'ROLE' 'IF' 'EXISTS' role_spec 'IN' 'DATABASE' database_name 'RESET' session_var
| 'ALTER' 'ROLE' 'IF' 'EXISTS' role_spec 'SET' var_name '=' var_value ( ( ',' var_value ) )*
| 'ALTER' 'ROLE' 'IF' 'EXISTS' role_spec 'SET' var_name 'TO' var_value ( ( ',' var_value ) )*
| 'ALTER' 'ROLE' 'IF' 'EXISTS' role_spec 'RESET_ALL' 'ALL'
| 'ALTER' 'ROLE' 'IF' 'EXISTS' role_spec 'RESET' session_var
| 'ALTER' 'USER' 'IF' 'EXISTS' role_spec 'IN' 'DATABASE' database_name 'SET' var_name '=' var_value ( ( ',' var_value ) )*
| 'ALTER' 'USER' 'IF' 'EXISTS' role_spec 'IN' 'DATABASE' database_name 'SET' var_name 'TO' var_value ( ( ',' var_value ) )*
| 'ALTER' 'USER' 'IF' 'EXISTS' role_spec 'IN' 'DATABASE' database_name 'RESET_ALL' 'ALL'
| 'ALTER' 'USER' 'IF' 'EXISTS' role_spec 'IN' 'DATABASE' database_name 'RESET' session_var
| 'ALTER' 'USER' 'IF' 'EXISTS' role_spec 'SET' var_name '=' var_value ( ( ',' var_value ) )*
| 'ALTER' 'USER' 'IF' 'EXISTS' role_spec 'SET' var_name 'TO' var_value ( ( ',' var_value ) )*
| 'ALTER' 'USER' 'IF' 'EXISTS' role_spec 'RESET_ALL' 'ALL'
| 'ALTER' 'USER' 'IF' 'EXISTS' role_spec 'RESET' session_var
| 'ALTER' 'ROLE' 'ALL' 'IN' 'DATABASE' database_name 'SET' var_name '=' var_value ( ( ',' var_value ) )*
| 'ALTER' 'ROLE' 'ALL' 'IN' 'DATABASE' database_name 'SET' var_name 'TO' var_value ( ( ',' var_value ) )*
| 'ALTER' 'ROLE' 'ALL' 'IN' 'DATABASE' database_name 'RESET_ALL' 'ALL'
| 'ALTER' 'ROLE' 'ALL' 'IN' 'DATABASE' database_name 'RESET' session_var
| 'ALTER' 'ROLE' 'ALL' 'SET' var_name '=' var_value ( ( ',' var_value ) )*
| 'ALTER' 'ROLE' 'ALL' 'SET' var_name 'TO' var_value ( ( ',' var_value ) )*
| 'ALTER' 'ROLE' 'ALL' 'RESET_ALL' 'ALL'
| 'ALTER' 'ROLE' 'ALL' 'RESET' session_var
| 'ALTER' 'USER' 'ALL' 'IN' 'DATABASE' database_name 'SET' var_name '=' var_value ( ( ',' var_value ) )*
| 'ALTER' 'USER' 'ALL' 'IN' 'DATABASE' database_name 'SET' var_name 'TO' var_value ( ( ',' var_value ) )*
| 'ALTER' 'USER' 'ALL' 'IN' 'DATABASE' database_name 'RESET_ALL' 'ALL'
| 'ALTER' 'USER' 'ALL' 'IN' 'DATABASE' database_name 'RESET' session_var
| 'ALTER' 'USER' 'ALL' 'SET' var_name '=' var_value ( ( ',' var_value ) )*
| 'ALTER' 'USER' 'ALL' 'SET' var_name 'TO' var_value ( ( ',' var_value ) )*
| 'ALTER' 'USER' 'ALL' 'RESET_ALL' 'ALL'
| 'ALTER' 'USER' 'ALL' 'RESET' session_var
| docs/generated/sql/bnf/alter_role_stmt.bnf | 0 | https://github.com/cockroachdb/cockroach/commit/a519265872c58fcfc2cc815c9dc58660534f9cd1 | [
0.0001705642935121432,
0.00016724559827707708,
0.00016537107876501977,
0.0001668968761805445,
0.0000016200185655179666
] |
{
"id": 0,
"code_window": [
"\n",
"\t\"github.com/juju/errors\"\n",
"\t\"github.com/pingcap/tidb/context\"\n",
"\t\"github.com/pingcap/tidb/expression\"\n",
"\t\"github.com/pingcap/tidb/sessionctx/variable\"\n",
"\t\"github.com/pingcap/tidb/util/codec\"\n",
"\t\"github.com/pingcap/tidb/util/mvmap\"\n",
"\t\"github.com/pingcap/tidb/util/types\"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\"github.com/pingcap/tidb/tablecodec\"\n"
],
"file_path": "executor/join.go",
"type": "add",
"edit_start_line_idx": 23
} | // Copyright 2016 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package executor
import (
"sync"
"sync/atomic"
"github.com/juju/errors"
"github.com/pingcap/tidb/context"
"github.com/pingcap/tidb/expression"
"github.com/pingcap/tidb/sessionctx/variable"
"github.com/pingcap/tidb/util/codec"
"github.com/pingcap/tidb/util/mvmap"
"github.com/pingcap/tidb/util/types"
)
var (
_ joinExec = &NestedLoopJoinExec{}
_ Executor = &HashJoinExec{}
_ joinExec = &HashSemiJoinExec{}
_ Executor = &ApplyJoinExec{}
)
// HashJoinExec implements the hash join algorithm.
type HashJoinExec struct {
hashTable *mvmap.MVMap
smallHashKey []*expression.Column
bigHashKey []*expression.Column
smallExec Executor
bigExec Executor
prepared bool
ctx context.Context
smallFilter expression.CNFExprs
bigFilter expression.CNFExprs
otherFilter expression.CNFExprs
schema *expression.Schema
outer bool
leftSmall bool
cursor int
defaultValues []types.Datum
// targetTypes means the target the type that both smallHashKey and bigHashKey should convert to.
targetTypes []*types.FieldType
finished atomic.Value
// wg is for sync multiple join workers.
wg sync.WaitGroup
// closeCh add a lock for closing executor.
closeCh chan struct{}
rows []*Row
// concurrency is number of concurrent channels.
concurrency int
bigTableResultCh []chan *execResult
hashJoinContexts []*hashJoinCtx
// Channels for output.
resultCh chan *execResult
// rowKeyCache is used to store the table and table name from a row.
// Because every row has the same table name and table, we can use a single row key cache.
rowKeyCache []*RowKeyEntry
}
// hashJoinCtx holds the variables needed to do a hash join in one of many concurrent goroutines.
type hashJoinCtx struct {
bigFilter expression.CNFExprs
otherFilter expression.CNFExprs
// datumBuffer is used for encode hash keys.
datumBuffer []types.Datum
hashKeyBuffer []byte
}
// Close implements the Executor Close interface.
func (e *HashJoinExec) Close() error {
e.finished.Store(true)
if e.prepared {
for range e.resultCh {
}
<-e.closeCh
}
e.prepared = false
e.cursor = 0
e.rows = nil
return e.smallExec.Close()
}
// makeJoinRow simply creates a new row that appends row b to row a.
func makeJoinRow(a *Row, b *Row) *Row {
ret := &Row{
RowKeys: make([]*RowKeyEntry, 0, len(a.RowKeys)+len(b.RowKeys)),
Data: make([]types.Datum, 0, len(a.Data)+len(b.Data)),
}
ret.RowKeys = append(ret.RowKeys, a.RowKeys...)
ret.RowKeys = append(ret.RowKeys, b.RowKeys...)
ret.Data = append(ret.Data, a.Data...)
ret.Data = append(ret.Data, b.Data...)
return ret
}
// getJoinKey gets the hash key when given a row and hash columns.
// It will return a boolean value representing if the hash key has null, a byte slice representing the result hash code.
func getJoinKey(sc *variable.StatementContext, cols []*expression.Column, row *Row, targetTypes []*types.FieldType,
vals []types.Datum, bytes []byte) (bool, []byte, error) {
var err error
for i, col := range cols {
vals[i], err = col.Eval(row.Data)
if err != nil {
return false, nil, errors.Trace(err)
}
if vals[i].IsNull() {
return true, nil, nil
}
vals[i], err = vals[i].ConvertTo(sc, targetTypes[i])
if err != nil {
return false, nil, errors.Trace(err)
}
}
if len(vals) == 0 {
return false, nil, nil
}
bytes, err = codec.EncodeValue(bytes, vals...)
return false, bytes, errors.Trace(err)
}
// Schema implements the Executor Schema interface.
func (e *HashJoinExec) Schema() *expression.Schema {
return e.schema
}
var batchSize = 128
// fetchBigExec fetches rows from the big table in a background goroutine
// and sends the rows to multiple channels which will be read by multiple join workers.
func (e *HashJoinExec) fetchBigExec() {
cnt := 0
defer func() {
for _, cn := range e.bigTableResultCh {
close(cn)
}
e.bigExec.Close()
e.wg.Done()
}()
curBatchSize := 1
result := &execResult{rows: make([]*Row, 0, curBatchSize)}
txnCtx := e.ctx.GoCtx()
for {
done := false
idx := cnt % e.concurrency
for i := 0; i < curBatchSize; i++ {
if e.finished.Load().(bool) {
return
}
row, err := e.bigExec.Next()
if err != nil {
result.err = errors.Trace(err)
e.bigTableResultCh[idx] <- result
done = true
break
}
if row == nil {
done = true
break
}
result.rows = append(result.rows, row)
if len(result.rows) >= curBatchSize {
select {
case <-txnCtx.Done():
return
case e.bigTableResultCh[idx] <- result:
result = &execResult{rows: make([]*Row, 0, curBatchSize)}
}
}
}
cnt++
if done {
if len(result.rows) > 0 {
select {
case <-txnCtx.Done():
return
case e.bigTableResultCh[idx] <- result:
}
}
break
}
if curBatchSize < batchSize {
curBatchSize *= 2
}
}
}
// prepare runs the first time when 'Next' is called, it starts one worker goroutine to fetch rows from the big table,
// and reads all data from the small table to build a hash table, then starts multiple join worker goroutines.
func (e *HashJoinExec) prepare() error {
e.closeCh = make(chan struct{})
e.finished.Store(false)
e.bigTableResultCh = make([]chan *execResult, e.concurrency)
e.wg = sync.WaitGroup{}
for i := 0; i < e.concurrency; i++ {
e.bigTableResultCh[i] = make(chan *execResult, e.concurrency)
}
// Start a worker to fetch big table rows.
e.wg.Add(1)
go e.fetchBigExec()
e.hashTable = mvmap.NewMVMap()
e.cursor = 0
sc := e.ctx.GetSessionVars().StmtCtx
var buffer []byte
for {
row, err := e.smallExec.Next()
if err != nil {
return errors.Trace(err)
}
if row == nil {
e.smallExec.Close()
break
}
matched, err := expression.EvalBool(e.smallFilter, row.Data, e.ctx)
if err != nil {
return errors.Trace(err)
}
if !matched {
continue
}
hasNull, joinKey, err := getJoinKey(sc, e.smallHashKey, row, e.targetTypes, e.hashJoinContexts[0].datumBuffer, nil)
if err != nil {
return errors.Trace(err)
}
if hasNull {
continue
}
buffer = buffer[:0]
buffer, err = e.encodeRow(buffer, row)
if err != nil {
return errors.Trace(err)
}
e.hashTable.Put(joinKey, buffer)
}
e.resultCh = make(chan *execResult, e.concurrency)
for i := 0; i < e.concurrency; i++ {
e.wg.Add(1)
go e.runJoinWorker(i)
}
go e.waitJoinWorkersAndCloseResultChan()
e.prepared = true
return nil
}
func (e *HashJoinExec) encodeRow(b []byte, row *Row) ([]byte, error) {
numRowKeys := int64(len(row.RowKeys))
b = codec.EncodeVarint(b, numRowKeys)
for _, rowKey := range row.RowKeys {
b = codec.EncodeVarint(b, rowKey.Handle)
}
if numRowKeys > 0 && e.rowKeyCache == nil {
e.rowKeyCache = make([]*RowKeyEntry, len(row.RowKeys))
for i := 0; i < len(row.RowKeys); i++ {
rk := new(RowKeyEntry)
rk.Tbl = row.RowKeys[i].Tbl
rk.TableName = row.RowKeys[i].TableName
e.rowKeyCache[i] = rk
}
}
b, err := codec.EncodeValue(b, row.Data...)
return b, errors.Trace(err)
}
func (e *HashJoinExec) decodeRow(data []byte) (*Row, error) {
row := new(Row)
data, entryLen, err := codec.DecodeVarint(data)
if err != nil {
return nil, errors.Trace(err)
}
for i := 0; i < int(entryLen); i++ {
entry := new(RowKeyEntry)
data, entry.Handle, err = codec.DecodeVarint(data)
if err != nil {
return nil, errors.Trace(err)
}
entry.Tbl = e.rowKeyCache[i].Tbl
entry.TableName = e.rowKeyCache[i].TableName
row.RowKeys = append(row.RowKeys, entry)
}
values := make([]types.Datum, e.smallExec.Schema().Len())
err = codec.SetRawValues(data, values)
if err != nil {
return nil, errors.Trace(err)
}
err = decodeRawValues(values, e.smallExec.Schema(), e.ctx.GetSessionVars().GetTimeZone())
if err != nil {
return nil, errors.Trace(err)
}
row.Data = values
return row, nil
}
func (e *HashJoinExec) waitJoinWorkersAndCloseResultChan() {
e.wg.Wait()
close(e.resultCh)
e.hashTable = nil
close(e.closeCh)
}
// runJoinWorker does join job in one goroutine.
func (e *HashJoinExec) runJoinWorker(idx int) {
maxRowsCnt := 1000
result := &execResult{rows: make([]*Row, 0, maxRowsCnt)}
txnCtx := e.ctx.GoCtx()
for {
var bigTableResult *execResult
var exit bool
select {
case <-txnCtx.Done():
exit = true
case tmp, ok := <-e.bigTableResultCh[idx]:
if !ok {
exit = true
}
bigTableResult = tmp
}
if exit || e.finished.Load().(bool) {
break
}
if bigTableResult.err != nil {
e.resultCh <- &execResult{err: errors.Trace(bigTableResult.err)}
break
}
for _, bigRow := range bigTableResult.rows {
succ := e.joinOneBigRow(e.hashJoinContexts[idx], bigRow, result)
if !succ {
break
}
if len(result.rows) >= maxRowsCnt {
e.resultCh <- result
result = &execResult{rows: make([]*Row, 0, maxRowsCnt)}
}
}
}
if len(result.rows) != 0 || result.err != nil {
e.resultCh <- result
}
e.wg.Done()
}
// joinOneBigRow creates result rows from a row in a big table and sends them to resultRows channel.
// Every matching row generates a result row.
// If there are no matching rows and it is outer join, a null filled result row is created.
func (e *HashJoinExec) joinOneBigRow(ctx *hashJoinCtx, bigRow *Row, result *execResult) bool {
var (
matchedRows []*Row
err error
)
bigMatched := true
bigMatched, err = expression.EvalBool(ctx.bigFilter, bigRow.Data, e.ctx)
if err != nil {
result.err = errors.Trace(err)
return false
}
if bigMatched {
matchedRows, err = e.constructMatchedRows(ctx, bigRow)
if err != nil {
result.err = errors.Trace(err)
return false
}
}
for _, r := range matchedRows {
result.rows = append(result.rows, r)
}
if len(matchedRows) == 0 && e.outer {
r := e.fillRowWithDefaultValues(bigRow)
result.rows = append(result.rows, r)
}
return true
}
// constructMatchedRows creates matching result rows from a row in the big table.
func (e *HashJoinExec) constructMatchedRows(ctx *hashJoinCtx, bigRow *Row) (matchedRows []*Row, err error) {
sc := e.ctx.GetSessionVars().StmtCtx
hasNull, joinKey, err := getJoinKey(sc, e.bigHashKey, bigRow, e.targetTypes, ctx.datumBuffer, ctx.hashKeyBuffer[0:0:cap(ctx.hashKeyBuffer)])
if err != nil {
return nil, errors.Trace(err)
}
if hasNull {
return
}
values := e.hashTable.Get(joinKey)
if len(values) == 0 {
return
}
// match eq condition
for _, value := range values {
var smallRow *Row
smallRow, err = e.decodeRow(value)
if err != nil {
return nil, errors.Trace(err)
}
var matchedRow *Row
if e.leftSmall {
matchedRow = makeJoinRow(smallRow, bigRow)
} else {
matchedRow = makeJoinRow(bigRow, smallRow)
}
otherMatched, err := expression.EvalBool(ctx.otherFilter, matchedRow.Data, e.ctx)
if err != nil {
return nil, errors.Trace(err)
}
if otherMatched {
matchedRows = append(matchedRows, matchedRow)
}
}
return matchedRows, nil
}
// fillRowWithDefaultValues creates a result row filled with default values from a row in the big table.
// It is used for outer join, when a row from outer table doesn't have any matching rows.
func (e *HashJoinExec) fillRowWithDefaultValues(bigRow *Row) (returnRow *Row) {
smallRow := &Row{
Data: make([]types.Datum, e.smallExec.Schema().Len()),
}
copy(smallRow.Data, e.defaultValues)
if e.leftSmall {
returnRow = makeJoinRow(smallRow, bigRow)
} else {
returnRow = makeJoinRow(bigRow, smallRow)
}
return returnRow
}
// Next implements the Executor Next interface.
func (e *HashJoinExec) Next() (*Row, error) {
if !e.prepared {
if err := e.prepare(); err != nil {
return nil, errors.Trace(err)
}
}
txnCtx := e.ctx.GoCtx()
if e.cursor >= len(e.rows) {
var result *execResult
select {
case tmp, ok := <-e.resultCh:
if !ok {
return nil, nil
}
result = tmp
if result.err != nil {
e.finished.Store(true)
return nil, errors.Trace(result.err)
}
case <-txnCtx.Done():
return nil, nil
}
if len(result.rows) == 0 {
return nil, nil
}
e.rows = result.rows
e.cursor = 0
}
row := e.rows[e.cursor]
e.cursor++
return row, nil
}
// joinExec is the common interface of join algorithm except for hash join.
type joinExec interface {
Executor
// fetchBigRow fetches a valid row from big Exec and returns a bool value that means if it is matched.
fetchBigRow() (*Row, bool, error)
// prepare reads all records from small Exec and stores them.
prepare() error
// doJoin fetches a row from big exec and a bool value that means if it's matched with big filter,
// then get all the rows matches the on condition.
doJoin(*Row, bool) ([]*Row, error)
}
// NestedLoopJoinExec implements nested-loop algorithm for join.
type NestedLoopJoinExec struct {
innerRows []*Row
cursor int
resultRows []*Row
SmallExec Executor
BigExec Executor
leftSmall bool
prepared bool
Ctx context.Context
SmallFilter expression.CNFExprs
BigFilter expression.CNFExprs
OtherFilter expression.CNFExprs
schema *expression.Schema
outer bool
defaultValues []types.Datum
}
// Schema implements Executor interface.
func (e *NestedLoopJoinExec) Schema() *expression.Schema {
return e.schema
}
// Close implements Executor interface.
func (e *NestedLoopJoinExec) Close() error {
e.resultRows = nil
e.innerRows = nil
e.cursor = 0
e.prepared = false
err := e.BigExec.Close()
if err != nil {
return errors.Trace(err)
}
return e.SmallExec.Close()
}
func (e *NestedLoopJoinExec) fetchBigRow() (*Row, bool, error) {
for {
bigRow, err := e.BigExec.Next()
if err != nil {
return nil, false, errors.Trace(err)
}
if bigRow == nil {
return nil, false, e.BigExec.Close()
}
matched, err := expression.EvalBool(e.BigFilter, bigRow.Data, e.Ctx)
if err != nil {
return nil, false, errors.Trace(err)
}
if matched {
return bigRow, true, nil
} else if e.outer {
return bigRow, false, nil
}
}
}
// prepare runs the first time when 'Next' is called and it reads all data from the small table and stores
// them in a slice.
func (e *NestedLoopJoinExec) prepare() error {
err := e.SmallExec.Close()
if err != nil {
return errors.Trace(err)
}
e.innerRows = e.innerRows[:0]
e.prepared = true
for {
row, err := e.SmallExec.Next()
if err != nil {
return errors.Trace(err)
}
if row == nil {
return e.SmallExec.Close()
}
matched, err := expression.EvalBool(e.SmallFilter, row.Data, e.Ctx)
if err != nil {
return errors.Trace(err)
}
if matched {
e.innerRows = append(e.innerRows, row)
}
}
}
func (e *NestedLoopJoinExec) fillRowWithDefaultValue(bigRow *Row) (returnRow *Row) {
smallRow := &Row{
Data: make([]types.Datum, e.SmallExec.Schema().Len()),
}
copy(smallRow.Data, e.defaultValues)
if e.leftSmall {
returnRow = makeJoinRow(smallRow, bigRow)
} else {
returnRow = makeJoinRow(bigRow, smallRow)
}
return returnRow
}
func (e *NestedLoopJoinExec) doJoin(bigRow *Row, match bool) ([]*Row, error) {
e.resultRows = e.resultRows[0:0]
if !match && e.outer {
row := e.fillRowWithDefaultValue(bigRow)
e.resultRows = append(e.resultRows, row)
return e.resultRows, nil
}
for _, row := range e.innerRows {
var mergedRow *Row
if e.leftSmall {
mergedRow = makeJoinRow(row, bigRow)
} else {
mergedRow = makeJoinRow(bigRow, row)
}
matched, err := expression.EvalBool(e.OtherFilter, mergedRow.Data, e.Ctx)
if err != nil {
return nil, errors.Trace(err)
}
if !matched {
continue
}
e.resultRows = append(e.resultRows, mergedRow)
}
if len(e.resultRows) == 0 && e.outer {
e.resultRows = append(e.resultRows, e.fillRowWithDefaultValue(bigRow))
}
return e.resultRows, nil
}
// Next implements the Executor interface.
func (e *NestedLoopJoinExec) Next() (*Row, error) {
if !e.prepared {
if err := e.prepare(); err != nil {
return nil, errors.Trace(err)
}
}
for {
if e.cursor < len(e.resultRows) {
retRow := e.resultRows[e.cursor]
e.cursor++
return retRow, nil
}
bigRow, match, err := e.fetchBigRow()
if bigRow == nil || err != nil {
return bigRow, errors.Trace(err)
}
e.resultRows, err = e.doJoin(bigRow, match)
if err != nil {
return nil, errors.Trace(err)
}
e.cursor = 0
}
}
// HashSemiJoinExec implements the hash join algorithm for semi join.
type HashSemiJoinExec struct {
hashTable map[string][]*Row
smallHashKey []*expression.Column
bigHashKey []*expression.Column
smallExec Executor
bigExec Executor
prepared bool
ctx context.Context
smallFilter expression.CNFExprs
bigFilter expression.CNFExprs
otherFilter expression.CNFExprs
schema *expression.Schema
resultRows []*Row
// auxMode is a mode that the result row always returns with an extra column which stores a boolean
// or NULL value to indicate if this row is matched.
auxMode bool
targetTypes []*types.FieldType
smallTableHasNull bool
// anti is true, semi join only output the unmatched row.
anti bool
}
// Close implements the Executor Close interface.
func (e *HashSemiJoinExec) Close() error {
e.prepared = false
e.hashTable = make(map[string][]*Row)
e.smallTableHasNull = false
e.resultRows = nil
err := e.smallExec.Close()
if err != nil {
return errors.Trace(err)
}
return e.bigExec.Close()
}
// Schema implements the Executor Schema interface.
func (e *HashSemiJoinExec) Schema() *expression.Schema {
return e.schema
}
// prepare runs the first time when 'Next' is called and it reads all data from the small table and stores
// them in a hash table.
func (e *HashSemiJoinExec) prepare() error {
err := e.smallExec.Close()
if err != nil {
return errors.Trace(err)
}
e.hashTable = make(map[string][]*Row)
sc := e.ctx.GetSessionVars().StmtCtx
e.resultRows = make([]*Row, 1)
for {
row, err := e.smallExec.Next()
if err != nil {
return errors.Trace(err)
}
if row == nil {
e.smallExec.Close()
break
}
matched, err := expression.EvalBool(e.smallFilter, row.Data, e.ctx)
if err != nil {
return errors.Trace(err)
}
if !matched {
continue
}
hasNull, hashcode, err := getJoinKey(sc, e.smallHashKey, row, e.targetTypes, make([]types.Datum, len(e.smallHashKey)), nil)
if err != nil {
return errors.Trace(err)
}
if hasNull {
e.smallTableHasNull = true
continue
}
if rows, ok := e.hashTable[string(hashcode)]; !ok {
e.hashTable[string(hashcode)] = []*Row{row}
} else {
e.hashTable[string(hashcode)] = append(rows, row)
}
}
e.prepared = true
return nil
}
func (e *HashSemiJoinExec) rowIsMatched(bigRow *Row) (matched bool, hasNull bool, err error) {
sc := e.ctx.GetSessionVars().StmtCtx
hasNull, hashcode, err := getJoinKey(sc, e.bigHashKey, bigRow, e.targetTypes, make([]types.Datum, len(e.smallHashKey)), nil)
if err != nil {
return false, false, errors.Trace(err)
}
if hasNull {
return false, true, nil
}
rows, ok := e.hashTable[string(hashcode)]
if !ok {
return
}
// match eq condition
for _, smallRow := range rows {
matchedRow := makeJoinRow(bigRow, smallRow)
matched, err = expression.EvalBool(e.otherFilter, matchedRow.Data, e.ctx)
if err != nil {
return false, false, errors.Trace(err)
}
if matched {
return
}
}
return
}
func (e *HashSemiJoinExec) fetchBigRow() (*Row, bool, error) {
for {
bigRow, err := e.bigExec.Next()
if err != nil {
return nil, false, errors.Trace(err)
}
if bigRow == nil {
return nil, false, errors.Trace(e.bigExec.Close())
}
matched, err := expression.EvalBool(e.bigFilter, bigRow.Data, e.ctx)
if err != nil {
return nil, false, errors.Trace(err)
}
if matched {
return bigRow, true, nil
} else if e.auxMode {
return bigRow, false, nil
}
}
}
func (e *HashSemiJoinExec) doJoin(bigRow *Row, match bool) ([]*Row, error) {
if e.auxMode && !match {
bigRow.Data = append(bigRow.Data, types.NewDatum(false))
e.resultRows[0] = bigRow
return e.resultRows, nil
}
matched, isNull, err := e.rowIsMatched(bigRow)
if err != nil {
return nil, errors.Trace(err)
}
if !matched && e.smallTableHasNull {
isNull = true
}
if e.anti && !isNull {
matched = !matched
}
// For the auxMode subquery, we return the row with a Datum indicating if it's a match,
// For the non-auxMode subquery, we return the matching row only.
if e.auxMode {
if isNull {
bigRow.Data = append(bigRow.Data, types.NewDatum(nil))
} else {
bigRow.Data = append(bigRow.Data, types.NewDatum(matched))
}
matched = true
}
if matched {
e.resultRows[0] = bigRow
return e.resultRows, nil
}
return nil, nil
}
// Next implements the Executor Next interface.
func (e *HashSemiJoinExec) Next() (*Row, error) {
if !e.prepared {
if err := e.prepare(); err != nil {
return nil, errors.Trace(err)
}
}
for {
bigRow, match, err := e.fetchBigRow()
if bigRow == nil || err != nil {
return bigRow, errors.Trace(err)
}
resultRows, err := e.doJoin(bigRow, match)
if err != nil {
return nil, errors.Trace(err)
}
if len(resultRows) > 0 {
return resultRows[0], nil
}
}
}
// ApplyJoinExec is the new logic of apply.
type ApplyJoinExec struct {
join joinExec
outerSchema []*expression.CorrelatedColumn
cursor int
resultRows []*Row
schema *expression.Schema
}
// Schema implements the Executor interface.
func (e *ApplyJoinExec) Schema() *expression.Schema {
return e.schema
}
// Close implements the Executor interface.
func (e *ApplyJoinExec) Close() error {
e.cursor = 0
e.resultRows = nil
return e.join.Close()
}
// Next implements the Executor interface.
func (e *ApplyJoinExec) Next() (*Row, error) {
for {
if e.cursor < len(e.resultRows) {
row := e.resultRows[e.cursor]
e.cursor++
return row, nil
}
bigRow, match, err := e.join.fetchBigRow()
if bigRow == nil || err != nil {
return nil, errors.Trace(err)
}
for _, col := range e.outerSchema {
*col.Data = bigRow.Data[col.Index]
}
err = e.join.prepare()
if err != nil {
return nil, errors.Trace(err)
}
e.resultRows, err = e.join.doJoin(bigRow, match)
if err != nil {
return nil, errors.Trace(err)
}
e.cursor = 0
}
}
| executor/join.go | 1 | https://github.com/pingcap/tidb/commit/436eb2430309bbeaaa400a390cb50c549f6f5537 | [
0.11030992120504379,
0.0014262660406529903,
0.00016072503058239818,
0.00016930401034187526,
0.011607309803366661
] |
{
"id": 0,
"code_window": [
"\n",
"\t\"github.com/juju/errors\"\n",
"\t\"github.com/pingcap/tidb/context\"\n",
"\t\"github.com/pingcap/tidb/expression\"\n",
"\t\"github.com/pingcap/tidb/sessionctx/variable\"\n",
"\t\"github.com/pingcap/tidb/util/codec\"\n",
"\t\"github.com/pingcap/tidb/util/mvmap\"\n",
"\t\"github.com/pingcap/tidb/util/types\"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\"github.com/pingcap/tidb/tablecodec\"\n"
],
"file_path": "executor/join.go",
"type": "add",
"edit_start_line_idx": 23
} | // mksyscall.pl -l32 syscall_bsd.go syscall_freebsd.go syscall_freebsd_386.go
// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT
// +build 386,freebsd
package unix
import (
"syscall"
"unsafe"
)
var _ syscall.Errno
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func getgroups(ngid int, gid *_Gid_t) (n int, err error) {
r0, _, e1 := RawSyscall(SYS_GETGROUPS, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0)
n = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func setgroups(ngid int, gid *_Gid_t) (err error) {
_, _, e1 := RawSyscall(SYS_SETGROUPS, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err error) {
r0, _, e1 := Syscall6(SYS_WAIT4, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0)
wpid = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) {
r0, _, e1 := Syscall(SYS_ACCEPT, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)))
fd = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) {
_, _, e1 := Syscall(SYS_BIND, uintptr(s), uintptr(addr), uintptr(addrlen))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) {
_, _, e1 := Syscall(SYS_CONNECT, uintptr(s), uintptr(addr), uintptr(addrlen))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func socket(domain int, typ int, proto int) (fd int, err error) {
r0, _, e1 := RawSyscall(SYS_SOCKET, uintptr(domain), uintptr(typ), uintptr(proto))
fd = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) {
_, _, e1 := Syscall6(SYS_GETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) {
_, _, e1 := Syscall6(SYS_SETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) {
_, _, e1 := RawSyscall(SYS_GETPEERNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) {
_, _, e1 := RawSyscall(SYS_GETSOCKNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Shutdown(s int, how int) (err error) {
_, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(s), uintptr(how), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) {
_, _, e1 := RawSyscall6(SYS_SOCKETPAIR, uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd)), 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) {
var _p0 unsafe.Pointer
if len(p) > 0 {
_p0 = unsafe.Pointer(&p[0])
} else {
_p0 = unsafe.Pointer(&_zero)
}
r0, _, e1 := Syscall6(SYS_RECVFROM, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen)))
n = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) {
var _p0 unsafe.Pointer
if len(buf) > 0 {
_p0 = unsafe.Pointer(&buf[0])
} else {
_p0 = unsafe.Pointer(&_zero)
}
_, _, e1 := Syscall6(SYS_SENDTO, uintptr(s), uintptr(_p0), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) {
r0, _, e1 := Syscall(SYS_RECVMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags))
n = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) {
r0, _, e1 := Syscall(SYS_SENDMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags))
n = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func kevent(kq int, change unsafe.Pointer, nchange int, event unsafe.Pointer, nevent int, timeout *Timespec) (n int, err error) {
r0, _, e1 := Syscall6(SYS_KEVENT, uintptr(kq), uintptr(change), uintptr(nchange), uintptr(event), uintptr(nevent), uintptr(unsafe.Pointer(timeout)))
n = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) {
var _p0 unsafe.Pointer
if len(mib) > 0 {
_p0 = unsafe.Pointer(&mib[0])
} else {
_p0 = unsafe.Pointer(&_zero)
}
_, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen))
use(_p0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func utimes(path string, timeval *[2]Timeval) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
if err != nil {
return
}
_, _, e1 := Syscall(SYS_UTIMES, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(timeval)), 0)
use(unsafe.Pointer(_p0))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func futimes(fd int, timeval *[2]Timeval) (err error) {
_, _, e1 := Syscall(SYS_FUTIMES, uintptr(fd), uintptr(unsafe.Pointer(timeval)), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func fcntl(fd int, cmd int, arg int) (val int, err error) {
r0, _, e1 := Syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg))
val = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func pipe() (r int, w int, err error) {
r0, r1, e1 := RawSyscall(SYS_PIPE, 0, 0, 0)
r = int(r0)
w = int(r1)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Access(path string, mode uint32) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
if err != nil {
return
}
_, _, e1 := Syscall(SYS_ACCESS, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0)
use(unsafe.Pointer(_p0))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Adjtime(delta *Timeval, olddelta *Timeval) (err error) {
_, _, e1 := Syscall(SYS_ADJTIME, uintptr(unsafe.Pointer(delta)), uintptr(unsafe.Pointer(olddelta)), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Chdir(path string) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
if err != nil {
return
}
_, _, e1 := Syscall(SYS_CHDIR, uintptr(unsafe.Pointer(_p0)), 0, 0)
use(unsafe.Pointer(_p0))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Chflags(path string, flags int) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
if err != nil {
return
}
_, _, e1 := Syscall(SYS_CHFLAGS, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0)
use(unsafe.Pointer(_p0))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Chmod(path string, mode uint32) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
if err != nil {
return
}
_, _, e1 := Syscall(SYS_CHMOD, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0)
use(unsafe.Pointer(_p0))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Chown(path string, uid int, gid int) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
if err != nil {
return
}
_, _, e1 := Syscall(SYS_CHOWN, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid))
use(unsafe.Pointer(_p0))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Chroot(path string) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
if err != nil {
return
}
_, _, e1 := Syscall(SYS_CHROOT, uintptr(unsafe.Pointer(_p0)), 0, 0)
use(unsafe.Pointer(_p0))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Close(fd int) (err error) {
_, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Dup(fd int) (nfd int, err error) {
r0, _, e1 := Syscall(SYS_DUP, uintptr(fd), 0, 0)
nfd = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Dup2(from int, to int) (err error) {
_, _, e1 := Syscall(SYS_DUP2, uintptr(from), uintptr(to), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Exit(code int) {
Syscall(SYS_EXIT, uintptr(code), 0, 0)
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func ExtattrGetFd(fd int, attrnamespace int, attrname string, data uintptr, nbytes int) (ret int, err error) {
var _p0 *byte
_p0, err = BytePtrFromString(attrname)
if err != nil {
return
}
r0, _, e1 := Syscall6(SYS_EXTATTR_GET_FD, uintptr(fd), uintptr(attrnamespace), uintptr(unsafe.Pointer(_p0)), uintptr(data), uintptr(nbytes), 0)
use(unsafe.Pointer(_p0))
ret = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func ExtattrSetFd(fd int, attrnamespace int, attrname string, data uintptr, nbytes int) (ret int, err error) {
var _p0 *byte
_p0, err = BytePtrFromString(attrname)
if err != nil {
return
}
r0, _, e1 := Syscall6(SYS_EXTATTR_SET_FD, uintptr(fd), uintptr(attrnamespace), uintptr(unsafe.Pointer(_p0)), uintptr(data), uintptr(nbytes), 0)
use(unsafe.Pointer(_p0))
ret = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func ExtattrDeleteFd(fd int, attrnamespace int, attrname string) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(attrname)
if err != nil {
return
}
_, _, e1 := Syscall(SYS_EXTATTR_DELETE_FD, uintptr(fd), uintptr(attrnamespace), uintptr(unsafe.Pointer(_p0)))
use(unsafe.Pointer(_p0))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func ExtattrListFd(fd int, attrnamespace int, data uintptr, nbytes int) (ret int, err error) {
r0, _, e1 := Syscall6(SYS_EXTATTR_LIST_FD, uintptr(fd), uintptr(attrnamespace), uintptr(data), uintptr(nbytes), 0, 0)
ret = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func ExtattrGetFile(file string, attrnamespace int, attrname string, data uintptr, nbytes int) (ret int, err error) {
var _p0 *byte
_p0, err = BytePtrFromString(file)
if err != nil {
return
}
var _p1 *byte
_p1, err = BytePtrFromString(attrname)
if err != nil {
return
}
r0, _, e1 := Syscall6(SYS_EXTATTR_GET_FILE, uintptr(unsafe.Pointer(_p0)), uintptr(attrnamespace), uintptr(unsafe.Pointer(_p1)), uintptr(data), uintptr(nbytes), 0)
use(unsafe.Pointer(_p0))
use(unsafe.Pointer(_p1))
ret = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func ExtattrSetFile(file string, attrnamespace int, attrname string, data uintptr, nbytes int) (ret int, err error) {
var _p0 *byte
_p0, err = BytePtrFromString(file)
if err != nil {
return
}
var _p1 *byte
_p1, err = BytePtrFromString(attrname)
if err != nil {
return
}
r0, _, e1 := Syscall6(SYS_EXTATTR_SET_FILE, uintptr(unsafe.Pointer(_p0)), uintptr(attrnamespace), uintptr(unsafe.Pointer(_p1)), uintptr(data), uintptr(nbytes), 0)
use(unsafe.Pointer(_p0))
use(unsafe.Pointer(_p1))
ret = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func ExtattrDeleteFile(file string, attrnamespace int, attrname string) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(file)
if err != nil {
return
}
var _p1 *byte
_p1, err = BytePtrFromString(attrname)
if err != nil {
return
}
_, _, e1 := Syscall(SYS_EXTATTR_DELETE_FILE, uintptr(unsafe.Pointer(_p0)), uintptr(attrnamespace), uintptr(unsafe.Pointer(_p1)))
use(unsafe.Pointer(_p0))
use(unsafe.Pointer(_p1))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func ExtattrListFile(file string, attrnamespace int, data uintptr, nbytes int) (ret int, err error) {
var _p0 *byte
_p0, err = BytePtrFromString(file)
if err != nil {
return
}
r0, _, e1 := Syscall6(SYS_EXTATTR_LIST_FILE, uintptr(unsafe.Pointer(_p0)), uintptr(attrnamespace), uintptr(data), uintptr(nbytes), 0, 0)
use(unsafe.Pointer(_p0))
ret = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func ExtattrGetLink(link string, attrnamespace int, attrname string, data uintptr, nbytes int) (ret int, err error) {
var _p0 *byte
_p0, err = BytePtrFromString(link)
if err != nil {
return
}
var _p1 *byte
_p1, err = BytePtrFromString(attrname)
if err != nil {
return
}
r0, _, e1 := Syscall6(SYS_EXTATTR_GET_LINK, uintptr(unsafe.Pointer(_p0)), uintptr(attrnamespace), uintptr(unsafe.Pointer(_p1)), uintptr(data), uintptr(nbytes), 0)
use(unsafe.Pointer(_p0))
use(unsafe.Pointer(_p1))
ret = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func ExtattrSetLink(link string, attrnamespace int, attrname string, data uintptr, nbytes int) (ret int, err error) {
var _p0 *byte
_p0, err = BytePtrFromString(link)
if err != nil {
return
}
var _p1 *byte
_p1, err = BytePtrFromString(attrname)
if err != nil {
return
}
r0, _, e1 := Syscall6(SYS_EXTATTR_SET_LINK, uintptr(unsafe.Pointer(_p0)), uintptr(attrnamespace), uintptr(unsafe.Pointer(_p1)), uintptr(data), uintptr(nbytes), 0)
use(unsafe.Pointer(_p0))
use(unsafe.Pointer(_p1))
ret = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func ExtattrDeleteLink(link string, attrnamespace int, attrname string) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(link)
if err != nil {
return
}
var _p1 *byte
_p1, err = BytePtrFromString(attrname)
if err != nil {
return
}
_, _, e1 := Syscall(SYS_EXTATTR_DELETE_LINK, uintptr(unsafe.Pointer(_p0)), uintptr(attrnamespace), uintptr(unsafe.Pointer(_p1)))
use(unsafe.Pointer(_p0))
use(unsafe.Pointer(_p1))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func ExtattrListLink(link string, attrnamespace int, data uintptr, nbytes int) (ret int, err error) {
var _p0 *byte
_p0, err = BytePtrFromString(link)
if err != nil {
return
}
r0, _, e1 := Syscall6(SYS_EXTATTR_LIST_LINK, uintptr(unsafe.Pointer(_p0)), uintptr(attrnamespace), uintptr(data), uintptr(nbytes), 0, 0)
use(unsafe.Pointer(_p0))
ret = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Fadvise(fd int, offset int64, length int64, advice int) (err error) {
_, _, e1 := Syscall6(SYS_POSIX_FADVISE, uintptr(fd), uintptr(offset), uintptr(offset>>32), uintptr(length), uintptr(length>>32), uintptr(advice))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Fchdir(fd int) (err error) {
_, _, e1 := Syscall(SYS_FCHDIR, uintptr(fd), 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Fchflags(fd int, flags int) (err error) {
_, _, e1 := Syscall(SYS_FCHFLAGS, uintptr(fd), uintptr(flags), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Fchmod(fd int, mode uint32) (err error) {
_, _, e1 := Syscall(SYS_FCHMOD, uintptr(fd), uintptr(mode), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Fchown(fd int, uid int, gid int) (err error) {
_, _, e1 := Syscall(SYS_FCHOWN, uintptr(fd), uintptr(uid), uintptr(gid))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Flock(fd int, how int) (err error) {
_, _, e1 := Syscall(SYS_FLOCK, uintptr(fd), uintptr(how), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Fpathconf(fd int, name int) (val int, err error) {
r0, _, e1 := Syscall(SYS_FPATHCONF, uintptr(fd), uintptr(name), 0)
val = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Fstat(fd int, stat *Stat_t) (err error) {
_, _, e1 := Syscall(SYS_FSTAT, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Fstatfs(fd int, stat *Statfs_t) (err error) {
_, _, e1 := Syscall(SYS_FSTATFS, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Fsync(fd int) (err error) {
_, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Ftruncate(fd int, length int64) (err error) {
_, _, e1 := Syscall(SYS_FTRUNCATE, uintptr(fd), uintptr(length), uintptr(length>>32))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) {
var _p0 unsafe.Pointer
if len(buf) > 0 {
_p0 = unsafe.Pointer(&buf[0])
} else {
_p0 = unsafe.Pointer(&_zero)
}
r0, _, e1 := Syscall6(SYS_GETDIRENTRIES, uintptr(fd), uintptr(_p0), uintptr(len(buf)), uintptr(unsafe.Pointer(basep)), 0, 0)
n = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Getdtablesize() (size int) {
r0, _, _ := Syscall(SYS_GETDTABLESIZE, 0, 0, 0)
size = int(r0)
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Getegid() (egid int) {
r0, _, _ := RawSyscall(SYS_GETEGID, 0, 0, 0)
egid = int(r0)
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Geteuid() (uid int) {
r0, _, _ := RawSyscall(SYS_GETEUID, 0, 0, 0)
uid = int(r0)
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Getgid() (gid int) {
r0, _, _ := RawSyscall(SYS_GETGID, 0, 0, 0)
gid = int(r0)
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Getpgid(pid int) (pgid int, err error) {
r0, _, e1 := RawSyscall(SYS_GETPGID, uintptr(pid), 0, 0)
pgid = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Getpgrp() (pgrp int) {
r0, _, _ := RawSyscall(SYS_GETPGRP, 0, 0, 0)
pgrp = int(r0)
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Getpid() (pid int) {
r0, _, _ := RawSyscall(SYS_GETPID, 0, 0, 0)
pid = int(r0)
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Getppid() (ppid int) {
r0, _, _ := RawSyscall(SYS_GETPPID, 0, 0, 0)
ppid = int(r0)
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Getpriority(which int, who int) (prio int, err error) {
r0, _, e1 := Syscall(SYS_GETPRIORITY, uintptr(which), uintptr(who), 0)
prio = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Getrlimit(which int, lim *Rlimit) (err error) {
_, _, e1 := RawSyscall(SYS_GETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Getrusage(who int, rusage *Rusage) (err error) {
_, _, e1 := RawSyscall(SYS_GETRUSAGE, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Getsid(pid int) (sid int, err error) {
r0, _, e1 := RawSyscall(SYS_GETSID, uintptr(pid), 0, 0)
sid = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Gettimeofday(tv *Timeval) (err error) {
_, _, e1 := RawSyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tv)), 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Getuid() (uid int) {
r0, _, _ := RawSyscall(SYS_GETUID, 0, 0, 0)
uid = int(r0)
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Issetugid() (tainted bool) {
r0, _, _ := Syscall(SYS_ISSETUGID, 0, 0, 0)
tainted = bool(r0 != 0)
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Kill(pid int, signum syscall.Signal) (err error) {
_, _, e1 := Syscall(SYS_KILL, uintptr(pid), uintptr(signum), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Kqueue() (fd int, err error) {
r0, _, e1 := Syscall(SYS_KQUEUE, 0, 0, 0)
fd = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Lchown(path string, uid int, gid int) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
if err != nil {
return
}
_, _, e1 := Syscall(SYS_LCHOWN, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid))
use(unsafe.Pointer(_p0))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Link(path string, link string) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
if err != nil {
return
}
var _p1 *byte
_p1, err = BytePtrFromString(link)
if err != nil {
return
}
_, _, e1 := Syscall(SYS_LINK, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0)
use(unsafe.Pointer(_p0))
use(unsafe.Pointer(_p1))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Listen(s int, backlog int) (err error) {
_, _, e1 := Syscall(SYS_LISTEN, uintptr(s), uintptr(backlog), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Lstat(path string, stat *Stat_t) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
if err != nil {
return
}
_, _, e1 := Syscall(SYS_LSTAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0)
use(unsafe.Pointer(_p0))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Mkdir(path string, mode uint32) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
if err != nil {
return
}
_, _, e1 := Syscall(SYS_MKDIR, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0)
use(unsafe.Pointer(_p0))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Mkfifo(path string, mode uint32) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
if err != nil {
return
}
_, _, e1 := Syscall(SYS_MKFIFO, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0)
use(unsafe.Pointer(_p0))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Mknod(path string, mode uint32, dev int) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
if err != nil {
return
}
_, _, e1 := Syscall(SYS_MKNOD, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev))
use(unsafe.Pointer(_p0))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Mlock(b []byte) (err error) {
var _p0 unsafe.Pointer
if len(b) > 0 {
_p0 = unsafe.Pointer(&b[0])
} else {
_p0 = unsafe.Pointer(&_zero)
}
_, _, e1 := Syscall(SYS_MLOCK, uintptr(_p0), uintptr(len(b)), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Mlockall(flags int) (err error) {
_, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Mprotect(b []byte, prot int) (err error) {
var _p0 unsafe.Pointer
if len(b) > 0 {
_p0 = unsafe.Pointer(&b[0])
} else {
_p0 = unsafe.Pointer(&_zero)
}
_, _, e1 := Syscall(SYS_MPROTECT, uintptr(_p0), uintptr(len(b)), uintptr(prot))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Munlock(b []byte) (err error) {
var _p0 unsafe.Pointer
if len(b) > 0 {
_p0 = unsafe.Pointer(&b[0])
} else {
_p0 = unsafe.Pointer(&_zero)
}
_, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Munlockall() (err error) {
_, _, e1 := Syscall(SYS_MUNLOCKALL, 0, 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Nanosleep(time *Timespec, leftover *Timespec) (err error) {
_, _, e1 := Syscall(SYS_NANOSLEEP, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Open(path string, mode int, perm uint32) (fd int, err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
if err != nil {
return
}
r0, _, e1 := Syscall(SYS_OPEN, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm))
use(unsafe.Pointer(_p0))
fd = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Pathconf(path string, name int) (val int, err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
if err != nil {
return
}
r0, _, e1 := Syscall(SYS_PATHCONF, uintptr(unsafe.Pointer(_p0)), uintptr(name), 0)
use(unsafe.Pointer(_p0))
val = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Pread(fd int, p []byte, offset int64) (n int, err error) {
var _p0 unsafe.Pointer
if len(p) > 0 {
_p0 = unsafe.Pointer(&p[0])
} else {
_p0 = unsafe.Pointer(&_zero)
}
r0, _, e1 := Syscall6(SYS_PREAD, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), uintptr(offset>>32), 0)
n = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Pwrite(fd int, p []byte, offset int64) (n int, err error) {
var _p0 unsafe.Pointer
if len(p) > 0 {
_p0 = unsafe.Pointer(&p[0])
} else {
_p0 = unsafe.Pointer(&_zero)
}
r0, _, e1 := Syscall6(SYS_PWRITE, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), uintptr(offset>>32), 0)
n = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func read(fd int, p []byte) (n int, err error) {
var _p0 unsafe.Pointer
if len(p) > 0 {
_p0 = unsafe.Pointer(&p[0])
} else {
_p0 = unsafe.Pointer(&_zero)
}
r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(_p0), uintptr(len(p)))
n = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Readlink(path string, buf []byte) (n int, err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
if err != nil {
return
}
var _p1 unsafe.Pointer
if len(buf) > 0 {
_p1 = unsafe.Pointer(&buf[0])
} else {
_p1 = unsafe.Pointer(&_zero)
}
r0, _, e1 := Syscall(SYS_READLINK, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf)))
use(unsafe.Pointer(_p0))
n = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Rename(from string, to string) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(from)
if err != nil {
return
}
var _p1 *byte
_p1, err = BytePtrFromString(to)
if err != nil {
return
}
_, _, e1 := Syscall(SYS_RENAME, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0)
use(unsafe.Pointer(_p0))
use(unsafe.Pointer(_p1))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Revoke(path string) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
if err != nil {
return
}
_, _, e1 := Syscall(SYS_REVOKE, uintptr(unsafe.Pointer(_p0)), 0, 0)
use(unsafe.Pointer(_p0))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Rmdir(path string) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
if err != nil {
return
}
_, _, e1 := Syscall(SYS_RMDIR, uintptr(unsafe.Pointer(_p0)), 0, 0)
use(unsafe.Pointer(_p0))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Seek(fd int, offset int64, whence int) (newoffset int64, err error) {
r0, r1, e1 := Syscall6(SYS_LSEEK, uintptr(fd), uintptr(offset), uintptr(offset>>32), uintptr(whence), 0, 0)
newoffset = int64(int64(r1)<<32 | int64(r0))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Select(n int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (err error) {
_, _, e1 := Syscall6(SYS_SELECT, uintptr(n), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Setegid(egid int) (err error) {
_, _, e1 := RawSyscall(SYS_SETEGID, uintptr(egid), 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Seteuid(euid int) (err error) {
_, _, e1 := RawSyscall(SYS_SETEUID, uintptr(euid), 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Setgid(gid int) (err error) {
_, _, e1 := RawSyscall(SYS_SETGID, uintptr(gid), 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Setlogin(name string) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(name)
if err != nil {
return
}
_, _, e1 := Syscall(SYS_SETLOGIN, uintptr(unsafe.Pointer(_p0)), 0, 0)
use(unsafe.Pointer(_p0))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Setpgid(pid int, pgid int) (err error) {
_, _, e1 := RawSyscall(SYS_SETPGID, uintptr(pid), uintptr(pgid), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Setpriority(which int, who int, prio int) (err error) {
_, _, e1 := Syscall(SYS_SETPRIORITY, uintptr(which), uintptr(who), uintptr(prio))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Setregid(rgid int, egid int) (err error) {
_, _, e1 := RawSyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Setreuid(ruid int, euid int) (err error) {
_, _, e1 := RawSyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Setresgid(rgid int, egid int, sgid int) (err error) {
_, _, e1 := RawSyscall(SYS_SETRESGID, uintptr(rgid), uintptr(egid), uintptr(sgid))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Setresuid(ruid int, euid int, suid int) (err error) {
_, _, e1 := RawSyscall(SYS_SETRESUID, uintptr(ruid), uintptr(euid), uintptr(suid))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Setrlimit(which int, lim *Rlimit) (err error) {
_, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Setsid() (pid int, err error) {
r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0)
pid = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Settimeofday(tp *Timeval) (err error) {
_, _, e1 := RawSyscall(SYS_SETTIMEOFDAY, uintptr(unsafe.Pointer(tp)), 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Setuid(uid int) (err error) {
_, _, e1 := RawSyscall(SYS_SETUID, uintptr(uid), 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Stat(path string, stat *Stat_t) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
if err != nil {
return
}
_, _, e1 := Syscall(SYS_STAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0)
use(unsafe.Pointer(_p0))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Statfs(path string, stat *Statfs_t) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
if err != nil {
return
}
_, _, e1 := Syscall(SYS_STATFS, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0)
use(unsafe.Pointer(_p0))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Symlink(path string, link string) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
if err != nil {
return
}
var _p1 *byte
_p1, err = BytePtrFromString(link)
if err != nil {
return
}
_, _, e1 := Syscall(SYS_SYMLINK, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0)
use(unsafe.Pointer(_p0))
use(unsafe.Pointer(_p1))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Sync() (err error) {
_, _, e1 := Syscall(SYS_SYNC, 0, 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Truncate(path string, length int64) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
if err != nil {
return
}
_, _, e1 := Syscall(SYS_TRUNCATE, uintptr(unsafe.Pointer(_p0)), uintptr(length), uintptr(length>>32))
use(unsafe.Pointer(_p0))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Umask(newmask int) (oldmask int) {
r0, _, _ := Syscall(SYS_UMASK, uintptr(newmask), 0, 0)
oldmask = int(r0)
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Undelete(path string) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
if err != nil {
return
}
_, _, e1 := Syscall(SYS_UNDELETE, uintptr(unsafe.Pointer(_p0)), 0, 0)
use(unsafe.Pointer(_p0))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Unlink(path string) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
if err != nil {
return
}
_, _, e1 := Syscall(SYS_UNLINK, uintptr(unsafe.Pointer(_p0)), 0, 0)
use(unsafe.Pointer(_p0))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Unmount(path string, flags int) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
if err != nil {
return
}
_, _, e1 := Syscall(SYS_UNMOUNT, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0)
use(unsafe.Pointer(_p0))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func write(fd int, p []byte) (n int, err error) {
var _p0 unsafe.Pointer
if len(p) > 0 {
_p0 = unsafe.Pointer(&p[0])
} else {
_p0 = unsafe.Pointer(&_zero)
}
r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(_p0), uintptr(len(p)))
n = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) {
r0, _, e1 := Syscall9(SYS_MMAP, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flag), uintptr(fd), uintptr(pos), uintptr(pos>>32), 0, 0)
ret = uintptr(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func munmap(addr uintptr, length uintptr) (err error) {
_, _, e1 := Syscall(SYS_MUNMAP, uintptr(addr), uintptr(length), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func readlen(fd int, buf *byte, nbuf int) (n int, err error) {
r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf))
n = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func writelen(fd int, buf *byte, nbuf int) (n int, err error) {
r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf))
n = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func accept4(fd int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (nfd int, err error) {
r0, _, e1 := Syscall6(SYS_ACCEPT4, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), uintptr(flags), 0, 0)
nfd = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
| _vendor/src/golang.org/x/sys/unix/zsyscall_freebsd_386.go | 0 | https://github.com/pingcap/tidb/commit/436eb2430309bbeaaa400a390cb50c549f6f5537 | [
0.0010289796628057957,
0.00018565055506769568,
0.00016165737179107964,
0.00016680890985298902,
0.00009050103835761547
] |
{
"id": 0,
"code_window": [
"\n",
"\t\"github.com/juju/errors\"\n",
"\t\"github.com/pingcap/tidb/context\"\n",
"\t\"github.com/pingcap/tidb/expression\"\n",
"\t\"github.com/pingcap/tidb/sessionctx/variable\"\n",
"\t\"github.com/pingcap/tidb/util/codec\"\n",
"\t\"github.com/pingcap/tidb/util/mvmap\"\n",
"\t\"github.com/pingcap/tidb/util/types\"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\"github.com/pingcap/tidb/tablecodec\"\n"
],
"file_path": "executor/join.go",
"type": "add",
"edit_start_line_idx": 23
} | // Copyright 2016 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package mocktikv
import "fmt"
// ErrLocked is returned when trying to Read/Write on a locked key. Client should
// backoff or cleanup the lock then retry.
type ErrLocked struct {
Key MvccKey
Primary []byte
StartTS uint64
TTL uint64
}
// Error formats the lock to a string.
func (e *ErrLocked) Error() string {
return fmt.Sprintf("key is locked, key: %q, primary: %q, startTS: %v", e.Key, e.Primary, e.StartTS)
}
// ErrRetryable suggests that client may restart the txn. e.g. write conflict.
type ErrRetryable string
func (e ErrRetryable) Error() string {
return fmt.Sprintf("retryable: %s", string(e))
}
// ErrAbort means something is wrong and client should abort the txn.
type ErrAbort string
func (e ErrAbort) Error() string {
return fmt.Sprintf("abort: %s", string(e))
}
// ErrAlreadyCommitted is returned specially when client tries to rollback a
// committed lock.
type ErrAlreadyCommitted uint64
func (e ErrAlreadyCommitted) Error() string {
return fmt.Sprint("txn already committed")
}
| store/tikv/mock-tikv/errors.go | 0 | https://github.com/pingcap/tidb/commit/436eb2430309bbeaaa400a390cb50c549f6f5537 | [
0.00027878081891685724,
0.00021187633683439344,
0.00016318501729983836,
0.00020487845176830888,
0.00004537736822385341
] |
{
"id": 0,
"code_window": [
"\n",
"\t\"github.com/juju/errors\"\n",
"\t\"github.com/pingcap/tidb/context\"\n",
"\t\"github.com/pingcap/tidb/expression\"\n",
"\t\"github.com/pingcap/tidb/sessionctx/variable\"\n",
"\t\"github.com/pingcap/tidb/util/codec\"\n",
"\t\"github.com/pingcap/tidb/util/mvmap\"\n",
"\t\"github.com/pingcap/tidb/util/types\"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\"github.com/pingcap/tidb/tablecodec\"\n"
],
"file_path": "executor/join.go",
"type": "add",
"edit_start_line_idx": 23
} | // Copyright 2016 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package tikv
import (
"bytes"
"math"
"sync"
"sync/atomic"
"time"
"github.com/coreos/etcd/pkg/monotime"
"github.com/juju/errors"
"github.com/ngaut/log"
pb "github.com/pingcap/kvproto/pkg/kvrpcpb"
"github.com/pingcap/tidb/kv"
"github.com/pingcap/tidb/sessionctx/binloginfo"
"github.com/pingcap/tidb/tablecodec"
"github.com/pingcap/tipb/go-binlog"
goctx "golang.org/x/net/context"
)
type twoPhaseCommitAction int
const (
actionPrewrite twoPhaseCommitAction = 1
actionCommit twoPhaseCommitAction = 2
actionCleanup twoPhaseCommitAction = 3
)
func (ca twoPhaseCommitAction) String() string {
switch ca {
case actionPrewrite:
return "prewrite"
case actionCommit:
return "commit"
case actionCleanup:
return "cleanup"
}
return "unknown"
}
// MetricsTag returns detail tag for metrics.
func (ca twoPhaseCommitAction) MetricsTag() string {
return "2pc_" + ca.String()
}
// twoPhaseCommitter executes a two-phase commit protocol.
type twoPhaseCommitter struct {
store *tikvStore
txn *tikvTxn
startTS uint64
keys [][]byte
mutations map[string]*pb.Mutation
lockTTL uint64
commitTS uint64
mu struct {
sync.RWMutex
writtenKeys [][]byte
committed bool
}
}
// newTwoPhaseCommitter creates a twoPhaseCommitter.
func newTwoPhaseCommitter(txn *tikvTxn) (*twoPhaseCommitter, error) {
var (
keys [][]byte
size int
putCnt int
delCnt int
lockCnt int
)
mutations := make(map[string]*pb.Mutation)
err := txn.us.WalkBuffer(func(k kv.Key, v []byte) error {
if len(v) > 0 {
mutations[string(k)] = &pb.Mutation{
Op: pb.Op_Put,
Key: k,
Value: v,
}
putCnt++
} else {
mutations[string(k)] = &pb.Mutation{
Op: pb.Op_Del,
Key: k,
}
delCnt++
}
keys = append(keys, k)
entrySize := len(k) + len(v)
if entrySize > kv.TxnEntrySizeLimit {
return kv.ErrEntryTooLarge
}
size += entrySize
return nil
})
if err != nil {
return nil, errors.Trace(err)
}
// Transactions without Put/Del, only Locks are readonly.
// We can skip commit directly.
if len(keys) == 0 {
return nil, nil
}
for _, lockKey := range txn.lockKeys {
if _, ok := mutations[string(lockKey)]; !ok {
mutations[string(lockKey)] = &pb.Mutation{
Op: pb.Op_Lock,
Key: lockKey,
}
lockCnt++
keys = append(keys, lockKey)
size += len(lockKey)
}
}
entrylimit := atomic.LoadUint64(&kv.TxnEntryCountLimit)
if len(keys) > int(entrylimit) || size > kv.TxnTotalSizeLimit {
return nil, kv.ErrTxnTooLarge
}
const logEntryCount = 10000
const logSize = 4 * 1024 * 1024 // 4MB
if len(keys) > logEntryCount || size > logSize {
tableID := tablecodec.DecodeTableID(keys[0])
log.Infof("[BIG_TXN] table id:%d size:%d, keys:%d, puts:%d, dels:%d, locks:%d, startTS:%d",
tableID, size, len(keys), putCnt, delCnt, lockCnt, txn.startTS)
}
txnWriteKVCountHistogram.Observe(float64(len(keys)))
txnWriteSizeHistogram.Observe(float64(size / 1024))
return &twoPhaseCommitter{
store: txn.store,
txn: txn,
startTS: txn.StartTS(),
keys: keys,
mutations: mutations,
lockTTL: txnLockTTL(txn.startTime, size),
}, nil
}
func (c *twoPhaseCommitter) primary() []byte {
return c.keys[0]
}
const bytesPerMiB = 1024 * 1024
func txnLockTTL(startTime monotime.Time, txnSize int) uint64 {
// Increase lockTTL for large transactions.
// The formula is `ttl = ttlFactor * sqrt(sizeInMiB)`.
// When writeSize is less than 256KB, the base ttl is defaultTTL (3s);
// When writeSize is 1MiB, 100MiB, or 400MiB, ttl is 6s, 60s, 120s correspondingly;
lockTTL := defaultLockTTL
if txnSize >= txnCommitBatchSize {
sizeMiB := float64(txnSize) / bytesPerMiB
lockTTL = uint64(float64(ttlFactor) * math.Sqrt(float64(sizeMiB)))
if lockTTL < defaultLockTTL {
lockTTL = defaultLockTTL
}
if lockTTL > maxLockTTL {
lockTTL = maxLockTTL
}
}
// Increase lockTTL by the transaction's read time.
// When resolving a lock, we compare current ts and startTS+lockTTL to decide whether to clean up. If a txn
// takes a long time to read, increasing its TTL will help to prevent it from been aborted soon after prewrite.
elapsed := time.Duration(monotime.Now()-startTime) / time.Millisecond
return lockTTL + uint64(elapsed)
}
// doActionOnKeys groups keys into primary batch and secondary batches, if primary batch exists in the key,
// it does action on primary batch first, then on secondary batches. If action is commit, secondary batches
// is done in background goroutine.
func (c *twoPhaseCommitter) doActionOnKeys(bo *Backoffer, action twoPhaseCommitAction, keys [][]byte) error {
if len(keys) == 0 {
return nil
}
groups, firstRegion, err := c.store.regionCache.GroupKeysByRegion(bo, keys)
if err != nil {
return errors.Trace(err)
}
txnRegionsNumHistogram.WithLabelValues(action.MetricsTag()).Observe(float64(len(groups)))
var batches []batchKeys
var sizeFunc = c.keySize
if action == actionPrewrite {
sizeFunc = c.keyValueSize
}
// Make sure the group that contains primary key goes first.
batches = appendBatchBySize(batches, firstRegion, groups[firstRegion], sizeFunc, txnCommitBatchSize)
delete(groups, firstRegion)
for id, g := range groups {
batches = appendBatchBySize(batches, id, g, sizeFunc, txnCommitBatchSize)
}
firstIsPrimary := bytes.Equal(keys[0], c.primary())
if firstIsPrimary && action == actionCommit {
// primary should be committed first.
err = c.doActionOnBatches(bo, action, batches[:1])
if err != nil {
return errors.Trace(err)
}
batches = batches[1:]
}
if action == actionCommit {
// Commit secondary batches in background goroutine to reduce latency.
go func() {
e := c.doActionOnBatches(bo, action, batches)
if e != nil {
log.Debugf("2PC async doActionOnBatches %s err: %v", action, e)
}
}()
} else {
err = c.doActionOnBatches(bo, action, batches)
}
return errors.Trace(err)
}
// doActionOnBatches does action to batches in parallel.
func (c *twoPhaseCommitter) doActionOnBatches(bo *Backoffer, action twoPhaseCommitAction, batches []batchKeys) error {
if len(batches) == 0 {
return nil
}
var singleBatchActionFunc func(bo *Backoffer, batch batchKeys) error
switch action {
case actionPrewrite:
singleBatchActionFunc = c.prewriteSingleBatch
case actionCommit:
singleBatchActionFunc = c.commitSingleBatch
case actionCleanup:
singleBatchActionFunc = c.cleanupSingleBatch
}
if len(batches) == 1 {
e := singleBatchActionFunc(bo, batches[0])
if e != nil {
log.Debugf("2PC doActionOnBatches %s failed: %v, tid: %d", action, e, c.startTS)
}
return errors.Trace(e)
}
// For prewrite, stop sending other requests after receiving first error.
backoffer := bo
var cancel goctx.CancelFunc
if action == actionPrewrite {
backoffer, cancel = bo.Fork()
}
// Concurrently do the work for each batch.
ch := make(chan error, len(batches))
for _, batch := range batches {
go func(batch batchKeys) {
singleBatchBackoffer, singleBatchCancel := backoffer.Fork()
defer singleBatchCancel()
ch <- singleBatchActionFunc(singleBatchBackoffer, batch)
}(batch)
}
var err error
for i := 0; i < len(batches); i++ {
if e := <-ch; e != nil {
log.Debugf("2PC doActionOnBatches %s failed: %v, tid: %d", action, e, c.startTS)
// Cancel other requests and return the first error.
if cancel != nil {
cancel()
}
if err == nil {
err = e
}
}
}
return errors.Trace(err)
}
func (c *twoPhaseCommitter) keyValueSize(key []byte) int {
size := len(key)
if mutation := c.mutations[string(key)]; mutation != nil {
size += len(mutation.Value)
}
return size
}
func (c *twoPhaseCommitter) keySize(key []byte) int {
return len(key)
}
func (c *twoPhaseCommitter) prewriteSingleBatch(bo *Backoffer, batch batchKeys) error {
mutations := make([]*pb.Mutation, len(batch.keys))
for i, k := range batch.keys {
mutations[i] = c.mutations[string(k)]
}
skipCheck := false
optSkipCheck := c.txn.us.GetOption(kv.SkipCheckForWrite)
if skip, ok := optSkipCheck.(bool); ok && skip {
skipCheck = true
}
req := &pb.Request{
Type: pb.MessageType_CmdPrewrite,
CmdPrewriteReq: &pb.CmdPrewriteRequest{
Mutations: mutations,
PrimaryLock: c.primary(),
StartVersion: c.startTS,
LockTtl: c.lockTTL,
SkipConstraintCheck: skipCheck,
},
}
for {
resp, err := c.store.SendKVReq(bo, req, batch.region, readTimeoutShort)
if err != nil {
return errors.Trace(err)
}
if regionErr := resp.GetRegionError(); regionErr != nil {
err = bo.Backoff(boRegionMiss, errors.New(regionErr.String()))
if err != nil {
return errors.Trace(err)
}
err = c.prewriteKeys(bo, batch.keys)
return errors.Trace(err)
}
prewriteResp := resp.GetCmdPrewriteResp()
if prewriteResp == nil {
return errors.Trace(errBodyMissing)
}
keyErrs := prewriteResp.GetErrors()
if len(keyErrs) == 0 {
// We need to cleanup all written keys if transaction aborts.
c.mu.Lock()
defer c.mu.Unlock()
c.mu.writtenKeys = append(c.mu.writtenKeys, batch.keys...)
return nil
}
var locks []*Lock
for _, keyErr := range keyErrs {
lock, err1 := extractLockFromKeyErr(keyErr)
if err1 != nil {
return errors.Trace(err1)
}
log.Debugf("2PC prewrite encounters lock: %v", lock)
locks = append(locks, lock)
}
ok, err := c.store.lockResolver.ResolveLocks(bo, locks)
if err != nil {
return errors.Trace(err)
}
if !ok {
err = bo.Backoff(boTxnLock, errors.Errorf("2PC prewrite lockedKeys: %d", len(locks)))
if err != nil {
return errors.Trace(err)
}
}
}
}
func (c *twoPhaseCommitter) commitSingleBatch(bo *Backoffer, batch batchKeys) error {
req := &pb.Request{
Type: pb.MessageType_CmdCommit,
CmdCommitReq: &pb.CmdCommitRequest{
StartVersion: c.startTS,
Keys: batch.keys,
CommitVersion: c.commitTS,
},
}
// If we fail to receive response for the request that commits primary key, it will be undetermined whether this
// transaction has been successfully committed.
// Under this circumstance, we can not declare the commit is complete (may lead to data lost), nor can we throw
// an error (may lead to the duplicated key error when upper level restarts the transaction). Currently the best
// workaround seems to be an infinite retry until server recovers and returns a success or failure response.
if bytes.Compare(batch.keys[0], c.primary()) == 0 {
bo = NewBackoffer(commitPrimaryMaxBackoff, bo.ctx)
}
resp, err := c.store.SendKVReq(bo, req, batch.region, readTimeoutShort)
if err != nil {
return errors.Trace(err)
}
if regionErr := resp.GetRegionError(); regionErr != nil {
err = bo.Backoff(boRegionMiss, errors.New(regionErr.String()))
if err != nil {
return errors.Trace(err)
}
// re-split keys and commit again.
err = c.commitKeys(bo, batch.keys)
return errors.Trace(err)
}
commitResp := resp.GetCmdCommitResp()
if commitResp == nil {
return errors.Trace(errBodyMissing)
}
if keyErr := commitResp.GetError(); keyErr != nil {
c.mu.RLock()
defer c.mu.RUnlock()
err = errors.Errorf("2PC commit failed: %v", keyErr.String())
if c.mu.committed {
// No secondary key could be rolled back after it's primary key is committed.
// There must be a serious bug somewhere.
log.Errorf("2PC failed commit key after primary key committed: %v, tid: %d", err, c.startTS)
return errors.Trace(err)
}
// The transaction maybe rolled back by concurrent transactions.
log.Debugf("2PC failed commit primary key: %v, retry later, tid: %d", err, c.startTS)
return errors.Annotate(err, txnRetryableMark)
}
c.mu.Lock()
defer c.mu.Unlock()
// Group that contains primary key is always the first.
// We mark transaction's status committed when we receive the first success response.
c.mu.committed = true
return nil
}
func (c *twoPhaseCommitter) cleanupSingleBatch(bo *Backoffer, batch batchKeys) error {
req := &pb.Request{
Type: pb.MessageType_CmdBatchRollback,
CmdBatchRollbackReq: &pb.CmdBatchRollbackRequest{
Keys: batch.keys,
StartVersion: c.startTS,
},
}
resp, err := c.store.SendKVReq(bo, req, batch.region, readTimeoutShort)
if err != nil {
return errors.Trace(err)
}
if regionErr := resp.GetRegionError(); regionErr != nil {
err = bo.Backoff(boRegionMiss, errors.New(regionErr.String()))
if err != nil {
return errors.Trace(err)
}
err = c.cleanupKeys(bo, batch.keys)
return errors.Trace(err)
}
if keyErr := resp.GetCmdBatchRollbackResp().GetError(); keyErr != nil {
err = errors.Errorf("2PC cleanup failed: %s", keyErr)
log.Debugf("2PC failed cleanup key: %v, tid: %d", err, c.startTS)
return errors.Trace(err)
}
return nil
}
func (c *twoPhaseCommitter) prewriteKeys(bo *Backoffer, keys [][]byte) error {
return c.doActionOnKeys(bo, actionPrewrite, keys)
}
func (c *twoPhaseCommitter) commitKeys(bo *Backoffer, keys [][]byte) error {
return c.doActionOnKeys(bo, actionCommit, keys)
}
func (c *twoPhaseCommitter) cleanupKeys(bo *Backoffer, keys [][]byte) error {
return c.doActionOnKeys(bo, actionCleanup, keys)
}
// The max time a Txn may use (in ms) from its startTS to commitTS.
// We use it to guarantee GC worker will not influence any active txn. The value
// should be less than `gcRunInterval`.
const maxTxnTimeUse = 590000
// execute executes the two-phase commit protocol.
func (c *twoPhaseCommitter) execute() error {
defer func() {
// Always clean up all written keys if the txn does not commit.
c.mu.RLock()
writtenKeys := c.mu.writtenKeys
committed := c.mu.committed
c.mu.RUnlock()
if !committed {
go func() {
err := c.cleanupKeys(NewBackoffer(cleanupMaxBackoff, goctx.Background()), writtenKeys)
if err != nil {
log.Infof("2PC cleanup err: %v, tid: %d", err, c.startTS)
} else {
log.Infof("2PC clean up done, tid: %d", c.startTS)
}
}()
}
}()
ctx := goctx.Background()
binlogChan := c.prewriteBinlog()
err := c.prewriteKeys(NewBackoffer(prewriteMaxBackoff, ctx), c.keys)
if binlogChan != nil {
binlogErr := <-binlogChan
if binlogErr != nil {
return errors.Trace(binlogErr)
}
}
if err != nil {
log.Debugf("2PC failed on prewrite: %v, tid: %d", err, c.startTS)
return errors.Trace(err)
}
commitTS, err := c.store.getTimestampWithRetry(NewBackoffer(tsoMaxBackoff, ctx))
if err != nil {
log.Warnf("2PC get commitTS failed: %v, tid: %d", err, c.startTS)
return errors.Trace(err)
}
// check commitTS
if commitTS <= c.startTS {
err = errors.Errorf("Invalid transaction tso with start_ts=%v while commit_ts=%v",
c.startTS,
commitTS)
log.Error(err)
return errors.Trace(err)
}
c.commitTS = commitTS
if err := c.checkSchemaValid(); err != nil {
return errors.Trace(err)
}
if c.store.oracle.IsExpired(c.startTS, maxTxnTimeUse) {
err = errors.Errorf("txn takes too much time, start: %d, commit: %d", c.startTS, c.commitTS)
return errors.Annotate(err, txnRetryableMark)
}
err = c.commitKeys(NewBackoffer(commitMaxBackoff, ctx), c.keys)
if err != nil {
if !c.mu.committed {
log.Debugf("2PC failed on commit: %v, tid: %d", err, c.startTS)
return errors.Trace(err)
}
log.Debugf("2PC succeed with error: %v, tid: %d", err, c.startTS)
}
return nil
}
type schemaLeaseChecker interface {
Check(txnTS uint64) error
}
func (c *twoPhaseCommitter) checkSchemaValid() error {
checker, ok := c.txn.us.GetOption(kv.SchemaLeaseChecker).(schemaLeaseChecker)
if ok {
err := checker.Check(c.commitTS)
if err != nil {
return errors.Trace(err)
}
}
return nil
}
func (c *twoPhaseCommitter) prewriteBinlog() chan error {
if !c.shouldWriteBinlog() {
return nil
}
ch := make(chan error, 1)
go func() {
bin := c.txn.us.GetOption(kv.BinlogData).(*binlog.Binlog)
bin.StartTs = int64(c.startTS)
if bin.Tp == binlog.BinlogType_Prewrite {
bin.PrewriteKey = c.keys[0]
}
err := binloginfo.WriteBinlog(bin, c.store.clusterID)
ch <- errors.Trace(err)
}()
return ch
}
func (c *twoPhaseCommitter) writeFinishBinlog(tp binlog.BinlogType, commitTS int64) {
if !c.shouldWriteBinlog() {
return
}
bin := c.txn.us.GetOption(kv.BinlogData).(*binlog.Binlog)
bin.Tp = tp
bin.CommitTs = commitTS
go func() {
err := binloginfo.WriteBinlog(bin, c.store.clusterID)
if err != nil {
log.Errorf("failed to write binlog: %v", err)
}
}()
}
func (c *twoPhaseCommitter) shouldWriteBinlog() bool {
if binloginfo.PumpClient == nil {
return false
}
_, ok := c.txn.us.GetOption(kv.BinlogData).(*binlog.Binlog)
return ok
}
// TiKV recommends each RPC packet should be less than ~1MB. We keep each packet's
// Key+Value size below 16KB.
const txnCommitBatchSize = 16 * 1024
// batchKeys is a batch of keys in the same region.
type batchKeys struct {
region RegionVerID
keys [][]byte
}
// appendBatchBySize appends keys to []batchKeys. It may split the keys to make
// sure each batch's size does not exceed the limit.
func appendBatchBySize(b []batchKeys, region RegionVerID, keys [][]byte, sizeFn func([]byte) int, limit int) []batchKeys {
var start, end int
for start = 0; start < len(keys); start = end {
var size int
for end = start; end < len(keys) && size < limit; end++ {
size += sizeFn(keys[end])
}
b = append(b, batchKeys{
region: region,
keys: keys[start:end],
})
}
return b
}
| store/tikv/2pc.go | 0 | https://github.com/pingcap/tidb/commit/436eb2430309bbeaaa400a390cb50c549f6f5537 | [
0.0036004059948027134,
0.00022969335259404033,
0.00015928508946672082,
0.00016833064728416502,
0.0004326678463257849
] |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.