hunk
dict | file
stringlengths 0
11.8M
| file_path
stringlengths 2
234
| label
int64 0
1
| commit_url
stringlengths 74
103
| dependency_score
sequencelengths 5
5
|
---|---|---|---|---|---|
{
"id": 2,
"code_window": [
"import (\n",
"\t\"fmt\"\n",
"\t\"os\"\n",
"\t\"os/exec\"\n",
"\t\"testing\"\n",
"\t\"time\"\n",
"\n",
"\t. \"github.com/onsi/gomega\"\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\"path/filepath\"\n"
],
"file_path": "tests/e2e/e2e_test.go",
"type": "add",
"edit_start_line_idx": 33
} | /*
Copyright (c) 2020 Docker Inc.
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use, copy,
modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED,
INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM,
DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE,
ARISING FROM, OUT OF OR IN CONNECTION WITH
THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
package main
import (
"context"
"fmt"
"math/rand"
"os"
"os/exec"
"os/signal"
"path/filepath"
"syscall"
"time"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/spf13/cobra"
// Backend registrations
_ "github.com/docker/api/azure"
_ "github.com/docker/api/example"
_ "github.com/docker/api/moby"
"github.com/docker/api/cli/cmd"
"github.com/docker/api/cli/cmd/compose"
contextcmd "github.com/docker/api/cli/cmd/context"
"github.com/docker/api/cli/cmd/run"
cliconfig "github.com/docker/api/cli/config"
cliopts "github.com/docker/api/cli/options"
apicontext "github.com/docker/api/context"
"github.com/docker/api/context/store"
)
var (
runningOwnCommand bool
)
func init() {
// initial hack to get the path of the project's bin dir
// into the env of this cli for development
path, err := filepath.Abs(filepath.Dir(os.Args[0]))
if err != nil {
fatal(errors.Wrap(err, "unable to get absolute bin path"))
}
if err := os.Setenv("PATH", fmt.Sprintf("%s:%s", os.Getenv("PATH"), path)); err != nil {
panic(err)
}
// Seed random
rand.Seed(time.Now().UnixNano())
}
func isOwnCommand(cmd *cobra.Command) bool {
if cmd == nil {
return false
}
if cmd.Name() == "context" || cmd.Name() == "serve" {
return true
}
return isOwnCommand(cmd.Parent())
}
func main() {
var opts cliopts.GlobalOpts
root := &cobra.Command{
Use: "docker",
Long: "docker for the 2020s",
SilenceErrors: true,
SilenceUsage: true,
PersistentPreRunE: func(cmd *cobra.Command, args []string) error {
runningOwnCommand = isOwnCommand(cmd)
if !runningOwnCommand {
execMoby(cmd.Context())
}
return nil
},
RunE: func(cmd *cobra.Command, args []string) error {
return cmd.Help()
},
}
root.AddCommand(
contextcmd.Command(),
cmd.PsCommand(),
cmd.ServeCommand(),
run.Command(),
cmd.ExecCommand(),
cmd.LogsCommand(),
cmd.RmCommand(),
compose.Command(),
)
helpFunc := root.HelpFunc()
root.SetHelpFunc(func(cmd *cobra.Command, args []string) {
runningOwnCommand = isOwnCommand(cmd)
if !runningOwnCommand {
execMoby(cmd.Context())
}
helpFunc(cmd, args)
})
root.PersistentFlags().BoolVarP(&opts.Debug, "debug", "d", false, "enable debug output in the logs")
opts.AddConfigFlags(root.PersistentFlags())
opts.AddContextFlags(root.PersistentFlags())
// populate the opts with the global flags
_ = root.PersistentFlags().Parse(os.Args[1:])
if opts.Debug {
logrus.SetLevel(logrus.DebugLevel)
}
ctx, cancel := newSigContext()
defer cancel()
if opts.Config == "" {
fatal(errors.New("config path cannot be empty"))
}
configDir := opts.Config
ctx = cliconfig.WithDir(ctx, configDir)
currentContext, err := determineCurrentContext(opts.Context, configDir)
if err != nil {
fatal(errors.New("unable to determine current context"))
}
s, err := store.New(store.WithRoot(configDir))
if err != nil {
fatal(errors.Wrap(err, "unable to create context store"))
}
ctx = apicontext.WithCurrentContext(ctx, currentContext)
ctx = store.WithContextStore(ctx, s)
if err = root.ExecuteContext(ctx); err != nil {
// Context should always be handled by new CLI
if runningOwnCommand {
fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
execMoby(ctx)
fmt.Println(err)
os.Exit(1)
}
}
func newSigContext() (context.Context, func()) {
ctx, cancel := context.WithCancel(context.Background())
s := make(chan os.Signal)
signal.Notify(s, syscall.SIGTERM, syscall.SIGINT)
go func() {
<-s
cancel()
}()
return ctx, cancel
}
func execMoby(ctx context.Context) {
currentContext := apicontext.CurrentContext(ctx)
s := store.ContextStore(ctx)
_, err := s.Get(currentContext)
// Only run original docker command if the current context is not
// ours.
if err != nil {
cmd := exec.Command("docker-classic", os.Args[1:]...)
cmd.Stdin = os.Stdin
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
if err := cmd.Run(); err != nil {
if exiterr, ok := err.(*exec.ExitError); ok {
os.Exit(exiterr.ExitCode())
}
os.Exit(1)
}
os.Exit(0)
}
}
func determineCurrentContext(flag string, configDir string) (string, error) {
res := flag
if res == "" {
config, err := cliconfig.LoadFile(configDir)
if err != nil {
return "", err
}
res = config.CurrentContext
}
if res == "" {
res = "default"
}
return res, nil
}
func fatal(err error) {
fmt.Fprint(os.Stderr, err)
os.Exit(1)
}
| cli/main.go | 1 | https://github.com/docker/compose/commit/66a83410dd768b25a93a8028c900b2dce10a6bac | [
0.0008961136918514967,
0.00021033895609434694,
0.00016622238035779446,
0.00017019161896314472,
0.00014762183127459139
] |
{
"id": 2,
"code_window": [
"import (\n",
"\t\"fmt\"\n",
"\t\"os\"\n",
"\t\"os/exec\"\n",
"\t\"testing\"\n",
"\t\"time\"\n",
"\n",
"\t. \"github.com/onsi/gomega\"\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\"path/filepath\"\n"
],
"file_path": "tests/e2e/e2e_test.go",
"type": "add",
"edit_start_line_idx": 33
} | /*
Copyright (c) 2020 Docker Inc.
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use, copy,
modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED,
INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM,
DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE,
ARISING FROM, OUT OF OR IN CONNECTION WITH
THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
package errdefs
import (
"testing"
"github.com/pkg/errors"
"github.com/stretchr/testify/assert"
)
func TestIsNotFound(t *testing.T) {
err := errors.Wrap(ErrNotFound, `object "name"`)
assert.True(t, IsNotFoundError(err))
assert.False(t, IsNotFoundError(errors.New("another error")))
}
func TestIsAlreadyExists(t *testing.T) {
err := errors.Wrap(ErrAlreadyExists, `object "name"`)
assert.True(t, IsAlreadyExistsError(err))
assert.False(t, IsAlreadyExistsError(errors.New("another error")))
}
func TestIsForbidden(t *testing.T) {
err := errors.Wrap(ErrForbidden, `object "name"`)
assert.True(t, IsForbiddenError(err))
assert.False(t, IsForbiddenError(errors.New("another error")))
}
func TestIsUnknown(t *testing.T) {
err := errors.Wrap(ErrUnknown, `object "name"`)
assert.True(t, IsUnknownError(err))
assert.False(t, IsUnknownError(errors.New("another error")))
}
| errdefs/errors_test.go | 0 | https://github.com/docker/compose/commit/66a83410dd768b25a93a8028c900b2dce10a6bac | [
0.00023074859927874058,
0.00017854428733699024,
0.00016776564007159323,
0.00017068680608645082,
0.000021356665456551127
] |
{
"id": 2,
"code_window": [
"import (\n",
"\t\"fmt\"\n",
"\t\"os\"\n",
"\t\"os/exec\"\n",
"\t\"testing\"\n",
"\t\"time\"\n",
"\n",
"\t. \"github.com/onsi/gomega\"\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\"path/filepath\"\n"
],
"file_path": "tests/e2e/e2e_test.go",
"type": "add",
"edit_start_line_idx": 33
} | bin/
tests/node-client/node_modules/
| .dockerignore | 0 | https://github.com/docker/compose/commit/66a83410dd768b25a93a8028c900b2dce10a6bac | [
0.00017021773965097964,
0.00017021773965097964,
0.00017021773965097964,
0.00017021773965097964,
0
] |
{
"id": 2,
"code_window": [
"import (\n",
"\t\"fmt\"\n",
"\t\"os\"\n",
"\t\"os/exec\"\n",
"\t\"testing\"\n",
"\t\"time\"\n",
"\n",
"\t. \"github.com/onsi/gomega\"\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\"path/filepath\"\n"
],
"file_path": "tests/e2e/e2e_test.go",
"type": "add",
"edit_start_line_idx": 33
} | package login
import (
"fmt"
"net"
"net/http"
"net/url"
)
const loginFailedHTML = `
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8" />
<title>Login failed</title>
</head>
<body>
<h4>Some failures occurred during the authentication</h4>
<p>You can log an issue at <a href="https://github.com/azure/azure-cli/issues">Azure CLI GitHub Repository</a> and we will assist you in resolving it.</p>
</body>
</html>
`
const successfullLoginHTML = `
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8" />
<meta http-equiv="refresh" content="10;url=https://docs.microsoft.com/cli/azure/">
<title>Login successfully</title>
</head>
<body>
<h4>You have logged into Microsoft Azure!</h4>
<p>You can close this window, or we will redirect you to the <a href="https://docs.microsoft.com/cli/azure/">Azure CLI documents</a> in 10 seconds.</p>
</body>
</html>
`
func startLoginServer(queryCh chan url.Values) (int, error) {
mux := http.NewServeMux()
mux.HandleFunc("/", queryHandler(queryCh))
listener, err := net.Listen("tcp", ":0")
if err != nil {
return 0, err
}
availablePort := listener.Addr().(*net.TCPAddr).Port
server := &http.Server{Handler: mux}
go func() {
if err := server.Serve(listener); err != nil {
queryCh <- url.Values{
"error": []string{fmt.Sprintf("error starting http server with: %v", err)},
}
}
}()
return availablePort, nil
}
func queryHandler(queryCh chan url.Values) func(w http.ResponseWriter, r *http.Request) {
queryHandler := func(w http.ResponseWriter, r *http.Request) {
_, hasCode := r.URL.Query()["code"]
if hasCode {
_, err := w.Write([]byte(successfullLoginHTML))
if err != nil {
queryCh <- url.Values{
"error": []string{err.Error()},
}
} else {
queryCh <- r.URL.Query()
}
} else {
_, err := w.Write([]byte(loginFailedHTML))
if err != nil {
queryCh <- url.Values{
"error": []string{err.Error()},
}
} else {
queryCh <- r.URL.Query()
}
}
}
return queryHandler
}
| azure/login/logingLocalServer.go | 0 | https://github.com/docker/compose/commit/66a83410dd768b25a93a8028c900b2dce10a6bac | [
0.0003346905577927828,
0.00019677936506923288,
0.00016674274229444563,
0.00017044009291566908,
0.000052211908041499555
] |
{
"id": 3,
"code_window": [
"\t\tExpect(output).To(Not(ContainSubstring(\"test-example\")))\n",
"\t\tExpect(output).To(ContainSubstring(\"default *\"))\n",
"\t})\n",
"}\n",
"\n",
"func (s *E2eSuite) TestLegacy() {\n",
"\tIt(\"should list all legacy commands\", func() {\n",
"\t\toutput := s.NewDockerCommand(\"--help\").ExecOrDie()\n",
"\t\tExpect(output).To(ContainSubstring(\"swarm\"))\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"func (s *E2eSuite) TestSetupError() {\n",
"\tIt(\"should display an error if cannot shell out to docker-classic\", func() {\n",
"\t\terr := os.Setenv(\"PATH\", s.BinDir)\n",
"\t\tExpect(err).To(BeNil())\n",
"\t\terr = os.Remove(filepath.Join(s.BinDir, \"docker-classic\"))\n",
"\t\tExpect(err).To(BeNil())\n",
"\t\toutput, err := s.NewDockerCommand(\"ps\").Exec()\n",
"\t\tExpect(output).To(ContainSubstring(\"docker-classic\"))\n",
"\t\tExpect(output).To(ContainSubstring(\"not found\"))\n",
"\t\tExpect(err).NotTo(BeNil())\n",
"\t})\n",
"}\n",
"\n"
],
"file_path": "tests/e2e/e2e_test.go",
"type": "add",
"edit_start_line_idx": 67
} | /*
Copyright (c) 2020 Docker Inc.
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use, copy,
modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED,
INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM,
DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE,
ARISING FROM, OUT OF OR IN CONNECTION WITH
THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
package main
import (
"context"
"fmt"
"math/rand"
"os"
"os/exec"
"os/signal"
"path/filepath"
"syscall"
"time"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/spf13/cobra"
// Backend registrations
_ "github.com/docker/api/azure"
_ "github.com/docker/api/example"
_ "github.com/docker/api/moby"
"github.com/docker/api/cli/cmd"
"github.com/docker/api/cli/cmd/compose"
contextcmd "github.com/docker/api/cli/cmd/context"
"github.com/docker/api/cli/cmd/run"
cliconfig "github.com/docker/api/cli/config"
cliopts "github.com/docker/api/cli/options"
apicontext "github.com/docker/api/context"
"github.com/docker/api/context/store"
)
var (
runningOwnCommand bool
)
func init() {
// initial hack to get the path of the project's bin dir
// into the env of this cli for development
path, err := filepath.Abs(filepath.Dir(os.Args[0]))
if err != nil {
fatal(errors.Wrap(err, "unable to get absolute bin path"))
}
if err := os.Setenv("PATH", fmt.Sprintf("%s:%s", os.Getenv("PATH"), path)); err != nil {
panic(err)
}
// Seed random
rand.Seed(time.Now().UnixNano())
}
func isOwnCommand(cmd *cobra.Command) bool {
if cmd == nil {
return false
}
if cmd.Name() == "context" || cmd.Name() == "serve" {
return true
}
return isOwnCommand(cmd.Parent())
}
func main() {
var opts cliopts.GlobalOpts
root := &cobra.Command{
Use: "docker",
Long: "docker for the 2020s",
SilenceErrors: true,
SilenceUsage: true,
PersistentPreRunE: func(cmd *cobra.Command, args []string) error {
runningOwnCommand = isOwnCommand(cmd)
if !runningOwnCommand {
execMoby(cmd.Context())
}
return nil
},
RunE: func(cmd *cobra.Command, args []string) error {
return cmd.Help()
},
}
root.AddCommand(
contextcmd.Command(),
cmd.PsCommand(),
cmd.ServeCommand(),
run.Command(),
cmd.ExecCommand(),
cmd.LogsCommand(),
cmd.RmCommand(),
compose.Command(),
)
helpFunc := root.HelpFunc()
root.SetHelpFunc(func(cmd *cobra.Command, args []string) {
runningOwnCommand = isOwnCommand(cmd)
if !runningOwnCommand {
execMoby(cmd.Context())
}
helpFunc(cmd, args)
})
root.PersistentFlags().BoolVarP(&opts.Debug, "debug", "d", false, "enable debug output in the logs")
opts.AddConfigFlags(root.PersistentFlags())
opts.AddContextFlags(root.PersistentFlags())
// populate the opts with the global flags
_ = root.PersistentFlags().Parse(os.Args[1:])
if opts.Debug {
logrus.SetLevel(logrus.DebugLevel)
}
ctx, cancel := newSigContext()
defer cancel()
if opts.Config == "" {
fatal(errors.New("config path cannot be empty"))
}
configDir := opts.Config
ctx = cliconfig.WithDir(ctx, configDir)
currentContext, err := determineCurrentContext(opts.Context, configDir)
if err != nil {
fatal(errors.New("unable to determine current context"))
}
s, err := store.New(store.WithRoot(configDir))
if err != nil {
fatal(errors.Wrap(err, "unable to create context store"))
}
ctx = apicontext.WithCurrentContext(ctx, currentContext)
ctx = store.WithContextStore(ctx, s)
if err = root.ExecuteContext(ctx); err != nil {
// Context should always be handled by new CLI
if runningOwnCommand {
fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
execMoby(ctx)
fmt.Println(err)
os.Exit(1)
}
}
func newSigContext() (context.Context, func()) {
ctx, cancel := context.WithCancel(context.Background())
s := make(chan os.Signal)
signal.Notify(s, syscall.SIGTERM, syscall.SIGINT)
go func() {
<-s
cancel()
}()
return ctx, cancel
}
func execMoby(ctx context.Context) {
currentContext := apicontext.CurrentContext(ctx)
s := store.ContextStore(ctx)
_, err := s.Get(currentContext)
// Only run original docker command if the current context is not
// ours.
if err != nil {
cmd := exec.Command("docker-classic", os.Args[1:]...)
cmd.Stdin = os.Stdin
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
if err := cmd.Run(); err != nil {
if exiterr, ok := err.(*exec.ExitError); ok {
os.Exit(exiterr.ExitCode())
}
os.Exit(1)
}
os.Exit(0)
}
}
func determineCurrentContext(flag string, configDir string) (string, error) {
res := flag
if res == "" {
config, err := cliconfig.LoadFile(configDir)
if err != nil {
return "", err
}
res = config.CurrentContext
}
if res == "" {
res = "default"
}
return res, nil
}
func fatal(err error) {
fmt.Fprint(os.Stderr, err)
os.Exit(1)
}
| cli/main.go | 1 | https://github.com/docker/compose/commit/66a83410dd768b25a93a8028c900b2dce10a6bac | [
0.970974862575531,
0.07370346784591675,
0.00016393547412008047,
0.00017341577040497214,
0.21698857843875885
] |
{
"id": 3,
"code_window": [
"\t\tExpect(output).To(Not(ContainSubstring(\"test-example\")))\n",
"\t\tExpect(output).To(ContainSubstring(\"default *\"))\n",
"\t})\n",
"}\n",
"\n",
"func (s *E2eSuite) TestLegacy() {\n",
"\tIt(\"should list all legacy commands\", func() {\n",
"\t\toutput := s.NewDockerCommand(\"--help\").ExecOrDie()\n",
"\t\tExpect(output).To(ContainSubstring(\"swarm\"))\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"func (s *E2eSuite) TestSetupError() {\n",
"\tIt(\"should display an error if cannot shell out to docker-classic\", func() {\n",
"\t\terr := os.Setenv(\"PATH\", s.BinDir)\n",
"\t\tExpect(err).To(BeNil())\n",
"\t\terr = os.Remove(filepath.Join(s.BinDir, \"docker-classic\"))\n",
"\t\tExpect(err).To(BeNil())\n",
"\t\toutput, err := s.NewDockerCommand(\"ps\").Exec()\n",
"\t\tExpect(output).To(ContainSubstring(\"docker-classic\"))\n",
"\t\tExpect(output).To(ContainSubstring(\"not found\"))\n",
"\t\tExpect(err).NotTo(BeNil())\n",
"\t})\n",
"}\n",
"\n"
],
"file_path": "tests/e2e/e2e_test.go",
"type": "add",
"edit_start_line_idx": 67
} | name: releaser
on:
push:
tags:
- 'v*'
jobs:
upload-release:
runs-on: ubuntu-latest
steps:
- name: Set up Go 1.14
uses: actions/setup-go@v1
with:
go-version: 1.14
id: go
- name: Checkout code into the Go module directory
uses: actions/checkout@v2
- uses: actions/cache@v1
with:
path: ~/go/pkg/mod
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
restore-keys: |
${{ runner.os }}-go-
- name: Build
run: make -f builder.Makefile cross
- uses: ncipollo/release-action@v1
with:
artifacts: "bin/*"
prerelease: true
token: ${{ secrets.GITHUB_TOKEN }}
| .github/workflows/release.yaml | 0 | https://github.com/docker/compose/commit/66a83410dd768b25a93a8028c900b2dce10a6bac | [
0.00017990412015933543,
0.0001786973443813622,
0.00017678389849606901,
0.0001790506939869374,
0.0000011670747426251182
] |
{
"id": 3,
"code_window": [
"\t\tExpect(output).To(Not(ContainSubstring(\"test-example\")))\n",
"\t\tExpect(output).To(ContainSubstring(\"default *\"))\n",
"\t})\n",
"}\n",
"\n",
"func (s *E2eSuite) TestLegacy() {\n",
"\tIt(\"should list all legacy commands\", func() {\n",
"\t\toutput := s.NewDockerCommand(\"--help\").ExecOrDie()\n",
"\t\tExpect(output).To(ContainSubstring(\"swarm\"))\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"func (s *E2eSuite) TestSetupError() {\n",
"\tIt(\"should display an error if cannot shell out to docker-classic\", func() {\n",
"\t\terr := os.Setenv(\"PATH\", s.BinDir)\n",
"\t\tExpect(err).To(BeNil())\n",
"\t\terr = os.Remove(filepath.Join(s.BinDir, \"docker-classic\"))\n",
"\t\tExpect(err).To(BeNil())\n",
"\t\toutput, err := s.NewDockerCommand(\"ps\").Exec()\n",
"\t\tExpect(output).To(ContainSubstring(\"docker-classic\"))\n",
"\t\tExpect(output).To(ContainSubstring(\"not found\"))\n",
"\t\tExpect(err).NotTo(BeNil())\n",
"\t})\n",
"}\n",
"\n"
],
"file_path": "tests/e2e/e2e_test.go",
"type": "add",
"edit_start_line_idx": 67
} | package proxy
import (
"io"
v1 "github.com/docker/api/protos/containers/v1"
)
type streamWriter struct {
stream v1.Containers_LogsServer
}
func newStreamWriter(stream v1.Containers_LogsServer) io.Writer {
return &streamWriter{
stream: stream,
}
}
func (w *streamWriter) Write(p []byte) (n int, err error) {
return len(p), w.stream.Send(&v1.LogsResponse{
Logs: p,
})
}
| server/proxy/streamwriter.go | 0 | https://github.com/docker/compose/commit/66a83410dd768b25a93a8028c900b2dce10a6bac | [
0.00017699979071039706,
0.00017356239550281316,
0.0001712469384074211,
0.0001724404573906213,
0.0000024789630970190046
] |
{
"id": 3,
"code_window": [
"\t\tExpect(output).To(Not(ContainSubstring(\"test-example\")))\n",
"\t\tExpect(output).To(ContainSubstring(\"default *\"))\n",
"\t})\n",
"}\n",
"\n",
"func (s *E2eSuite) TestLegacy() {\n",
"\tIt(\"should list all legacy commands\", func() {\n",
"\t\toutput := s.NewDockerCommand(\"--help\").ExecOrDie()\n",
"\t\tExpect(output).To(ContainSubstring(\"swarm\"))\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"func (s *E2eSuite) TestSetupError() {\n",
"\tIt(\"should display an error if cannot shell out to docker-classic\", func() {\n",
"\t\terr := os.Setenv(\"PATH\", s.BinDir)\n",
"\t\tExpect(err).To(BeNil())\n",
"\t\terr = os.Remove(filepath.Join(s.BinDir, \"docker-classic\"))\n",
"\t\tExpect(err).To(BeNil())\n",
"\t\toutput, err := s.NewDockerCommand(\"ps\").Exec()\n",
"\t\tExpect(output).To(ContainSubstring(\"docker-classic\"))\n",
"\t\tExpect(output).To(ContainSubstring(\"not found\"))\n",
"\t\tExpect(err).NotTo(BeNil())\n",
"\t})\n",
"}\n",
"\n"
],
"file_path": "tests/e2e/e2e_test.go",
"type": "add",
"edit_start_line_idx": 67
} | # Copyright (c) 2020 Docker Inc.
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use, copy,
# modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED,
# INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH
# THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
GOOS ?= $(shell go env GOOS)
GOARCH ?= $(shell go env GOARCH)
PWD = $(shell pwd)
export DOCKER_BUILDKIT=1
all: cli
protos: ## Generate go code from .proto files
@docker build . \
--output type=local,dest=./protos \
--target protos
cli: ## Compile the cli
@docker build . \
--output type=local,dest=./bin \
--build-arg TARGET_OS=${GOOS} \
--build-arg TARGET_ARCH=${GOARCH} \
--target cli
e2e-local: ## Run End to end local tests
go test -v ./tests/e2e ./moby/e2e
e2e-aci: ## Run End to end ACI tests (requires azure login)
go test -v ./tests/aci-e2e
cross: ## Compile the CLI for linux, darwin and windows
@docker build . \
--output type=local,dest=./bin \
--target cross
test: ## Run unit tests
@docker build . \
--target test
cache-clear: ## Clear the builder cache
@docker builder prune --force --filter type=exec.cachemount --filter=unused-for=24h
lint: ## run linter(s)
@docker build . \
--target lint
classic-link: ## create docker-classic symlink if does not already exist
ln -s /usr/local/bin/docker-classic /Applications/Docker.app/Contents/Resources/bin/docker
help: ## Show help
@echo Please specify a build target. The choices are:
@grep -E '^[0-9a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}'
FORCE:
.PHONY: all protos cli e2e-local cross test cache-clear lint classic-link help
| Makefile | 0 | https://github.com/docker/compose/commit/66a83410dd768b25a93a8028c900b2dce10a6bac | [
0.00017819787899497896,
0.00017385785758960992,
0.0001701812434475869,
0.0001731626398395747,
0.000002673996050361893
] |
{
"id": 0,
"code_window": [
"\t\tgetCert = globalTLSCerts.GetCertificate\n",
"\t}\n",
"\n",
"\thttpServer := xhttp.NewServer([]string{globalMinioAddr},\n",
"\t\tcriticalErrorHandler{corsHandler(router)}, getCert)\n",
"\thttpServer.BaseContext = func(listener net.Listener) context.Context {\n",
"\t\treturn GlobalContext\n",
"\t}\n",
"\tgo func() {\n",
"\t\tglobalHTTPServerErrorCh <- httpServer.Start(GlobalContext)\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tlisteners := ctx.Int(\"listeners\")\n",
"\tif listeners == 0 {\n",
"\t\tlisteners = 1\n",
"\t}\n",
"\taddrs := make([]string, 0, listeners)\n",
"\tfor i := 0; i < listeners; i++ {\n",
"\t\taddrs = append(addrs, globalMinioAddr)\n",
"\t}\n",
"\n",
"\thttpServer := xhttp.NewServer(addrs, criticalErrorHandler{corsHandler(router)}, getCert)\n"
],
"file_path": "cmd/gateway-main.go",
"type": "replace",
"edit_start_line_idx": 262
} | // Copyright (c) 2015-2021 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package http
import (
"context"
"fmt"
"net"
"syscall"
)
type acceptResult struct {
conn net.Conn
err error
}
// httpListener - HTTP listener capable of handling multiple server addresses.
type httpListener struct {
tcpListeners []*net.TCPListener // underlaying TCP listeners.
acceptCh chan acceptResult // channel where all TCP listeners write accepted connection.
ctx context.Context
ctxCanceler context.CancelFunc
}
// start - starts separate goroutine for each TCP listener. A valid new connection is passed to httpListener.acceptCh.
func (listener *httpListener) start() {
// Closure to send acceptResult to acceptCh.
// It returns true if the result is sent else false if returns when doneCh is closed.
send := func(result acceptResult) bool {
select {
case listener.acceptCh <- result:
// Successfully written to acceptCh
return true
case <-listener.ctx.Done():
// As stop signal is received, close accepted connection.
if result.conn != nil {
result.conn.Close()
}
return false
}
}
// Closure to handle single connection.
handleConn := func(tcpConn *net.TCPConn) {
tcpConn.SetKeepAlive(true)
send(acceptResult{tcpConn, nil})
}
// Closure to handle TCPListener until done channel is closed.
handleListener := func(tcpListener *net.TCPListener) {
for {
tcpConn, err := tcpListener.AcceptTCP()
if err != nil {
// Returns when send fails.
if !send(acceptResult{nil, err}) {
return
}
} else {
go handleConn(tcpConn)
}
}
}
// Start separate goroutine for each TCP listener to handle connection.
for _, tcpListener := range listener.tcpListeners {
go handleListener(tcpListener)
}
}
// Accept - reads from httpListener.acceptCh for one of previously accepted TCP connection and returns the same.
func (listener *httpListener) Accept() (conn net.Conn, err error) {
select {
case result, ok := <-listener.acceptCh:
if ok {
return result.conn, result.err
}
case <-listener.ctx.Done():
}
return nil, syscall.EINVAL
}
// Close - closes underneath all TCP listeners.
func (listener *httpListener) Close() (err error) {
listener.ctxCanceler()
for i := range listener.tcpListeners {
listener.tcpListeners[i].Close()
}
return nil
}
// Addr - net.Listener interface compatible method returns net.Addr. In case of multiple TCP listeners, it returns '0.0.0.0' as IP address.
func (listener *httpListener) Addr() (addr net.Addr) {
addr = listener.tcpListeners[0].Addr()
if len(listener.tcpListeners) == 1 {
return addr
}
tcpAddr := addr.(*net.TCPAddr)
if ip := net.ParseIP("0.0.0.0"); ip != nil {
tcpAddr.IP = ip
}
addr = tcpAddr
return addr
}
// Addrs - returns all address information of TCP listeners.
func (listener *httpListener) Addrs() (addrs []net.Addr) {
for i := range listener.tcpListeners {
addrs = append(addrs, listener.tcpListeners[i].Addr())
}
return addrs
}
// newHTTPListener - creates new httpListener object which is interface compatible to net.Listener.
// httpListener is capable to
// * listen to multiple addresses
// * controls incoming connections only doing HTTP protocol
func newHTTPListener(ctx context.Context, serverAddrs []string) (listener *httpListener, err error) {
var tcpListeners []*net.TCPListener
// Close all opened listeners on error
defer func() {
if err == nil {
return
}
for _, tcpListener := range tcpListeners {
// Ignore error on close.
tcpListener.Close()
}
}()
for _, serverAddr := range serverAddrs {
var l net.Listener
if l, err = listenCfg.Listen(ctx, "tcp", serverAddr); err != nil {
return nil, err
}
tcpListener, ok := l.(*net.TCPListener)
if !ok {
return nil, fmt.Errorf("unexpected listener type found %v, expected net.TCPListener", l)
}
tcpListeners = append(tcpListeners, tcpListener)
}
listener = &httpListener{
tcpListeners: tcpListeners,
acceptCh: make(chan acceptResult, len(tcpListeners)),
}
listener.ctx, listener.ctxCanceler = context.WithCancel(ctx)
listener.start()
return listener, nil
}
| internal/http/listener.go | 1 | https://github.com/minio/minio/commit/acc9645249a3ee61cedff832d003421015cc724d | [
0.016657164320349693,
0.0033616661094129086,
0.00016770949878264219,
0.0025160377845168114,
0.004031762480735779
] |
{
"id": 0,
"code_window": [
"\t\tgetCert = globalTLSCerts.GetCertificate\n",
"\t}\n",
"\n",
"\thttpServer := xhttp.NewServer([]string{globalMinioAddr},\n",
"\t\tcriticalErrorHandler{corsHandler(router)}, getCert)\n",
"\thttpServer.BaseContext = func(listener net.Listener) context.Context {\n",
"\t\treturn GlobalContext\n",
"\t}\n",
"\tgo func() {\n",
"\t\tglobalHTTPServerErrorCh <- httpServer.Start(GlobalContext)\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tlisteners := ctx.Int(\"listeners\")\n",
"\tif listeners == 0 {\n",
"\t\tlisteners = 1\n",
"\t}\n",
"\taddrs := make([]string, 0, listeners)\n",
"\tfor i := 0; i < listeners; i++ {\n",
"\t\taddrs = append(addrs, globalMinioAddr)\n",
"\t}\n",
"\n",
"\thttpServer := xhttp.NewServer(addrs, criticalErrorHandler{corsHandler(router)}, getCert)\n"
],
"file_path": "cmd/gateway-main.go",
"type": "replace",
"edit_start_line_idx": 262
} | // Copyright (c) 2015-2021 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package cmd
import (
"os"
"testing"
)
// Tests - mkdirAll()
func TestOSMkdirAll(t *testing.T) {
// create xlStorage test setup
_, path, err := newXLStorageTestSetup()
if err != nil {
t.Fatalf("Unable to create xlStorage test setup, %s", err)
}
defer os.RemoveAll(path)
if err = mkdirAll("", 0777); err != errInvalidArgument {
t.Fatal("Unexpected error", err)
}
if err = mkdirAll(pathJoin(path, "my-obj-del-0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001"), 0777); err != errFileNameTooLong {
t.Fatal("Unexpected error", err)
}
if err = mkdirAll(pathJoin(path, "success-vol", "success-object"), 0777); err != nil {
t.Fatal("Unexpected error", err)
}
}
// Tests - renameAll()
func TestOSRenameAll(t *testing.T) {
// create xlStorage test setup
_, path, err := newXLStorageTestSetup()
if err != nil {
t.Fatalf("Unable to create xlStorage test setup, %s", err)
}
defer os.RemoveAll(path)
if err = mkdirAll(pathJoin(path, "testvolume1"), 0777); err != nil {
t.Fatal(err)
}
if err = renameAll("", "foo"); err != errInvalidArgument {
t.Fatal(err)
}
if err = renameAll("foo", ""); err != errInvalidArgument {
t.Fatal(err)
}
if err = renameAll(pathJoin(path, "testvolume1"), pathJoin(path, "testvolume2")); err != nil {
t.Fatal(err)
}
if err = renameAll(pathJoin(path, "testvolume1"), pathJoin(path, "testvolume2")); err != errFileNotFound {
t.Fatal(err)
}
if err = renameAll(pathJoin(path, "my-obj-del-0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001"), pathJoin(path, "testvolume2")); err != errFileNameTooLong {
t.Fatal("Unexpected error", err)
}
if err = renameAll(pathJoin(path, "testvolume1"), pathJoin(path, "my-obj-del-0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001")); err != errFileNameTooLong {
t.Fatal("Unexpected error", err)
}
}
| cmd/os-reliable_test.go | 0 | https://github.com/minio/minio/commit/acc9645249a3ee61cedff832d003421015cc724d | [
0.0001783923653420061,
0.00017657136777415872,
0.00017319533799309283,
0.0001773242256604135,
0.0000018537929236117634
] |
{
"id": 0,
"code_window": [
"\t\tgetCert = globalTLSCerts.GetCertificate\n",
"\t}\n",
"\n",
"\thttpServer := xhttp.NewServer([]string{globalMinioAddr},\n",
"\t\tcriticalErrorHandler{corsHandler(router)}, getCert)\n",
"\thttpServer.BaseContext = func(listener net.Listener) context.Context {\n",
"\t\treturn GlobalContext\n",
"\t}\n",
"\tgo func() {\n",
"\t\tglobalHTTPServerErrorCh <- httpServer.Start(GlobalContext)\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tlisteners := ctx.Int(\"listeners\")\n",
"\tif listeners == 0 {\n",
"\t\tlisteners = 1\n",
"\t}\n",
"\taddrs := make([]string, 0, listeners)\n",
"\tfor i := 0; i < listeners; i++ {\n",
"\t\taddrs = append(addrs, globalMinioAddr)\n",
"\t}\n",
"\n",
"\thttpServer := xhttp.NewServer(addrs, criticalErrorHandler{corsHandler(router)}, getCert)\n"
],
"file_path": "cmd/gateway-main.go",
"type": "replace",
"edit_start_line_idx": 262
} | // Copyright (c) 2015-2021 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package cmd
import (
"context"
"encoding/base64"
"errors"
"fmt"
"io"
"net/http"
"net/url"
"strings"
"github.com/Azure/azure-storage-blob-go/azblob"
"github.com/minio/madmin-go"
)
type warmBackendAzure struct {
serviceURL azblob.ServiceURL
Bucket string
Prefix string
StorageClass string
}
func (az *warmBackendAzure) getDest(object string) string {
destObj := object
if az.Prefix != "" {
destObj = fmt.Sprintf("%s/%s", az.Prefix, object)
}
return destObj
}
func (az *warmBackendAzure) tier() azblob.AccessTierType {
for _, t := range azblob.PossibleAccessTierTypeValues() {
if strings.ToLower(az.StorageClass) == strings.ToLower(string(t)) {
return t
}
}
return azblob.AccessTierType("")
}
// FIXME: add support for remote version ID in Azure remote tier and remove
// this. Currently it's a no-op.
func (az *warmBackendAzure) Put(ctx context.Context, object string, r io.Reader, length int64) (remoteVersionID, error) {
blobURL := az.serviceURL.NewContainerURL(az.Bucket).NewBlockBlobURL(az.getDest(object))
// set tier if specified -
if az.StorageClass != "" {
if _, err := blobURL.SetTier(ctx, az.tier(), azblob.LeaseAccessConditions{}); err != nil {
return "", azureToObjectError(err, az.Bucket, object)
}
}
res, err := azblob.UploadStreamToBlockBlob(ctx, r, blobURL, azblob.UploadStreamToBlockBlobOptions{})
if err != nil {
return "", azureToObjectError(err, az.Bucket, object)
}
return remoteVersionID(res.Version()), nil
}
func (az *warmBackendAzure) Get(ctx context.Context, object string, rv remoteVersionID, opts WarmBackendGetOpts) (r io.ReadCloser, err error) {
if opts.startOffset < 0 {
return nil, InvalidRange{}
}
blobURL := az.serviceURL.NewContainerURL(az.Bucket).NewBlobURL(az.getDest(object))
blob, err := blobURL.Download(ctx, opts.startOffset, opts.length, azblob.BlobAccessConditions{}, false)
if err != nil {
return nil, azureToObjectError(err, az.Bucket, object)
}
rc := blob.Body(azblob.RetryReaderOptions{})
return rc, nil
}
func (az *warmBackendAzure) Remove(ctx context.Context, object string, rv remoteVersionID) error {
blob := az.serviceURL.NewContainerURL(az.Bucket).NewBlobURL(az.getDest(object))
_, err := blob.Delete(ctx, azblob.DeleteSnapshotsOptionNone, azblob.BlobAccessConditions{})
return azureToObjectError(err, az.Bucket, object)
}
func (az *warmBackendAzure) InUse(ctx context.Context) (bool, error) {
containerURL := az.serviceURL.NewContainerURL(az.Bucket)
resp, err := containerURL.ListBlobsHierarchySegment(ctx, azblob.Marker{}, "/", azblob.ListBlobsSegmentOptions{
Prefix: az.Prefix,
MaxResults: int32(1),
})
if err != nil {
return false, azureToObjectError(err, az.Bucket, az.Prefix)
}
if len(resp.Segment.BlobPrefixes) > 0 || len(resp.Segment.BlobItems) > 0 {
return true, nil
}
return false, nil
}
func newWarmBackendAzure(conf madmin.TierAzure) (*warmBackendAzure, error) {
credential, err := azblob.NewSharedKeyCredential(conf.AccountName, conf.AccountKey)
if err != nil {
if _, ok := err.(base64.CorruptInputError); ok {
return nil, errors.New("invalid Azure credentials")
}
return nil, err
}
p := azblob.NewPipeline(credential, azblob.PipelineOptions{})
u, err := url.Parse(fmt.Sprintf("https://%s.blob.core.windows.net", conf.AccountName))
if err != nil {
return nil, err
}
serviceURL := azblob.NewServiceURL(*u, p)
return &warmBackendAzure{
serviceURL: serviceURL,
Bucket: conf.Bucket,
Prefix: strings.TrimSuffix(conf.Prefix, slashSeparator),
StorageClass: conf.StorageClass,
}, nil
}
// Convert azure errors to minio object layer errors.
func azureToObjectError(err error, params ...string) error {
if err == nil {
return nil
}
bucket := ""
object := ""
if len(params) >= 1 {
bucket = params[0]
}
if len(params) == 2 {
object = params[1]
}
azureErr, ok := err.(azblob.StorageError)
if !ok {
// We don't interpret non Azure errors. As azure errors will
// have StatusCode to help to convert to object errors.
return err
}
serviceCode := string(azureErr.ServiceCode())
statusCode := azureErr.Response().StatusCode
return azureCodesToObjectError(err, serviceCode, statusCode, bucket, object)
}
func azureCodesToObjectError(err error, serviceCode string, statusCode int, bucket string, object string) error {
switch serviceCode {
case "ContainerNotFound", "ContainerBeingDeleted":
err = BucketNotFound{Bucket: bucket}
case "ContainerAlreadyExists":
err = BucketExists{Bucket: bucket}
case "InvalidResourceName":
err = BucketNameInvalid{Bucket: bucket}
case "RequestBodyTooLarge":
err = PartTooBig{}
case "InvalidMetadata":
err = UnsupportedMetadata{}
case "BlobAccessTierNotSupportedForAccountType":
err = NotImplemented{}
case "OutOfRangeInput":
err = ObjectNameInvalid{
Bucket: bucket,
Object: object,
}
default:
switch statusCode {
case http.StatusNotFound:
if object != "" {
err = ObjectNotFound{
Bucket: bucket,
Object: object,
}
} else {
err = BucketNotFound{Bucket: bucket}
}
case http.StatusBadRequest:
err = BucketNameInvalid{Bucket: bucket}
}
}
return err
}
| cmd/warm-backend-azure.go | 0 | https://github.com/minio/minio/commit/acc9645249a3ee61cedff832d003421015cc724d | [
0.00017829288844950497,
0.00017105611914303154,
0.00015997506852727383,
0.00017219372966792434,
0.000005090038030175492
] |
{
"id": 0,
"code_window": [
"\t\tgetCert = globalTLSCerts.GetCertificate\n",
"\t}\n",
"\n",
"\thttpServer := xhttp.NewServer([]string{globalMinioAddr},\n",
"\t\tcriticalErrorHandler{corsHandler(router)}, getCert)\n",
"\thttpServer.BaseContext = func(listener net.Listener) context.Context {\n",
"\t\treturn GlobalContext\n",
"\t}\n",
"\tgo func() {\n",
"\t\tglobalHTTPServerErrorCh <- httpServer.Start(GlobalContext)\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tlisteners := ctx.Int(\"listeners\")\n",
"\tif listeners == 0 {\n",
"\t\tlisteners = 1\n",
"\t}\n",
"\taddrs := make([]string, 0, listeners)\n",
"\tfor i := 0; i < listeners; i++ {\n",
"\t\taddrs = append(addrs, globalMinioAddr)\n",
"\t}\n",
"\n",
"\thttpServer := xhttp.NewServer(addrs, criticalErrorHandler{corsHandler(router)}, getCert)\n"
],
"file_path": "cmd/gateway-main.go",
"type": "replace",
"edit_start_line_idx": 262
} | // Copyright (c) 2015-2021 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package cmd
import (
"context"
"errors"
pathutil "path"
"runtime"
"sort"
"strings"
"sync"
"fmt"
"time"
"github.com/minio/minio/internal/dsync"
"github.com/minio/minio/internal/logger"
"github.com/minio/minio/internal/lsync"
)
// local lock servers
var globalLockServer *localLocker
// RWLocker - locker interface to introduce GetRLock, RUnlock.
type RWLocker interface {
GetLock(ctx context.Context, timeout *dynamicTimeout) (lkCtx LockContext, timedOutErr error)
Unlock(cancel context.CancelFunc)
GetRLock(ctx context.Context, timeout *dynamicTimeout) (lkCtx LockContext, timedOutErr error)
RUnlock(cancel context.CancelFunc)
}
// LockContext lock context holds the lock backed context and canceler for the context.
type LockContext struct {
ctx context.Context
cancel context.CancelFunc
}
// Context returns lock context
func (l LockContext) Context() context.Context {
return l.ctx
}
// Cancel function calls cancel() function
func (l LockContext) Cancel() {
if l.cancel != nil {
l.cancel()
}
}
// newNSLock - return a new name space lock map.
func newNSLock(isDistErasure bool) *nsLockMap {
nsMutex := nsLockMap{
isDistErasure: isDistErasure,
}
if isDistErasure {
return &nsMutex
}
nsMutex.lockMap = make(map[string]*nsLock)
return &nsMutex
}
// nsLock - provides primitives for locking critical namespace regions.
type nsLock struct {
ref int32
*lsync.LRWMutex
}
// nsLockMap - namespace lock map, provides primitives to Lock,
// Unlock, RLock and RUnlock.
type nsLockMap struct {
// Indicates if namespace is part of a distributed setup.
isDistErasure bool
lockMap map[string]*nsLock
lockMapMutex sync.Mutex
}
// Lock the namespace resource.
func (n *nsLockMap) lock(ctx context.Context, volume string, path string, lockSource, opsID string, readLock bool, timeout time.Duration) (locked bool) {
resource := pathJoin(volume, path)
n.lockMapMutex.Lock()
nsLk, found := n.lockMap[resource]
if !found {
nsLk = &nsLock{
LRWMutex: lsync.NewLRWMutex(),
}
// Add a count to indicate that a parallel unlock doesn't clear this entry.
}
nsLk.ref++
n.lockMap[resource] = nsLk
n.lockMapMutex.Unlock()
// Locking here will block (until timeout).
if readLock {
locked = nsLk.GetRLock(ctx, opsID, lockSource, timeout)
} else {
locked = nsLk.GetLock(ctx, opsID, lockSource, timeout)
}
if !locked { // We failed to get the lock
// Decrement ref count since we failed to get the lock
n.lockMapMutex.Lock()
n.lockMap[resource].ref--
if n.lockMap[resource].ref < 0 {
logger.CriticalIf(GlobalContext, errors.New("resource reference count was lower than 0"))
}
if n.lockMap[resource].ref == 0 {
// Remove from the map if there are no more references.
delete(n.lockMap, resource)
}
n.lockMapMutex.Unlock()
}
return
}
// Unlock the namespace resource.
func (n *nsLockMap) unlock(volume string, path string, readLock bool) {
resource := pathJoin(volume, path)
n.lockMapMutex.Lock()
defer n.lockMapMutex.Unlock()
if _, found := n.lockMap[resource]; !found {
return
}
if readLock {
n.lockMap[resource].RUnlock()
} else {
n.lockMap[resource].Unlock()
}
n.lockMap[resource].ref--
if n.lockMap[resource].ref < 0 {
logger.CriticalIf(GlobalContext, errors.New("resource reference count was lower than 0"))
}
if n.lockMap[resource].ref == 0 {
// Remove from the map if there are no more references.
delete(n.lockMap, resource)
}
}
// dsync's distributed lock instance.
type distLockInstance struct {
rwMutex *dsync.DRWMutex
opsID string
}
// Lock - block until write lock is taken or timeout has occurred.
func (di *distLockInstance) GetLock(ctx context.Context, timeout *dynamicTimeout) (LockContext, error) {
lockSource := getSource(2)
start := UTCNow()
newCtx, cancel := context.WithCancel(ctx)
if !di.rwMutex.GetLock(newCtx, cancel, di.opsID, lockSource, dsync.Options{
Timeout: timeout.Timeout(),
}) {
timeout.LogFailure()
cancel()
return LockContext{ctx: ctx, cancel: func() {}}, OperationTimedOut{}
}
timeout.LogSuccess(UTCNow().Sub(start))
return LockContext{ctx: newCtx, cancel: cancel}, nil
}
// Unlock - block until write lock is released.
func (di *distLockInstance) Unlock(cancel context.CancelFunc) {
if cancel != nil {
cancel()
}
di.rwMutex.Unlock()
}
// RLock - block until read lock is taken or timeout has occurred.
func (di *distLockInstance) GetRLock(ctx context.Context, timeout *dynamicTimeout) (LockContext, error) {
lockSource := getSource(2)
start := UTCNow()
newCtx, cancel := context.WithCancel(ctx)
if !di.rwMutex.GetRLock(ctx, cancel, di.opsID, lockSource, dsync.Options{
Timeout: timeout.Timeout(),
}) {
timeout.LogFailure()
cancel()
return LockContext{ctx: ctx, cancel: func() {}}, OperationTimedOut{}
}
timeout.LogSuccess(UTCNow().Sub(start))
return LockContext{ctx: newCtx, cancel: cancel}, nil
}
// RUnlock - block until read lock is released.
func (di *distLockInstance) RUnlock(cancel context.CancelFunc) {
if cancel != nil {
cancel()
}
di.rwMutex.RUnlock()
}
// localLockInstance - frontend/top-level interface for namespace locks.
type localLockInstance struct {
ns *nsLockMap
volume string
paths []string
opsID string
}
// NewNSLock - returns a lock instance for a given volume and
// path. The returned lockInstance object encapsulates the nsLockMap,
// volume, path and operation ID.
func (n *nsLockMap) NewNSLock(lockers func() ([]dsync.NetLocker, string), volume string, paths ...string) RWLocker {
opsID := mustGetUUID()
if n.isDistErasure {
drwmutex := dsync.NewDRWMutex(&dsync.Dsync{
GetLockers: lockers,
}, pathsJoinPrefix(volume, paths...)...)
return &distLockInstance{drwmutex, opsID}
}
sort.Strings(paths)
return &localLockInstance{n, volume, paths, opsID}
}
// Lock - block until write lock is taken or timeout has occurred.
func (li *localLockInstance) GetLock(ctx context.Context, timeout *dynamicTimeout) (_ LockContext, timedOutErr error) {
lockSource := getSource(2)
start := UTCNow()
const readLock = false
success := make([]int, len(li.paths))
for i, path := range li.paths {
if !li.ns.lock(ctx, li.volume, path, lockSource, li.opsID, readLock, timeout.Timeout()) {
timeout.LogFailure()
for si, sint := range success {
if sint == 1 {
li.ns.unlock(li.volume, li.paths[si], readLock)
}
}
return LockContext{}, OperationTimedOut{}
}
success[i] = 1
}
timeout.LogSuccess(UTCNow().Sub(start))
return LockContext{ctx: ctx, cancel: func() {}}, nil
}
// Unlock - block until write lock is released.
func (li *localLockInstance) Unlock(cancel context.CancelFunc) {
if cancel != nil {
cancel()
}
const readLock = false
for _, path := range li.paths {
li.ns.unlock(li.volume, path, readLock)
}
}
// RLock - block until read lock is taken or timeout has occurred.
func (li *localLockInstance) GetRLock(ctx context.Context, timeout *dynamicTimeout) (_ LockContext, timedOutErr error) {
lockSource := getSource(2)
start := UTCNow()
const readLock = true
success := make([]int, len(li.paths))
for i, path := range li.paths {
if !li.ns.lock(ctx, li.volume, path, lockSource, li.opsID, readLock, timeout.Timeout()) {
timeout.LogFailure()
for si, sint := range success {
if sint == 1 {
li.ns.unlock(li.volume, li.paths[si], readLock)
}
}
return LockContext{}, OperationTimedOut{}
}
success[i] = 1
}
timeout.LogSuccess(UTCNow().Sub(start))
return LockContext{ctx: ctx, cancel: func() {}}, nil
}
// RUnlock - block until read lock is released.
func (li *localLockInstance) RUnlock(cancel context.CancelFunc) {
if cancel != nil {
cancel()
}
const readLock = true
for _, path := range li.paths {
li.ns.unlock(li.volume, path, readLock)
}
}
func getSource(n int) string {
var funcName string
pc, filename, lineNum, ok := runtime.Caller(n)
if ok {
filename = pathutil.Base(filename)
funcName = strings.TrimPrefix(runtime.FuncForPC(pc).Name(),
"github.com/minio/minio/cmd.")
} else {
filename = "<unknown>"
lineNum = 0
}
return fmt.Sprintf("[%s:%d:%s()]", filename, lineNum, funcName)
}
| cmd/namespace-lock.go | 0 | https://github.com/minio/minio/commit/acc9645249a3ee61cedff832d003421015cc724d | [
0.0014162866864353418,
0.0002101095742546022,
0.00016304812743328512,
0.0001703898306004703,
0.00021673788432963192
] |
{
"id": 1,
"code_window": [
"\t\tName: \"address\",\n",
"\t\tValue: \":\" + GlobalMinioDefaultPort,\n",
"\t\tUsage: \"bind to a specific ADDRESS:PORT, ADDRESS can be an IP or hostname\",\n",
"\t},\n",
"\tcli.StringFlag{\n",
"\t\tName: \"console-address\",\n",
"\t\tUsage: \"bind to a specific ADDRESS:PORT for embedded Console UI, ADDRESS can be an IP or hostname\",\n",
"\t},\n",
"}\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tcli.IntFlag{\n",
"\t\tName: \"listeners\",\n",
"\t\tValue: 1,\n",
"\t\tUsage: \"bind N number of listeners per ADDRESS:PORT\",\n",
"\t},\n"
],
"file_path": "cmd/server-main.go",
"type": "add",
"edit_start_line_idx": 57
} | // Copyright (c) 2015-2021 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package cmd
import (
"context"
"fmt"
"net"
"net/url"
"os"
"os/signal"
"strings"
"syscall"
"github.com/gorilla/mux"
"github.com/minio/cli"
"github.com/minio/madmin-go"
xhttp "github.com/minio/minio/internal/http"
"github.com/minio/minio/internal/logger"
"github.com/minio/pkg/certs"
"github.com/minio/pkg/env"
)
var (
gatewayCmd = cli.Command{
Name: "gateway",
Usage: "start object storage gateway",
Flags: append(ServerFlags, GlobalFlags...),
HideHelpCommand: true,
}
)
// GatewayLocker implements custom NewNSLock implementation
type GatewayLocker struct {
ObjectLayer
nsMutex *nsLockMap
}
// NewNSLock - implements gateway level locker
func (l *GatewayLocker) NewNSLock(bucket string, objects ...string) RWLocker {
return l.nsMutex.NewNSLock(nil, bucket, objects...)
}
// Walk - implements common gateway level Walker, to walk on all objects recursively at a prefix
func (l *GatewayLocker) Walk(ctx context.Context, bucket, prefix string, results chan<- ObjectInfo, opts ObjectOptions) error {
walk := func(ctx context.Context, bucket, prefix string, results chan<- ObjectInfo) error {
go func() {
// Make sure the results channel is ready to be read when we're done.
defer close(results)
var marker string
for {
// set maxKeys to '0' to list maximum possible objects in single call.
loi, err := l.ObjectLayer.ListObjects(ctx, bucket, prefix, marker, "", 0)
if err != nil {
logger.LogIf(ctx, err)
return
}
marker = loi.NextMarker
for _, obj := range loi.Objects {
select {
case results <- obj:
case <-ctx.Done():
return
}
}
if !loi.IsTruncated {
break
}
}
}()
return nil
}
if err := l.ObjectLayer.Walk(ctx, bucket, prefix, results, opts); err != nil {
if _, ok := err.(NotImplemented); ok {
return walk(ctx, bucket, prefix, results)
}
return err
}
return nil
}
// NewGatewayLayerWithLocker - initialize gateway with locker.
func NewGatewayLayerWithLocker(gwLayer ObjectLayer) ObjectLayer {
return &GatewayLocker{ObjectLayer: gwLayer, nsMutex: newNSLock(false)}
}
// RegisterGatewayCommand registers a new command for gateway.
func RegisterGatewayCommand(cmd cli.Command) error {
cmd.Flags = append(append(cmd.Flags, ServerFlags...), GlobalFlags...)
gatewayCmd.Subcommands = append(gatewayCmd.Subcommands, cmd)
return nil
}
// ParseGatewayEndpoint - Return endpoint.
func ParseGatewayEndpoint(arg string) (endPoint string, secure bool, err error) {
schemeSpecified := len(strings.Split(arg, "://")) > 1
if !schemeSpecified {
// Default connection will be "secure".
arg = "https://" + arg
}
u, err := url.Parse(arg)
if err != nil {
return "", false, err
}
switch u.Scheme {
case "http":
return u.Host, false, nil
case "https":
return u.Host, true, nil
default:
return "", false, fmt.Errorf("Unrecognized scheme %s", u.Scheme)
}
}
// ValidateGatewayArguments - Validate gateway arguments.
func ValidateGatewayArguments(serverAddr, endpointAddr string) error {
if err := CheckLocalServerAddr(serverAddr); err != nil {
return err
}
if endpointAddr != "" {
// Reject the endpoint if it points to the gateway handler itself.
sameTarget, err := sameLocalAddrs(endpointAddr, serverAddr)
if err != nil {
return err
}
if sameTarget {
return fmt.Errorf("endpoint points to the local gateway")
}
}
return nil
}
// StartGateway - handler for 'minio gateway <name>'.
func StartGateway(ctx *cli.Context, gw Gateway) {
signal.Notify(globalOSSignalCh, os.Interrupt, syscall.SIGTERM, syscall.SIGQUIT)
go handleSignals()
signal.Notify(globalOSSignalCh, os.Interrupt, syscall.SIGTERM, syscall.SIGQUIT)
// This is only to uniquely identify each gateway deployments.
globalDeploymentID = env.Get("MINIO_GATEWAY_DEPLOYMENT_ID", mustGetUUID())
logger.SetDeploymentID(globalDeploymentID)
if gw == nil {
logger.FatalIf(errUnexpected, "Gateway implementation not initialized")
}
// Validate if we have access, secret set through environment.
globalGatewayName = gw.Name()
gatewayName := gw.Name()
if ctx.Args().First() == "help" {
cli.ShowCommandHelpAndExit(ctx, gatewayName, 1)
}
// Initialize globalConsoleSys system
globalConsoleSys = NewConsoleLogger(GlobalContext)
logger.AddTarget(globalConsoleSys)
// Handle common command args.
handleCommonCmdArgs(ctx)
// Check and load TLS certificates.
var err error
globalPublicCerts, globalTLSCerts, globalIsTLS, err = getTLSConfig()
logger.FatalIf(err, "Invalid TLS certificate file")
// Check and load Root CAs.
globalRootCAs, err = certs.GetRootCAs(globalCertsCADir.Get())
logger.FatalIf(err, "Failed to read root CAs (%v)", err)
// Add the global public crts as part of global root CAs
for _, publicCrt := range globalPublicCerts {
globalRootCAs.AddCert(publicCrt)
}
// Register root CAs for remote ENVs
env.RegisterGlobalCAs(globalRootCAs)
// Initialize all help
initHelp()
// On macOS, if a process already listens on LOCALIPADDR:PORT, net.Listen() falls back
// to IPv6 address ie minio will start listening on IPv6 address whereas another
// (non-)minio process is listening on IPv4 of given port.
// To avoid this error situation we check for port availability.
logger.FatalIf(checkPortAvailability(globalMinioHost, globalMinioPort), "Unable to start the gateway")
// Handle gateway specific env
gatewayHandleEnvVars()
// Set system resources to maximum.
setMaxResources()
// Set when gateway is enabled
globalIsGateway = true
// TODO: We need to move this code with globalConfigSys.Init()
// for now keep it here such that "s3" gateway layer initializes
// itself properly when KMS is set.
// Initialize server config.
srvCfg := newServerConfig()
// Override any values from ENVs.
lookupConfigs(srvCfg, nil)
// hold the mutex lock before a new config is assigned.
globalServerConfigMu.Lock()
globalServerConfig = srvCfg
globalServerConfigMu.Unlock()
// Initialize router. `SkipClean(true)` stops gorilla/mux from
// normalizing URL path minio/minio#3256
// avoid URL path encoding minio/minio#8950
router := mux.NewRouter().SkipClean(true).UseEncodedPath()
// Enable STS router if etcd is enabled.
registerSTSRouter(router)
// Enable IAM admin APIs if etcd is enabled, if not just enable basic
// operations such as profiling, server info etc.
registerAdminRouter(router, false)
// Add healthcheck router
registerHealthCheckRouter(router)
// Add server metrics router
registerMetricsRouter(router)
// Add API router.
registerAPIRouter(router)
// Use all the middlewares
router.Use(globalHandlers...)
var getCert certs.GetCertificateFunc
if globalTLSCerts != nil {
getCert = globalTLSCerts.GetCertificate
}
httpServer := xhttp.NewServer([]string{globalMinioAddr},
criticalErrorHandler{corsHandler(router)}, getCert)
httpServer.BaseContext = func(listener net.Listener) context.Context {
return GlobalContext
}
go func() {
globalHTTPServerErrorCh <- httpServer.Start(GlobalContext)
}()
globalObjLayerMutex.Lock()
globalHTTPServer = httpServer
globalObjLayerMutex.Unlock()
newObject, err := gw.NewGatewayLayer(madmin.Credentials{
AccessKey: globalActiveCred.AccessKey,
SecretKey: globalActiveCred.SecretKey,
})
if err != nil {
logger.FatalIf(err, "Unable to initialize gateway backend")
}
newObject = NewGatewayLayerWithLocker(newObject)
// Calls all New() for all sub-systems.
newAllSubsystems()
// Once endpoints are finalized, initialize the new object api in safe mode.
globalObjLayerMutex.Lock()
globalObjectAPI = newObject
globalObjLayerMutex.Unlock()
if gatewayName == NASBackendGateway {
buckets, err := newObject.ListBuckets(GlobalContext)
if err != nil {
logger.Fatal(err, "Unable to list buckets")
}
logger.FatalIf(globalNotificationSys.Init(GlobalContext, buckets, newObject), "Unable to initialize notification system")
}
go globalIAMSys.Init(GlobalContext, newObject)
if globalCacheConfig.Enabled {
// initialize the new disk cache objects.
var cacheAPI CacheObjectLayer
cacheAPI, err = newServerCacheObjects(GlobalContext, globalCacheConfig)
logger.FatalIf(err, "Unable to initialize disk caching")
globalObjLayerMutex.Lock()
globalCacheObjectAPI = cacheAPI
globalObjLayerMutex.Unlock()
}
// Populate existing buckets to the etcd backend
if globalDNSConfig != nil {
buckets, err := newObject.ListBuckets(GlobalContext)
if err != nil {
logger.Fatal(err, "Unable to list buckets")
}
initFederatorBackend(buckets, newObject)
}
// Verify if object layer supports
// - encryption
// - compression
verifyObjectLayerFeatures("gateway "+gatewayName, newObject)
// Prints the formatted startup message once object layer is initialized.
if !globalCLIContext.Quiet && !globalInplaceUpdateDisabled {
// Check update mode.
checkUpdate(globalMinioModeGatewayPrefix + gatewayName)
}
if !globalCLIContext.Quiet {
// Print gateway startup message.
printGatewayStartupMessage(getAPIEndpoints(), gatewayName)
}
if globalBrowserEnabled {
globalConsoleSrv, err = initConsoleServer()
if err != nil {
logger.FatalIf(err, "Unable to initialize console service")
}
go func() {
logger.FatalIf(globalConsoleSrv.Serve(), "Unable to initialize console server")
}()
}
<-globalOSSignalCh
}
| cmd/gateway-main.go | 1 | https://github.com/minio/minio/commit/acc9645249a3ee61cedff832d003421015cc724d | [
0.006043438799679279,
0.00046145019587129354,
0.0001627340679988265,
0.00017243242473341525,
0.0010686879977583885
] |
{
"id": 1,
"code_window": [
"\t\tName: \"address\",\n",
"\t\tValue: \":\" + GlobalMinioDefaultPort,\n",
"\t\tUsage: \"bind to a specific ADDRESS:PORT, ADDRESS can be an IP or hostname\",\n",
"\t},\n",
"\tcli.StringFlag{\n",
"\t\tName: \"console-address\",\n",
"\t\tUsage: \"bind to a specific ADDRESS:PORT for embedded Console UI, ADDRESS can be an IP or hostname\",\n",
"\t},\n",
"}\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tcli.IntFlag{\n",
"\t\tName: \"listeners\",\n",
"\t\tValue: 1,\n",
"\t\tUsage: \"bind N number of listeners per ADDRESS:PORT\",\n",
"\t},\n"
],
"file_path": "cmd/server-main.go",
"type": "add",
"edit_start_line_idx": 57
} | // Copyright (c) 2015-2021 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package cmd
import (
"context"
"strings"
"sync"
"github.com/dustin/go-humanize"
"github.com/minio/minio/internal/sync/errgroup"
)
const (
// Block size used for all internal operations version 1.
// TLDR..
// Not used anymore xl.meta captures the right blockSize
// so blockSizeV2 should be used for all future purposes.
// this value is kept here to calculate the max API
// requests based on RAM size for existing content.
blockSizeV1 = 10 * humanize.MiByte
// Block size used in erasure coding version 2.
blockSizeV2 = 1 * humanize.MiByte
// Buckets meta prefix.
bucketMetaPrefix = "buckets"
// ETag (hex encoded md5sum) of empty string.
emptyETag = "d41d8cd98f00b204e9800998ecf8427e"
)
// Global object layer mutex, used for safely updating object layer.
var globalObjLayerMutex sync.RWMutex
// Global object layer, only accessed by globalObjectAPI.
var globalObjectAPI ObjectLayer
//Global cacheObjects, only accessed by newCacheObjectsFn().
var globalCacheObjectAPI CacheObjectLayer
// Checks if the object is a directory, this logic uses
// if size == 0 and object ends with SlashSeparator then
// returns true.
func isObjectDir(object string, size int64) bool {
return HasSuffix(object, SlashSeparator) && size == 0
}
func newStorageAPIWithoutHealthCheck(endpoint Endpoint) (storage StorageAPI, err error) {
if endpoint.IsLocal {
storage, err := newXLStorage(endpoint)
if err != nil {
return nil, err
}
return newXLStorageDiskIDCheck(storage), nil
}
return newStorageRESTClient(endpoint, false), nil
}
// Depending on the disk type network or local, initialize storage API.
func newStorageAPI(endpoint Endpoint) (storage StorageAPI, err error) {
if endpoint.IsLocal {
storage, err := newXLStorage(endpoint)
if err != nil {
return nil, err
}
return newXLStorageDiskIDCheck(storage), nil
}
return newStorageRESTClient(endpoint, true), nil
}
func listObjectsNonSlash(ctx context.Context, bucket, prefix, marker, delimiter string, maxKeys int, tpool *TreeWalkPool, listDir ListDirFunc, isLeaf IsLeafFunc, isLeafDir IsLeafDirFunc, getObjInfo func(context.Context, string, string) (ObjectInfo, error), getObjectInfoDirs ...func(context.Context, string, string) (ObjectInfo, error)) (loi ListObjectsInfo, err error) {
endWalkCh := make(chan struct{})
defer close(endWalkCh)
recursive := true
walkResultCh := startTreeWalk(ctx, bucket, prefix, "", recursive, listDir, isLeaf, isLeafDir, endWalkCh)
var objInfos []ObjectInfo
var eof bool
var prevPrefix string
for {
if len(objInfos) == maxKeys {
break
}
result, ok := <-walkResultCh
if !ok {
eof = true
break
}
var objInfo ObjectInfo
var err error
index := strings.Index(strings.TrimPrefix(result.entry, prefix), delimiter)
if index == -1 {
objInfo, err = getObjInfo(ctx, bucket, result.entry)
if err != nil {
// Ignore errFileNotFound as the object might have got
// deleted in the interim period of listing and getObjectInfo(),
// ignore quorum error as it might be an entry from an outdated disk.
if IsErrIgnored(err, []error{
errFileNotFound,
errErasureReadQuorum,
}...) {
continue
}
return loi, toObjectErr(err, bucket, prefix)
}
} else {
index = len(prefix) + index + len(delimiter)
currPrefix := result.entry[:index]
if currPrefix == prevPrefix {
continue
}
prevPrefix = currPrefix
objInfo = ObjectInfo{
Bucket: bucket,
Name: currPrefix,
IsDir: true,
}
}
if objInfo.Name <= marker {
continue
}
objInfos = append(objInfos, objInfo)
if result.end {
eof = true
break
}
}
result := ListObjectsInfo{}
for _, objInfo := range objInfos {
if objInfo.IsDir {
result.Prefixes = append(result.Prefixes, objInfo.Name)
continue
}
result.Objects = append(result.Objects, objInfo)
}
if !eof {
result.IsTruncated = true
if len(objInfos) > 0 {
result.NextMarker = objInfos[len(objInfos)-1].Name
}
}
return result, nil
}
// Walk a bucket, optionally prefix recursively, until we have returned
// all the content to objectInfo channel, it is callers responsibility
// to allocate a receive channel for ObjectInfo, upon any unhandled
// error walker returns error. Optionally if context.Done() is received
// then Walk() stops the walker.
func fsWalk(ctx context.Context, obj ObjectLayer, bucket, prefix string, listDir ListDirFunc, isLeaf IsLeafFunc, isLeafDir IsLeafDirFunc, results chan<- ObjectInfo, getObjInfo func(context.Context, string, string) (ObjectInfo, error), getObjectInfoDirs ...func(context.Context, string, string) (ObjectInfo, error)) error {
if err := checkListObjsArgs(ctx, bucket, prefix, "", obj); err != nil {
// Upon error close the channel.
close(results)
return err
}
walkResultCh := startTreeWalk(ctx, bucket, prefix, "", true, listDir, isLeaf, isLeafDir, ctx.Done())
go func() {
defer close(results)
for {
walkResult, ok := <-walkResultCh
if !ok {
break
}
var objInfo ObjectInfo
var err error
if HasSuffix(walkResult.entry, SlashSeparator) {
for _, getObjectInfoDir := range getObjectInfoDirs {
objInfo, err = getObjectInfoDir(ctx, bucket, walkResult.entry)
if err == nil {
break
}
if err == errFileNotFound {
err = nil
objInfo = ObjectInfo{
Bucket: bucket,
Name: walkResult.entry,
IsDir: true,
}
}
}
} else {
objInfo, err = getObjInfo(ctx, bucket, walkResult.entry)
}
if err != nil {
continue
}
results <- objInfo
if walkResult.end {
break
}
}
}()
return nil
}
func listObjects(ctx context.Context, obj ObjectLayer, bucket, prefix, marker, delimiter string, maxKeys int, tpool *TreeWalkPool, listDir ListDirFunc, isLeaf IsLeafFunc, isLeafDir IsLeafDirFunc, getObjInfo func(context.Context, string, string) (ObjectInfo, error), getObjectInfoDirs ...func(context.Context, string, string) (ObjectInfo, error)) (loi ListObjectsInfo, err error) {
if delimiter != SlashSeparator && delimiter != "" {
return listObjectsNonSlash(ctx, bucket, prefix, marker, delimiter, maxKeys, tpool, listDir, isLeaf, isLeafDir, getObjInfo, getObjectInfoDirs...)
}
if err := checkListObjsArgs(ctx, bucket, prefix, marker, obj); err != nil {
return loi, err
}
// Marker is set validate pre-condition.
if marker != "" {
// Marker not common with prefix is not implemented. Send an empty response
if !HasPrefix(marker, prefix) {
return loi, nil
}
}
// With max keys of zero we have reached eof, return right here.
if maxKeys == 0 {
return loi, nil
}
// For delimiter and prefix as '/' we do not list anything at all
// since according to s3 spec we stop at the 'delimiter'
// along // with the prefix. On a flat namespace with 'prefix'
// as '/' we don't have any entries, since all the keys are
// of form 'keyName/...'
if delimiter == SlashSeparator && prefix == SlashSeparator {
return loi, nil
}
// Over flowing count - reset to maxObjectList.
if maxKeys < 0 || maxKeys > maxObjectList {
maxKeys = maxObjectList
}
// Default is recursive, if delimiter is set then list non recursive.
recursive := true
if delimiter == SlashSeparator {
recursive = false
}
walkResultCh, endWalkCh := tpool.Release(listParams{bucket, recursive, marker, prefix})
if walkResultCh == nil {
endWalkCh = make(chan struct{})
walkResultCh = startTreeWalk(ctx, bucket, prefix, marker, recursive, listDir, isLeaf, isLeafDir, endWalkCh)
}
var eof bool
var nextMarker string
maxConcurrent := maxKeys / 10
if maxConcurrent == 0 {
maxConcurrent = maxKeys
}
// List until maxKeys requested.
g := errgroup.WithNErrs(maxKeys).WithConcurrency(maxConcurrent)
ctx, cancel := g.WithCancelOnError(ctx)
defer cancel()
objInfoFound := make([]*ObjectInfo, maxKeys)
var i int
for i = 0; i < maxKeys; i++ {
i := i
walkResult, ok := <-walkResultCh
if !ok {
// Closed channel.
eof = true
break
}
if HasSuffix(walkResult.entry, SlashSeparator) {
g.Go(func() error {
for _, getObjectInfoDir := range getObjectInfoDirs {
objInfo, err := getObjectInfoDir(ctx, bucket, walkResult.entry)
if err == nil {
objInfoFound[i] = &objInfo
// Done...
return nil
}
// Add temp, may be overridden,
if err == errFileNotFound {
objInfoFound[i] = &ObjectInfo{
Bucket: bucket,
Name: walkResult.entry,
IsDir: true,
}
continue
}
return toObjectErr(err, bucket, prefix)
}
return nil
}, i)
} else {
g.Go(func() error {
objInfo, err := getObjInfo(ctx, bucket, walkResult.entry)
if err != nil {
// Ignore errFileNotFound as the object might have got
// deleted in the interim period of listing and getObjectInfo(),
// ignore quorum error as it might be an entry from an outdated disk.
if IsErrIgnored(err, []error{
errFileNotFound,
errErasureReadQuorum,
}...) {
return nil
}
return toObjectErr(err, bucket, prefix)
}
objInfoFound[i] = &objInfo
return nil
}, i)
}
if walkResult.end {
eof = true
break
}
}
if err := g.WaitErr(); err != nil {
return loi, err
}
// Copy found objects
objInfos := make([]ObjectInfo, 0, i+1)
for _, objInfo := range objInfoFound {
if objInfo == nil {
continue
}
objInfos = append(objInfos, *objInfo)
nextMarker = objInfo.Name
}
// Save list routine for the next marker if we haven't reached EOF.
params := listParams{bucket, recursive, nextMarker, prefix}
if !eof {
tpool.Set(params, walkResultCh, endWalkCh)
}
result := ListObjectsInfo{}
for _, objInfo := range objInfos {
if objInfo.IsDir && delimiter == SlashSeparator {
result.Prefixes = append(result.Prefixes, objInfo.Name)
continue
}
result.Objects = append(result.Objects, objInfo)
}
if !eof {
result.IsTruncated = true
if len(objInfos) > 0 {
result.NextMarker = objInfos[len(objInfos)-1].Name
}
}
// Success.
return result, nil
}
| cmd/object-api-common.go | 0 | https://github.com/minio/minio/commit/acc9645249a3ee61cedff832d003421015cc724d | [
0.0001773361291270703,
0.00017051358008757234,
0.00016232463531196117,
0.00017105991719290614,
0.0000031672168461227557
] |
{
"id": 1,
"code_window": [
"\t\tName: \"address\",\n",
"\t\tValue: \":\" + GlobalMinioDefaultPort,\n",
"\t\tUsage: \"bind to a specific ADDRESS:PORT, ADDRESS can be an IP or hostname\",\n",
"\t},\n",
"\tcli.StringFlag{\n",
"\t\tName: \"console-address\",\n",
"\t\tUsage: \"bind to a specific ADDRESS:PORT for embedded Console UI, ADDRESS can be an IP or hostname\",\n",
"\t},\n",
"}\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tcli.IntFlag{\n",
"\t\tName: \"listeners\",\n",
"\t\tValue: 1,\n",
"\t\tUsage: \"bind N number of listeners per ADDRESS:PORT\",\n",
"\t},\n"
],
"file_path": "cmd/server-main.go",
"type": "add",
"edit_start_line_idx": 57
} | // Copyright (c) 2015-2021 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package cmd
import (
"context"
"encoding/json"
"fmt"
"io"
"net/http"
"net/url"
"reflect"
"runtime"
"time"
"github.com/gorilla/mux"
"github.com/minio/minio-go/v7/pkg/set"
xhttp "github.com/minio/minio/internal/http"
"github.com/minio/minio/internal/logger"
"github.com/minio/minio/internal/rest"
"github.com/minio/pkg/env"
)
const (
bootstrapRESTVersion = "v1"
bootstrapRESTVersionPrefix = SlashSeparator + bootstrapRESTVersion
bootstrapRESTPrefix = minioReservedBucketPath + "/bootstrap"
bootstrapRESTPath = bootstrapRESTPrefix + bootstrapRESTVersionPrefix
)
const (
bootstrapRESTMethodHealth = "/health"
bootstrapRESTMethodVerify = "/verify"
)
// To abstract a node over network.
type bootstrapRESTServer struct{}
// ServerSystemConfig - captures information about server configuration.
type ServerSystemConfig struct {
MinioPlatform string
MinioEndpoints EndpointServerPools
MinioEnv map[string]string
}
// Diff - returns error on first difference found in two configs.
func (s1 ServerSystemConfig) Diff(s2 ServerSystemConfig) error {
if s1.MinioPlatform != s2.MinioPlatform {
return fmt.Errorf("Expected platform '%s', found to be running '%s'",
s1.MinioPlatform, s2.MinioPlatform)
}
if s1.MinioEndpoints.NEndpoints() != s2.MinioEndpoints.NEndpoints() {
return fmt.Errorf("Expected number of endpoints %d, seen %d", s1.MinioEndpoints.NEndpoints(),
s2.MinioEndpoints.NEndpoints())
}
for i, ep := range s1.MinioEndpoints {
if ep.SetCount != s2.MinioEndpoints[i].SetCount {
return fmt.Errorf("Expected set count %d, seen %d", ep.SetCount,
s2.MinioEndpoints[i].SetCount)
}
if ep.DrivesPerSet != s2.MinioEndpoints[i].DrivesPerSet {
return fmt.Errorf("Expected drives pet set %d, seen %d", ep.DrivesPerSet,
s2.MinioEndpoints[i].DrivesPerSet)
}
for j, endpoint := range ep.Endpoints {
if endpoint.String() != s2.MinioEndpoints[i].Endpoints[j].String() {
return fmt.Errorf("Expected endpoint %s, seen %s", endpoint,
s2.MinioEndpoints[i].Endpoints[j])
}
}
}
if !reflect.DeepEqual(s1.MinioEnv, s2.MinioEnv) {
return fmt.Errorf("Expected same MINIO_ environment variables and values")
}
return nil
}
var skipEnvs = map[string]struct{}{
"MINIO_OPTS": {},
"MINIO_CERT_PASSWD": {},
}
func getServerSystemCfg() ServerSystemConfig {
envs := env.List("MINIO_")
envValues := make(map[string]string, len(envs))
for _, envK := range envs {
// skip certain environment variables as part
// of the whitelist and could be configured
// differently on each nodes, update skipEnvs()
// map if there are such environment values
if _, ok := skipEnvs[envK]; ok {
continue
}
envValues[envK] = env.Get(envK, "")
}
return ServerSystemConfig{
MinioPlatform: fmt.Sprintf("OS: %s | Arch: %s", runtime.GOOS, runtime.GOARCH),
MinioEndpoints: globalEndpoints,
MinioEnv: envValues,
}
}
// HealthHandler returns success if request is valid
func (b *bootstrapRESTServer) HealthHandler(w http.ResponseWriter, r *http.Request) {}
func (b *bootstrapRESTServer) VerifyHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "VerifyHandler")
cfg := getServerSystemCfg()
logger.LogIf(ctx, json.NewEncoder(w).Encode(&cfg))
w.(http.Flusher).Flush()
}
// registerBootstrapRESTHandlers - register bootstrap rest router.
func registerBootstrapRESTHandlers(router *mux.Router) {
server := &bootstrapRESTServer{}
subrouter := router.PathPrefix(bootstrapRESTPrefix).Subrouter()
subrouter.Methods(http.MethodPost).Path(bootstrapRESTVersionPrefix + bootstrapRESTMethodHealth).HandlerFunc(
httpTraceHdrs(server.HealthHandler))
subrouter.Methods(http.MethodPost).Path(bootstrapRESTVersionPrefix + bootstrapRESTMethodVerify).HandlerFunc(
httpTraceHdrs(server.VerifyHandler))
}
// client to talk to bootstrap NEndpoints.
type bootstrapRESTClient struct {
endpoint Endpoint
restClient *rest.Client
}
// Wrapper to restClient.Call to handle network errors, in case of network error the connection is marked disconnected
// permanently. The only way to restore the connection is at the xl-sets layer by xlsets.monitorAndConnectEndpoints()
// after verifying format.json
func (client *bootstrapRESTClient) callWithContext(ctx context.Context, method string, values url.Values, body io.Reader, length int64) (respBody io.ReadCloser, err error) {
if values == nil {
values = make(url.Values)
}
respBody, err = client.restClient.Call(ctx, method, values, body, length)
if err == nil {
return respBody, nil
}
return nil, err
}
// Stringer provides a canonicalized representation of node.
func (client *bootstrapRESTClient) String() string {
return client.endpoint.String()
}
// Verify - fetches system server config.
func (client *bootstrapRESTClient) Verify(ctx context.Context, srcCfg ServerSystemConfig) (err error) {
if newObjectLayerFn() != nil {
return nil
}
respBody, err := client.callWithContext(ctx, bootstrapRESTMethodVerify, nil, nil, -1)
if err != nil {
return
}
defer xhttp.DrainBody(respBody)
recvCfg := ServerSystemConfig{}
if err = json.NewDecoder(respBody).Decode(&recvCfg); err != nil {
return err
}
return srcCfg.Diff(recvCfg)
}
func verifyServerSystemConfig(ctx context.Context, endpointServerPools EndpointServerPools) error {
srcCfg := getServerSystemCfg()
clnts := newBootstrapRESTClients(endpointServerPools)
var onlineServers int
var offlineEndpoints []string
var retries int
for onlineServers < len(clnts)/2 {
for _, clnt := range clnts {
if err := clnt.Verify(ctx, srcCfg); err != nil {
if isNetworkError(err) {
offlineEndpoints = append(offlineEndpoints, clnt.String())
continue
}
return fmt.Errorf("%s as has incorrect configuration: %w", clnt.String(), err)
}
onlineServers++
}
select {
case <-ctx.Done():
return ctx.Err()
default:
// Sleep for a while - so that we don't go into
// 100% CPU when half the endpoints are offline.
time.Sleep(100 * time.Millisecond)
retries++
// after 5 retries start logging that servers are not reachable yet
if retries >= 5 {
logger.Info(fmt.Sprintf("Waiting for atleast %d remote servers to be online for bootstrap check", len(clnts)/2))
if len(offlineEndpoints) > 0 {
logger.Info(fmt.Sprintf("Following servers are currently offline or unreachable %s", offlineEndpoints))
}
retries = 0 // reset to log again after 5 retries.
}
offlineEndpoints = nil
}
}
return nil
}
func newBootstrapRESTClients(endpointServerPools EndpointServerPools) []*bootstrapRESTClient {
seenHosts := set.NewStringSet()
var clnts []*bootstrapRESTClient
for _, ep := range endpointServerPools {
for _, endpoint := range ep.Endpoints {
if seenHosts.Contains(endpoint.Host) {
continue
}
seenHosts.Add(endpoint.Host)
// Only proceed for remote endpoints.
if !endpoint.IsLocal {
clnts = append(clnts, newBootstrapRESTClient(endpoint))
}
}
}
return clnts
}
// Returns a new bootstrap client.
func newBootstrapRESTClient(endpoint Endpoint) *bootstrapRESTClient {
serverURL := &url.URL{
Scheme: endpoint.Scheme,
Host: endpoint.Host,
Path: bootstrapRESTPath,
}
restClient := rest.NewClient(serverURL, globalInternodeTransport, newAuthToken)
restClient.HealthCheckFn = nil
return &bootstrapRESTClient{endpoint: endpoint, restClient: restClient}
}
| cmd/bootstrap-peer-server.go | 0 | https://github.com/minio/minio/commit/acc9645249a3ee61cedff832d003421015cc724d | [
0.0036139811854809523,
0.0005568276392295957,
0.0001659635890973732,
0.00017338016186840832,
0.0009848192567005754
] |
{
"id": 1,
"code_window": [
"\t\tName: \"address\",\n",
"\t\tValue: \":\" + GlobalMinioDefaultPort,\n",
"\t\tUsage: \"bind to a specific ADDRESS:PORT, ADDRESS can be an IP or hostname\",\n",
"\t},\n",
"\tcli.StringFlag{\n",
"\t\tName: \"console-address\",\n",
"\t\tUsage: \"bind to a specific ADDRESS:PORT for embedded Console UI, ADDRESS can be an IP or hostname\",\n",
"\t},\n",
"}\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tcli.IntFlag{\n",
"\t\tName: \"listeners\",\n",
"\t\tValue: 1,\n",
"\t\tUsage: \"bind N number of listeners per ADDRESS:PORT\",\n",
"\t},\n"
],
"file_path": "cmd/server-main.go",
"type": "add",
"edit_start_line_idx": 57
} | // Copyright (c) 2015-2021 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package lifecycle
import (
"encoding/xml"
"testing"
)
func TestTransitionUnmarshalXML(t *testing.T) {
trTests := []struct {
input string
err error
}{
{
input: `<Transition>
<Days>0</Days>
<StorageClass>S3TIER-1</StorageClass>
</Transition>`,
err: nil,
},
{
input: `<Transition>
<Days>1</Days>
<Date>2021-01-01T00:00:00Z</Date>
<StorageClass>S3TIER-1</StorageClass>
</Transition>`,
err: errTransitionInvalid,
},
{
input: `<Transition>
<Days>1</Days>
</Transition>`,
err: errXMLNotWellFormed,
},
}
for i, tc := range trTests {
var tr Transition
err := xml.Unmarshal([]byte(tc.input), &tr)
if err != nil {
t.Fatalf("%d: xml unmarshal failed with %v", i+1, err)
}
if err = tr.Validate(); err != tc.err {
t.Fatalf("%d: Invalid transition %v: err %v", i+1, tr, err)
}
}
ntrTests := []struct {
input string
err error
}{
{
input: `<NoncurrentVersionTransition>
<NoncurrentDays>0</NoncurrentDays>
<StorageClass>S3TIER-1</StorageClass>
</NoncurrentVersionTransition>`,
err: nil,
},
{
input: `<NoncurrentVersionTransition>
<Days>1</Days>
</NoncurrentVersionTransition>`,
err: errXMLNotWellFormed,
},
}
for i, tc := range ntrTests {
var ntr NoncurrentVersionTransition
err := xml.Unmarshal([]byte(tc.input), &ntr)
if err != nil {
t.Fatalf("%d: xml unmarshal failed with %v", i+1, err)
}
if err = ntr.Validate(); err != tc.err {
t.Fatalf("%d: Invalid noncurrent version transition %v: err %v", i+1, ntr, err)
}
}
}
| internal/bucket/lifecycle/transition_test.go | 0 | https://github.com/minio/minio/commit/acc9645249a3ee61cedff832d003421015cc724d | [
0.00017933173512574285,
0.00017384500824846327,
0.00016984745161607862,
0.00017332765855826437,
0.0000031044874049257487
] |
{
"id": 2,
"code_window": [
"\tvar getCert certs.GetCertificateFunc\n",
"\tif globalTLSCerts != nil {\n",
"\t\tgetCert = globalTLSCerts.GetCertificate\n",
"\t}\n",
"\n",
"\thttpServer := xhttp.NewServer([]string{globalMinioAddr},\n",
"\t\tcriticalErrorHandler{corsHandler(handler)}, getCert)\n",
"\thttpServer.BaseContext = func(listener net.Listener) context.Context {\n",
"\t\treturn GlobalContext\n",
"\t}\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tlisteners := ctx.Int(\"listeners\")\n",
"\tif listeners == 0 {\n",
"\t\tlisteners = 1\n",
"\t}\n",
"\taddrs := make([]string, 0, listeners)\n",
"\tfor i := 0; i < listeners; i++ {\n",
"\t\taddrs = append(addrs, globalMinioAddr)\n",
"\t}\n",
"\n",
"\thttpServer := xhttp.NewServer(addrs, criticalErrorHandler{corsHandler(handler)}, getCert)\n"
],
"file_path": "cmd/server-main.go",
"type": "replace",
"edit_start_line_idx": 499
} | // Copyright (c) 2015-2021 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package cmd
import (
"context"
"crypto/tls"
"errors"
"fmt"
"io"
"log"
"math/rand"
"net"
"os"
"os/signal"
"strings"
"sync"
"syscall"
"time"
"github.com/minio/cli"
"github.com/minio/madmin-go"
"github.com/minio/minio/internal/auth"
"github.com/minio/minio/internal/bucket/bandwidth"
"github.com/minio/minio/internal/color"
"github.com/minio/minio/internal/config"
"github.com/minio/minio/internal/fips"
xhttp "github.com/minio/minio/internal/http"
"github.com/minio/minio/internal/logger"
"github.com/minio/minio/internal/rest"
"github.com/minio/minio/internal/sync/errgroup"
"github.com/minio/pkg/certs"
"github.com/minio/pkg/env"
)
// ServerFlags - server command specific flags
var ServerFlags = []cli.Flag{
cli.StringFlag{
Name: "address",
Value: ":" + GlobalMinioDefaultPort,
Usage: "bind to a specific ADDRESS:PORT, ADDRESS can be an IP or hostname",
},
cli.StringFlag{
Name: "console-address",
Usage: "bind to a specific ADDRESS:PORT for embedded Console UI, ADDRESS can be an IP or hostname",
},
}
var serverCmd = cli.Command{
Name: "server",
Usage: "start object storage server",
Flags: append(ServerFlags, GlobalFlags...),
Action: serverMain,
CustomHelpTemplate: `NAME:
{{.HelpName}} - {{.Usage}}
USAGE:
{{.HelpName}} {{if .VisibleFlags}}[FLAGS] {{end}}DIR1 [DIR2..]
{{.HelpName}} {{if .VisibleFlags}}[FLAGS] {{end}}DIR{1...64}
{{.HelpName}} {{if .VisibleFlags}}[FLAGS] {{end}}DIR{1...64} DIR{65...128}
DIR:
DIR points to a directory on a filesystem. When you want to combine
multiple drives into a single large system, pass one directory per
filesystem separated by space. You may also use a '...' convention
to abbreviate the directory arguments. Remote directories in a
distributed setup are encoded as HTTP(s) URIs.
{{if .VisibleFlags}}
FLAGS:
{{range .VisibleFlags}}{{.}}
{{end}}{{end}}
EXAMPLES:
1. Start minio server on "/home/shared" directory.
{{.Prompt}} {{.HelpName}} /home/shared
2. Start single node server with 64 local drives "/mnt/data1" to "/mnt/data64".
{{.Prompt}} {{.HelpName}} /mnt/data{1...64}
3. Start distributed minio server on an 32 node setup with 32 drives each, run following command on all the nodes
{{.Prompt}} {{.EnvVarSetCommand}} MINIO_ROOT_USER{{.AssignmentOperator}}minio
{{.Prompt}} {{.EnvVarSetCommand}} MINIO_ROOT_PASSWORD{{.AssignmentOperator}}miniostorage
{{.Prompt}} {{.HelpName}} http://node{1...32}.example.com/mnt/export{1...32}
4. Start distributed minio server in an expanded setup, run the following command on all the nodes
{{.Prompt}} {{.EnvVarSetCommand}} MINIO_ROOT_USER{{.AssignmentOperator}}minio
{{.Prompt}} {{.EnvVarSetCommand}} MINIO_ROOT_PASSWORD{{.AssignmentOperator}}miniostorage
{{.Prompt}} {{.HelpName}} http://node{1...16}.example.com/mnt/export{1...32} \
http://node{17...64}.example.com/mnt/export{1...64}
`,
}
func serverCmdArgs(ctx *cli.Context) []string {
v, _, _, err := env.LookupEnv(config.EnvArgs)
if err != nil {
logger.FatalIf(err, "Unable to validate passed arguments in %s:%s",
config.EnvArgs, os.Getenv(config.EnvArgs))
}
if v == "" {
// Fall back to older environment value MINIO_ENDPOINTS
v, _, _, err = env.LookupEnv(config.EnvEndpoints)
if err != nil {
logger.FatalIf(err, "Unable to validate passed arguments in %s:%s",
config.EnvEndpoints, os.Getenv(config.EnvEndpoints))
}
}
if v == "" {
if !ctx.Args().Present() || ctx.Args().First() == "help" {
cli.ShowCommandHelpAndExit(ctx, ctx.Command.Name, 1)
}
return ctx.Args()
}
return strings.Fields(v)
}
func serverHandleCmdArgs(ctx *cli.Context) {
// Handle common command args.
handleCommonCmdArgs(ctx)
logger.FatalIf(CheckLocalServerAddr(globalMinioAddr), "Unable to validate passed arguments")
var err error
var setupType SetupType
// Check and load TLS certificates.
globalPublicCerts, globalTLSCerts, globalIsTLS, err = getTLSConfig()
logger.FatalIf(err, "Unable to load the TLS configuration")
// Check and load Root CAs.
globalRootCAs, err = certs.GetRootCAs(globalCertsCADir.Get())
logger.FatalIf(err, "Failed to read root CAs (%v)", err)
// Add the global public crts as part of global root CAs
for _, publicCrt := range globalPublicCerts {
globalRootCAs.AddCert(publicCrt)
}
// Register root CAs for remote ENVs
env.RegisterGlobalCAs(globalRootCAs)
globalEndpoints, setupType, err = createServerEndpoints(globalMinioAddr, serverCmdArgs(ctx)...)
logger.FatalIf(err, "Invalid command line arguments")
globalLocalNodeName = GetLocalPeer(globalEndpoints, globalMinioHost, globalMinioPort)
globalRemoteEndpoints = make(map[string]Endpoint)
for _, z := range globalEndpoints {
for _, ep := range z.Endpoints {
if ep.IsLocal {
globalRemoteEndpoints[globalLocalNodeName] = ep
} else {
globalRemoteEndpoints[ep.Host] = ep
}
}
}
// allow transport to be HTTP/1.1 for proxying.
globalProxyTransport = newCustomHTTPProxyTransport(&tls.Config{
RootCAs: globalRootCAs,
CipherSuites: fips.CipherSuitesTLS(),
CurvePreferences: fips.EllipticCurvesTLS(),
}, rest.DefaultTimeout)()
globalProxyEndpoints = GetProxyEndpoints(globalEndpoints)
globalInternodeTransport = newInternodeHTTPTransport(&tls.Config{
RootCAs: globalRootCAs,
CipherSuites: fips.CipherSuitesTLS(),
CurvePreferences: fips.EllipticCurvesTLS(),
}, rest.DefaultTimeout)()
// On macOS, if a process already listens on LOCALIPADDR:PORT, net.Listen() falls back
// to IPv6 address ie minio will start listening on IPv6 address whereas another
// (non-)minio process is listening on IPv4 of given port.
// To avoid this error situation we check for port availability.
logger.FatalIf(checkPortAvailability(globalMinioHost, globalMinioPort), "Unable to start the server")
globalIsErasure = (setupType == ErasureSetupType)
globalIsDistErasure = (setupType == DistErasureSetupType)
if globalIsDistErasure {
globalIsErasure = true
}
}
func serverHandleEnvVars() {
// Handle common environment variables.
handleCommonEnvVars()
}
var globalHealStateLK sync.RWMutex
func newAllSubsystems() {
if globalIsErasure {
globalHealStateLK.Lock()
// New global heal state
globalAllHealState = newHealState(true)
globalBackgroundHealState = newHealState(false)
globalHealStateLK.Unlock()
}
// Create new notification system and initialize notification targets
globalNotificationSys = NewNotificationSys(globalEndpoints)
// Create new bucket metadata system.
if globalBucketMetadataSys == nil {
globalBucketMetadataSys = NewBucketMetadataSys()
} else {
// Reinitialize safely when testing.
globalBucketMetadataSys.Reset()
}
// Create the bucket bandwidth monitor
globalBucketMonitor = bandwidth.NewMonitor(GlobalContext, totalNodeCount())
// Create a new config system.
globalConfigSys = NewConfigSys()
// Create new IAM system.
globalIAMSys = NewIAMSys()
// Create new policy system.
globalPolicySys = NewPolicySys()
// Create new lifecycle system.
globalLifecycleSys = NewLifecycleSys()
// Create new bucket encryption subsystem
globalBucketSSEConfigSys = NewBucketSSEConfigSys()
// Create new bucket object lock subsystem
globalBucketObjectLockSys = NewBucketObjectLockSys()
// Create new bucket quota subsystem
globalBucketQuotaSys = NewBucketQuotaSys()
// Create new bucket versioning subsystem
if globalBucketVersioningSys == nil {
globalBucketVersioningSys = NewBucketVersioningSys()
} else {
globalBucketVersioningSys.Reset()
}
// Create new bucket replication subsytem
globalBucketTargetSys = NewBucketTargetSys()
// Create new ILM tier configuration subsystem
globalTierConfigMgr = NewTierConfigMgr()
}
func configRetriableErrors(err error) bool {
// Initializing sub-systems needs a retry mechanism for
// the following reasons:
// - Read quorum is lost just after the initialization
// of the object layer.
// - Write quorum not met when upgrading configuration
// version is needed, migration is needed etc.
rquorum := InsufficientReadQuorum{}
wquorum := InsufficientWriteQuorum{}
// One of these retriable errors shall be retried.
return errors.Is(err, errDiskNotFound) ||
errors.Is(err, errConfigNotFound) ||
errors.Is(err, context.DeadlineExceeded) ||
errors.Is(err, errErasureWriteQuorum) ||
errors.Is(err, errErasureReadQuorum) ||
errors.Is(err, io.ErrUnexpectedEOF) ||
errors.As(err, &rquorum) ||
errors.As(err, &wquorum) ||
isErrBucketNotFound(err) ||
errors.Is(err, os.ErrDeadlineExceeded)
}
func initServer(ctx context.Context, newObject ObjectLayer) error {
// Once the config is fully loaded, initialize the new object layer.
setObjectLayer(newObject)
// Make sure to hold lock for entire migration to avoid
// such that only one server should migrate the entire config
// at a given time, this big transaction lock ensures this
// appropriately. This is also true for rotation of encrypted
// content.
txnLk := newObject.NewNSLock(minioMetaBucket, minioConfigPrefix+"/transaction.lock")
// **** WARNING ****
// Migrating to encrypted backend should happen before initialization of any
// sub-systems, make sure that we do not move the above codeblock elsewhere.
r := rand.New(rand.NewSource(time.Now().UnixNano()))
lockTimeout := newDynamicTimeout(5*time.Second, 3*time.Second)
for {
select {
case <-ctx.Done():
// Retry was canceled successfully.
return fmt.Errorf("Initializing sub-systems stopped gracefully %w", ctx.Err())
default:
}
// let one of the server acquire the lock, if not let them timeout.
// which shall be retried again by this loop.
lkctx, err := txnLk.GetLock(ctx, lockTimeout)
if err != nil {
logger.Info("Waiting for all MinIO sub-systems to be initialized.. trying to acquire lock")
time.Sleep(time.Duration(r.Float64() * float64(5*time.Second)))
continue
}
// These messages only meant primarily for distributed setup, so only log during distributed setup.
if globalIsDistErasure {
logger.Info("Waiting for all MinIO sub-systems to be initialized.. lock acquired")
}
// Migrate all backend configs to encrypted backend configs, optionally
// handles rotating keys for encryption, if there is any retriable failure
// that shall be retried if there is an error.
if err = handleEncryptedConfigBackend(newObject); err == nil {
// Upon success migrating the config, initialize all sub-systems
// if all sub-systems initialized successfully return right away
if err = initAllSubsystems(ctx, newObject); err == nil {
txnLk.Unlock(lkctx.Cancel)
// All successful return.
if globalIsDistErasure {
// These messages only meant primarily for distributed setup, so only log during distributed setup.
logger.Info("All MinIO sub-systems initialized successfully")
}
return nil
}
}
// Unlock the transaction lock and allow other nodes to acquire the lock if possible.
txnLk.Unlock(lkctx.Cancel)
if configRetriableErrors(err) {
logger.Info("Waiting for all MinIO sub-systems to be initialized.. possible cause (%v)", err)
time.Sleep(time.Duration(r.Float64() * float64(5*time.Second)))
continue
}
// Any other unhandled return right here.
return fmt.Errorf("Unable to initialize sub-systems: %w", err)
}
}
func initAllSubsystems(ctx context.Context, newObject ObjectLayer) (err error) {
// %w is used by all error returns here to make sure
// we wrap the underlying error, make sure when you
// are modifying this code that you do so, if and when
// you want to add extra context to your error. This
// ensures top level retry works accordingly.
// List buckets to heal, and be re-used for loading configs.
buckets, err := newObject.ListBuckets(ctx)
if err != nil {
return fmt.Errorf("Unable to list buckets to heal: %w", err)
}
if globalIsErasure {
if len(buckets) > 0 {
if len(buckets) == 1 {
logger.Info(fmt.Sprintf("Verifying if %d bucket is consistent across drives...", len(buckets)))
} else {
logger.Info(fmt.Sprintf("Verifying if %d buckets are consistent across drives...", len(buckets)))
}
}
// Limit to no more than 50 concurrent buckets.
g := errgroup.WithNErrs(len(buckets)).WithConcurrency(50)
ctx, cancel := g.WithCancelOnError(ctx)
defer cancel()
for index := range buckets {
index := index
g.Go(func() error {
_, berr := newObject.HealBucket(ctx, buckets[index].Name, madmin.HealOpts{Recreate: true})
return berr
}, index)
}
if err := g.WaitErr(); err != nil {
return fmt.Errorf("Unable to list buckets to heal: %w", err)
}
}
// Initialize config system.
if err = globalConfigSys.Init(newObject); err != nil {
if configRetriableErrors(err) {
return fmt.Errorf("Unable to initialize config system: %w", err)
}
// Any other config errors we simply print a message and proceed forward.
logger.LogIf(ctx, fmt.Errorf("Unable to initialize config, some features may be missing %w", err))
}
// Populate existing buckets to the etcd backend
if globalDNSConfig != nil {
// Background this operation.
go initFederatorBackend(buckets, newObject)
}
// Initialize bucket metadata sub-system.
globalBucketMetadataSys.Init(ctx, buckets, newObject)
// Initialize bucket notification sub-system.
globalNotificationSys.Init(ctx, buckets, newObject)
// Initialize site replication manager.
globalSiteReplicationSys.Init(ctx, newObject)
if globalIsErasure {
// Initialize transition tier configuration manager
if err = globalTierConfigMgr.Init(ctx, newObject); err != nil {
return err
}
}
return nil
}
type nullWriter struct{}
func (lw nullWriter) Write(b []byte) (int, error) {
return len(b), nil
}
// serverMain handler called for 'minio server' command.
func serverMain(ctx *cli.Context) {
signal.Notify(globalOSSignalCh, os.Interrupt, syscall.SIGTERM, syscall.SIGQUIT)
go handleSignals()
setDefaultProfilerRates()
// Initialize globalConsoleSys system
globalConsoleSys = NewConsoleLogger(GlobalContext)
logger.AddTarget(globalConsoleSys)
// Perform any self-tests
bitrotSelfTest()
erasureSelfTest()
compressSelfTest()
// Handle all server command args.
serverHandleCmdArgs(ctx)
// Handle all server environment vars.
serverHandleEnvVars()
// Set node name, only set for distributed setup.
globalConsoleSys.SetNodeName(globalLocalNodeName)
// Initialize all help
initHelp()
// Initialize all sub-systems
newAllSubsystems()
// Is distributed setup, error out if no certificates are found for HTTPS endpoints.
if globalIsDistErasure {
if globalEndpoints.HTTPS() && !globalIsTLS {
logger.Fatal(config.ErrNoCertsAndHTTPSEndpoints(nil), "Unable to start the server")
}
if !globalEndpoints.HTTPS() && globalIsTLS {
logger.Fatal(config.ErrCertsAndHTTPEndpoints(nil), "Unable to start the server")
}
}
if !globalCLIContext.Quiet && !globalInplaceUpdateDisabled {
// Check for new updates from dl.min.io.
checkUpdate(getMinioMode())
}
if !globalActiveCred.IsValid() && globalIsDistErasure {
globalActiveCred = auth.DefaultCredentials
}
// Set system resources to maximum.
setMaxResources()
// Configure server.
handler, err := configureServerHandler(globalEndpoints)
if err != nil {
logger.Fatal(config.ErrUnexpectedError(err), "Unable to configure one of server's RPC services")
}
var getCert certs.GetCertificateFunc
if globalTLSCerts != nil {
getCert = globalTLSCerts.GetCertificate
}
httpServer := xhttp.NewServer([]string{globalMinioAddr},
criticalErrorHandler{corsHandler(handler)}, getCert)
httpServer.BaseContext = func(listener net.Listener) context.Context {
return GlobalContext
}
// Turn-off random logging by Go internally
httpServer.ErrorLog = log.New(&nullWriter{}, "", 0)
go func() {
globalHTTPServerErrorCh <- httpServer.Start(GlobalContext)
}()
setHTTPServer(httpServer)
if globalIsDistErasure && globalEndpoints.FirstLocal() {
for {
// Additionally in distributed setup, validate the setup and configuration.
err := verifyServerSystemConfig(GlobalContext, globalEndpoints)
if err == nil || errors.Is(err, context.Canceled) {
break
}
logger.LogIf(GlobalContext, err, "Unable to initialize distributed setup, retrying.. after 5 seconds")
select {
case <-GlobalContext.Done():
return
case <-time.After(500 * time.Millisecond):
}
}
}
newObject, err := newObjectLayer(GlobalContext, globalEndpoints)
if err != nil {
logFatalErrs(err, Endpoint{}, true)
}
logger.SetDeploymentID(globalDeploymentID)
// Enable background operations for erasure coding
if globalIsErasure {
initAutoHeal(GlobalContext, newObject)
initHealMRF(GlobalContext, newObject)
}
initBackgroundExpiry(GlobalContext, newObject)
if err = initServer(GlobalContext, newObject); err != nil {
var cerr config.Err
// For any config error, we don't need to drop into safe-mode
// instead its a user error and should be fixed by user.
if errors.As(err, &cerr) {
logger.FatalIf(err, "Unable to initialize the server")
}
// If context was canceled
if errors.Is(err, context.Canceled) {
logger.FatalIf(err, "Server startup canceled upon user request")
}
logger.LogIf(GlobalContext, err)
}
// Initialize users credentials and policies in background right after config has initialized.
go globalIAMSys.Init(GlobalContext, newObject)
initDataScanner(GlobalContext, newObject)
if globalIsErasure { // to be done after config init
initBackgroundReplication(GlobalContext, newObject)
initBackgroundTransition(GlobalContext, newObject)
globalTierJournal, err = initTierDeletionJournal(GlobalContext)
if err != nil {
logger.FatalIf(err, "Unable to initialize remote tier pending deletes journal")
}
}
if globalCacheConfig.Enabled {
// initialize the new disk cache objects.
var cacheAPI CacheObjectLayer
cacheAPI, err = newServerCacheObjects(GlobalContext, globalCacheConfig)
logger.FatalIf(err, "Unable to initialize disk caching")
setCacheObjectLayer(cacheAPI)
}
// Prints the formatted startup message, if err is not nil then it prints additional information as well.
printStartupMessage(getAPIEndpoints(), err)
if globalActiveCred.Equal(auth.DefaultCredentials) {
msg := fmt.Sprintf("WARNING: Detected default credentials '%s', we recommend that you change these values with 'MINIO_ROOT_USER' and 'MINIO_ROOT_PASSWORD' environment variables", globalActiveCred)
logStartupMessage(color.RedBold(msg))
}
if !globalCLIContext.StrictS3Compat {
logStartupMessage(color.RedBold("WARNING: Strict AWS S3 compatible incoming PUT, POST content payload validation is turned off, caution is advised do not use in production"))
}
if globalBrowserEnabled {
globalConsoleSrv, err = initConsoleServer()
if err != nil {
logger.FatalIf(err, "Unable to initialize console service")
}
go func() {
logger.FatalIf(globalConsoleSrv.Serve(), "Unable to initialize console server")
}()
}
if serverDebugLog {
logger.Info("== DEBUG Mode enabled ==")
logger.Info("Currently set environment settings:")
for _, v := range os.Environ() {
logger.Info(v)
}
logger.Info("======")
}
<-globalOSSignalCh
}
// Initialize object layer with the supplied disks, objectLayer is nil upon any error.
func newObjectLayer(ctx context.Context, endpointServerPools EndpointServerPools) (newObject ObjectLayer, err error) {
// For FS only, directly use the disk.
if endpointServerPools.NEndpoints() == 1 {
// Initialize new FS object layer.
return NewFSObjectLayer(endpointServerPools[0].Endpoints[0].Path)
}
return newErasureServerPools(ctx, endpointServerPools)
}
| cmd/server-main.go | 1 | https://github.com/minio/minio/commit/acc9645249a3ee61cedff832d003421015cc724d | [
0.99776291847229,
0.047848891466856,
0.0001627747406018898,
0.00017466921417508274,
0.21216540038585663
] |
{
"id": 2,
"code_window": [
"\tvar getCert certs.GetCertificateFunc\n",
"\tif globalTLSCerts != nil {\n",
"\t\tgetCert = globalTLSCerts.GetCertificate\n",
"\t}\n",
"\n",
"\thttpServer := xhttp.NewServer([]string{globalMinioAddr},\n",
"\t\tcriticalErrorHandler{corsHandler(handler)}, getCert)\n",
"\thttpServer.BaseContext = func(listener net.Listener) context.Context {\n",
"\t\treturn GlobalContext\n",
"\t}\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tlisteners := ctx.Int(\"listeners\")\n",
"\tif listeners == 0 {\n",
"\t\tlisteners = 1\n",
"\t}\n",
"\taddrs := make([]string, 0, listeners)\n",
"\tfor i := 0; i < listeners; i++ {\n",
"\t\taddrs = append(addrs, globalMinioAddr)\n",
"\t}\n",
"\n",
"\thttpServer := xhttp.NewServer(addrs, criticalErrorHandler{corsHandler(handler)}, getCert)\n"
],
"file_path": "cmd/server-main.go",
"type": "replace",
"edit_start_line_idx": 499
} | *Federation feature is deprecated and should be avoided for future deployments*
# Federation Quickstart Guide [](https://slack.min.io)
This document explains how to configure MinIO with `Bucket lookup from DNS` style federation.
## Get started
### 1. Prerequisites
Install MinIO - [MinIO Quickstart Guide](https://docs.min.io/docs/minio-quickstart-guide).
### 2. Run MinIO in federated mode
Bucket lookup from DNS federation requires two dependencies
- etcd (for bucket DNS service records)
- CoreDNS (for DNS management based on populated bucket DNS service records, optional)
## Architecture

### Environment variables
#### MINIO_ETCD_ENDPOINTS
This is comma separated list of etcd servers that you want to use as the MinIO federation back-end. This should
be same across the federated deployment, i.e. all the MinIO instances within a federated deployment should use same
etcd back-end.
#### MINIO_DOMAIN
This is the top level domain name used for the federated setup. This domain name should ideally resolve to a load-balancer
running in front of all the federated MinIO instances. The domain name is used to create sub domain entries to etcd. For
example, if the domain is set to `domain.com`, the buckets `bucket1`, `bucket2` will be accessible as `bucket1.domain.com`
and `bucket2.domain.com`.
#### MINIO_PUBLIC_IPS
This is comma separated list of IP addresses to which buckets created on this MinIO instance will resolve to. For example,
a bucket `bucket1` created on current MinIO instance will be accessible as `bucket1.domain.com`, and the DNS entry for
`bucket1.domain.com` will point to IP address set in `MINIO_PUBLIC_IPS`.
*Note*
- This field is mandatory for standalone and erasure code MinIO server deployments, to enable federated mode.
- This field is optional for distributed deployments. If you don't set this field in a federated setup, we use the IP addresses of
hosts passed to the MinIO server startup and use them for DNS entries.
### Run Multiple Clusters
> cluster1
```sh
export MINIO_ETCD_ENDPOINTS="http://remote-etcd1:2379,http://remote-etcd2:4001"
export MINIO_DOMAIN=domain.com
export MINIO_PUBLIC_IPS=44.35.2.1,44.35.2.2,44.35.2.3,44.35.2.4
minio server http://rack{1...4}.host{1...4}.domain.com/mnt/export{1...32}
```
> cluster2
```sh
export MINIO_ETCD_ENDPOINTS="http://remote-etcd1:2379,http://remote-etcd2:4001"
export MINIO_DOMAIN=domain.com
export MINIO_PUBLIC_IPS=44.35.1.1,44.35.1.2,44.35.1.3,44.35.1.4
minio server http://rack{5...8}.host{5...8}.domain.com/mnt/export{1...32}
```
In this configuration you can see `MINIO_ETCD_ENDPOINTS` points to the etcd backend which manages MinIO's
`config.json` and bucket DNS SRV records. `MINIO_DOMAIN` indicates the domain suffix for the bucket which
will be used to resolve bucket through DNS. For example if you have a bucket such as `mybucket`, the
client can use now `mybucket.domain.com` to directly resolve itself to the right cluster. `MINIO_PUBLIC_IPS`
points to the public IP address where each cluster might be accessible, this is unique for each cluster.
NOTE: `mybucket` only exists on one cluster either `cluster1` or `cluster2` this is random and
is decided by how `domain.com` gets resolved, if there is a round-robin DNS on `domain.com` then
it is randomized which cluster might provision the bucket.
### 3. Upgrading to `etcdv3` API
Users running MinIO federation from release `RELEASE.2018-06-09T03-43-35Z` to `RELEASE.2018-07-10T01-42-11Z`, should migrate the existing bucket data on etcd server to `etcdv3` API, and update CoreDNS version to `1.2.0` before updating their MinIO server to the latest version.
Here is some background on why this is needed - MinIO server release `RELEASE.2018-06-09T03-43-35Z` to `RELEASE.2018-07-10T01-42-11Z` used etcdv2 API to store bucket data to etcd server. This was due to `etcdv3` support not available for CoreDNS server. So, even if MinIO used `etcdv3` API to store bucket data, CoreDNS wouldn't be able to read and serve it as DNS records.
Now that CoreDNS [supports etcdv3](https://coredns.io/2018/07/11/coredns-1.2.0-release/), MinIO server uses `etcdv3` API to store bucket data to etcd server. As `etcdv2` and `etcdv3` APIs are not compatible, data stored using `etcdv2` API is not visible to the `etcdv3` API. So, bucket data stored by previous MinIO version will not be visible to current MinIO version, until a migration is done.
CoreOS team has documented the steps required to migrate existing data from `etcdv2` to `etcdv3` in [this blog post](https://coreos.com/blog/migrating-applications-etcd-v3.html). Please refer the post and migrate etcd data to `etcdv3` API.
### 4. Test your setup
To test this setup, access the MinIO server via browser or [`mc`](https://docs.min.io/docs/minio-client-quickstart-guide). You’ll see the uploaded files are accessible from the all the MinIO endpoints.
# Explore Further
- [Use `mc` with MinIO Server](https://docs.min.io/docs/minio-client-quickstart-guide)
- [Use `aws-cli` with MinIO Server](https://docs.min.io/docs/aws-cli-with-minio)
- [Use `s3cmd` with MinIO Server](https://docs.min.io/docs/s3cmd-with-minio)
- [Use `minio-go` SDK with MinIO Server](https://docs.min.io/docs/golang-client-quickstart-guide)
- [The MinIO documentation website](https://docs.min.io)
| docs/federation/lookup/README.md | 0 | https://github.com/minio/minio/commit/acc9645249a3ee61cedff832d003421015cc724d | [
0.00034674463677220047,
0.0001843088393798098,
0.00016105271060951054,
0.00016502224025316536,
0.00005431448153103702
] |
{
"id": 2,
"code_window": [
"\tvar getCert certs.GetCertificateFunc\n",
"\tif globalTLSCerts != nil {\n",
"\t\tgetCert = globalTLSCerts.GetCertificate\n",
"\t}\n",
"\n",
"\thttpServer := xhttp.NewServer([]string{globalMinioAddr},\n",
"\t\tcriticalErrorHandler{corsHandler(handler)}, getCert)\n",
"\thttpServer.BaseContext = func(listener net.Listener) context.Context {\n",
"\t\treturn GlobalContext\n",
"\t}\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tlisteners := ctx.Int(\"listeners\")\n",
"\tif listeners == 0 {\n",
"\t\tlisteners = 1\n",
"\t}\n",
"\taddrs := make([]string, 0, listeners)\n",
"\tfor i := 0; i < listeners; i++ {\n",
"\t\taddrs = append(addrs, globalMinioAddr)\n",
"\t}\n",
"\n",
"\thttpServer := xhttp.NewServer(addrs, criticalErrorHandler{corsHandler(handler)}, getCert)\n"
],
"file_path": "cmd/server-main.go",
"type": "replace",
"edit_start_line_idx": 499
} | // Copyright (c) 2015-2021 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package cmd
import (
"testing"
"github.com/minio/minio/internal/bucket/replication"
)
var replicatedInfosTests = []struct {
name string
tgtInfos []replicatedTargetInfo
expectedCompletedSize int64
expectedReplicationStatusInternal string
expectedReplicationStatus replication.StatusType
expectedOpType replication.Type
expectedAction replicationAction
}{
{ //1. empty tgtInfos slice
name: "no replicated targets",
tgtInfos: []replicatedTargetInfo{},
expectedCompletedSize: 0,
expectedReplicationStatusInternal: "",
expectedReplicationStatus: replication.StatusType(""),
expectedOpType: replication.UnsetReplicationType,
expectedAction: replicateNone,
},
{ //2. replication completed to single target
name: "replication completed to single target",
tgtInfos: []replicatedTargetInfo{
{
Arn: "arn1",
Size: 249,
PrevReplicationStatus: replication.Pending,
ReplicationStatus: replication.Completed,
OpType: replication.ObjectReplicationType,
ReplicationAction: replicateAll,
},
},
expectedCompletedSize: 249,
expectedReplicationStatusInternal: "arn1=COMPLETED;",
expectedReplicationStatus: replication.Completed,
expectedOpType: replication.ObjectReplicationType,
expectedAction: replicateAll,
},
{ //3. replication completed to single target; failed to another
name: "replication completed to single target",
tgtInfos: []replicatedTargetInfo{
{
Arn: "arn1",
Size: 249,
PrevReplicationStatus: replication.Pending,
ReplicationStatus: replication.Completed,
OpType: replication.ObjectReplicationType,
ReplicationAction: replicateAll,
},
{
Arn: "arn2",
Size: 249,
PrevReplicationStatus: replication.Pending,
ReplicationStatus: replication.Failed,
OpType: replication.ObjectReplicationType,
ReplicationAction: replicateAll,
}},
expectedCompletedSize: 249,
expectedReplicationStatusInternal: "arn1=COMPLETED;arn2=FAILED;",
expectedReplicationStatus: replication.Failed,
expectedOpType: replication.ObjectReplicationType,
expectedAction: replicateAll,
},
{ //4. replication pending on one target; failed to another
name: "replication completed to single target",
tgtInfos: []replicatedTargetInfo{
{
Arn: "arn1",
Size: 249,
PrevReplicationStatus: replication.Pending,
ReplicationStatus: replication.Pending,
OpType: replication.ObjectReplicationType,
ReplicationAction: replicateAll,
},
{
Arn: "arn2",
Size: 249,
PrevReplicationStatus: replication.Pending,
ReplicationStatus: replication.Failed,
OpType: replication.ObjectReplicationType,
ReplicationAction: replicateAll,
}},
expectedCompletedSize: 0,
expectedReplicationStatusInternal: "arn1=PENDING;arn2=FAILED;",
expectedReplicationStatus: replication.Failed,
expectedOpType: replication.ObjectReplicationType,
expectedAction: replicateAll,
},
}
func TestReplicatedInfos(t *testing.T) {
for i, test := range replicatedInfosTests {
rinfos := replicatedInfos{
Targets: test.tgtInfos,
}
if actualSize := rinfos.CompletedSize(); actualSize != test.expectedCompletedSize {
t.Errorf("Test%d (%s): Size got %d , want %d", i+1, test.name, actualSize, test.expectedCompletedSize)
}
if repStatusStr := rinfos.ReplicationStatusInternal(); repStatusStr != test.expectedReplicationStatusInternal {
t.Errorf("Test%d (%s): Internal replication status got %s , want %s", i+1, test.name, repStatusStr, test.expectedReplicationStatusInternal)
}
if repStatus := rinfos.ReplicationStatus(); repStatus != test.expectedReplicationStatus {
t.Errorf("Test%d (%s): ReplicationStatus got %s , want %s", i+1, test.name, repStatus, test.expectedReplicationStatus)
}
if action := rinfos.Action(); action != test.expectedAction {
t.Errorf("Test%d (%s): Action got %s , want %s", i+1, test.name, action, test.expectedAction)
}
}
}
var parseReplicationDecisionTest = []struct {
name string
dsc string
expDsc ReplicateDecision
expErr error
}{
{ //1.
name: "empty string",
dsc: "",
expDsc: ReplicateDecision{
targetsMap: map[string]replicateTargetDecision{},
},
expErr: nil,
},
{ //2.
name: "replicate decision for one target",
dsc: "arn:minio:replication::id:bucket=true;false;arn:minio:replication::id:bucket;id",
expErr: nil,
expDsc: ReplicateDecision{
targetsMap: map[string]replicateTargetDecision{
"arn:minio:replication::id:bucket": newReplicateTargetDecision("arn:minio:replication::id:bucket", true, false),
},
},
},
{ //3.
name: "replicate decision for multiple targets",
dsc: "arn:minio:replication::id:bucket=true;false;arn:minio:replication::id:bucket;id,arn:minio:replication::id2:bucket=false;true;arn:minio:replication::id2:bucket;id2",
expErr: nil,
expDsc: ReplicateDecision{
targetsMap: map[string]replicateTargetDecision{
"arn:minio:replication::id:bucket": newReplicateTargetDecision("arn:minio:replication::id:bucket", true, false),
"arn:minio:replication::id2:bucket": newReplicateTargetDecision("arn:minio:replication::id2:bucket", false, true),
},
},
},
{ //4.
name: "invalid format replicate decision for one target",
dsc: "arn:minio:replication::id:bucket:true;false;arn:minio:replication::id:bucket;id",
expErr: errInvalidReplicateDecisionFormat,
expDsc: ReplicateDecision{
targetsMap: map[string]replicateTargetDecision{
"arn:minio:replication::id:bucket": newReplicateTargetDecision("arn:minio:replication::id:bucket", true, false),
},
},
},
}
func TestParseReplicateDecision(t *testing.T) {
for i, test := range parseReplicationDecisionTest {
//dsc, err := parseReplicateDecision(test.dsc)
dsc, err := parseReplicateDecision(test.expDsc.String())
if err != nil {
if test.expErr != err {
t.Errorf("Test%d (%s): Expected parse error got %t , want %t", i+1, test.name, err, test.expErr)
}
continue
}
if len(dsc.targetsMap) != len(test.expDsc.targetsMap) {
t.Errorf("Test%d (%s): Invalid number of entries in targetsMap got %d , want %d", i+1, test.name, len(dsc.targetsMap), len(test.expDsc.targetsMap))
}
for arn, tdsc := range dsc.targetsMap {
expDsc, ok := test.expDsc.targetsMap[arn]
if !ok || expDsc != tdsc {
t.Errorf("Test%d (%s): Invalid target replicate decision: got %+v, want %+v", i+1, test.name, tdsc, expDsc)
}
}
}
}
var replicationStateTest = []struct {
name string
rs ReplicationState
arn string
expStatus replication.StatusType
}{
{ //1. no replication status header
name: "no replicated targets",
rs: ReplicationState{},
expStatus: replication.StatusType(""),
},
{ //2. replication status for one target
name: "replication status for one target",
rs: ReplicationState{ReplicationStatusInternal: "arn1=PENDING;", Targets: map[string]replication.StatusType{"arn1": "PENDING"}},
expStatus: replication.Pending,
},
{ //3. replication status for one target - incorrect format
name: "replication status for one target",
rs: ReplicationState{ReplicationStatusInternal: "arn1=PENDING"},
expStatus: replication.StatusType(""),
},
{ //4. replication status for 3 targets, one of them failed
name: "replication status for 3 targets - one failed",
rs: ReplicationState{
ReplicationStatusInternal: "arn1=COMPLETED;arn2=COMPLETED;arn3=FAILED;",
Targets: map[string]replication.StatusType{"arn1": "COMPLETED", "arn2": "COMPLETED", "arn3": "FAILED"},
},
expStatus: replication.Failed,
},
{ //5. replication status for replica version
name: "replication status for replica version",
rs: ReplicationState{ReplicationStatusInternal: string(replication.Replica)},
expStatus: replication.Replica,
},
}
func TestCompositeReplicationStatus(t *testing.T) {
for i, test := range replicationStateTest {
if rstatus := test.rs.CompositeReplicationStatus(); rstatus != test.expStatus {
t.Errorf("Test%d (%s): Overall replication status got %s , want %s", i+1, test.name, rstatus, test.expStatus)
}
}
}
| cmd/bucket-replication-utils_test.go | 0 | https://github.com/minio/minio/commit/acc9645249a3ee61cedff832d003421015cc724d | [
0.00018159167666453868,
0.00017579560517333448,
0.00016515518655069172,
0.00017628329806029797,
0.0000037136480841581943
] |
{
"id": 2,
"code_window": [
"\tvar getCert certs.GetCertificateFunc\n",
"\tif globalTLSCerts != nil {\n",
"\t\tgetCert = globalTLSCerts.GetCertificate\n",
"\t}\n",
"\n",
"\thttpServer := xhttp.NewServer([]string{globalMinioAddr},\n",
"\t\tcriticalErrorHandler{corsHandler(handler)}, getCert)\n",
"\thttpServer.BaseContext = func(listener net.Listener) context.Context {\n",
"\t\treturn GlobalContext\n",
"\t}\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tlisteners := ctx.Int(\"listeners\")\n",
"\tif listeners == 0 {\n",
"\t\tlisteners = 1\n",
"\t}\n",
"\taddrs := make([]string, 0, listeners)\n",
"\tfor i := 0; i < listeners; i++ {\n",
"\t\taddrs = append(addrs, globalMinioAddr)\n",
"\t}\n",
"\n",
"\thttpServer := xhttp.NewServer(addrs, criticalErrorHandler{corsHandler(handler)}, getCert)\n"
],
"file_path": "cmd/server-main.go",
"type": "replace",
"edit_start_line_idx": 499
} | // Copyright (c) 2015-2021 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package cmd
import (
"time"
)
// BucketTargetUsageInfo - bucket target usage info provides
// - replicated size for all objects sent to this target
// - replica size for all objects received from this target
// - replication pending size for all objects pending replication to this target
// - replication failed size for all objects failed replication to this target
// - replica pending count
// - replica failed count
type BucketTargetUsageInfo struct {
ReplicationPendingSize uint64 `json:"objectsPendingReplicationTotalSize"`
ReplicationFailedSize uint64 `json:"objectsFailedReplicationTotalSize"`
ReplicatedSize uint64 `json:"objectsReplicatedTotalSize"`
ReplicaSize uint64 `json:"objectReplicaTotalSize"`
ReplicationPendingCount uint64 `json:"objectsPendingReplicationCount"`
ReplicationFailedCount uint64 `json:"objectsFailedReplicationCount"`
}
// BucketUsageInfo - bucket usage info provides
// - total size of the bucket
// - total objects in a bucket
// - object size histogram per bucket
type BucketUsageInfo struct {
Size uint64 `json:"size"`
// Following five fields suffixed with V1 are here for backward compatibility
// Total Size for objects that have not yet been replicated
ReplicationPendingSizeV1 uint64 `json:"objectsPendingReplicationTotalSize"`
// Total size for objects that have witness one or more failures and will be retried
ReplicationFailedSizeV1 uint64 `json:"objectsFailedReplicationTotalSize"`
// Total size for objects that have been replicated to destination
ReplicatedSizeV1 uint64 `json:"objectsReplicatedTotalSize"`
// Total number of objects pending replication
ReplicationPendingCountV1 uint64 `json:"objectsPendingReplicationCount"`
// Total number of objects that failed replication
ReplicationFailedCountV1 uint64 `json:"objectsFailedReplicationCount"`
ObjectsCount uint64 `json:"objectsCount"`
ObjectSizesHistogram map[string]uint64 `json:"objectsSizesHistogram"`
ReplicaSize uint64 `json:"objectReplicaTotalSize"`
ReplicationInfo map[string]BucketTargetUsageInfo `json:"objectsReplicationInfo"`
}
// DataUsageInfo represents data usage stats of the underlying Object API
type DataUsageInfo struct {
// LastUpdate is the timestamp of when the data usage info was last updated.
// This does not indicate a full scan.
LastUpdate time.Time `json:"lastUpdate"`
// Objects total count across all buckets
ObjectsTotalCount uint64 `json:"objectsCount"`
// Objects total size across all buckets
ObjectsTotalSize uint64 `json:"objectsTotalSize"`
ReplicationInfo map[string]BucketTargetUsageInfo `json:"objectsReplicationInfo"`
// Total number of buckets in this cluster
BucketsCount uint64 `json:"bucketsCount"`
// Buckets usage info provides following information across all buckets
// - total size of the bucket
// - total objects in a bucket
// - object size histogram per bucket
BucketsUsage map[string]BucketUsageInfo `json:"bucketsUsageInfo"`
// Deprecated kept here for backward compatibility reasons.
BucketSizes map[string]uint64 `json:"bucketsSizes"`
}
| cmd/data-usage-utils.go | 0 | https://github.com/minio/minio/commit/acc9645249a3ee61cedff832d003421015cc724d | [
0.0004532044695224613,
0.00021424356964416802,
0.0001663382863625884,
0.00017334238509647548,
0.0000907442081370391
] |
{
"id": 3,
"code_window": [
")\n",
"\n",
"type acceptResult struct {\n",
"\tconn net.Conn\n",
"\terr error\n",
"}\n",
"\n",
"// httpListener - HTTP listener capable of handling multiple server addresses.\n",
"type httpListener struct {\n",
"\ttcpListeners []*net.TCPListener // underlaying TCP listeners.\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tlidx int\n"
],
"file_path": "internal/http/listener.go",
"type": "add",
"edit_start_line_idx": 29
} | // Copyright (c) 2015-2021 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package cmd
import (
"context"
"crypto/tls"
"errors"
"fmt"
"io"
"log"
"math/rand"
"net"
"os"
"os/signal"
"strings"
"sync"
"syscall"
"time"
"github.com/minio/cli"
"github.com/minio/madmin-go"
"github.com/minio/minio/internal/auth"
"github.com/minio/minio/internal/bucket/bandwidth"
"github.com/minio/minio/internal/color"
"github.com/minio/minio/internal/config"
"github.com/minio/minio/internal/fips"
xhttp "github.com/minio/minio/internal/http"
"github.com/minio/minio/internal/logger"
"github.com/minio/minio/internal/rest"
"github.com/minio/minio/internal/sync/errgroup"
"github.com/minio/pkg/certs"
"github.com/minio/pkg/env"
)
// ServerFlags - server command specific flags
var ServerFlags = []cli.Flag{
cli.StringFlag{
Name: "address",
Value: ":" + GlobalMinioDefaultPort,
Usage: "bind to a specific ADDRESS:PORT, ADDRESS can be an IP or hostname",
},
cli.StringFlag{
Name: "console-address",
Usage: "bind to a specific ADDRESS:PORT for embedded Console UI, ADDRESS can be an IP or hostname",
},
}
var serverCmd = cli.Command{
Name: "server",
Usage: "start object storage server",
Flags: append(ServerFlags, GlobalFlags...),
Action: serverMain,
CustomHelpTemplate: `NAME:
{{.HelpName}} - {{.Usage}}
USAGE:
{{.HelpName}} {{if .VisibleFlags}}[FLAGS] {{end}}DIR1 [DIR2..]
{{.HelpName}} {{if .VisibleFlags}}[FLAGS] {{end}}DIR{1...64}
{{.HelpName}} {{if .VisibleFlags}}[FLAGS] {{end}}DIR{1...64} DIR{65...128}
DIR:
DIR points to a directory on a filesystem. When you want to combine
multiple drives into a single large system, pass one directory per
filesystem separated by space. You may also use a '...' convention
to abbreviate the directory arguments. Remote directories in a
distributed setup are encoded as HTTP(s) URIs.
{{if .VisibleFlags}}
FLAGS:
{{range .VisibleFlags}}{{.}}
{{end}}{{end}}
EXAMPLES:
1. Start minio server on "/home/shared" directory.
{{.Prompt}} {{.HelpName}} /home/shared
2. Start single node server with 64 local drives "/mnt/data1" to "/mnt/data64".
{{.Prompt}} {{.HelpName}} /mnt/data{1...64}
3. Start distributed minio server on an 32 node setup with 32 drives each, run following command on all the nodes
{{.Prompt}} {{.EnvVarSetCommand}} MINIO_ROOT_USER{{.AssignmentOperator}}minio
{{.Prompt}} {{.EnvVarSetCommand}} MINIO_ROOT_PASSWORD{{.AssignmentOperator}}miniostorage
{{.Prompt}} {{.HelpName}} http://node{1...32}.example.com/mnt/export{1...32}
4. Start distributed minio server in an expanded setup, run the following command on all the nodes
{{.Prompt}} {{.EnvVarSetCommand}} MINIO_ROOT_USER{{.AssignmentOperator}}minio
{{.Prompt}} {{.EnvVarSetCommand}} MINIO_ROOT_PASSWORD{{.AssignmentOperator}}miniostorage
{{.Prompt}} {{.HelpName}} http://node{1...16}.example.com/mnt/export{1...32} \
http://node{17...64}.example.com/mnt/export{1...64}
`,
}
func serverCmdArgs(ctx *cli.Context) []string {
v, _, _, err := env.LookupEnv(config.EnvArgs)
if err != nil {
logger.FatalIf(err, "Unable to validate passed arguments in %s:%s",
config.EnvArgs, os.Getenv(config.EnvArgs))
}
if v == "" {
// Fall back to older environment value MINIO_ENDPOINTS
v, _, _, err = env.LookupEnv(config.EnvEndpoints)
if err != nil {
logger.FatalIf(err, "Unable to validate passed arguments in %s:%s",
config.EnvEndpoints, os.Getenv(config.EnvEndpoints))
}
}
if v == "" {
if !ctx.Args().Present() || ctx.Args().First() == "help" {
cli.ShowCommandHelpAndExit(ctx, ctx.Command.Name, 1)
}
return ctx.Args()
}
return strings.Fields(v)
}
func serverHandleCmdArgs(ctx *cli.Context) {
// Handle common command args.
handleCommonCmdArgs(ctx)
logger.FatalIf(CheckLocalServerAddr(globalMinioAddr), "Unable to validate passed arguments")
var err error
var setupType SetupType
// Check and load TLS certificates.
globalPublicCerts, globalTLSCerts, globalIsTLS, err = getTLSConfig()
logger.FatalIf(err, "Unable to load the TLS configuration")
// Check and load Root CAs.
globalRootCAs, err = certs.GetRootCAs(globalCertsCADir.Get())
logger.FatalIf(err, "Failed to read root CAs (%v)", err)
// Add the global public crts as part of global root CAs
for _, publicCrt := range globalPublicCerts {
globalRootCAs.AddCert(publicCrt)
}
// Register root CAs for remote ENVs
env.RegisterGlobalCAs(globalRootCAs)
globalEndpoints, setupType, err = createServerEndpoints(globalMinioAddr, serverCmdArgs(ctx)...)
logger.FatalIf(err, "Invalid command line arguments")
globalLocalNodeName = GetLocalPeer(globalEndpoints, globalMinioHost, globalMinioPort)
globalRemoteEndpoints = make(map[string]Endpoint)
for _, z := range globalEndpoints {
for _, ep := range z.Endpoints {
if ep.IsLocal {
globalRemoteEndpoints[globalLocalNodeName] = ep
} else {
globalRemoteEndpoints[ep.Host] = ep
}
}
}
// allow transport to be HTTP/1.1 for proxying.
globalProxyTransport = newCustomHTTPProxyTransport(&tls.Config{
RootCAs: globalRootCAs,
CipherSuites: fips.CipherSuitesTLS(),
CurvePreferences: fips.EllipticCurvesTLS(),
}, rest.DefaultTimeout)()
globalProxyEndpoints = GetProxyEndpoints(globalEndpoints)
globalInternodeTransport = newInternodeHTTPTransport(&tls.Config{
RootCAs: globalRootCAs,
CipherSuites: fips.CipherSuitesTLS(),
CurvePreferences: fips.EllipticCurvesTLS(),
}, rest.DefaultTimeout)()
// On macOS, if a process already listens on LOCALIPADDR:PORT, net.Listen() falls back
// to IPv6 address ie minio will start listening on IPv6 address whereas another
// (non-)minio process is listening on IPv4 of given port.
// To avoid this error situation we check for port availability.
logger.FatalIf(checkPortAvailability(globalMinioHost, globalMinioPort), "Unable to start the server")
globalIsErasure = (setupType == ErasureSetupType)
globalIsDistErasure = (setupType == DistErasureSetupType)
if globalIsDistErasure {
globalIsErasure = true
}
}
func serverHandleEnvVars() {
// Handle common environment variables.
handleCommonEnvVars()
}
var globalHealStateLK sync.RWMutex
func newAllSubsystems() {
if globalIsErasure {
globalHealStateLK.Lock()
// New global heal state
globalAllHealState = newHealState(true)
globalBackgroundHealState = newHealState(false)
globalHealStateLK.Unlock()
}
// Create new notification system and initialize notification targets
globalNotificationSys = NewNotificationSys(globalEndpoints)
// Create new bucket metadata system.
if globalBucketMetadataSys == nil {
globalBucketMetadataSys = NewBucketMetadataSys()
} else {
// Reinitialize safely when testing.
globalBucketMetadataSys.Reset()
}
// Create the bucket bandwidth monitor
globalBucketMonitor = bandwidth.NewMonitor(GlobalContext, totalNodeCount())
// Create a new config system.
globalConfigSys = NewConfigSys()
// Create new IAM system.
globalIAMSys = NewIAMSys()
// Create new policy system.
globalPolicySys = NewPolicySys()
// Create new lifecycle system.
globalLifecycleSys = NewLifecycleSys()
// Create new bucket encryption subsystem
globalBucketSSEConfigSys = NewBucketSSEConfigSys()
// Create new bucket object lock subsystem
globalBucketObjectLockSys = NewBucketObjectLockSys()
// Create new bucket quota subsystem
globalBucketQuotaSys = NewBucketQuotaSys()
// Create new bucket versioning subsystem
if globalBucketVersioningSys == nil {
globalBucketVersioningSys = NewBucketVersioningSys()
} else {
globalBucketVersioningSys.Reset()
}
// Create new bucket replication subsytem
globalBucketTargetSys = NewBucketTargetSys()
// Create new ILM tier configuration subsystem
globalTierConfigMgr = NewTierConfigMgr()
}
func configRetriableErrors(err error) bool {
// Initializing sub-systems needs a retry mechanism for
// the following reasons:
// - Read quorum is lost just after the initialization
// of the object layer.
// - Write quorum not met when upgrading configuration
// version is needed, migration is needed etc.
rquorum := InsufficientReadQuorum{}
wquorum := InsufficientWriteQuorum{}
// One of these retriable errors shall be retried.
return errors.Is(err, errDiskNotFound) ||
errors.Is(err, errConfigNotFound) ||
errors.Is(err, context.DeadlineExceeded) ||
errors.Is(err, errErasureWriteQuorum) ||
errors.Is(err, errErasureReadQuorum) ||
errors.Is(err, io.ErrUnexpectedEOF) ||
errors.As(err, &rquorum) ||
errors.As(err, &wquorum) ||
isErrBucketNotFound(err) ||
errors.Is(err, os.ErrDeadlineExceeded)
}
func initServer(ctx context.Context, newObject ObjectLayer) error {
// Once the config is fully loaded, initialize the new object layer.
setObjectLayer(newObject)
// Make sure to hold lock for entire migration to avoid
// such that only one server should migrate the entire config
// at a given time, this big transaction lock ensures this
// appropriately. This is also true for rotation of encrypted
// content.
txnLk := newObject.NewNSLock(minioMetaBucket, minioConfigPrefix+"/transaction.lock")
// **** WARNING ****
// Migrating to encrypted backend should happen before initialization of any
// sub-systems, make sure that we do not move the above codeblock elsewhere.
r := rand.New(rand.NewSource(time.Now().UnixNano()))
lockTimeout := newDynamicTimeout(5*time.Second, 3*time.Second)
for {
select {
case <-ctx.Done():
// Retry was canceled successfully.
return fmt.Errorf("Initializing sub-systems stopped gracefully %w", ctx.Err())
default:
}
// let one of the server acquire the lock, if not let them timeout.
// which shall be retried again by this loop.
lkctx, err := txnLk.GetLock(ctx, lockTimeout)
if err != nil {
logger.Info("Waiting for all MinIO sub-systems to be initialized.. trying to acquire lock")
time.Sleep(time.Duration(r.Float64() * float64(5*time.Second)))
continue
}
// These messages only meant primarily for distributed setup, so only log during distributed setup.
if globalIsDistErasure {
logger.Info("Waiting for all MinIO sub-systems to be initialized.. lock acquired")
}
// Migrate all backend configs to encrypted backend configs, optionally
// handles rotating keys for encryption, if there is any retriable failure
// that shall be retried if there is an error.
if err = handleEncryptedConfigBackend(newObject); err == nil {
// Upon success migrating the config, initialize all sub-systems
// if all sub-systems initialized successfully return right away
if err = initAllSubsystems(ctx, newObject); err == nil {
txnLk.Unlock(lkctx.Cancel)
// All successful return.
if globalIsDistErasure {
// These messages only meant primarily for distributed setup, so only log during distributed setup.
logger.Info("All MinIO sub-systems initialized successfully")
}
return nil
}
}
// Unlock the transaction lock and allow other nodes to acquire the lock if possible.
txnLk.Unlock(lkctx.Cancel)
if configRetriableErrors(err) {
logger.Info("Waiting for all MinIO sub-systems to be initialized.. possible cause (%v)", err)
time.Sleep(time.Duration(r.Float64() * float64(5*time.Second)))
continue
}
// Any other unhandled return right here.
return fmt.Errorf("Unable to initialize sub-systems: %w", err)
}
}
func initAllSubsystems(ctx context.Context, newObject ObjectLayer) (err error) {
// %w is used by all error returns here to make sure
// we wrap the underlying error, make sure when you
// are modifying this code that you do so, if and when
// you want to add extra context to your error. This
// ensures top level retry works accordingly.
// List buckets to heal, and be re-used for loading configs.
buckets, err := newObject.ListBuckets(ctx)
if err != nil {
return fmt.Errorf("Unable to list buckets to heal: %w", err)
}
if globalIsErasure {
if len(buckets) > 0 {
if len(buckets) == 1 {
logger.Info(fmt.Sprintf("Verifying if %d bucket is consistent across drives...", len(buckets)))
} else {
logger.Info(fmt.Sprintf("Verifying if %d buckets are consistent across drives...", len(buckets)))
}
}
// Limit to no more than 50 concurrent buckets.
g := errgroup.WithNErrs(len(buckets)).WithConcurrency(50)
ctx, cancel := g.WithCancelOnError(ctx)
defer cancel()
for index := range buckets {
index := index
g.Go(func() error {
_, berr := newObject.HealBucket(ctx, buckets[index].Name, madmin.HealOpts{Recreate: true})
return berr
}, index)
}
if err := g.WaitErr(); err != nil {
return fmt.Errorf("Unable to list buckets to heal: %w", err)
}
}
// Initialize config system.
if err = globalConfigSys.Init(newObject); err != nil {
if configRetriableErrors(err) {
return fmt.Errorf("Unable to initialize config system: %w", err)
}
// Any other config errors we simply print a message and proceed forward.
logger.LogIf(ctx, fmt.Errorf("Unable to initialize config, some features may be missing %w", err))
}
// Populate existing buckets to the etcd backend
if globalDNSConfig != nil {
// Background this operation.
go initFederatorBackend(buckets, newObject)
}
// Initialize bucket metadata sub-system.
globalBucketMetadataSys.Init(ctx, buckets, newObject)
// Initialize bucket notification sub-system.
globalNotificationSys.Init(ctx, buckets, newObject)
// Initialize site replication manager.
globalSiteReplicationSys.Init(ctx, newObject)
if globalIsErasure {
// Initialize transition tier configuration manager
if err = globalTierConfigMgr.Init(ctx, newObject); err != nil {
return err
}
}
return nil
}
type nullWriter struct{}
func (lw nullWriter) Write(b []byte) (int, error) {
return len(b), nil
}
// serverMain handler called for 'minio server' command.
func serverMain(ctx *cli.Context) {
signal.Notify(globalOSSignalCh, os.Interrupt, syscall.SIGTERM, syscall.SIGQUIT)
go handleSignals()
setDefaultProfilerRates()
// Initialize globalConsoleSys system
globalConsoleSys = NewConsoleLogger(GlobalContext)
logger.AddTarget(globalConsoleSys)
// Perform any self-tests
bitrotSelfTest()
erasureSelfTest()
compressSelfTest()
// Handle all server command args.
serverHandleCmdArgs(ctx)
// Handle all server environment vars.
serverHandleEnvVars()
// Set node name, only set for distributed setup.
globalConsoleSys.SetNodeName(globalLocalNodeName)
// Initialize all help
initHelp()
// Initialize all sub-systems
newAllSubsystems()
// Is distributed setup, error out if no certificates are found for HTTPS endpoints.
if globalIsDistErasure {
if globalEndpoints.HTTPS() && !globalIsTLS {
logger.Fatal(config.ErrNoCertsAndHTTPSEndpoints(nil), "Unable to start the server")
}
if !globalEndpoints.HTTPS() && globalIsTLS {
logger.Fatal(config.ErrCertsAndHTTPEndpoints(nil), "Unable to start the server")
}
}
if !globalCLIContext.Quiet && !globalInplaceUpdateDisabled {
// Check for new updates from dl.min.io.
checkUpdate(getMinioMode())
}
if !globalActiveCred.IsValid() && globalIsDistErasure {
globalActiveCred = auth.DefaultCredentials
}
// Set system resources to maximum.
setMaxResources()
// Configure server.
handler, err := configureServerHandler(globalEndpoints)
if err != nil {
logger.Fatal(config.ErrUnexpectedError(err), "Unable to configure one of server's RPC services")
}
var getCert certs.GetCertificateFunc
if globalTLSCerts != nil {
getCert = globalTLSCerts.GetCertificate
}
httpServer := xhttp.NewServer([]string{globalMinioAddr},
criticalErrorHandler{corsHandler(handler)}, getCert)
httpServer.BaseContext = func(listener net.Listener) context.Context {
return GlobalContext
}
// Turn-off random logging by Go internally
httpServer.ErrorLog = log.New(&nullWriter{}, "", 0)
go func() {
globalHTTPServerErrorCh <- httpServer.Start(GlobalContext)
}()
setHTTPServer(httpServer)
if globalIsDistErasure && globalEndpoints.FirstLocal() {
for {
// Additionally in distributed setup, validate the setup and configuration.
err := verifyServerSystemConfig(GlobalContext, globalEndpoints)
if err == nil || errors.Is(err, context.Canceled) {
break
}
logger.LogIf(GlobalContext, err, "Unable to initialize distributed setup, retrying.. after 5 seconds")
select {
case <-GlobalContext.Done():
return
case <-time.After(500 * time.Millisecond):
}
}
}
newObject, err := newObjectLayer(GlobalContext, globalEndpoints)
if err != nil {
logFatalErrs(err, Endpoint{}, true)
}
logger.SetDeploymentID(globalDeploymentID)
// Enable background operations for erasure coding
if globalIsErasure {
initAutoHeal(GlobalContext, newObject)
initHealMRF(GlobalContext, newObject)
}
initBackgroundExpiry(GlobalContext, newObject)
if err = initServer(GlobalContext, newObject); err != nil {
var cerr config.Err
// For any config error, we don't need to drop into safe-mode
// instead its a user error and should be fixed by user.
if errors.As(err, &cerr) {
logger.FatalIf(err, "Unable to initialize the server")
}
// If context was canceled
if errors.Is(err, context.Canceled) {
logger.FatalIf(err, "Server startup canceled upon user request")
}
logger.LogIf(GlobalContext, err)
}
// Initialize users credentials and policies in background right after config has initialized.
go globalIAMSys.Init(GlobalContext, newObject)
initDataScanner(GlobalContext, newObject)
if globalIsErasure { // to be done after config init
initBackgroundReplication(GlobalContext, newObject)
initBackgroundTransition(GlobalContext, newObject)
globalTierJournal, err = initTierDeletionJournal(GlobalContext)
if err != nil {
logger.FatalIf(err, "Unable to initialize remote tier pending deletes journal")
}
}
if globalCacheConfig.Enabled {
// initialize the new disk cache objects.
var cacheAPI CacheObjectLayer
cacheAPI, err = newServerCacheObjects(GlobalContext, globalCacheConfig)
logger.FatalIf(err, "Unable to initialize disk caching")
setCacheObjectLayer(cacheAPI)
}
// Prints the formatted startup message, if err is not nil then it prints additional information as well.
printStartupMessage(getAPIEndpoints(), err)
if globalActiveCred.Equal(auth.DefaultCredentials) {
msg := fmt.Sprintf("WARNING: Detected default credentials '%s', we recommend that you change these values with 'MINIO_ROOT_USER' and 'MINIO_ROOT_PASSWORD' environment variables", globalActiveCred)
logStartupMessage(color.RedBold(msg))
}
if !globalCLIContext.StrictS3Compat {
logStartupMessage(color.RedBold("WARNING: Strict AWS S3 compatible incoming PUT, POST content payload validation is turned off, caution is advised do not use in production"))
}
if globalBrowserEnabled {
globalConsoleSrv, err = initConsoleServer()
if err != nil {
logger.FatalIf(err, "Unable to initialize console service")
}
go func() {
logger.FatalIf(globalConsoleSrv.Serve(), "Unable to initialize console server")
}()
}
if serverDebugLog {
logger.Info("== DEBUG Mode enabled ==")
logger.Info("Currently set environment settings:")
for _, v := range os.Environ() {
logger.Info(v)
}
logger.Info("======")
}
<-globalOSSignalCh
}
// Initialize object layer with the supplied disks, objectLayer is nil upon any error.
func newObjectLayer(ctx context.Context, endpointServerPools EndpointServerPools) (newObject ObjectLayer, err error) {
// For FS only, directly use the disk.
if endpointServerPools.NEndpoints() == 1 {
// Initialize new FS object layer.
return NewFSObjectLayer(endpointServerPools[0].Endpoints[0].Path)
}
return newErasureServerPools(ctx, endpointServerPools)
}
| cmd/server-main.go | 1 | https://github.com/minio/minio/commit/acc9645249a3ee61cedff832d003421015cc724d | [
0.995711088180542,
0.016045838594436646,
0.00016127385606523603,
0.00016952288569882512,
0.12441813200712204
] |
{
"id": 3,
"code_window": [
")\n",
"\n",
"type acceptResult struct {\n",
"\tconn net.Conn\n",
"\terr error\n",
"}\n",
"\n",
"// httpListener - HTTP listener capable of handling multiple server addresses.\n",
"type httpListener struct {\n",
"\ttcpListeners []*net.TCPListener // underlaying TCP listeners.\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tlidx int\n"
],
"file_path": "internal/http/listener.go",
"type": "add",
"edit_start_line_idx": 29
} | // Copyright (c) 2015-2021 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package etcd
import (
"reflect"
"testing"
)
// TestParseEndpoints - tests parseEndpoints function with valid and invalid inputs.
func TestParseEndpoints(t *testing.T) {
testCases := []struct {
s string
endpoints []string
secure bool
success bool
}{
// Invalid inputs
{"https://localhost:2379,http://localhost:2380", nil, false, false},
{",,,", nil, false, false},
{"", nil, false, false},
{"ftp://localhost:2379", nil, false, false},
{"http://localhost:2379000", nil, false, false},
// Valid inputs
{"https://localhost:2379,https://localhost:2380", []string{
"https://localhost:2379", "https://localhost:2380"},
true, true},
{"http://localhost:2379", []string{"http://localhost:2379"}, false, true},
}
for _, testCase := range testCases {
testCase := testCase
t.Run(testCase.s, func(t *testing.T) {
endpoints, secure, err := parseEndpoints(testCase.s)
if err != nil && testCase.success {
t.Errorf("expected to succeed but failed with %s", err)
}
if !testCase.success && err == nil {
t.Error("expected failure but succeeded instead")
}
if testCase.success {
if !reflect.DeepEqual(endpoints, testCase.endpoints) {
t.Errorf("expected %s, got %s", testCase.endpoints, endpoints)
}
if secure != testCase.secure {
t.Errorf("expected %t, got %t", testCase.secure, secure)
}
}
})
}
}
| internal/config/etcd/etcd_test.go | 0 | https://github.com/minio/minio/commit/acc9645249a3ee61cedff832d003421015cc724d | [
0.00017928147281054407,
0.00017297769954893738,
0.00016539882926736027,
0.0001738250139169395,
0.000004776697096531279
] |
{
"id": 3,
"code_window": [
")\n",
"\n",
"type acceptResult struct {\n",
"\tconn net.Conn\n",
"\terr error\n",
"}\n",
"\n",
"// httpListener - HTTP listener capable of handling multiple server addresses.\n",
"type httpListener struct {\n",
"\ttcpListeners []*net.TCPListener // underlaying TCP listeners.\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tlidx int\n"
],
"file_path": "internal/http/listener.go",
"type": "add",
"edit_start_line_idx": 29
} | ## MinIO Server Limits Per Tenant
For best deployment experience MinIO recommends operating systems RHEL/CentOS 8.x or later, Ubuntu 18.04 LTS or later. These operating systems package the latest 'xfsprogs' that support large scale deployments.
### Erasure Code (Multiple Drives / Servers)
| Item | Specification |
|:----------------------------------------------------------------|:--------------|
| Maximum number of servers per cluster | no-limit |
| Maximum number of federated clusters | no-limit |
| Minimum number of servers | 02 |
| Minimum number of drives per server when server count is 1 | 04 |
| Minimum number of drives per server when server count is 2 or 3 | 02 |
| Minimum number of drives per server when server count is 4 | 01 |
| Maximum number of drives per server | no-limit |
| Read quorum | N/2 |
| Write quorum | N/2+1 |
### Limits of S3 API
| Item | Specification |
|:--------------------------------------------------------------------------------|:----------------------------------------------|
| Maximum number of buckets | no-limit |
| Maximum number of objects per bucket | no-limit |
| Maximum object size | 5 TiB |
| Minimum object size | 0 B |
| Maximum object size per PUT operation | 5 TiB |
| Maximum number of parts per upload | 10,000 |
| Part size range | 5 MiB to 5 GiB. Last part can be 0 B to 5 GiB |
| Maximum number of parts returned per list parts request | 10000 |
| Maximum number of objects returned per list objects request | 4500 |
| Maximum number of multipart uploads returned per list multipart uploads request | 1000 |
| Maximum length for bucket names | 63 |
| Maximum length for object names | 1024 |
| Maximum length for '/' separated object name segment | 255 |
### List of Amazon S3 API's not supported on MinIO
We found the following APIs to be redundant or less useful outside of AWS S3. If you have a different view on any of the APIs we missed, please open a [GitHub issue](https://github.com/minio/minio/issues).
#### List of Amazon S3 Bucket API's not supported on MinIO
- BucketACL (Use [bucket policies](https://docs.min.io/docs/minio-client-complete-guide#policy) instead)
- BucketCORS (CORS enabled by default on all buckets for all HTTP verbs)
- BucketWebsite (Use [`caddy`](https://github.com/caddyserver/caddy) or [`nginx`](https://www.nginx.com/resources/wiki/))
- BucketAnalytics, BucketMetrics, BucketLogging (Use [bucket notification](https://docs.min.io/docs/minio-client-complete-guide#events) APIs)
- BucketRequestPayment
#### List of Amazon S3 Object API's not supported on MinIO
- ObjectACL (Use [bucket policies](https://docs.min.io/docs/minio-client-complete-guide#policy) instead)
- ObjectTorrent
### Object name restrictions on MinIO
- Object names that contain characters `^*|\/&";` are unsupported on Windows platform or any other file systems that do not support filenames with special charaters. **This list is non exhaustive, it depends on the operating system and filesystem under use - please consult your operating system vendor**. MinIO recommends using Linux based deployments for production workloads.
- Objects should not have conflicting objects as parents, applications using this behavior should change their behavior and use proper unique keys, for example situations such as following conflicting key patterns are not supported.
```
PUT <bucketname>/a/b/1.txt
PUT <bucketname>/a/b
```
```
PUT <bucketname>/a/b
PUT <bucketname>/a/b/1.txt
```
| docs/minio-limits.md | 0 | https://github.com/minio/minio/commit/acc9645249a3ee61cedff832d003421015cc724d | [
0.0001762273459462449,
0.00016777074779383838,
0.00016355013940483332,
0.00016561185475438833,
0.000004333441211201716
] |
{
"id": 3,
"code_window": [
")\n",
"\n",
"type acceptResult struct {\n",
"\tconn net.Conn\n",
"\terr error\n",
"}\n",
"\n",
"// httpListener - HTTP listener capable of handling multiple server addresses.\n",
"type httpListener struct {\n",
"\ttcpListeners []*net.TCPListener // underlaying TCP listeners.\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tlidx int\n"
],
"file_path": "internal/http/listener.go",
"type": "add",
"edit_start_line_idx": 29
} | // Copyright (c) 2015-2021 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package cmd
import (
"compress/gzip"
"net"
"net/http"
"github.com/gorilla/mux"
"github.com/klauspost/compress/gzhttp"
xhttp "github.com/minio/minio/internal/http"
"github.com/minio/minio/internal/logger"
"github.com/minio/pkg/wildcard"
"github.com/rs/cors"
)
func newHTTPServerFn() *xhttp.Server {
globalObjLayerMutex.RLock()
defer globalObjLayerMutex.RUnlock()
return globalHTTPServer
}
func setHTTPServer(h *xhttp.Server) {
globalObjLayerMutex.Lock()
globalHTTPServer = h
globalObjLayerMutex.Unlock()
}
func newObjectLayerFn() ObjectLayer {
globalObjLayerMutex.RLock()
defer globalObjLayerMutex.RUnlock()
return globalObjectAPI
}
func newCachedObjectLayerFn() CacheObjectLayer {
globalObjLayerMutex.RLock()
defer globalObjLayerMutex.RUnlock()
return globalCacheObjectAPI
}
func setCacheObjectLayer(c CacheObjectLayer) {
globalObjLayerMutex.Lock()
globalCacheObjectAPI = c
globalObjLayerMutex.Unlock()
}
func setObjectLayer(o ObjectLayer) {
globalObjLayerMutex.Lock()
globalObjectAPI = o
globalObjLayerMutex.Unlock()
}
// objectAPIHandler implements and provides http handlers for S3 API.
type objectAPIHandlers struct {
ObjectAPI func() ObjectLayer
CacheAPI func() CacheObjectLayer
}
// getHost tries its best to return the request host.
// According to section 14.23 of RFC 2616 the Host header
// can include the port number if the default value of 80 is not used.
func getHost(r *http.Request) string {
if r.URL.IsAbs() {
return r.URL.Host
}
return r.Host
}
func notImplementedHandler(w http.ResponseWriter, r *http.Request) {
writeErrorResponse(r.Context(), w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
}
type rejectedAPI struct {
api string
methods []string
queries []string
path string
}
var rejectedObjAPIs = []rejectedAPI{
{
api: "torrent",
methods: []string{http.MethodPut, http.MethodDelete, http.MethodGet},
queries: []string{"torrent", ""},
path: "/{object:.+}",
},
{
api: "acl",
methods: []string{http.MethodDelete},
queries: []string{"acl", ""},
path: "/{object:.+}",
},
}
var rejectedBucketAPIs = []rejectedAPI{
{
api: "inventory",
methods: []string{http.MethodGet, http.MethodPut, http.MethodDelete},
queries: []string{"inventory", ""},
},
{
api: "cors",
methods: []string{http.MethodPut, http.MethodDelete},
queries: []string{"cors", ""},
},
{
api: "metrics",
methods: []string{http.MethodGet, http.MethodPut, http.MethodDelete},
queries: []string{"metrics", ""},
},
{
api: "website",
methods: []string{http.MethodPut},
queries: []string{"website", ""},
},
{
api: "logging",
methods: []string{http.MethodPut, http.MethodDelete},
queries: []string{"logging", ""},
},
{
api: "accelerate",
methods: []string{http.MethodPut, http.MethodDelete},
queries: []string{"accelerate", ""},
},
{
api: "requestPayment",
methods: []string{http.MethodPut, http.MethodDelete},
queries: []string{"requestPayment", ""},
},
{
api: "acl",
methods: []string{http.MethodDelete, http.MethodPut, http.MethodHead},
queries: []string{"acl", ""},
},
{
api: "publicAccessBlock",
methods: []string{http.MethodDelete, http.MethodPut, http.MethodGet},
queries: []string{"publicAccessBlock", ""},
},
{
api: "ownershipControls",
methods: []string{http.MethodDelete, http.MethodPut, http.MethodGet},
queries: []string{"ownershipControls", ""},
},
{
api: "intelligent-tiering",
methods: []string{http.MethodDelete, http.MethodPut, http.MethodGet},
queries: []string{"intelligent-tiering", ""},
},
{
api: "analytics",
methods: []string{http.MethodDelete, http.MethodPut, http.MethodGet},
queries: []string{"analytics", ""},
},
}
// registerAPIRouter - registers S3 compatible APIs.
func registerAPIRouter(router *mux.Router) {
// Initialize API.
api := objectAPIHandlers{
ObjectAPI: newObjectLayerFn,
CacheAPI: newCachedObjectLayerFn,
}
// API Router
apiRouter := router.PathPrefix(SlashSeparator).Subrouter()
var routers []*mux.Router
for _, domainName := range globalDomainNames {
if IsKubernetes() {
routers = append(routers, apiRouter.MatcherFunc(func(r *http.Request, match *mux.RouteMatch) bool {
host, _, err := net.SplitHostPort(getHost(r))
if err != nil {
host = r.Host
}
// Make sure to skip matching minio.<domain>` this is
// specifically meant for operator/k8s deployment
// The reason we need to skip this is for a special
// usecase where we need to make sure that
// minio.<namespace>.svc.<cluster_domain> is ignored
// by the bucketDNS style to ensure that path style
// is available and honored at this domain.
//
// All other `<bucket>.<namespace>.svc.<cluster_domain>`
// makes sure that buckets are routed through this matcher
// to match for `<bucket>`
return host != minioReservedBucket+"."+domainName
}).Host("{bucket:.+}."+domainName).Subrouter())
} else {
routers = append(routers, apiRouter.Host("{bucket:.+}."+domainName).Subrouter())
}
}
routers = append(routers, apiRouter.PathPrefix("/{bucket}").Subrouter())
gz, err := gzhttp.NewWrapper(gzhttp.MinSize(1000), gzhttp.CompressionLevel(gzip.BestSpeed))
if err != nil {
// Static params, so this is very unlikely.
logger.Fatal(err, "Unable to initialize server")
}
for _, router := range routers {
// Register all rejected object APIs
for _, r := range rejectedObjAPIs {
t := router.Methods(r.methods...).
HandlerFunc(collectAPIStats(r.api, httpTraceAll(notImplementedHandler))).
Queries(r.queries...)
t.Path(r.path)
}
// Object operations
// HeadObject
router.Methods(http.MethodHead).Path("/{object:.+}").HandlerFunc(
collectAPIStats("headobject", maxClients(gz(httpTraceAll(api.HeadObjectHandler)))))
// CopyObjectPart
router.Methods(http.MethodPut).Path("/{object:.+}").
HeadersRegexp(xhttp.AmzCopySource, ".*?(\\/|%2F).*?").
HandlerFunc(collectAPIStats("copyobjectpart", maxClients(gz(httpTraceAll(api.CopyObjectPartHandler))))).
Queries("partNumber", "{partNumber:[0-9]+}", "uploadId", "{uploadId:.*}")
// PutObjectPart
router.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(
collectAPIStats("putobjectpart", maxClients(gz(httpTraceHdrs(api.PutObjectPartHandler))))).Queries("partNumber", "{partNumber:[0-9]+}", "uploadId", "{uploadId:.*}")
// ListObjectParts
router.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(
collectAPIStats("listobjectparts", maxClients(gz(httpTraceAll(api.ListObjectPartsHandler))))).Queries("uploadId", "{uploadId:.*}")
// CompleteMultipartUpload
router.Methods(http.MethodPost).Path("/{object:.+}").HandlerFunc(
collectAPIStats("completemultipartupload", maxClients(gz(httpTraceAll(api.CompleteMultipartUploadHandler))))).Queries("uploadId", "{uploadId:.*}")
// NewMultipartUpload
router.Methods(http.MethodPost).Path("/{object:.+}").HandlerFunc(
collectAPIStats("newmultipartupload", maxClients(gz(httpTraceAll(api.NewMultipartUploadHandler))))).Queries("uploads", "")
// AbortMultipartUpload
router.Methods(http.MethodDelete).Path("/{object:.+}").HandlerFunc(
collectAPIStats("abortmultipartupload", maxClients(gz(httpTraceAll(api.AbortMultipartUploadHandler))))).Queries("uploadId", "{uploadId:.*}")
// GetObjectACL - this is a dummy call.
router.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(
collectAPIStats("getobjectacl", maxClients(gz(httpTraceHdrs(api.GetObjectACLHandler))))).Queries("acl", "")
// PutObjectACL - this is a dummy call.
router.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(
collectAPIStats("putobjectacl", maxClients(gz(httpTraceHdrs(api.PutObjectACLHandler))))).Queries("acl", "")
// GetObjectTagging
router.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(
collectAPIStats("getobjecttagging", maxClients(gz(httpTraceHdrs(api.GetObjectTaggingHandler))))).Queries("tagging", "")
// PutObjectTagging
router.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(
collectAPIStats("putobjecttagging", maxClients(gz(httpTraceHdrs(api.PutObjectTaggingHandler))))).Queries("tagging", "")
// DeleteObjectTagging
router.Methods(http.MethodDelete).Path("/{object:.+}").HandlerFunc(
collectAPIStats("deleteobjecttagging", maxClients(gz(httpTraceHdrs(api.DeleteObjectTaggingHandler))))).Queries("tagging", "")
// SelectObjectContent
router.Methods(http.MethodPost).Path("/{object:.+}").HandlerFunc(
collectAPIStats("selectobjectcontent", maxClients(gz(httpTraceHdrs(api.SelectObjectContentHandler))))).Queries("select", "").Queries("select-type", "2")
// GetObjectRetention
router.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(
collectAPIStats("getobjectretention", maxClients(gz(httpTraceAll(api.GetObjectRetentionHandler))))).Queries("retention", "")
// GetObjectLegalHold
router.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(
collectAPIStats("getobjectlegalhold", maxClients(gz(httpTraceAll(api.GetObjectLegalHoldHandler))))).Queries("legal-hold", "")
// GetObject - note gzip compression is *not* added due to Range requests.
router.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(
collectAPIStats("getobject", maxClients(httpTraceHdrs(api.GetObjectHandler))))
// CopyObject
router.Methods(http.MethodPut).Path("/{object:.+}").HeadersRegexp(xhttp.AmzCopySource, ".*?(\\/|%2F).*?").HandlerFunc(
collectAPIStats("copyobject", maxClients(gz(httpTraceAll(api.CopyObjectHandler)))))
// PutObjectRetention
router.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(
collectAPIStats("putobjectretention", maxClients(gz(httpTraceAll(api.PutObjectRetentionHandler))))).Queries("retention", "")
// PutObjectLegalHold
router.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(
collectAPIStats("putobjectlegalhold", maxClients(gz(httpTraceAll(api.PutObjectLegalHoldHandler))))).Queries("legal-hold", "")
// PutObject with auto-extract support for zip
router.Methods(http.MethodPut).Path("/{object:.+}").HeadersRegexp(xhttp.AmzSnowballExtract, "true").HandlerFunc(
collectAPIStats("putobject", maxClients(gz(httpTraceHdrs(api.PutObjectExtractHandler)))))
// PutObject
router.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(
collectAPIStats("putobject", maxClients(gz(httpTraceHdrs(api.PutObjectHandler)))))
// DeleteObject
router.Methods(http.MethodDelete).Path("/{object:.+}").HandlerFunc(
collectAPIStats("deleteobject", maxClients(gz(httpTraceAll(api.DeleteObjectHandler)))))
// PostRestoreObject
router.Methods(http.MethodPost).Path("/{object:.+}").HandlerFunc(
collectAPIStats("restoreobject", maxClients(gz(httpTraceAll(api.PostRestoreObjectHandler))))).Queries("restore", "")
/// Bucket operations
// GetBucketLocation
router.Methods(http.MethodGet).HandlerFunc(
collectAPIStats("getbucketlocation", maxClients(gz(httpTraceAll(api.GetBucketLocationHandler))))).Queries("location", "")
// GetBucketPolicy
router.Methods(http.MethodGet).HandlerFunc(
collectAPIStats("getbucketpolicy", maxClients(gz(httpTraceAll(api.GetBucketPolicyHandler))))).Queries("policy", "")
// GetBucketLifecycle
router.Methods(http.MethodGet).HandlerFunc(
collectAPIStats("getbucketlifecycle", maxClients(gz(httpTraceAll(api.GetBucketLifecycleHandler))))).Queries("lifecycle", "")
// GetBucketEncryption
router.Methods(http.MethodGet).HandlerFunc(
collectAPIStats("getbucketencryption", maxClients(gz(httpTraceAll(api.GetBucketEncryptionHandler))))).Queries("encryption", "")
// GetBucketObjectLockConfig
router.Methods(http.MethodGet).HandlerFunc(
collectAPIStats("getbucketobjectlockconfiguration", maxClients(gz(httpTraceAll(api.GetBucketObjectLockConfigHandler))))).Queries("object-lock", "")
// GetBucketReplicationConfig
router.Methods(http.MethodGet).HandlerFunc(
collectAPIStats("getbucketreplicationconfiguration", maxClients(gz(httpTraceAll(api.GetBucketReplicationConfigHandler))))).Queries("replication", "")
// GetBucketVersioning
router.Methods(http.MethodGet).HandlerFunc(
collectAPIStats("getbucketversioning", maxClients(gz(httpTraceAll(api.GetBucketVersioningHandler))))).Queries("versioning", "")
// GetBucketNotification
router.Methods(http.MethodGet).HandlerFunc(
collectAPIStats("getbucketnotification", maxClients(gz(httpTraceAll(api.GetBucketNotificationHandler))))).Queries("notification", "")
// ListenNotification
router.Methods(http.MethodGet).HandlerFunc(
collectAPIStats("listennotification", maxClients(gz(httpTraceAll(api.ListenNotificationHandler))))).Queries("events", "{events:.*}")
// Dummy Bucket Calls
// GetBucketACL -- this is a dummy call.
router.Methods(http.MethodGet).HandlerFunc(
collectAPIStats("getbucketacl", maxClients(gz(httpTraceAll(api.GetBucketACLHandler))))).Queries("acl", "")
// PutBucketACL -- this is a dummy call.
router.Methods(http.MethodPut).HandlerFunc(
collectAPIStats("putbucketacl", maxClients(gz(httpTraceAll(api.PutBucketACLHandler))))).Queries("acl", "")
// GetBucketCors - this is a dummy call.
router.Methods(http.MethodGet).HandlerFunc(
collectAPIStats("getbucketcors", maxClients(gz(httpTraceAll(api.GetBucketCorsHandler))))).Queries("cors", "")
// GetBucketWebsiteHandler - this is a dummy call.
router.Methods(http.MethodGet).HandlerFunc(
collectAPIStats("getbucketwebsite", maxClients(gz(httpTraceAll(api.GetBucketWebsiteHandler))))).Queries("website", "")
// GetBucketAccelerateHandler - this is a dummy call.
router.Methods(http.MethodGet).HandlerFunc(
collectAPIStats("getbucketaccelerate", maxClients(gz(httpTraceAll(api.GetBucketAccelerateHandler))))).Queries("accelerate", "")
// GetBucketRequestPaymentHandler - this is a dummy call.
router.Methods(http.MethodGet).HandlerFunc(
collectAPIStats("getbucketrequestpayment", maxClients(gz(httpTraceAll(api.GetBucketRequestPaymentHandler))))).Queries("requestPayment", "")
// GetBucketLoggingHandler - this is a dummy call.
router.Methods(http.MethodGet).HandlerFunc(
collectAPIStats("getbucketlogging", maxClients(gz(httpTraceAll(api.GetBucketLoggingHandler))))).Queries("logging", "")
// GetBucketTaggingHandler
router.Methods(http.MethodGet).HandlerFunc(
collectAPIStats("getbuckettagging", maxClients(gz(httpTraceAll(api.GetBucketTaggingHandler))))).Queries("tagging", "")
//DeleteBucketWebsiteHandler
router.Methods(http.MethodDelete).HandlerFunc(
collectAPIStats("deletebucketwebsite", maxClients(gz(httpTraceAll(api.DeleteBucketWebsiteHandler))))).Queries("website", "")
// DeleteBucketTaggingHandler
router.Methods(http.MethodDelete).HandlerFunc(
collectAPIStats("deletebuckettagging", maxClients(gz(httpTraceAll(api.DeleteBucketTaggingHandler))))).Queries("tagging", "")
// ListMultipartUploads
router.Methods(http.MethodGet).HandlerFunc(
collectAPIStats("listmultipartuploads", maxClients(gz(httpTraceAll(api.ListMultipartUploadsHandler))))).Queries("uploads", "")
// ListObjectsV2M
router.Methods(http.MethodGet).HandlerFunc(
collectAPIStats("listobjectsv2M", maxClients(gz(httpTraceAll(api.ListObjectsV2MHandler))))).Queries("list-type", "2", "metadata", "true")
// ListObjectsV2
router.Methods(http.MethodGet).HandlerFunc(
collectAPIStats("listobjectsv2", maxClients(gz(httpTraceAll(api.ListObjectsV2Handler))))).Queries("list-type", "2")
// ListObjectVersions
router.Methods(http.MethodGet).HandlerFunc(
collectAPIStats("listobjectversions", maxClients(gz(httpTraceAll(api.ListObjectVersionsHandler))))).Queries("versions", "")
// GetBucketPolicyStatus
router.Methods(http.MethodGet).HandlerFunc(
collectAPIStats("getpolicystatus", maxClients(gz(httpTraceAll(api.GetBucketPolicyStatusHandler))))).Queries("policyStatus", "")
// PutBucketLifecycle
router.Methods(http.MethodPut).HandlerFunc(
collectAPIStats("putbucketlifecycle", maxClients(gz(httpTraceAll(api.PutBucketLifecycleHandler))))).Queries("lifecycle", "")
// PutBucketReplicationConfig
router.Methods(http.MethodPut).HandlerFunc(
collectAPIStats("putbucketreplicationconfiguration", maxClients(gz(httpTraceAll(api.PutBucketReplicationConfigHandler))))).Queries("replication", "")
// PutBucketEncryption
router.Methods(http.MethodPut).HandlerFunc(
collectAPIStats("putbucketencryption", maxClients(gz(httpTraceAll(api.PutBucketEncryptionHandler))))).Queries("encryption", "")
// PutBucketPolicy
router.Methods(http.MethodPut).HandlerFunc(
collectAPIStats("putbucketpolicy", maxClients(gz(httpTraceAll(api.PutBucketPolicyHandler))))).Queries("policy", "")
// PutBucketObjectLockConfig
router.Methods(http.MethodPut).HandlerFunc(
collectAPIStats("putbucketobjectlockconfig", maxClients(gz(httpTraceAll(api.PutBucketObjectLockConfigHandler))))).Queries("object-lock", "")
// PutBucketTaggingHandler
router.Methods(http.MethodPut).HandlerFunc(
collectAPIStats("putbuckettagging", maxClients(gz(httpTraceAll(api.PutBucketTaggingHandler))))).Queries("tagging", "")
// PutBucketVersioning
router.Methods(http.MethodPut).HandlerFunc(
collectAPIStats("putbucketversioning", maxClients(gz(httpTraceAll(api.PutBucketVersioningHandler))))).Queries("versioning", "")
// PutBucketNotification
router.Methods(http.MethodPut).HandlerFunc(
collectAPIStats("putbucketnotification", maxClients(gz(httpTraceAll(api.PutBucketNotificationHandler))))).Queries("notification", "")
// ResetBucketReplicationState - MinIO extension API
router.Methods(http.MethodPut).HandlerFunc(
collectAPIStats("resetbucketreplicationstate", maxClients(gz(httpTraceAll(api.ResetBucketReplicationStateHandler))))).Queries("replication-reset", "")
// PutBucket
router.Methods(http.MethodPut).HandlerFunc(
collectAPIStats("putbucket", maxClients(gz(httpTraceAll(api.PutBucketHandler)))))
// HeadBucket
router.Methods(http.MethodHead).HandlerFunc(
collectAPIStats("headbucket", maxClients(gz(httpTraceAll(api.HeadBucketHandler)))))
// PostPolicy
router.Methods(http.MethodPost).HeadersRegexp(xhttp.ContentType, "multipart/form-data*").HandlerFunc(
collectAPIStats("postpolicybucket", maxClients(gz(httpTraceHdrs(api.PostPolicyBucketHandler)))))
// DeleteMultipleObjects
router.Methods(http.MethodPost).HandlerFunc(
collectAPIStats("deletemultipleobjects", maxClients(gz(httpTraceAll(api.DeleteMultipleObjectsHandler))))).Queries("delete", "")
// DeleteBucketPolicy
router.Methods(http.MethodDelete).HandlerFunc(
collectAPIStats("deletebucketpolicy", maxClients(gz(httpTraceAll(api.DeleteBucketPolicyHandler))))).Queries("policy", "")
// DeleteBucketReplication
router.Methods(http.MethodDelete).HandlerFunc(
collectAPIStats("deletebucketreplicationconfiguration", maxClients(gz(httpTraceAll(api.DeleteBucketReplicationConfigHandler))))).Queries("replication", "")
// DeleteBucketLifecycle
router.Methods(http.MethodDelete).HandlerFunc(
collectAPIStats("deletebucketlifecycle", maxClients(gz(httpTraceAll(api.DeleteBucketLifecycleHandler))))).Queries("lifecycle", "")
// DeleteBucketEncryption
router.Methods(http.MethodDelete).HandlerFunc(
collectAPIStats("deletebucketencryption", maxClients(gz(httpTraceAll(api.DeleteBucketEncryptionHandler))))).Queries("encryption", "")
// DeleteBucket
router.Methods(http.MethodDelete).HandlerFunc(
collectAPIStats("deletebucket", maxClients(gz(httpTraceAll(api.DeleteBucketHandler)))))
// MinIO extension API for replication.
//
// GetBucketReplicationMetrics
router.Methods(http.MethodGet).HandlerFunc(
collectAPIStats("getbucketreplicationmetrics", maxClients(gz(httpTraceAll(api.GetBucketReplicationMetricsHandler))))).Queries("replication-metrics", "")
// Register rejected bucket APIs
for _, r := range rejectedBucketAPIs {
router.Methods(r.methods...).
HandlerFunc(collectAPIStats(r.api, httpTraceAll(notImplementedHandler))).
Queries(r.queries...)
}
// S3 ListObjectsV1 (Legacy)
router.Methods(http.MethodGet).HandlerFunc(
collectAPIStats("listobjectsv1", maxClients(gz(httpTraceAll(api.ListObjectsV1Handler)))))
}
/// Root operation
// ListenNotification
apiRouter.Methods(http.MethodGet).Path(SlashSeparator).HandlerFunc(
collectAPIStats("listennotification", maxClients(gz(httpTraceAll(api.ListenNotificationHandler))))).Queries("events", "{events:.*}")
// ListBuckets
apiRouter.Methods(http.MethodGet).Path(SlashSeparator).HandlerFunc(
collectAPIStats("listbuckets", maxClients(gz(httpTraceAll(api.ListBucketsHandler)))))
// S3 browser with signature v4 adds '//' for ListBuckets request, so rather
// than failing with UnknownAPIRequest we simply handle it for now.
apiRouter.Methods(http.MethodGet).Path(SlashSeparator + SlashSeparator).HandlerFunc(
collectAPIStats("listbuckets", maxClients(gz(httpTraceAll(api.ListBucketsHandler)))))
// If none of the routes match add default error handler routes
apiRouter.NotFoundHandler = collectAPIStats("notfound", httpTraceAll(errorResponseHandler))
apiRouter.MethodNotAllowedHandler = collectAPIStats("methodnotallowed", httpTraceAll(methodNotAllowedHandler("S3")))
}
// corsHandler handler for CORS (Cross Origin Resource Sharing)
func corsHandler(handler http.Handler) http.Handler {
commonS3Headers := []string{
xhttp.Date,
xhttp.ETag,
xhttp.ServerInfo,
xhttp.Connection,
xhttp.AcceptRanges,
xhttp.ContentRange,
xhttp.ContentEncoding,
xhttp.ContentLength,
xhttp.ContentType,
xhttp.ContentDisposition,
xhttp.LastModified,
xhttp.ContentLanguage,
xhttp.CacheControl,
xhttp.RetryAfter,
xhttp.AmzBucketRegion,
xhttp.Expires,
"X-Amz*",
"x-amz*",
"*",
}
return cors.New(cors.Options{
AllowOriginFunc: func(origin string) bool {
for _, allowedOrigin := range globalAPIConfig.getCorsAllowOrigins() {
if wildcard.MatchSimple(allowedOrigin, origin) {
return true
}
}
return false
},
AllowedMethods: []string{
http.MethodGet,
http.MethodPut,
http.MethodHead,
http.MethodPost,
http.MethodDelete,
http.MethodOptions,
http.MethodPatch,
},
AllowedHeaders: commonS3Headers,
ExposedHeaders: commonS3Headers,
AllowCredentials: true,
}).Handler(handler)
}
| cmd/api-router.go | 0 | https://github.com/minio/minio/commit/acc9645249a3ee61cedff832d003421015cc724d | [
0.0003212766314391047,
0.00017708422092255205,
0.00016551745648030192,
0.00017039719386957586,
0.000028236374419066124
] |
{
"id": 4,
"code_window": [
"\t\tselect {\n",
"\t\tcase listener.acceptCh <- result:\n",
"\t\t\t// Successfully written to acceptCh\n",
"\t\t\treturn true\n",
"\t\tcase <-listener.ctx.Done():\n",
"\t\t\t// As stop signal is received, close accepted connection.\n",
"\t\t\tif result.conn != nil {\n",
"\t\t\t\tresult.conn.Close()\n",
"\t\t\t}\n",
"\t\t\treturn false\n",
"\t\t}\n",
"\t}\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "internal/http/listener.go",
"type": "replace",
"edit_start_line_idx": 49
} | // Copyright (c) 2015-2021 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package cmd
import (
"context"
"fmt"
"net"
"net/url"
"os"
"os/signal"
"strings"
"syscall"
"github.com/gorilla/mux"
"github.com/minio/cli"
"github.com/minio/madmin-go"
xhttp "github.com/minio/minio/internal/http"
"github.com/minio/minio/internal/logger"
"github.com/minio/pkg/certs"
"github.com/minio/pkg/env"
)
var (
gatewayCmd = cli.Command{
Name: "gateway",
Usage: "start object storage gateway",
Flags: append(ServerFlags, GlobalFlags...),
HideHelpCommand: true,
}
)
// GatewayLocker implements custom NewNSLock implementation
type GatewayLocker struct {
ObjectLayer
nsMutex *nsLockMap
}
// NewNSLock - implements gateway level locker
func (l *GatewayLocker) NewNSLock(bucket string, objects ...string) RWLocker {
return l.nsMutex.NewNSLock(nil, bucket, objects...)
}
// Walk - implements common gateway level Walker, to walk on all objects recursively at a prefix
func (l *GatewayLocker) Walk(ctx context.Context, bucket, prefix string, results chan<- ObjectInfo, opts ObjectOptions) error {
walk := func(ctx context.Context, bucket, prefix string, results chan<- ObjectInfo) error {
go func() {
// Make sure the results channel is ready to be read when we're done.
defer close(results)
var marker string
for {
// set maxKeys to '0' to list maximum possible objects in single call.
loi, err := l.ObjectLayer.ListObjects(ctx, bucket, prefix, marker, "", 0)
if err != nil {
logger.LogIf(ctx, err)
return
}
marker = loi.NextMarker
for _, obj := range loi.Objects {
select {
case results <- obj:
case <-ctx.Done():
return
}
}
if !loi.IsTruncated {
break
}
}
}()
return nil
}
if err := l.ObjectLayer.Walk(ctx, bucket, prefix, results, opts); err != nil {
if _, ok := err.(NotImplemented); ok {
return walk(ctx, bucket, prefix, results)
}
return err
}
return nil
}
// NewGatewayLayerWithLocker - initialize gateway with locker.
func NewGatewayLayerWithLocker(gwLayer ObjectLayer) ObjectLayer {
return &GatewayLocker{ObjectLayer: gwLayer, nsMutex: newNSLock(false)}
}
// RegisterGatewayCommand registers a new command for gateway.
func RegisterGatewayCommand(cmd cli.Command) error {
cmd.Flags = append(append(cmd.Flags, ServerFlags...), GlobalFlags...)
gatewayCmd.Subcommands = append(gatewayCmd.Subcommands, cmd)
return nil
}
// ParseGatewayEndpoint - Return endpoint.
func ParseGatewayEndpoint(arg string) (endPoint string, secure bool, err error) {
schemeSpecified := len(strings.Split(arg, "://")) > 1
if !schemeSpecified {
// Default connection will be "secure".
arg = "https://" + arg
}
u, err := url.Parse(arg)
if err != nil {
return "", false, err
}
switch u.Scheme {
case "http":
return u.Host, false, nil
case "https":
return u.Host, true, nil
default:
return "", false, fmt.Errorf("Unrecognized scheme %s", u.Scheme)
}
}
// ValidateGatewayArguments - Validate gateway arguments.
func ValidateGatewayArguments(serverAddr, endpointAddr string) error {
if err := CheckLocalServerAddr(serverAddr); err != nil {
return err
}
if endpointAddr != "" {
// Reject the endpoint if it points to the gateway handler itself.
sameTarget, err := sameLocalAddrs(endpointAddr, serverAddr)
if err != nil {
return err
}
if sameTarget {
return fmt.Errorf("endpoint points to the local gateway")
}
}
return nil
}
// StartGateway - handler for 'minio gateway <name>'.
func StartGateway(ctx *cli.Context, gw Gateway) {
signal.Notify(globalOSSignalCh, os.Interrupt, syscall.SIGTERM, syscall.SIGQUIT)
go handleSignals()
signal.Notify(globalOSSignalCh, os.Interrupt, syscall.SIGTERM, syscall.SIGQUIT)
// This is only to uniquely identify each gateway deployments.
globalDeploymentID = env.Get("MINIO_GATEWAY_DEPLOYMENT_ID", mustGetUUID())
logger.SetDeploymentID(globalDeploymentID)
if gw == nil {
logger.FatalIf(errUnexpected, "Gateway implementation not initialized")
}
// Validate if we have access, secret set through environment.
globalGatewayName = gw.Name()
gatewayName := gw.Name()
if ctx.Args().First() == "help" {
cli.ShowCommandHelpAndExit(ctx, gatewayName, 1)
}
// Initialize globalConsoleSys system
globalConsoleSys = NewConsoleLogger(GlobalContext)
logger.AddTarget(globalConsoleSys)
// Handle common command args.
handleCommonCmdArgs(ctx)
// Check and load TLS certificates.
var err error
globalPublicCerts, globalTLSCerts, globalIsTLS, err = getTLSConfig()
logger.FatalIf(err, "Invalid TLS certificate file")
// Check and load Root CAs.
globalRootCAs, err = certs.GetRootCAs(globalCertsCADir.Get())
logger.FatalIf(err, "Failed to read root CAs (%v)", err)
// Add the global public crts as part of global root CAs
for _, publicCrt := range globalPublicCerts {
globalRootCAs.AddCert(publicCrt)
}
// Register root CAs for remote ENVs
env.RegisterGlobalCAs(globalRootCAs)
// Initialize all help
initHelp()
// On macOS, if a process already listens on LOCALIPADDR:PORT, net.Listen() falls back
// to IPv6 address ie minio will start listening on IPv6 address whereas another
// (non-)minio process is listening on IPv4 of given port.
// To avoid this error situation we check for port availability.
logger.FatalIf(checkPortAvailability(globalMinioHost, globalMinioPort), "Unable to start the gateway")
// Handle gateway specific env
gatewayHandleEnvVars()
// Set system resources to maximum.
setMaxResources()
// Set when gateway is enabled
globalIsGateway = true
// TODO: We need to move this code with globalConfigSys.Init()
// for now keep it here such that "s3" gateway layer initializes
// itself properly when KMS is set.
// Initialize server config.
srvCfg := newServerConfig()
// Override any values from ENVs.
lookupConfigs(srvCfg, nil)
// hold the mutex lock before a new config is assigned.
globalServerConfigMu.Lock()
globalServerConfig = srvCfg
globalServerConfigMu.Unlock()
// Initialize router. `SkipClean(true)` stops gorilla/mux from
// normalizing URL path minio/minio#3256
// avoid URL path encoding minio/minio#8950
router := mux.NewRouter().SkipClean(true).UseEncodedPath()
// Enable STS router if etcd is enabled.
registerSTSRouter(router)
// Enable IAM admin APIs if etcd is enabled, if not just enable basic
// operations such as profiling, server info etc.
registerAdminRouter(router, false)
// Add healthcheck router
registerHealthCheckRouter(router)
// Add server metrics router
registerMetricsRouter(router)
// Add API router.
registerAPIRouter(router)
// Use all the middlewares
router.Use(globalHandlers...)
var getCert certs.GetCertificateFunc
if globalTLSCerts != nil {
getCert = globalTLSCerts.GetCertificate
}
httpServer := xhttp.NewServer([]string{globalMinioAddr},
criticalErrorHandler{corsHandler(router)}, getCert)
httpServer.BaseContext = func(listener net.Listener) context.Context {
return GlobalContext
}
go func() {
globalHTTPServerErrorCh <- httpServer.Start(GlobalContext)
}()
globalObjLayerMutex.Lock()
globalHTTPServer = httpServer
globalObjLayerMutex.Unlock()
newObject, err := gw.NewGatewayLayer(madmin.Credentials{
AccessKey: globalActiveCred.AccessKey,
SecretKey: globalActiveCred.SecretKey,
})
if err != nil {
logger.FatalIf(err, "Unable to initialize gateway backend")
}
newObject = NewGatewayLayerWithLocker(newObject)
// Calls all New() for all sub-systems.
newAllSubsystems()
// Once endpoints are finalized, initialize the new object api in safe mode.
globalObjLayerMutex.Lock()
globalObjectAPI = newObject
globalObjLayerMutex.Unlock()
if gatewayName == NASBackendGateway {
buckets, err := newObject.ListBuckets(GlobalContext)
if err != nil {
logger.Fatal(err, "Unable to list buckets")
}
logger.FatalIf(globalNotificationSys.Init(GlobalContext, buckets, newObject), "Unable to initialize notification system")
}
go globalIAMSys.Init(GlobalContext, newObject)
if globalCacheConfig.Enabled {
// initialize the new disk cache objects.
var cacheAPI CacheObjectLayer
cacheAPI, err = newServerCacheObjects(GlobalContext, globalCacheConfig)
logger.FatalIf(err, "Unable to initialize disk caching")
globalObjLayerMutex.Lock()
globalCacheObjectAPI = cacheAPI
globalObjLayerMutex.Unlock()
}
// Populate existing buckets to the etcd backend
if globalDNSConfig != nil {
buckets, err := newObject.ListBuckets(GlobalContext)
if err != nil {
logger.Fatal(err, "Unable to list buckets")
}
initFederatorBackend(buckets, newObject)
}
// Verify if object layer supports
// - encryption
// - compression
verifyObjectLayerFeatures("gateway "+gatewayName, newObject)
// Prints the formatted startup message once object layer is initialized.
if !globalCLIContext.Quiet && !globalInplaceUpdateDisabled {
// Check update mode.
checkUpdate(globalMinioModeGatewayPrefix + gatewayName)
}
if !globalCLIContext.Quiet {
// Print gateway startup message.
printGatewayStartupMessage(getAPIEndpoints(), gatewayName)
}
if globalBrowserEnabled {
globalConsoleSrv, err = initConsoleServer()
if err != nil {
logger.FatalIf(err, "Unable to initialize console service")
}
go func() {
logger.FatalIf(globalConsoleSrv.Serve(), "Unable to initialize console server")
}()
}
<-globalOSSignalCh
}
| cmd/gateway-main.go | 1 | https://github.com/minio/minio/commit/acc9645249a3ee61cedff832d003421015cc724d | [
0.0007186017464846373,
0.00020640117872972041,
0.0001590556203154847,
0.00017032717005349696,
0.0001170873292721808
] |
{
"id": 4,
"code_window": [
"\t\tselect {\n",
"\t\tcase listener.acceptCh <- result:\n",
"\t\t\t// Successfully written to acceptCh\n",
"\t\t\treturn true\n",
"\t\tcase <-listener.ctx.Done():\n",
"\t\t\t// As stop signal is received, close accepted connection.\n",
"\t\t\tif result.conn != nil {\n",
"\t\t\t\tresult.conn.Close()\n",
"\t\t\t}\n",
"\t\t\treturn false\n",
"\t\t}\n",
"\t}\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "internal/http/listener.go",
"type": "replace",
"edit_start_line_idx": 49
} | //go:build !linux && !netbsd && !freebsd && !darwin
// +build !linux,!netbsd,!freebsd,!darwin
// Copyright (c) 2015-2021 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package disk
import (
"os"
)
// OpenBSD, Windows, and illumos do not support O_DIRECT.
// On Windows there is no documentation on disabling O_DIRECT.
// For these systems we do not attempt to build the 'directio' dependency since
// the O_DIRECT symbol may not be exposed resulting in a failed build.
//
//
// On illumos an explicit O_DIRECT flag is not necessary for two primary
// reasons. Note that ZFS is effectively the default filesystem on illumos
// systems.
//
// One benefit of using DirectIO on Linux is that the page cache will not be
// polluted with single-access data. The ZFS read cache (ARC) is scan-resistant
// so there is no risk of polluting the entire cache with data accessed once.
// Another goal of DirectIO is to minimize the mutation of data by the kernel
// before issuing IO to underlying devices. ZFS users often enable features like
// compression and checksumming which currently necessitates mutating data in
// the kernel.
//
// DirectIO semantics for a filesystem like ZFS would be quite different than
// the semantics on filesystems like XFS, and these semantics are not
// implemented at this time.
// For more information on why typical DirectIO semantics do not apply to ZFS
// see this ZFS-on-Linux commit message:
// https://github.com/openzfs/zfs/commit/a584ef26053065f486d46a7335bea222cb03eeea
// OpenFileDirectIO wrapper around os.OpenFile nothing special
func OpenFileDirectIO(filePath string, flag int, perm os.FileMode) (*os.File, error) {
return os.OpenFile(filePath, flag, perm)
}
// DisableDirectIO is a no-op
func DisableDirectIO(f *os.File) error {
return nil
}
// AlignedBlock simply returns an unaligned buffer
// for systems that do not support DirectIO.
func AlignedBlock(BlockSize int) []byte {
return make([]byte, BlockSize)
}
| internal/disk/directio_unsupported.go | 0 | https://github.com/minio/minio/commit/acc9645249a3ee61cedff832d003421015cc724d | [
0.00017619271238800138,
0.00016758769925218076,
0.00016136275371536613,
0.0001654716907069087,
0.000005690802026947495
] |
{
"id": 4,
"code_window": [
"\t\tselect {\n",
"\t\tcase listener.acceptCh <- result:\n",
"\t\t\t// Successfully written to acceptCh\n",
"\t\t\treturn true\n",
"\t\tcase <-listener.ctx.Done():\n",
"\t\t\t// As stop signal is received, close accepted connection.\n",
"\t\t\tif result.conn != nil {\n",
"\t\t\t\tresult.conn.Close()\n",
"\t\t\t}\n",
"\t\t\treturn false\n",
"\t\t}\n",
"\t}\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "internal/http/listener.go",
"type": "replace",
"edit_start_line_idx": 49
} | // Copyright (c) 2015-2021 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package cmd
import (
"bytes"
"encoding/xml"
"net/http"
"net/http/httptest"
"testing"
"github.com/minio/minio/internal/auth"
)
// Test S3 Bucket lifecycle APIs with wrong credentials
func TestBucketLifecycleWrongCredentials(t *testing.T) {
ExecObjectLayerAPITest(t, testBucketLifecycleHandlersWrongCredentials, []string{"GetBucketLifecycle", "PutBucketLifecycle", "DeleteBucketLifecycle"})
}
// Test for authentication
func testBucketLifecycleHandlersWrongCredentials(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler,
credentials auth.Credentials, t *testing.T) {
// test cases with sample input and expected output.
testCases := []struct {
method string
bucketName string
accessKey string
secretKey string
// Sent body
body []byte
// Expected response
expectedRespStatus int
lifecycleResponse []byte
errorResponse APIErrorResponse
shouldPass bool
}{
// GET empty credentials
{
method: http.MethodGet, bucketName: bucketName,
accessKey: "",
secretKey: "",
expectedRespStatus: http.StatusForbidden,
lifecycleResponse: []byte(""),
errorResponse: APIErrorResponse{
Resource: SlashSeparator + bucketName + SlashSeparator,
Code: "AccessDenied",
Message: "Access Denied.",
},
shouldPass: false,
},
// GET wrong credentials
{
method: http.MethodGet, bucketName: bucketName,
accessKey: "abcd",
secretKey: "abcd",
expectedRespStatus: http.StatusForbidden,
lifecycleResponse: []byte(""),
errorResponse: APIErrorResponse{
Resource: SlashSeparator + bucketName + SlashSeparator,
Code: "InvalidAccessKeyId",
Message: "The Access Key Id you provided does not exist in our records.",
},
shouldPass: false,
},
// PUT empty credentials
{
method: http.MethodPut,
bucketName: bucketName,
accessKey: "",
secretKey: "",
expectedRespStatus: http.StatusForbidden,
lifecycleResponse: []byte(""),
errorResponse: APIErrorResponse{
Resource: SlashSeparator + bucketName + SlashSeparator,
Code: "AccessDenied",
Message: "Access Denied.",
},
shouldPass: false,
},
// PUT wrong credentials
{
method: http.MethodPut,
bucketName: bucketName,
accessKey: "abcd",
secretKey: "abcd",
expectedRespStatus: http.StatusForbidden,
lifecycleResponse: []byte(""),
errorResponse: APIErrorResponse{
Resource: SlashSeparator + bucketName + SlashSeparator,
Code: "InvalidAccessKeyId",
Message: "The Access Key Id you provided does not exist in our records.",
},
shouldPass: false,
},
// DELETE empty credentials
{
method: http.MethodDelete,
bucketName: bucketName,
accessKey: "",
secretKey: "",
expectedRespStatus: http.StatusForbidden,
lifecycleResponse: []byte(""),
errorResponse: APIErrorResponse{
Resource: SlashSeparator + bucketName + SlashSeparator,
Code: "AccessDenied",
Message: "Access Denied.",
},
shouldPass: false,
},
// DELETE wrong credentials
{
method: http.MethodDelete,
bucketName: bucketName,
accessKey: "abcd",
secretKey: "abcd",
expectedRespStatus: http.StatusForbidden,
lifecycleResponse: []byte(""),
errorResponse: APIErrorResponse{
Resource: SlashSeparator + bucketName + SlashSeparator,
Code: "InvalidAccessKeyId",
Message: "The Access Key Id you provided does not exist in our records.",
},
shouldPass: false,
},
}
testBucketLifecycle(obj, instanceType, bucketName, apiRouter, t, testCases)
}
// Test S3 Bucket lifecycle APIs
func TestBucketLifecycle(t *testing.T) {
ExecObjectLayerAPITest(t, testBucketLifecycleHandlers, []string{"GetBucketLifecycle", "PutBucketLifecycle", "DeleteBucketLifecycle"})
}
// Simple tests of bucket lifecycle: PUT, GET, DELETE.
// Tests are related and the order is important.
func testBucketLifecycleHandlers(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler,
creds auth.Credentials, t *testing.T) {
// test cases with sample input and expected output.
testCases := []struct {
method string
bucketName string
accessKey string
secretKey string
// Sent body
body []byte
// Expected response
expectedRespStatus int
lifecycleResponse []byte
errorResponse APIErrorResponse
shouldPass bool
}{
// Test case - 1.
// Filter contains more than (Prefix,Tag,And) rule
{
method: http.MethodPut,
bucketName: bucketName,
accessKey: creds.AccessKey,
secretKey: creds.SecretKey,
body: []byte(`<LifecycleConfiguration><Rule><ID>id</ID><Filter><Prefix>logs/</Prefix><Tag><Key>Key1</Key><Value>Value1</Value></Tag></Filter><Status>Enabled</Status><Expiration><Days>365</Days></Expiration></Rule></LifecycleConfiguration>`),
expectedRespStatus: http.StatusBadRequest,
lifecycleResponse: []byte(``),
errorResponse: APIErrorResponse{
Resource: SlashSeparator + bucketName + SlashSeparator,
Code: "InvalidRequest",
Message: "Filter must have exactly one of Prefix, Tag, or And specified",
},
shouldPass: false,
},
// Date contains wrong format
{
method: http.MethodPut,
bucketName: bucketName,
accessKey: creds.AccessKey,
secretKey: creds.SecretKey,
body: []byte(`<LifecycleConfiguration><Rule><ID>id</ID><Filter><Prefix>logs/</Prefix><Tag><Key>Key1</Key><Value>Value1</Value></Tag></Filter><Status>Enabled</Status><Expiration><Date>365</Date></Expiration></Rule></LifecycleConfiguration>`),
expectedRespStatus: http.StatusBadRequest,
lifecycleResponse: []byte(``),
errorResponse: APIErrorResponse{
Resource: SlashSeparator + bucketName + SlashSeparator,
Code: "InvalidRequest",
Message: "Date must be provided in ISO 8601 format",
},
shouldPass: false,
},
{
method: http.MethodPut,
bucketName: bucketName,
accessKey: creds.AccessKey,
secretKey: creds.SecretKey,
body: []byte(`<?xml version="1.0" encoding="UTF-8"?><LifecycleConfiguration><Rule><ID>id</ID><Filter><Prefix>logs/</Prefix></Filter><Status>Enabled</Status><Expiration><Days>365</Days></Expiration></Rule></LifecycleConfiguration>`),
expectedRespStatus: http.StatusOK,
lifecycleResponse: []byte(``),
errorResponse: APIErrorResponse{},
shouldPass: true,
},
{
method: http.MethodGet,
accessKey: creds.AccessKey,
secretKey: creds.SecretKey,
bucketName: bucketName,
body: []byte(``),
expectedRespStatus: http.StatusOK,
lifecycleResponse: []byte(`<LifecycleConfiguration><Rule><ID>id</ID><Status>Enabled</Status><Filter><Prefix>logs/</Prefix></Filter><Expiration><Days>365</Days></Expiration></Rule></LifecycleConfiguration>`),
errorResponse: APIErrorResponse{},
shouldPass: true,
},
{
method: http.MethodDelete,
accessKey: creds.AccessKey,
secretKey: creds.SecretKey,
bucketName: bucketName,
body: []byte(``),
expectedRespStatus: http.StatusNoContent,
lifecycleResponse: []byte(``),
errorResponse: APIErrorResponse{},
shouldPass: true,
},
{
method: http.MethodGet,
accessKey: creds.AccessKey,
secretKey: creds.SecretKey,
bucketName: bucketName,
body: []byte(``),
expectedRespStatus: http.StatusNotFound,
lifecycleResponse: []byte(``),
errorResponse: APIErrorResponse{
Resource: SlashSeparator + bucketName + SlashSeparator,
Code: "NoSuchLifecycleConfiguration",
Message: "The lifecycle configuration does not exist",
},
shouldPass: false,
},
}
testBucketLifecycle(obj, instanceType, bucketName, apiRouter, t, testCases)
}
// testBucketLifecycle is a generic testing of lifecycle requests
func testBucketLifecycle(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler,
t *testing.T, testCases []struct {
method string
bucketName string
accessKey string
secretKey string
body []byte
expectedRespStatus int
lifecycleResponse []byte
errorResponse APIErrorResponse
shouldPass bool
}) {
for i, testCase := range testCases {
// initialize httptest Recorder, this records any mutations to response writer inside the handler.
rec := httptest.NewRecorder()
// construct HTTP request
req, err := newTestSignedRequestV4(testCase.method, getBucketLifecycleURL("", testCase.bucketName),
int64(len(testCase.body)), bytes.NewReader(testCase.body), testCase.accessKey, testCase.secretKey, nil)
if err != nil {
t.Fatalf("Test %d: %s: Failed to create HTTP request for GetBucketLocationHandler: <ERROR> %v", i+1, instanceType, err)
}
// Since `apiRouter` satisfies `http.Handler` it has a ServeHTTP to execute the logic of the handler.
// Call the ServeHTTP to execute the handler.
apiRouter.ServeHTTP(rec, req)
if rec.Code != testCase.expectedRespStatus {
t.Errorf("Test %d: %s: Expected the response status to be `%d`, but instead found `%d`", i+1, instanceType, testCase.expectedRespStatus, rec.Code)
}
if testCase.shouldPass && !bytes.Equal(testCase.lifecycleResponse, rec.Body.Bytes()) {
t.Errorf("Test %d: %s: Expected the response to be `%s`, but instead found `%s`", i+1, instanceType, string(testCase.lifecycleResponse), rec.Body.String())
}
errorResponse := APIErrorResponse{}
err = xml.Unmarshal(rec.Body.Bytes(), &errorResponse)
if err != nil && !testCase.shouldPass {
t.Fatalf("Test %d: %s: Unable to marshal response body %s", i+1, instanceType, rec.Body.String())
}
if errorResponse.Resource != testCase.errorResponse.Resource {
t.Errorf("Test %d: %s: Expected the error resource to be `%s`, but instead found `%s`", i+1, instanceType, testCase.errorResponse.Resource, errorResponse.Resource)
}
if errorResponse.Message != testCase.errorResponse.Message {
t.Errorf("Test %d: %s: Expected the error message to be `%s`, but instead found `%s`", i+1, instanceType, testCase.errorResponse.Message, errorResponse.Message)
}
if errorResponse.Code != testCase.errorResponse.Code {
t.Errorf("Test %d: %s: Expected the error code to be `%s`, but instead found `%s`", i+1, instanceType, testCase.errorResponse.Code, errorResponse.Code)
}
}
}
| cmd/bucket-lifecycle-handlers_test.go | 0 | https://github.com/minio/minio/commit/acc9645249a3ee61cedff832d003421015cc724d | [
0.00017772412684280425,
0.000171749503351748,
0.0001644792646402493,
0.00017162472067866474,
0.0000031001504794403445
] |
{
"id": 4,
"code_window": [
"\t\tselect {\n",
"\t\tcase listener.acceptCh <- result:\n",
"\t\t\t// Successfully written to acceptCh\n",
"\t\t\treturn true\n",
"\t\tcase <-listener.ctx.Done():\n",
"\t\t\t// As stop signal is received, close accepted connection.\n",
"\t\t\tif result.conn != nil {\n",
"\t\t\t\tresult.conn.Close()\n",
"\t\t\t}\n",
"\t\t\treturn false\n",
"\t\t}\n",
"\t}\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "internal/http/listener.go",
"type": "replace",
"edit_start_line_idx": 49
} | // Copyright (c) 2015-2021 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package cmd
import (
"testing"
"github.com/minio/minio/internal/bucket/replication"
)
var replicatedInfosTests = []struct {
name string
tgtInfos []replicatedTargetInfo
expectedCompletedSize int64
expectedReplicationStatusInternal string
expectedReplicationStatus replication.StatusType
expectedOpType replication.Type
expectedAction replicationAction
}{
{ //1. empty tgtInfos slice
name: "no replicated targets",
tgtInfos: []replicatedTargetInfo{},
expectedCompletedSize: 0,
expectedReplicationStatusInternal: "",
expectedReplicationStatus: replication.StatusType(""),
expectedOpType: replication.UnsetReplicationType,
expectedAction: replicateNone,
},
{ //2. replication completed to single target
name: "replication completed to single target",
tgtInfos: []replicatedTargetInfo{
{
Arn: "arn1",
Size: 249,
PrevReplicationStatus: replication.Pending,
ReplicationStatus: replication.Completed,
OpType: replication.ObjectReplicationType,
ReplicationAction: replicateAll,
},
},
expectedCompletedSize: 249,
expectedReplicationStatusInternal: "arn1=COMPLETED;",
expectedReplicationStatus: replication.Completed,
expectedOpType: replication.ObjectReplicationType,
expectedAction: replicateAll,
},
{ //3. replication completed to single target; failed to another
name: "replication completed to single target",
tgtInfos: []replicatedTargetInfo{
{
Arn: "arn1",
Size: 249,
PrevReplicationStatus: replication.Pending,
ReplicationStatus: replication.Completed,
OpType: replication.ObjectReplicationType,
ReplicationAction: replicateAll,
},
{
Arn: "arn2",
Size: 249,
PrevReplicationStatus: replication.Pending,
ReplicationStatus: replication.Failed,
OpType: replication.ObjectReplicationType,
ReplicationAction: replicateAll,
}},
expectedCompletedSize: 249,
expectedReplicationStatusInternal: "arn1=COMPLETED;arn2=FAILED;",
expectedReplicationStatus: replication.Failed,
expectedOpType: replication.ObjectReplicationType,
expectedAction: replicateAll,
},
{ //4. replication pending on one target; failed to another
name: "replication completed to single target",
tgtInfos: []replicatedTargetInfo{
{
Arn: "arn1",
Size: 249,
PrevReplicationStatus: replication.Pending,
ReplicationStatus: replication.Pending,
OpType: replication.ObjectReplicationType,
ReplicationAction: replicateAll,
},
{
Arn: "arn2",
Size: 249,
PrevReplicationStatus: replication.Pending,
ReplicationStatus: replication.Failed,
OpType: replication.ObjectReplicationType,
ReplicationAction: replicateAll,
}},
expectedCompletedSize: 0,
expectedReplicationStatusInternal: "arn1=PENDING;arn2=FAILED;",
expectedReplicationStatus: replication.Failed,
expectedOpType: replication.ObjectReplicationType,
expectedAction: replicateAll,
},
}
func TestReplicatedInfos(t *testing.T) {
for i, test := range replicatedInfosTests {
rinfos := replicatedInfos{
Targets: test.tgtInfos,
}
if actualSize := rinfos.CompletedSize(); actualSize != test.expectedCompletedSize {
t.Errorf("Test%d (%s): Size got %d , want %d", i+1, test.name, actualSize, test.expectedCompletedSize)
}
if repStatusStr := rinfos.ReplicationStatusInternal(); repStatusStr != test.expectedReplicationStatusInternal {
t.Errorf("Test%d (%s): Internal replication status got %s , want %s", i+1, test.name, repStatusStr, test.expectedReplicationStatusInternal)
}
if repStatus := rinfos.ReplicationStatus(); repStatus != test.expectedReplicationStatus {
t.Errorf("Test%d (%s): ReplicationStatus got %s , want %s", i+1, test.name, repStatus, test.expectedReplicationStatus)
}
if action := rinfos.Action(); action != test.expectedAction {
t.Errorf("Test%d (%s): Action got %s , want %s", i+1, test.name, action, test.expectedAction)
}
}
}
var parseReplicationDecisionTest = []struct {
name string
dsc string
expDsc ReplicateDecision
expErr error
}{
{ //1.
name: "empty string",
dsc: "",
expDsc: ReplicateDecision{
targetsMap: map[string]replicateTargetDecision{},
},
expErr: nil,
},
{ //2.
name: "replicate decision for one target",
dsc: "arn:minio:replication::id:bucket=true;false;arn:minio:replication::id:bucket;id",
expErr: nil,
expDsc: ReplicateDecision{
targetsMap: map[string]replicateTargetDecision{
"arn:minio:replication::id:bucket": newReplicateTargetDecision("arn:minio:replication::id:bucket", true, false),
},
},
},
{ //3.
name: "replicate decision for multiple targets",
dsc: "arn:minio:replication::id:bucket=true;false;arn:minio:replication::id:bucket;id,arn:minio:replication::id2:bucket=false;true;arn:minio:replication::id2:bucket;id2",
expErr: nil,
expDsc: ReplicateDecision{
targetsMap: map[string]replicateTargetDecision{
"arn:minio:replication::id:bucket": newReplicateTargetDecision("arn:minio:replication::id:bucket", true, false),
"arn:minio:replication::id2:bucket": newReplicateTargetDecision("arn:minio:replication::id2:bucket", false, true),
},
},
},
{ //4.
name: "invalid format replicate decision for one target",
dsc: "arn:minio:replication::id:bucket:true;false;arn:minio:replication::id:bucket;id",
expErr: errInvalidReplicateDecisionFormat,
expDsc: ReplicateDecision{
targetsMap: map[string]replicateTargetDecision{
"arn:minio:replication::id:bucket": newReplicateTargetDecision("arn:minio:replication::id:bucket", true, false),
},
},
},
}
func TestParseReplicateDecision(t *testing.T) {
for i, test := range parseReplicationDecisionTest {
//dsc, err := parseReplicateDecision(test.dsc)
dsc, err := parseReplicateDecision(test.expDsc.String())
if err != nil {
if test.expErr != err {
t.Errorf("Test%d (%s): Expected parse error got %t , want %t", i+1, test.name, err, test.expErr)
}
continue
}
if len(dsc.targetsMap) != len(test.expDsc.targetsMap) {
t.Errorf("Test%d (%s): Invalid number of entries in targetsMap got %d , want %d", i+1, test.name, len(dsc.targetsMap), len(test.expDsc.targetsMap))
}
for arn, tdsc := range dsc.targetsMap {
expDsc, ok := test.expDsc.targetsMap[arn]
if !ok || expDsc != tdsc {
t.Errorf("Test%d (%s): Invalid target replicate decision: got %+v, want %+v", i+1, test.name, tdsc, expDsc)
}
}
}
}
var replicationStateTest = []struct {
name string
rs ReplicationState
arn string
expStatus replication.StatusType
}{
{ //1. no replication status header
name: "no replicated targets",
rs: ReplicationState{},
expStatus: replication.StatusType(""),
},
{ //2. replication status for one target
name: "replication status for one target",
rs: ReplicationState{ReplicationStatusInternal: "arn1=PENDING;", Targets: map[string]replication.StatusType{"arn1": "PENDING"}},
expStatus: replication.Pending,
},
{ //3. replication status for one target - incorrect format
name: "replication status for one target",
rs: ReplicationState{ReplicationStatusInternal: "arn1=PENDING"},
expStatus: replication.StatusType(""),
},
{ //4. replication status for 3 targets, one of them failed
name: "replication status for 3 targets - one failed",
rs: ReplicationState{
ReplicationStatusInternal: "arn1=COMPLETED;arn2=COMPLETED;arn3=FAILED;",
Targets: map[string]replication.StatusType{"arn1": "COMPLETED", "arn2": "COMPLETED", "arn3": "FAILED"},
},
expStatus: replication.Failed,
},
{ //5. replication status for replica version
name: "replication status for replica version",
rs: ReplicationState{ReplicationStatusInternal: string(replication.Replica)},
expStatus: replication.Replica,
},
}
func TestCompositeReplicationStatus(t *testing.T) {
for i, test := range replicationStateTest {
if rstatus := test.rs.CompositeReplicationStatus(); rstatus != test.expStatus {
t.Errorf("Test%d (%s): Overall replication status got %s , want %s", i+1, test.name, rstatus, test.expStatus)
}
}
}
| cmd/bucket-replication-utils_test.go | 0 | https://github.com/minio/minio/commit/acc9645249a3ee61cedff832d003421015cc724d | [
0.00018023634038399905,
0.0001736230042297393,
0.00016359053552150726,
0.00017464904522057623,
0.0000042078454498550855
] |
{
"id": 5,
"code_window": [
"\t\t}\n",
"\t}\n",
"\n",
"\t// Closure to handle single connection.\n",
"\thandleConn := func(tcpConn *net.TCPConn) {\n",
"\t\ttcpConn.SetKeepAlive(true)\n",
"\t\tsend(acceptResult{tcpConn, nil})\n",
"\t}\n",
"\n",
"\t// Closure to handle TCPListener until done channel is closed.\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"keep"
],
"after_edit": [],
"file_path": "internal/http/listener.go",
"type": "replace",
"edit_start_line_idx": 57
} | // Copyright (c) 2015-2021 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package cmd
import (
"context"
"crypto/tls"
"errors"
"fmt"
"io"
"log"
"math/rand"
"net"
"os"
"os/signal"
"strings"
"sync"
"syscall"
"time"
"github.com/minio/cli"
"github.com/minio/madmin-go"
"github.com/minio/minio/internal/auth"
"github.com/minio/minio/internal/bucket/bandwidth"
"github.com/minio/minio/internal/color"
"github.com/minio/minio/internal/config"
"github.com/minio/minio/internal/fips"
xhttp "github.com/minio/minio/internal/http"
"github.com/minio/minio/internal/logger"
"github.com/minio/minio/internal/rest"
"github.com/minio/minio/internal/sync/errgroup"
"github.com/minio/pkg/certs"
"github.com/minio/pkg/env"
)
// ServerFlags - server command specific flags
var ServerFlags = []cli.Flag{
cli.StringFlag{
Name: "address",
Value: ":" + GlobalMinioDefaultPort,
Usage: "bind to a specific ADDRESS:PORT, ADDRESS can be an IP or hostname",
},
cli.StringFlag{
Name: "console-address",
Usage: "bind to a specific ADDRESS:PORT for embedded Console UI, ADDRESS can be an IP or hostname",
},
}
var serverCmd = cli.Command{
Name: "server",
Usage: "start object storage server",
Flags: append(ServerFlags, GlobalFlags...),
Action: serverMain,
CustomHelpTemplate: `NAME:
{{.HelpName}} - {{.Usage}}
USAGE:
{{.HelpName}} {{if .VisibleFlags}}[FLAGS] {{end}}DIR1 [DIR2..]
{{.HelpName}} {{if .VisibleFlags}}[FLAGS] {{end}}DIR{1...64}
{{.HelpName}} {{if .VisibleFlags}}[FLAGS] {{end}}DIR{1...64} DIR{65...128}
DIR:
DIR points to a directory on a filesystem. When you want to combine
multiple drives into a single large system, pass one directory per
filesystem separated by space. You may also use a '...' convention
to abbreviate the directory arguments. Remote directories in a
distributed setup are encoded as HTTP(s) URIs.
{{if .VisibleFlags}}
FLAGS:
{{range .VisibleFlags}}{{.}}
{{end}}{{end}}
EXAMPLES:
1. Start minio server on "/home/shared" directory.
{{.Prompt}} {{.HelpName}} /home/shared
2. Start single node server with 64 local drives "/mnt/data1" to "/mnt/data64".
{{.Prompt}} {{.HelpName}} /mnt/data{1...64}
3. Start distributed minio server on an 32 node setup with 32 drives each, run following command on all the nodes
{{.Prompt}} {{.EnvVarSetCommand}} MINIO_ROOT_USER{{.AssignmentOperator}}minio
{{.Prompt}} {{.EnvVarSetCommand}} MINIO_ROOT_PASSWORD{{.AssignmentOperator}}miniostorage
{{.Prompt}} {{.HelpName}} http://node{1...32}.example.com/mnt/export{1...32}
4. Start distributed minio server in an expanded setup, run the following command on all the nodes
{{.Prompt}} {{.EnvVarSetCommand}} MINIO_ROOT_USER{{.AssignmentOperator}}minio
{{.Prompt}} {{.EnvVarSetCommand}} MINIO_ROOT_PASSWORD{{.AssignmentOperator}}miniostorage
{{.Prompt}} {{.HelpName}} http://node{1...16}.example.com/mnt/export{1...32} \
http://node{17...64}.example.com/mnt/export{1...64}
`,
}
func serverCmdArgs(ctx *cli.Context) []string {
v, _, _, err := env.LookupEnv(config.EnvArgs)
if err != nil {
logger.FatalIf(err, "Unable to validate passed arguments in %s:%s",
config.EnvArgs, os.Getenv(config.EnvArgs))
}
if v == "" {
// Fall back to older environment value MINIO_ENDPOINTS
v, _, _, err = env.LookupEnv(config.EnvEndpoints)
if err != nil {
logger.FatalIf(err, "Unable to validate passed arguments in %s:%s",
config.EnvEndpoints, os.Getenv(config.EnvEndpoints))
}
}
if v == "" {
if !ctx.Args().Present() || ctx.Args().First() == "help" {
cli.ShowCommandHelpAndExit(ctx, ctx.Command.Name, 1)
}
return ctx.Args()
}
return strings.Fields(v)
}
func serverHandleCmdArgs(ctx *cli.Context) {
// Handle common command args.
handleCommonCmdArgs(ctx)
logger.FatalIf(CheckLocalServerAddr(globalMinioAddr), "Unable to validate passed arguments")
var err error
var setupType SetupType
// Check and load TLS certificates.
globalPublicCerts, globalTLSCerts, globalIsTLS, err = getTLSConfig()
logger.FatalIf(err, "Unable to load the TLS configuration")
// Check and load Root CAs.
globalRootCAs, err = certs.GetRootCAs(globalCertsCADir.Get())
logger.FatalIf(err, "Failed to read root CAs (%v)", err)
// Add the global public crts as part of global root CAs
for _, publicCrt := range globalPublicCerts {
globalRootCAs.AddCert(publicCrt)
}
// Register root CAs for remote ENVs
env.RegisterGlobalCAs(globalRootCAs)
globalEndpoints, setupType, err = createServerEndpoints(globalMinioAddr, serverCmdArgs(ctx)...)
logger.FatalIf(err, "Invalid command line arguments")
globalLocalNodeName = GetLocalPeer(globalEndpoints, globalMinioHost, globalMinioPort)
globalRemoteEndpoints = make(map[string]Endpoint)
for _, z := range globalEndpoints {
for _, ep := range z.Endpoints {
if ep.IsLocal {
globalRemoteEndpoints[globalLocalNodeName] = ep
} else {
globalRemoteEndpoints[ep.Host] = ep
}
}
}
// allow transport to be HTTP/1.1 for proxying.
globalProxyTransport = newCustomHTTPProxyTransport(&tls.Config{
RootCAs: globalRootCAs,
CipherSuites: fips.CipherSuitesTLS(),
CurvePreferences: fips.EllipticCurvesTLS(),
}, rest.DefaultTimeout)()
globalProxyEndpoints = GetProxyEndpoints(globalEndpoints)
globalInternodeTransport = newInternodeHTTPTransport(&tls.Config{
RootCAs: globalRootCAs,
CipherSuites: fips.CipherSuitesTLS(),
CurvePreferences: fips.EllipticCurvesTLS(),
}, rest.DefaultTimeout)()
// On macOS, if a process already listens on LOCALIPADDR:PORT, net.Listen() falls back
// to IPv6 address ie minio will start listening on IPv6 address whereas another
// (non-)minio process is listening on IPv4 of given port.
// To avoid this error situation we check for port availability.
logger.FatalIf(checkPortAvailability(globalMinioHost, globalMinioPort), "Unable to start the server")
globalIsErasure = (setupType == ErasureSetupType)
globalIsDistErasure = (setupType == DistErasureSetupType)
if globalIsDistErasure {
globalIsErasure = true
}
}
func serverHandleEnvVars() {
// Handle common environment variables.
handleCommonEnvVars()
}
var globalHealStateLK sync.RWMutex
func newAllSubsystems() {
if globalIsErasure {
globalHealStateLK.Lock()
// New global heal state
globalAllHealState = newHealState(true)
globalBackgroundHealState = newHealState(false)
globalHealStateLK.Unlock()
}
// Create new notification system and initialize notification targets
globalNotificationSys = NewNotificationSys(globalEndpoints)
// Create new bucket metadata system.
if globalBucketMetadataSys == nil {
globalBucketMetadataSys = NewBucketMetadataSys()
} else {
// Reinitialize safely when testing.
globalBucketMetadataSys.Reset()
}
// Create the bucket bandwidth monitor
globalBucketMonitor = bandwidth.NewMonitor(GlobalContext, totalNodeCount())
// Create a new config system.
globalConfigSys = NewConfigSys()
// Create new IAM system.
globalIAMSys = NewIAMSys()
// Create new policy system.
globalPolicySys = NewPolicySys()
// Create new lifecycle system.
globalLifecycleSys = NewLifecycleSys()
// Create new bucket encryption subsystem
globalBucketSSEConfigSys = NewBucketSSEConfigSys()
// Create new bucket object lock subsystem
globalBucketObjectLockSys = NewBucketObjectLockSys()
// Create new bucket quota subsystem
globalBucketQuotaSys = NewBucketQuotaSys()
// Create new bucket versioning subsystem
if globalBucketVersioningSys == nil {
globalBucketVersioningSys = NewBucketVersioningSys()
} else {
globalBucketVersioningSys.Reset()
}
// Create new bucket replication subsytem
globalBucketTargetSys = NewBucketTargetSys()
// Create new ILM tier configuration subsystem
globalTierConfigMgr = NewTierConfigMgr()
}
func configRetriableErrors(err error) bool {
// Initializing sub-systems needs a retry mechanism for
// the following reasons:
// - Read quorum is lost just after the initialization
// of the object layer.
// - Write quorum not met when upgrading configuration
// version is needed, migration is needed etc.
rquorum := InsufficientReadQuorum{}
wquorum := InsufficientWriteQuorum{}
// One of these retriable errors shall be retried.
return errors.Is(err, errDiskNotFound) ||
errors.Is(err, errConfigNotFound) ||
errors.Is(err, context.DeadlineExceeded) ||
errors.Is(err, errErasureWriteQuorum) ||
errors.Is(err, errErasureReadQuorum) ||
errors.Is(err, io.ErrUnexpectedEOF) ||
errors.As(err, &rquorum) ||
errors.As(err, &wquorum) ||
isErrBucketNotFound(err) ||
errors.Is(err, os.ErrDeadlineExceeded)
}
func initServer(ctx context.Context, newObject ObjectLayer) error {
// Once the config is fully loaded, initialize the new object layer.
setObjectLayer(newObject)
// Make sure to hold lock for entire migration to avoid
// such that only one server should migrate the entire config
// at a given time, this big transaction lock ensures this
// appropriately. This is also true for rotation of encrypted
// content.
txnLk := newObject.NewNSLock(minioMetaBucket, minioConfigPrefix+"/transaction.lock")
// **** WARNING ****
// Migrating to encrypted backend should happen before initialization of any
// sub-systems, make sure that we do not move the above codeblock elsewhere.
r := rand.New(rand.NewSource(time.Now().UnixNano()))
lockTimeout := newDynamicTimeout(5*time.Second, 3*time.Second)
for {
select {
case <-ctx.Done():
// Retry was canceled successfully.
return fmt.Errorf("Initializing sub-systems stopped gracefully %w", ctx.Err())
default:
}
// let one of the server acquire the lock, if not let them timeout.
// which shall be retried again by this loop.
lkctx, err := txnLk.GetLock(ctx, lockTimeout)
if err != nil {
logger.Info("Waiting for all MinIO sub-systems to be initialized.. trying to acquire lock")
time.Sleep(time.Duration(r.Float64() * float64(5*time.Second)))
continue
}
// These messages only meant primarily for distributed setup, so only log during distributed setup.
if globalIsDistErasure {
logger.Info("Waiting for all MinIO sub-systems to be initialized.. lock acquired")
}
// Migrate all backend configs to encrypted backend configs, optionally
// handles rotating keys for encryption, if there is any retriable failure
// that shall be retried if there is an error.
if err = handleEncryptedConfigBackend(newObject); err == nil {
// Upon success migrating the config, initialize all sub-systems
// if all sub-systems initialized successfully return right away
if err = initAllSubsystems(ctx, newObject); err == nil {
txnLk.Unlock(lkctx.Cancel)
// All successful return.
if globalIsDistErasure {
// These messages only meant primarily for distributed setup, so only log during distributed setup.
logger.Info("All MinIO sub-systems initialized successfully")
}
return nil
}
}
// Unlock the transaction lock and allow other nodes to acquire the lock if possible.
txnLk.Unlock(lkctx.Cancel)
if configRetriableErrors(err) {
logger.Info("Waiting for all MinIO sub-systems to be initialized.. possible cause (%v)", err)
time.Sleep(time.Duration(r.Float64() * float64(5*time.Second)))
continue
}
// Any other unhandled return right here.
return fmt.Errorf("Unable to initialize sub-systems: %w", err)
}
}
func initAllSubsystems(ctx context.Context, newObject ObjectLayer) (err error) {
// %w is used by all error returns here to make sure
// we wrap the underlying error, make sure when you
// are modifying this code that you do so, if and when
// you want to add extra context to your error. This
// ensures top level retry works accordingly.
// List buckets to heal, and be re-used for loading configs.
buckets, err := newObject.ListBuckets(ctx)
if err != nil {
return fmt.Errorf("Unable to list buckets to heal: %w", err)
}
if globalIsErasure {
if len(buckets) > 0 {
if len(buckets) == 1 {
logger.Info(fmt.Sprintf("Verifying if %d bucket is consistent across drives...", len(buckets)))
} else {
logger.Info(fmt.Sprintf("Verifying if %d buckets are consistent across drives...", len(buckets)))
}
}
// Limit to no more than 50 concurrent buckets.
g := errgroup.WithNErrs(len(buckets)).WithConcurrency(50)
ctx, cancel := g.WithCancelOnError(ctx)
defer cancel()
for index := range buckets {
index := index
g.Go(func() error {
_, berr := newObject.HealBucket(ctx, buckets[index].Name, madmin.HealOpts{Recreate: true})
return berr
}, index)
}
if err := g.WaitErr(); err != nil {
return fmt.Errorf("Unable to list buckets to heal: %w", err)
}
}
// Initialize config system.
if err = globalConfigSys.Init(newObject); err != nil {
if configRetriableErrors(err) {
return fmt.Errorf("Unable to initialize config system: %w", err)
}
// Any other config errors we simply print a message and proceed forward.
logger.LogIf(ctx, fmt.Errorf("Unable to initialize config, some features may be missing %w", err))
}
// Populate existing buckets to the etcd backend
if globalDNSConfig != nil {
// Background this operation.
go initFederatorBackend(buckets, newObject)
}
// Initialize bucket metadata sub-system.
globalBucketMetadataSys.Init(ctx, buckets, newObject)
// Initialize bucket notification sub-system.
globalNotificationSys.Init(ctx, buckets, newObject)
// Initialize site replication manager.
globalSiteReplicationSys.Init(ctx, newObject)
if globalIsErasure {
// Initialize transition tier configuration manager
if err = globalTierConfigMgr.Init(ctx, newObject); err != nil {
return err
}
}
return nil
}
type nullWriter struct{}
func (lw nullWriter) Write(b []byte) (int, error) {
return len(b), nil
}
// serverMain handler called for 'minio server' command.
func serverMain(ctx *cli.Context) {
signal.Notify(globalOSSignalCh, os.Interrupt, syscall.SIGTERM, syscall.SIGQUIT)
go handleSignals()
setDefaultProfilerRates()
// Initialize globalConsoleSys system
globalConsoleSys = NewConsoleLogger(GlobalContext)
logger.AddTarget(globalConsoleSys)
// Perform any self-tests
bitrotSelfTest()
erasureSelfTest()
compressSelfTest()
// Handle all server command args.
serverHandleCmdArgs(ctx)
// Handle all server environment vars.
serverHandleEnvVars()
// Set node name, only set for distributed setup.
globalConsoleSys.SetNodeName(globalLocalNodeName)
// Initialize all help
initHelp()
// Initialize all sub-systems
newAllSubsystems()
// Is distributed setup, error out if no certificates are found for HTTPS endpoints.
if globalIsDistErasure {
if globalEndpoints.HTTPS() && !globalIsTLS {
logger.Fatal(config.ErrNoCertsAndHTTPSEndpoints(nil), "Unable to start the server")
}
if !globalEndpoints.HTTPS() && globalIsTLS {
logger.Fatal(config.ErrCertsAndHTTPEndpoints(nil), "Unable to start the server")
}
}
if !globalCLIContext.Quiet && !globalInplaceUpdateDisabled {
// Check for new updates from dl.min.io.
checkUpdate(getMinioMode())
}
if !globalActiveCred.IsValid() && globalIsDistErasure {
globalActiveCred = auth.DefaultCredentials
}
// Set system resources to maximum.
setMaxResources()
// Configure server.
handler, err := configureServerHandler(globalEndpoints)
if err != nil {
logger.Fatal(config.ErrUnexpectedError(err), "Unable to configure one of server's RPC services")
}
var getCert certs.GetCertificateFunc
if globalTLSCerts != nil {
getCert = globalTLSCerts.GetCertificate
}
httpServer := xhttp.NewServer([]string{globalMinioAddr},
criticalErrorHandler{corsHandler(handler)}, getCert)
httpServer.BaseContext = func(listener net.Listener) context.Context {
return GlobalContext
}
// Turn-off random logging by Go internally
httpServer.ErrorLog = log.New(&nullWriter{}, "", 0)
go func() {
globalHTTPServerErrorCh <- httpServer.Start(GlobalContext)
}()
setHTTPServer(httpServer)
if globalIsDistErasure && globalEndpoints.FirstLocal() {
for {
// Additionally in distributed setup, validate the setup and configuration.
err := verifyServerSystemConfig(GlobalContext, globalEndpoints)
if err == nil || errors.Is(err, context.Canceled) {
break
}
logger.LogIf(GlobalContext, err, "Unable to initialize distributed setup, retrying.. after 5 seconds")
select {
case <-GlobalContext.Done():
return
case <-time.After(500 * time.Millisecond):
}
}
}
newObject, err := newObjectLayer(GlobalContext, globalEndpoints)
if err != nil {
logFatalErrs(err, Endpoint{}, true)
}
logger.SetDeploymentID(globalDeploymentID)
// Enable background operations for erasure coding
if globalIsErasure {
initAutoHeal(GlobalContext, newObject)
initHealMRF(GlobalContext, newObject)
}
initBackgroundExpiry(GlobalContext, newObject)
if err = initServer(GlobalContext, newObject); err != nil {
var cerr config.Err
// For any config error, we don't need to drop into safe-mode
// instead its a user error and should be fixed by user.
if errors.As(err, &cerr) {
logger.FatalIf(err, "Unable to initialize the server")
}
// If context was canceled
if errors.Is(err, context.Canceled) {
logger.FatalIf(err, "Server startup canceled upon user request")
}
logger.LogIf(GlobalContext, err)
}
// Initialize users credentials and policies in background right after config has initialized.
go globalIAMSys.Init(GlobalContext, newObject)
initDataScanner(GlobalContext, newObject)
if globalIsErasure { // to be done after config init
initBackgroundReplication(GlobalContext, newObject)
initBackgroundTransition(GlobalContext, newObject)
globalTierJournal, err = initTierDeletionJournal(GlobalContext)
if err != nil {
logger.FatalIf(err, "Unable to initialize remote tier pending deletes journal")
}
}
if globalCacheConfig.Enabled {
// initialize the new disk cache objects.
var cacheAPI CacheObjectLayer
cacheAPI, err = newServerCacheObjects(GlobalContext, globalCacheConfig)
logger.FatalIf(err, "Unable to initialize disk caching")
setCacheObjectLayer(cacheAPI)
}
// Prints the formatted startup message, if err is not nil then it prints additional information as well.
printStartupMessage(getAPIEndpoints(), err)
if globalActiveCred.Equal(auth.DefaultCredentials) {
msg := fmt.Sprintf("WARNING: Detected default credentials '%s', we recommend that you change these values with 'MINIO_ROOT_USER' and 'MINIO_ROOT_PASSWORD' environment variables", globalActiveCred)
logStartupMessage(color.RedBold(msg))
}
if !globalCLIContext.StrictS3Compat {
logStartupMessage(color.RedBold("WARNING: Strict AWS S3 compatible incoming PUT, POST content payload validation is turned off, caution is advised do not use in production"))
}
if globalBrowserEnabled {
globalConsoleSrv, err = initConsoleServer()
if err != nil {
logger.FatalIf(err, "Unable to initialize console service")
}
go func() {
logger.FatalIf(globalConsoleSrv.Serve(), "Unable to initialize console server")
}()
}
if serverDebugLog {
logger.Info("== DEBUG Mode enabled ==")
logger.Info("Currently set environment settings:")
for _, v := range os.Environ() {
logger.Info(v)
}
logger.Info("======")
}
<-globalOSSignalCh
}
// Initialize object layer with the supplied disks, objectLayer is nil upon any error.
func newObjectLayer(ctx context.Context, endpointServerPools EndpointServerPools) (newObject ObjectLayer, err error) {
// For FS only, directly use the disk.
if endpointServerPools.NEndpoints() == 1 {
// Initialize new FS object layer.
return NewFSObjectLayer(endpointServerPools[0].Endpoints[0].Path)
}
return newErasureServerPools(ctx, endpointServerPools)
}
| cmd/server-main.go | 1 | https://github.com/minio/minio/commit/acc9645249a3ee61cedff832d003421015cc724d | [
0.00032606502645649016,
0.00017435793415643275,
0.00016295317618642002,
0.00017088906315620989,
0.000020704212147393264
] |
{
"id": 5,
"code_window": [
"\t\t}\n",
"\t}\n",
"\n",
"\t// Closure to handle single connection.\n",
"\thandleConn := func(tcpConn *net.TCPConn) {\n",
"\t\ttcpConn.SetKeepAlive(true)\n",
"\t\tsend(acceptResult{tcpConn, nil})\n",
"\t}\n",
"\n",
"\t// Closure to handle TCPListener until done channel is closed.\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"keep"
],
"after_edit": [],
"file_path": "internal/http/listener.go",
"type": "replace",
"edit_start_line_idx": 57
} | // Copyright (c) 2015-2021 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package cmd
import (
"bytes"
"encoding/xml"
"io/ioutil"
"net/http"
"net/http/httptest"
"strconv"
"testing"
"github.com/minio/minio/internal/auth"
)
// Wrapper for calling RemoveBucket HTTP handler tests for both Erasure multiple disks and single node setup.
func TestRemoveBucketHandler(t *testing.T) {
ExecObjectLayerAPITest(t, testRemoveBucketHandler, []string{"RemoveBucket"})
}
func testRemoveBucketHandler(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler,
credentials auth.Credentials, t *testing.T) {
_, err := obj.PutObject(GlobalContext, bucketName, "test-object", mustGetPutObjReader(t, bytes.NewReader([]byte{}), int64(0), "", "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"), ObjectOptions{})
// if object upload fails stop the test.
if err != nil {
t.Fatalf("Error uploading object: <ERROR> %v", err)
}
// initialize httptest Recorder, this records any mutations to response writer inside the handler.
rec := httptest.NewRecorder()
// construct HTTP request for DELETE bucket.
req, err := newTestSignedRequestV4(http.MethodDelete, getBucketLocationURL("", bucketName), 0, nil, credentials.AccessKey, credentials.SecretKey, nil)
if err != nil {
t.Fatalf("Test %s: Failed to create HTTP request for RemoveBucketHandler: <ERROR> %v", instanceType, err)
}
// Since `apiRouter` satisfies `http.Handler` it has a ServeHTTP to execute the logic of the handler.
// Call the ServeHTTP to execute the handler.
apiRouter.ServeHTTP(rec, req)
switch rec.Code {
case http.StatusOK, http.StatusCreated, http.StatusAccepted, http.StatusNoContent:
t.Fatalf("Test %v: expected failure, but succeeded with %v", instanceType, rec.Code)
}
// Verify response of the V2 signed HTTP request.
// initialize HTTP NewRecorder, this records any mutations to response writer inside the handler.
recV2 := httptest.NewRecorder()
// construct HTTP request for DELETE bucket.
reqV2, err := newTestSignedRequestV2(http.MethodDelete, getBucketLocationURL("", bucketName), 0, nil, credentials.AccessKey, credentials.SecretKey, nil)
if err != nil {
t.Fatalf("Test %s: Failed to create HTTP request for RemoveBucketHandler: <ERROR> %v", instanceType, err)
}
// Since `apiRouter` satisfies `http.Handler` it has a ServeHTTP to execute the logic of the handler.
// Call the ServeHTTP to execute the handler.
apiRouter.ServeHTTP(recV2, reqV2)
switch recV2.Code {
case http.StatusOK, http.StatusCreated, http.StatusAccepted, http.StatusNoContent:
t.Fatalf("Test %v: expected failure, but succeeded with %v", instanceType, recV2.Code)
}
}
// Wrapper for calling GetBucketPolicy HTTP handler tests for both Erasure multiple disks and single node setup.
func TestGetBucketLocationHandler(t *testing.T) {
ExecObjectLayerAPITest(t, testGetBucketLocationHandler, []string{"GetBucketLocation"})
}
func testGetBucketLocationHandler(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler,
credentials auth.Credentials, t *testing.T) {
// test cases with sample input and expected output.
testCases := []struct {
bucketName string
accessKey string
secretKey string
// expected Response.
expectedRespStatus int
locationResponse []byte
errorResponse APIErrorResponse
shouldPass bool
}{
// Test case - 1.
// Tests for authenticated request and proper response.
{
bucketName: bucketName,
accessKey: credentials.AccessKey,
secretKey: credentials.SecretKey,
expectedRespStatus: http.StatusOK,
locationResponse: []byte(`<?xml version="1.0" encoding="UTF-8"?>
<LocationConstraint xmlns="http://s3.amazonaws.com/doc/2006-03-01/"></LocationConstraint>`),
errorResponse: APIErrorResponse{},
shouldPass: true,
},
// Test case - 2.
// Tests for signature mismatch error.
{
bucketName: bucketName,
accessKey: "abcd",
secretKey: "abcd",
expectedRespStatus: http.StatusForbidden,
locationResponse: []byte(""),
errorResponse: APIErrorResponse{
Resource: SlashSeparator + bucketName + SlashSeparator,
Code: "InvalidAccessKeyId",
Message: "The Access Key Id you provided does not exist in our records.",
},
shouldPass: false,
},
}
for i, testCase := range testCases {
if i != 1 {
continue
}
// initialize httptest Recorder, this records any mutations to response writer inside the handler.
rec := httptest.NewRecorder()
// construct HTTP request for Get bucket location.
req, err := newTestSignedRequestV4(http.MethodGet, getBucketLocationURL("", testCase.bucketName), 0, nil, testCase.accessKey, testCase.secretKey, nil)
if err != nil {
t.Fatalf("Test %d: %s: Failed to create HTTP request for GetBucketLocationHandler: <ERROR> %v", i+1, instanceType, err)
}
// Since `apiRouter` satisfies `http.Handler` it has a ServeHTTP to execute the logic of the handler.
// Call the ServeHTTP to execute the handler.
apiRouter.ServeHTTP(rec, req)
if rec.Code != testCase.expectedRespStatus {
t.Errorf("Test %d: %s: Expected the response status to be `%d`, but instead found `%d`", i+1, instanceType, testCase.expectedRespStatus, rec.Code)
}
if !bytes.Equal(testCase.locationResponse, rec.Body.Bytes()) && testCase.shouldPass {
t.Errorf("Test %d: %s: Expected the response to be `%s`, but instead found `%s`", i+1, instanceType, string(testCase.locationResponse), rec.Body.String())
}
errorResponse := APIErrorResponse{}
err = xml.Unmarshal(rec.Body.Bytes(), &errorResponse)
if err != nil && !testCase.shouldPass {
t.Fatalf("Test %d: %s: Unable to marshal response body %s", i+1, instanceType, rec.Body.String())
}
if errorResponse.Resource != testCase.errorResponse.Resource {
t.Errorf("Test %d: %s: Expected the error resource to be `%s`, but instead found `%s`", i+1, instanceType, testCase.errorResponse.Resource, errorResponse.Resource)
}
if errorResponse.Message != testCase.errorResponse.Message {
t.Errorf("Test %d: %s: Expected the error message to be `%s`, but instead found `%s`", i+1, instanceType, testCase.errorResponse.Message, errorResponse.Message)
}
if errorResponse.Code != testCase.errorResponse.Code {
t.Errorf("Test %d: %s: Expected the error code to be `%s`, but instead found `%s`", i+1, instanceType, testCase.errorResponse.Code, errorResponse.Code)
}
// Verify response of the V2 signed HTTP request.
// initialize HTTP NewRecorder, this records any mutations to response writer inside the handler.
recV2 := httptest.NewRecorder()
// construct HTTP request for PUT bucket policy endpoint.
reqV2, err := newTestSignedRequestV2(http.MethodGet, getBucketLocationURL("", testCase.bucketName), 0, nil, testCase.accessKey, testCase.secretKey, nil)
if err != nil {
t.Fatalf("Test %d: %s: Failed to create HTTP request for PutBucketPolicyHandler: <ERROR> %v", i+1, instanceType, err)
}
// Since `apiRouter` satisfies `http.Handler` it has a ServeHTTP to execute the logic of the handler.
// Call the ServeHTTP to execute the handler.
apiRouter.ServeHTTP(recV2, reqV2)
if recV2.Code != testCase.expectedRespStatus {
t.Errorf("Test %d: %s: Expected the response status to be `%d`, but instead found `%d`", i+1, instanceType, testCase.expectedRespStatus, recV2.Code)
}
errorResponse = APIErrorResponse{}
err = xml.Unmarshal(recV2.Body.Bytes(), &errorResponse)
if err != nil && !testCase.shouldPass {
t.Fatalf("Test %d: %s: Unable to marshal response body %s", i+1, instanceType, recV2.Body.String())
}
if errorResponse.Resource != testCase.errorResponse.Resource {
t.Errorf("Test %d: %s: Expected the error resource to be `%s`, but instead found `%s`", i+1, instanceType, testCase.errorResponse.Resource, errorResponse.Resource)
}
if errorResponse.Message != testCase.errorResponse.Message {
t.Errorf("Test %d: %s: Expected the error message to be `%s`, but instead found `%s`", i+1, instanceType, testCase.errorResponse.Message, errorResponse.Message)
}
if errorResponse.Code != testCase.errorResponse.Code {
t.Errorf("Test %d: %s: Expected the error code to be `%s`, but instead found `%s`", i+1, instanceType, testCase.errorResponse.Code, errorResponse.Code)
}
}
// Test for Anonymous/unsigned http request.
// ListBucketsHandler doesn't support bucket policies, setting the policies shouldn't make any difference.
anonReq, err := newTestRequest(http.MethodGet, getBucketLocationURL("", bucketName), 0, nil)
if err != nil {
t.Fatalf("MinIO %s: Failed to create an anonymous request.", instanceType)
}
// ExecObjectLayerAPIAnonTest - Calls the HTTP API handler using the anonymous request, validates the ErrAccessDeniedResponse,
// sets the bucket policy using the policy statement generated from `getReadOnlyBucketStatement` so that the
// unsigned request goes through and its validated again.
ExecObjectLayerAPIAnonTest(t, obj, "TestGetBucketLocationHandler", bucketName, "", instanceType, apiRouter, anonReq, getAnonReadOnlyBucketPolicy(bucketName))
// HTTP request for testing when `objectLayer` is set to `nil`.
// There is no need to use an existing bucket and valid input for creating the request
// since the `objectLayer==nil` check is performed before any other checks inside the handlers.
// The only aim is to generate an HTTP request in a way that the relevant/registered end point is evoked/called.
nilBucket := "dummy-bucket"
nilReq, err := newTestRequest(http.MethodGet, getBucketLocationURL("", nilBucket), 0, nil)
if err != nil {
t.Errorf("MinIO %s: Failed to create HTTP request for testing the response when object Layer is set to `nil`.", instanceType)
}
// Executes the object layer set to `nil` test.
// `ExecObjectLayerAPINilTest` manages the operation.
ExecObjectLayerAPINilTest(t, nilBucket, "", instanceType, apiRouter, nilReq)
}
// Wrapper for calling HeadBucket HTTP handler tests for both Erasure multiple disks and single node setup.
func TestHeadBucketHandler(t *testing.T) {
ExecObjectLayerAPITest(t, testHeadBucketHandler, []string{"HeadBucket"})
}
func testHeadBucketHandler(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler,
credentials auth.Credentials, t *testing.T) {
// test cases with sample input and expected output.
testCases := []struct {
bucketName string
accessKey string
secretKey string
// expected Response.
expectedRespStatus int
}{
// Test case - 1.
// Bucket exists.
{
bucketName: bucketName,
accessKey: credentials.AccessKey,
secretKey: credentials.SecretKey,
expectedRespStatus: http.StatusOK,
},
// Test case - 2.
// Non-existent bucket name.
{
bucketName: "2333",
accessKey: credentials.AccessKey,
secretKey: credentials.SecretKey,
expectedRespStatus: http.StatusNotFound,
},
// Test case - 3.
// Testing for signature mismatch error.
// setting invalid acess and secret key.
{
bucketName: bucketName,
accessKey: "abcd",
secretKey: "abcd",
expectedRespStatus: http.StatusForbidden,
},
}
for i, testCase := range testCases {
// initialize HTTP NewRecorder, this records any mutations to response writer inside the handler.
rec := httptest.NewRecorder()
// construct HTTP request for HEAD bucket.
req, err := newTestSignedRequestV4(http.MethodHead, getHEADBucketURL("", testCase.bucketName), 0, nil, testCase.accessKey, testCase.secretKey, nil)
if err != nil {
t.Fatalf("Test %d: %s: Failed to create HTTP request for HeadBucketHandler: <ERROR> %v", i+1, instanceType, err)
}
// Since `apiRouter` satisfies `http.Handler` it has a ServeHTTP to execute the logic of the handler.
// Call the ServeHTTP to execute the handler.
apiRouter.ServeHTTP(rec, req)
if rec.Code != testCase.expectedRespStatus {
t.Errorf("Test %d: %s: Expected the response status to be `%d`, but instead found `%d`", i+1, instanceType, testCase.expectedRespStatus, rec.Code)
}
// Verify response the V2 signed HTTP request.
// initialize HTTP NewRecorder, this records any mutations to response writer inside the handler.
recV2 := httptest.NewRecorder()
// construct HTTP request for PUT bucket policy endpoint.
reqV2, err := newTestSignedRequestV2(http.MethodHead, getHEADBucketURL("", testCase.bucketName), 0, nil, testCase.accessKey, testCase.secretKey, nil)
if err != nil {
t.Fatalf("Test %d: %s: Failed to create HTTP request for PutBucketPolicyHandler: <ERROR> %v", i+1, instanceType, err)
}
// Since `apiRouter` satisfies `http.Handler` it has a ServeHTTP to execute the logic of the handler.
// Call the ServeHTTP to execute the handler.
apiRouter.ServeHTTP(recV2, reqV2)
if recV2.Code != testCase.expectedRespStatus {
t.Errorf("Test %d: %s: Expected the response status to be `%d`, but instead found `%d`", i+1, instanceType, testCase.expectedRespStatus, recV2.Code)
}
}
// Test for Anonymous/unsigned http request.
anonReq, err := newTestRequest(http.MethodHead, getHEADBucketURL("", bucketName), 0, nil)
if err != nil {
t.Fatalf("MinIO %s: Failed to create an anonymous request for bucket \"%s\": <ERROR> %v",
instanceType, bucketName, err)
}
// ExecObjectLayerAPIAnonTest - Calls the HTTP API handler using the anonymous request, validates the ErrAccessDeniedResponse,
// sets the bucket policy using the policy statement generated from `getReadOnlyBucketStatement` so that the
// unsigned request goes through and its validated again.
ExecObjectLayerAPIAnonTest(t, obj, "TestHeadBucketHandler", bucketName, "", instanceType, apiRouter, anonReq, getAnonReadOnlyBucketPolicy(bucketName))
// HTTP request for testing when `objectLayer` is set to `nil`.
// There is no need to use an existing bucket and valid input for creating the request
// since the `objectLayer==nil` check is performed before any other checks inside the handlers.
// The only aim is to generate an HTTP request in a way that the relevant/registered end point is evoked/called.
nilBucket := "dummy-bucket"
nilReq, err := newTestRequest(http.MethodHead, getHEADBucketURL("", nilBucket), 0, nil)
if err != nil {
t.Errorf("MinIO %s: Failed to create HTTP request for testing the response when object Layer is set to `nil`.", instanceType)
}
// execute the object layer set to `nil` test.
// `ExecObjectLayerAPINilTest` manages the operation.
ExecObjectLayerAPINilTest(t, nilBucket, "", instanceType, apiRouter, nilReq)
}
// Wrapper for calling TestListMultipartUploadsHandler tests for both Erasure multiple disks and single node setup.
func TestListMultipartUploadsHandler(t *testing.T) {
ExecObjectLayerAPITest(t, testListMultipartUploadsHandler, []string{"ListMultipartUploads"})
}
// testListMultipartUploadsHandler - Tests validate listing of multipart uploads.
func testListMultipartUploadsHandler(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler,
credentials auth.Credentials, t *testing.T) {
// Collection of non-exhaustive ListMultipartUploads test cases, valid errors
// and success responses.
testCases := []struct {
// Inputs to ListMultipartUploads.
bucket string
prefix string
keyMarker string
uploadIDMarker string
delimiter string
maxUploads string
accessKey string
secretKey string
expectedRespStatus int
shouldPass bool
}{
// Test case - 1.
// Setting invalid bucket name.
{
bucket: ".test",
prefix: "",
keyMarker: "",
uploadIDMarker: "",
delimiter: "",
maxUploads: "0",
accessKey: credentials.AccessKey,
secretKey: credentials.SecretKey,
expectedRespStatus: http.StatusNotFound,
shouldPass: false,
},
// Test case - 2.
// Setting a non-existent bucket.
{
bucket: "volatile-bucket-1",
prefix: "",
keyMarker: "",
uploadIDMarker: "",
delimiter: "",
maxUploads: "0",
accessKey: credentials.AccessKey,
secretKey: credentials.SecretKey,
expectedRespStatus: http.StatusNotFound,
shouldPass: false,
},
// Test case -3.
// Delimiter unsupported, but response is empty.
{
bucket: bucketName,
prefix: "",
keyMarker: "",
uploadIDMarker: "",
delimiter: "-",
maxUploads: "0",
accessKey: credentials.AccessKey,
secretKey: credentials.SecretKey,
expectedRespStatus: http.StatusOK,
shouldPass: true,
},
// Test case - 4.
// Setting Invalid prefix and marker combination.
{
bucket: bucketName,
prefix: "asia",
keyMarker: "europe-object",
uploadIDMarker: "",
delimiter: "",
maxUploads: "0",
accessKey: credentials.AccessKey,
secretKey: credentials.SecretKey,
expectedRespStatus: http.StatusNotImplemented,
shouldPass: false,
},
// Test case - 5.
// Invalid upload id and marker combination.
{
bucket: bucketName,
prefix: "asia",
keyMarker: "asia/europe/",
uploadIDMarker: "abc",
delimiter: "",
maxUploads: "0",
accessKey: credentials.AccessKey,
secretKey: credentials.SecretKey,
expectedRespStatus: http.StatusNotImplemented,
shouldPass: false,
},
// Test case - 6.
// Setting a negative value to max-uploads paramater, should result in http.StatusBadRequest.
{
bucket: bucketName,
prefix: "",
keyMarker: "",
uploadIDMarker: "",
delimiter: "",
maxUploads: "-1",
accessKey: credentials.AccessKey,
secretKey: credentials.SecretKey,
expectedRespStatus: http.StatusBadRequest,
shouldPass: false,
},
// Test case - 7.
// Case with right set of parameters,
// should result in success 200OK.
{
bucket: bucketName,
prefix: "",
keyMarker: "",
uploadIDMarker: "",
delimiter: SlashSeparator,
maxUploads: "100",
accessKey: credentials.AccessKey,
secretKey: credentials.SecretKey,
expectedRespStatus: http.StatusOK,
shouldPass: true,
},
// Test case - 8.
// Good case without delimiter.
{
bucket: bucketName,
prefix: "",
keyMarker: "",
uploadIDMarker: "",
delimiter: "",
maxUploads: "100",
accessKey: credentials.AccessKey,
secretKey: credentials.SecretKey,
expectedRespStatus: http.StatusOK,
shouldPass: true,
},
// Test case - 9.
// Setting Invalid AccessKey and SecretKey to induce and verify Signature Mismatch error.
{
bucket: bucketName,
prefix: "",
keyMarker: "",
uploadIDMarker: "",
delimiter: "",
maxUploads: "100",
accessKey: "abcd",
secretKey: "abcd",
expectedRespStatus: http.StatusForbidden,
shouldPass: true,
},
}
for i, testCase := range testCases {
// initialize HTTP NewRecorder, this records any mutations to response writer inside the handler.
rec := httptest.NewRecorder()
// construct HTTP request for List multipart uploads endpoint.
u := getListMultipartUploadsURLWithParams("", testCase.bucket, testCase.prefix, testCase.keyMarker, testCase.uploadIDMarker, testCase.delimiter, testCase.maxUploads)
req, gerr := newTestSignedRequestV4(http.MethodGet, u, 0, nil, testCase.accessKey, testCase.secretKey, nil)
if gerr != nil {
t.Fatalf("Test %d: %s: Failed to create HTTP request for ListMultipartUploadsHandler: <ERROR> %v", i+1, instanceType, gerr)
}
// Since `apiRouter` satisfies `http.Handler` it has a ServeHTTP to execute the logic of the handler.
// Call the ServeHTTP to execute the handler.
apiRouter.ServeHTTP(rec, req)
if rec.Code != testCase.expectedRespStatus {
t.Errorf("Test %d: %s: Expected the response status to be `%d`, but instead found `%d`", i+1, instanceType, testCase.expectedRespStatus, rec.Code)
}
// Verify response the V2 signed HTTP request.
// initialize HTTP NewRecorder, this records any mutations to response writer inside the handler.
recV2 := httptest.NewRecorder()
// construct HTTP request for PUT bucket policy endpoint.
// verify response for V2 signed HTTP request.
reqV2, err := newTestSignedRequestV2(http.MethodGet, u, 0, nil, testCase.accessKey, testCase.secretKey, nil)
if err != nil {
t.Fatalf("Test %d: %s: Failed to create HTTP request for PutBucketPolicyHandler: <ERROR> %v", i+1, instanceType, err)
}
// Since `apiRouter` satisfies `http.Handler` it has a ServeHTTP to execute the logic of the handler.
// Call the ServeHTTP to execute the handler.
apiRouter.ServeHTTP(recV2, reqV2)
if recV2.Code != testCase.expectedRespStatus {
t.Errorf("Test %d: %s: Expected the response status to be `%d`, but instead found `%d`", i+1, instanceType, testCase.expectedRespStatus, recV2.Code)
}
}
// initialize HTTP NewRecorder, this records any mutations to response writer inside the handler.
rec := httptest.NewRecorder()
// construct HTTP request for List multipart uploads endpoint.
u := getListMultipartUploadsURLWithParams("", bucketName, "", "", "", "", "")
req, err := newTestSignedRequestV4(http.MethodGet, u, 0, nil, "", "", nil) // Generate an anonymous request.
if err != nil {
t.Fatalf("Test %s: Failed to create HTTP request for ListMultipartUploadsHandler: <ERROR> %v", instanceType, err)
}
// Since `apiRouter` satisfies `http.Handler` it has a ServeHTTP to execute the logic of the handler.
// Call the ServeHTTP to execute the handler.
apiRouter.ServeHTTP(rec, req)
if rec.Code != http.StatusForbidden {
t.Errorf("Test %s: Expected the response status to be `http.StatusForbidden`, but instead found `%d`", instanceType, rec.Code)
}
url := getListMultipartUploadsURLWithParams("", testCases[6].bucket, testCases[6].prefix, testCases[6].keyMarker,
testCases[6].uploadIDMarker, testCases[6].delimiter, testCases[6].maxUploads)
// Test for Anonymous/unsigned http request.
anonReq, err := newTestRequest(http.MethodGet, url, 0, nil)
if err != nil {
t.Fatalf("MinIO %s: Failed to create an anonymous request for bucket \"%s\": <ERROR> %v",
instanceType, bucketName, err)
}
// ExecObjectLayerAPIAnonTest - Calls the HTTP API handler using the anonymous request, validates the ErrAccessDeniedResponse,
// sets the bucket policy using the policy statement generated from `getWriteOnlyBucketStatement` so that the
// unsigned request goes through and its validated again.
ExecObjectLayerAPIAnonTest(t, obj, "TestListMultipartUploadsHandler", bucketName, "", instanceType, apiRouter, anonReq, getAnonWriteOnlyBucketPolicy(bucketName))
// HTTP request for testing when `objectLayer` is set to `nil`.
// There is no need to use an existing bucket and valid input for creating the request
// since the `objectLayer==nil` check is performed before any other checks inside the handlers.
// The only aim is to generate an HTTP request in a way that the relevant/registered end point is evoked/called.
nilBucket := "dummy-bucket"
url = getListMultipartUploadsURLWithParams("", nilBucket, "dummy-prefix", testCases[6].keyMarker,
testCases[6].uploadIDMarker, testCases[6].delimiter, testCases[6].maxUploads)
nilReq, err := newTestRequest(http.MethodGet, url, 0, nil)
if err != nil {
t.Errorf("MinIO %s: Failed to create HTTP request for testing the response when object Layer is set to `nil`.", instanceType)
}
// execute the object layer set to `nil` test.
// `ExecObjectLayerAPINilTest` manages the operation.
ExecObjectLayerAPINilTest(t, nilBucket, "", instanceType, apiRouter, nilReq)
}
// Wrapper for calling TestListBucketsHandler tests for both Erasure multiple disks and single node setup.
func TestListBucketsHandler(t *testing.T) {
ExecObjectLayerAPITest(t, testListBucketsHandler, []string{"ListBuckets"})
}
// testListBucketsHandler - Tests validate listing of buckets.
func testListBucketsHandler(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler,
credentials auth.Credentials, t *testing.T) {
testCases := []struct {
bucketName string
accessKey string
secretKey string
expectedRespStatus int
}{
// Test case - 1.
// Validate a good case request succeeds.
{
bucketName: bucketName,
accessKey: credentials.AccessKey,
secretKey: credentials.SecretKey,
expectedRespStatus: http.StatusOK,
},
// Test case - 2.
// Test case with invalid accessKey to produce and validate Signature MisMatch error.
{
bucketName: bucketName,
accessKey: "abcd",
secretKey: "abcd",
expectedRespStatus: http.StatusForbidden,
},
}
for i, testCase := range testCases {
// initialize HTTP NewRecorder, this records any mutations to response writer inside the handler.
rec := httptest.NewRecorder()
req, lerr := newTestSignedRequestV4(http.MethodGet, getListBucketURL(""), 0, nil, testCase.accessKey, testCase.secretKey, nil)
if lerr != nil {
t.Fatalf("Test %d: %s: Failed to create HTTP request for ListBucketsHandler: <ERROR> %v", i+1, instanceType, lerr)
}
// Since `apiRouter` satisfies `http.Handler` it has a ServeHTTP to execute the logic of the handler.
// Call the ServeHTTP to execute the handler.
apiRouter.ServeHTTP(rec, req)
if rec.Code != testCase.expectedRespStatus {
t.Errorf("Test %d: %s: Expected the response status to be `%d`, but instead found `%d`", i+1, instanceType, testCase.expectedRespStatus, rec.Code)
}
// Verify response of the V2 signed HTTP request.
// initialize HTTP NewRecorder, this records any mutations to response writer inside the handler.
recV2 := httptest.NewRecorder()
// construct HTTP request for PUT bucket policy endpoint.
// verify response for V2 signed HTTP request.
reqV2, err := newTestSignedRequestV2(http.MethodGet, getListBucketURL(""), 0, nil, testCase.accessKey, testCase.secretKey, nil)
if err != nil {
t.Fatalf("Test %d: %s: Failed to create HTTP request for PutBucketPolicyHandler: <ERROR> %v", i+1, instanceType, err)
}
// Since `apiRouter` satisfies `http.Handler` it has a ServeHTTP to execute the logic of the handler.
// Call the ServeHTTP to execute the handler.
apiRouter.ServeHTTP(recV2, reqV2)
if recV2.Code != testCase.expectedRespStatus {
t.Errorf("Test %d: %s: Expected the response status to be `%d`, but instead found `%d`", i+1, instanceType, testCase.expectedRespStatus, recV2.Code)
}
}
// Test for Anonymous/unsigned http request.
// ListBucketsHandler doesn't support bucket policies, setting the policies shouldn't make a difference.
anonReq, err := newTestRequest(http.MethodGet, getListBucketURL(""), 0, nil)
if err != nil {
t.Fatalf("MinIO %s: Failed to create an anonymous request.", instanceType)
}
// ExecObjectLayerAPIAnonTest - Calls the HTTP API handler using the anonymous request, validates the ErrAccessDeniedResponse,
// sets the bucket policy using the policy statement generated from `getWriteOnlyObjectStatement` so that the
// unsigned request goes through and its validated again.
ExecObjectLayerAPIAnonTest(t, obj, "ListBucketsHandler", "", "", instanceType, apiRouter, anonReq, getAnonWriteOnlyBucketPolicy("*"))
// HTTP request for testing when `objectLayer` is set to `nil`.
// There is no need to use an existing bucket and valid input for creating the request
// since the `objectLayer==nil` check is performed before any other checks inside the handlers.
// The only aim is to generate an HTTP request in a way that the relevant/registered end point is evoked/called.
nilReq, err := newTestRequest(http.MethodGet, getListBucketURL(""), 0, nil)
if err != nil {
t.Errorf("MinIO %s: Failed to create HTTP request for testing the response when object Layer is set to `nil`.", instanceType)
}
// execute the object layer set to `nil` test.
// `ExecObjectLayerAPINilTest` manages the operation.
ExecObjectLayerAPINilTest(t, "", "", instanceType, apiRouter, nilReq)
}
// Wrapper for calling DeleteMultipleObjects HTTP handler tests for both Erasure multiple disks and single node setup.
func TestAPIDeleteMultipleObjectsHandler(t *testing.T) {
ExecObjectLayerAPITest(t, testAPIDeleteMultipleObjectsHandler, []string{"DeleteMultipleObjects"})
}
func testAPIDeleteMultipleObjectsHandler(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler,
credentials auth.Credentials, t *testing.T) {
var err error
contentBytes := []byte("hello")
sha256sum := ""
var objectNames []string
for i := 0; i < 10; i++ {
objectName := "test-object-" + strconv.Itoa(i)
// uploading the object.
_, err = obj.PutObject(GlobalContext, bucketName, objectName, mustGetPutObjReader(t, bytes.NewReader(contentBytes), int64(len(contentBytes)), "", sha256sum), ObjectOptions{})
// if object upload fails stop the test.
if err != nil {
t.Fatalf("Put Object %d: Error uploading object: <ERROR> %v", i, err)
}
// object used for the test.
objectNames = append(objectNames, objectName)
}
getObjectToDeleteList := func(objectNames []string) (objectList []ObjectToDelete) {
for _, objectName := range objectNames {
objectList = append(objectList, ObjectToDelete{
ObjectName: objectName,
})
}
return objectList
}
getDeleteErrorList := func(objects []ObjectToDelete) (deleteErrorList []DeleteError) {
for _, obj := range objects {
deleteErrorList = append(deleteErrorList, DeleteError{
Code: errorCodes[ErrAccessDenied].Code,
Message: errorCodes[ErrAccessDenied].Description,
Key: obj.ObjectName,
})
}
return deleteErrorList
}
requestList := []DeleteObjectsRequest{
{Quiet: false, Objects: getObjectToDeleteList(objectNames[:5])},
{Quiet: true, Objects: getObjectToDeleteList(objectNames[5:])},
}
// generate multi objects delete response.
successRequest0 := encodeResponse(requestList[0])
deletedObjects := make([]DeletedObject, len(requestList[0].Objects))
for i := range requestList[0].Objects {
deletedObjects[i] = DeletedObject{
ObjectName: requestList[0].Objects[i].ObjectName,
}
}
successResponse0 := generateMultiDeleteResponse(requestList[0].Quiet, deletedObjects, nil)
encodedSuccessResponse0 := encodeResponse(successResponse0)
successRequest1 := encodeResponse(requestList[1])
deletedObjects = make([]DeletedObject, len(requestList[1].Objects))
for i := range requestList[0].Objects {
deletedObjects[i] = DeletedObject{
ObjectName: requestList[1].Objects[i].ObjectName,
}
}
successResponse1 := generateMultiDeleteResponse(requestList[1].Quiet, deletedObjects, nil)
encodedSuccessResponse1 := encodeResponse(successResponse1)
// generate multi objects delete response for errors.
// errorRequest := encodeResponse(requestList[1])
errorResponse := generateMultiDeleteResponse(requestList[1].Quiet, deletedObjects, nil)
encodedErrorResponse := encodeResponse(errorResponse)
anonRequest := encodeResponse(requestList[0])
anonResponse := generateMultiDeleteResponse(requestList[0].Quiet, nil, getDeleteErrorList(requestList[0].Objects))
encodedAnonResponse := encodeResponse(anonResponse)
testCases := []struct {
bucket string
objects []byte
accessKey string
secretKey string
expectedContent []byte
expectedRespStatus int
}{
// Test case - 1.
// Delete objects with invalid access key.
{
bucket: bucketName,
objects: successRequest0,
accessKey: "Invalid-AccessID",
secretKey: credentials.SecretKey,
expectedContent: nil,
expectedRespStatus: http.StatusForbidden,
},
// Test case - 2.
// Delete valid objects with quiet flag off.
{
bucket: bucketName,
objects: successRequest0,
accessKey: credentials.AccessKey,
secretKey: credentials.SecretKey,
expectedContent: encodedSuccessResponse0,
expectedRespStatus: http.StatusOK,
},
// Test case - 3.
// Delete valid objects with quiet flag on.
{
bucket: bucketName,
objects: successRequest1,
accessKey: credentials.AccessKey,
secretKey: credentials.SecretKey,
expectedContent: encodedSuccessResponse1,
expectedRespStatus: http.StatusOK,
},
// Test case - 4.
// Delete previously deleted objects.
{
bucket: bucketName,
objects: successRequest1,
accessKey: credentials.AccessKey,
secretKey: credentials.SecretKey,
expectedContent: encodedErrorResponse,
expectedRespStatus: http.StatusOK,
},
// Test case - 5.
// Anonymous user access denied response
// Currently anonymous users cannot delete multiple objects in MinIO server
{
bucket: bucketName,
objects: anonRequest,
accessKey: "",
secretKey: "",
expectedContent: encodedAnonResponse,
expectedRespStatus: http.StatusOK,
},
}
for i, testCase := range testCases {
var req *http.Request
var actualContent []byte
// Generate a signed or anonymous request based on the testCase
if testCase.accessKey != "" {
req, err = newTestSignedRequestV4(http.MethodPost, getDeleteMultipleObjectsURL("", bucketName),
int64(len(testCase.objects)), bytes.NewReader(testCase.objects), testCase.accessKey, testCase.secretKey, nil)
} else {
req, err = newTestRequest(http.MethodPost, getDeleteMultipleObjectsURL("", bucketName),
int64(len(testCase.objects)), bytes.NewReader(testCase.objects))
}
if err != nil {
t.Fatalf("Failed to create HTTP request for DeleteMultipleObjects: <ERROR> %v", err)
}
rec := httptest.NewRecorder()
// Since `apiRouter` satisfies `http.Handler` it has a ServeHTTP to execute the logic of the handler.
// Call the ServeHTTP to executes the registered handler.
apiRouter.ServeHTTP(rec, req)
// Assert the response code with the expected status.
if rec.Code != testCase.expectedRespStatus {
t.Errorf("Test %d: MinIO %s: Expected the response status to be `%d`, but instead found `%d`", i+1, instanceType, testCase.expectedRespStatus, rec.Code)
}
// read the response body.
actualContent, err = ioutil.ReadAll(rec.Body)
if err != nil {
t.Fatalf("Test %d : MinIO %s: Failed parsing response body: <ERROR> %v", i+1, instanceType, err)
}
// Verify whether the bucket obtained object is same as the one created.
if testCase.expectedContent != nil && !bytes.Equal(testCase.expectedContent, actualContent) {
t.Log(string(testCase.expectedContent), string(actualContent))
t.Errorf("Test %d : MinIO %s: Object content differs from expected value.", i+1, instanceType)
}
}
// HTTP request to test the case of `objectLayer` being set to `nil`.
// There is no need to use an existing bucket or valid input for creating the request,
// since the `objectLayer==nil` check is performed before any other checks inside the handlers.
// The only aim is to generate an HTTP request in a way that the relevant/registered end point is evoked/called.
// Indicating that all parts are uploaded and initiating completeMultipartUpload.
nilBucket := "dummy-bucket"
nilObject := ""
nilReq, err := newTestSignedRequestV4(http.MethodPost, getDeleteMultipleObjectsURL("", nilBucket), 0, nil, "", "", nil)
if err != nil {
t.Errorf("MinIO %s: Failed to create HTTP request for testing the response when object Layer is set to `nil`.", instanceType)
}
// execute the object layer set to `nil` test.
// `ExecObjectLayerAPINilTest` manages the operation.
ExecObjectLayerAPINilTest(t, nilBucket, nilObject, instanceType, apiRouter, nilReq)
}
| cmd/bucket-handlers_test.go | 0 | https://github.com/minio/minio/commit/acc9645249a3ee61cedff832d003421015cc724d | [
0.0001781260216375813,
0.0001705063332337886,
0.0001625670411158353,
0.00017089575703721493,
0.000003397555701667443
] |
{
"id": 5,
"code_window": [
"\t\t}\n",
"\t}\n",
"\n",
"\t// Closure to handle single connection.\n",
"\thandleConn := func(tcpConn *net.TCPConn) {\n",
"\t\ttcpConn.SetKeepAlive(true)\n",
"\t\tsend(acceptResult{tcpConn, nil})\n",
"\t}\n",
"\n",
"\t// Closure to handle TCPListener until done channel is closed.\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"keep"
],
"after_edit": [],
"file_path": "internal/http/listener.go",
"type": "replace",
"edit_start_line_idx": 57
} | // Copyright (c) 2015-2021 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package cmd
import (
"reflect"
"sort"
"testing"
)
func Test_metaCacheEntries_sort(t *testing.T) {
entries := loadMetacacheSampleEntries(t)
o := entries.entries()
if !o.isSorted() {
t.Fatal("Expected sorted objects")
}
// Swap first and last
o[0], o[len(o)-1] = o[len(o)-1], o[0]
if o.isSorted() {
t.Fatal("Expected unsorted objects")
}
sorted := o.sort()
if !o.isSorted() {
t.Fatal("Expected sorted o objects")
}
if !sorted.entries().isSorted() {
t.Fatal("Expected sorted wrapped objects")
}
want := loadMetacacheSampleNames
for i, got := range o {
if got.name != want[i] {
t.Errorf("entry %d, want %q, got %q", i, want[i], got.name)
}
}
}
func Test_metaCacheEntries_forwardTo(t *testing.T) {
org := loadMetacacheSampleEntries(t)
entries := org
want := []string{"src/compress/zlib/reader_test.go", "src/compress/zlib/writer.go", "src/compress/zlib/writer_test.go"}
entries.forwardTo("src/compress/zlib/reader_test.go")
got := entries.entries().names()
if !reflect.DeepEqual(got, want) {
t.Errorf("got unexpected result: %#v", got)
}
// Try with prefix
entries = org
entries.forwardTo("src/compress/zlib/reader_t")
got = entries.entries().names()
if !reflect.DeepEqual(got, want) {
t.Errorf("got unexpected result: %#v", got)
}
}
func Test_metaCacheEntries_merge(t *testing.T) {
org := loadMetacacheSampleEntries(t)
a, b := org.shallowClone(), org.shallowClone()
be := b.entries()
for i := range be {
// Modify b so it isn't deduplicated.
be[i].metadata = []byte("something-else")
}
// Merge b into a
a.merge(b, -1)
want := append(loadMetacacheSampleNames, loadMetacacheSampleNames...)
sort.Strings(want)
got := a.entries().names()
if len(got) != len(want) {
t.Errorf("unexpected count, want %v, got %v", len(want), len(got))
}
for i, name := range got {
if want[i] != name {
t.Errorf("unexpected name, want %q, got %q", want[i], name)
}
}
}
func Test_metaCacheEntries_filterObjects(t *testing.T) {
data := loadMetacacheSampleEntries(t)
data.filterObjectsOnly()
got := data.entries().names()
want := []string{"src/compress/bzip2/bit_reader.go", "src/compress/bzip2/bzip2.go", "src/compress/bzip2/bzip2_test.go", "src/compress/bzip2/huffman.go", "src/compress/bzip2/move_to_front.go", "src/compress/bzip2/testdata/Isaac.Newton-Opticks.txt.bz2", "src/compress/bzip2/testdata/e.txt.bz2", "src/compress/bzip2/testdata/fail-issue5747.bz2", "src/compress/bzip2/testdata/pass-random1.bin", "src/compress/bzip2/testdata/pass-random1.bz2", "src/compress/bzip2/testdata/pass-random2.bin", "src/compress/bzip2/testdata/pass-random2.bz2", "src/compress/bzip2/testdata/pass-sawtooth.bz2", "src/compress/bzip2/testdata/random.data.bz2", "src/compress/flate/deflate.go", "src/compress/flate/deflate_test.go", "src/compress/flate/deflatefast.go", "src/compress/flate/dict_decoder.go", "src/compress/flate/dict_decoder_test.go", "src/compress/flate/example_test.go", "src/compress/flate/flate_test.go", "src/compress/flate/huffman_bit_writer.go", "src/compress/flate/huffman_bit_writer_test.go", "src/compress/flate/huffman_code.go", "src/compress/flate/inflate.go", "src/compress/flate/inflate_test.go", "src/compress/flate/reader_test.go", "src/compress/flate/testdata/huffman-null-max.dyn.expect", "src/compress/flate/testdata/huffman-null-max.dyn.expect-noinput", "src/compress/flate/testdata/huffman-null-max.golden", "src/compress/flate/testdata/huffman-null-max.in", "src/compress/flate/testdata/huffman-null-max.wb.expect", "src/compress/flate/testdata/huffman-null-max.wb.expect-noinput", "src/compress/flate/testdata/huffman-pi.dyn.expect", "src/compress/flate/testdata/huffman-pi.dyn.expect-noinput", "src/compress/flate/testdata/huffman-pi.golden", "src/compress/flate/testdata/huffman-pi.in", "src/compress/flate/testdata/huffman-pi.wb.expect", "src/compress/flate/testdata/huffman-pi.wb.expect-noinput", "src/compress/flate/testdata/huffman-rand-1k.dyn.expect", "src/compress/flate/testdata/huffman-rand-1k.dyn.expect-noinput", "src/compress/flate/testdata/huffman-rand-1k.golden", "src/compress/flate/testdata/huffman-rand-1k.in", "src/compress/flate/testdata/huffman-rand-1k.wb.expect", "src/compress/flate/testdata/huffman-rand-1k.wb.expect-noinput", "src/compress/flate/testdata/huffman-rand-limit.dyn.expect", "src/compress/flate/testdata/huffman-rand-limit.dyn.expect-noinput", "src/compress/flate/testdata/huffman-rand-limit.golden", "src/compress/flate/testdata/huffman-rand-limit.in", "src/compress/flate/testdata/huffman-rand-limit.wb.expect", "src/compress/flate/testdata/huffman-rand-limit.wb.expect-noinput", "src/compress/flate/testdata/huffman-rand-max.golden", "src/compress/flate/testdata/huffman-rand-max.in", "src/compress/flate/testdata/huffman-shifts.dyn.expect", "src/compress/flate/testdata/huffman-shifts.dyn.expect-noinput", "src/compress/flate/testdata/huffman-shifts.golden", "src/compress/flate/testdata/huffman-shifts.in", "src/compress/flate/testdata/huffman-shifts.wb.expect", "src/compress/flate/testdata/huffman-shifts.wb.expect-noinput", "src/compress/flate/testdata/huffman-text-shift.dyn.expect", "src/compress/flate/testdata/huffman-text-shift.dyn.expect-noinput", "src/compress/flate/testdata/huffman-text-shift.golden", "src/compress/flate/testdata/huffman-text-shift.in", "src/compress/flate/testdata/huffman-text-shift.wb.expect", "src/compress/flate/testdata/huffman-text-shift.wb.expect-noinput", "src/compress/flate/testdata/huffman-text.dyn.expect", "src/compress/flate/testdata/huffman-text.dyn.expect-noinput", "src/compress/flate/testdata/huffman-text.golden", "src/compress/flate/testdata/huffman-text.in", "src/compress/flate/testdata/huffman-text.wb.expect", "src/compress/flate/testdata/huffman-text.wb.expect-noinput", "src/compress/flate/testdata/huffman-zero.dyn.expect", "src/compress/flate/testdata/huffman-zero.dyn.expect-noinput", "src/compress/flate/testdata/huffman-zero.golden", "src/compress/flate/testdata/huffman-zero.in", "src/compress/flate/testdata/huffman-zero.wb.expect", "src/compress/flate/testdata/huffman-zero.wb.expect-noinput", "src/compress/flate/testdata/null-long-match.dyn.expect-noinput", "src/compress/flate/testdata/null-long-match.wb.expect-noinput", "src/compress/flate/token.go", "src/compress/flate/writer_test.go", "src/compress/gzip/example_test.go", "src/compress/gzip/gunzip.go", "src/compress/gzip/gunzip_test.go", "src/compress/gzip/gzip.go", "src/compress/gzip/gzip_test.go", "src/compress/gzip/issue14937_test.go", "src/compress/gzip/testdata/issue6550.gz.base64", "src/compress/lzw/reader.go", "src/compress/lzw/reader_test.go", "src/compress/lzw/writer.go", "src/compress/lzw/writer_test.go", "src/compress/testdata/e.txt", "src/compress/testdata/gettysburg.txt", "src/compress/testdata/pi.txt", "src/compress/zlib/example_test.go", "src/compress/zlib/reader.go", "src/compress/zlib/reader_test.go", "src/compress/zlib/writer.go", "src/compress/zlib/writer_test.go"}
if !reflect.DeepEqual(want, got) {
t.Errorf("got unexpected result: %#v", got)
}
}
func Test_metaCacheEntries_filterPrefixes(t *testing.T) {
data := loadMetacacheSampleEntries(t)
data.filterPrefixesOnly()
got := data.entries().names()
want := []string{"src/compress/bzip2/", "src/compress/bzip2/testdata/", "src/compress/flate/", "src/compress/flate/testdata/", "src/compress/gzip/", "src/compress/gzip/testdata/", "src/compress/lzw/", "src/compress/testdata/", "src/compress/zlib/"}
if !reflect.DeepEqual(want, got) {
t.Errorf("got unexpected result: %#v", got)
}
}
func Test_metaCacheEntries_filterRecursive(t *testing.T) {
data := loadMetacacheSampleEntries(t)
data.filterRecursiveEntries("src/compress/bzip2/", slashSeparator)
got := data.entries().names()
want := []string{"src/compress/bzip2/", "src/compress/bzip2/bit_reader.go", "src/compress/bzip2/bzip2.go", "src/compress/bzip2/bzip2_test.go", "src/compress/bzip2/huffman.go", "src/compress/bzip2/move_to_front.go"}
if !reflect.DeepEqual(want, got) {
t.Errorf("got unexpected result: %#v", got)
}
}
func Test_metaCacheEntries_filterRecursiveRoot(t *testing.T) {
data := loadMetacacheSampleEntries(t)
data.filterRecursiveEntries("", slashSeparator)
got := data.entries().names()
want := []string{}
if !reflect.DeepEqual(want, got) {
t.Errorf("got unexpected result: %#v", got)
}
}
func Test_metaCacheEntries_filterRecursiveRootSep(t *testing.T) {
data := loadMetacacheSampleEntries(t)
// This will remove anything with "bzip2/" in the path since it is separator
data.filterRecursiveEntries("", "bzip2/")
got := data.entries().names()
want := []string{"src/compress/flate/", "src/compress/flate/deflate.go", "src/compress/flate/deflate_test.go", "src/compress/flate/deflatefast.go", "src/compress/flate/dict_decoder.go", "src/compress/flate/dict_decoder_test.go", "src/compress/flate/example_test.go", "src/compress/flate/flate_test.go", "src/compress/flate/huffman_bit_writer.go", "src/compress/flate/huffman_bit_writer_test.go", "src/compress/flate/huffman_code.go", "src/compress/flate/inflate.go", "src/compress/flate/inflate_test.go", "src/compress/flate/reader_test.go", "src/compress/flate/testdata/", "src/compress/flate/testdata/huffman-null-max.dyn.expect", "src/compress/flate/testdata/huffman-null-max.dyn.expect-noinput", "src/compress/flate/testdata/huffman-null-max.golden", "src/compress/flate/testdata/huffman-null-max.in", "src/compress/flate/testdata/huffman-null-max.wb.expect", "src/compress/flate/testdata/huffman-null-max.wb.expect-noinput", "src/compress/flate/testdata/huffman-pi.dyn.expect", "src/compress/flate/testdata/huffman-pi.dyn.expect-noinput", "src/compress/flate/testdata/huffman-pi.golden", "src/compress/flate/testdata/huffman-pi.in", "src/compress/flate/testdata/huffman-pi.wb.expect", "src/compress/flate/testdata/huffman-pi.wb.expect-noinput", "src/compress/flate/testdata/huffman-rand-1k.dyn.expect", "src/compress/flate/testdata/huffman-rand-1k.dyn.expect-noinput", "src/compress/flate/testdata/huffman-rand-1k.golden", "src/compress/flate/testdata/huffman-rand-1k.in", "src/compress/flate/testdata/huffman-rand-1k.wb.expect", "src/compress/flate/testdata/huffman-rand-1k.wb.expect-noinput", "src/compress/flate/testdata/huffman-rand-limit.dyn.expect", "src/compress/flate/testdata/huffman-rand-limit.dyn.expect-noinput", "src/compress/flate/testdata/huffman-rand-limit.golden", "src/compress/flate/testdata/huffman-rand-limit.in", "src/compress/flate/testdata/huffman-rand-limit.wb.expect", "src/compress/flate/testdata/huffman-rand-limit.wb.expect-noinput", "src/compress/flate/testdata/huffman-rand-max.golden", "src/compress/flate/testdata/huffman-rand-max.in", "src/compress/flate/testdata/huffman-shifts.dyn.expect", "src/compress/flate/testdata/huffman-shifts.dyn.expect-noinput", "src/compress/flate/testdata/huffman-shifts.golden", "src/compress/flate/testdata/huffman-shifts.in", "src/compress/flate/testdata/huffman-shifts.wb.expect", "src/compress/flate/testdata/huffman-shifts.wb.expect-noinput", "src/compress/flate/testdata/huffman-text-shift.dyn.expect", "src/compress/flate/testdata/huffman-text-shift.dyn.expect-noinput", "src/compress/flate/testdata/huffman-text-shift.golden", "src/compress/flate/testdata/huffman-text-shift.in", "src/compress/flate/testdata/huffman-text-shift.wb.expect", "src/compress/flate/testdata/huffman-text-shift.wb.expect-noinput", "src/compress/flate/testdata/huffman-text.dyn.expect", "src/compress/flate/testdata/huffman-text.dyn.expect-noinput", "src/compress/flate/testdata/huffman-text.golden", "src/compress/flate/testdata/huffman-text.in", "src/compress/flate/testdata/huffman-text.wb.expect", "src/compress/flate/testdata/huffman-text.wb.expect-noinput", "src/compress/flate/testdata/huffman-zero.dyn.expect", "src/compress/flate/testdata/huffman-zero.dyn.expect-noinput", "src/compress/flate/testdata/huffman-zero.golden", "src/compress/flate/testdata/huffman-zero.in", "src/compress/flate/testdata/huffman-zero.wb.expect", "src/compress/flate/testdata/huffman-zero.wb.expect-noinput", "src/compress/flate/testdata/null-long-match.dyn.expect-noinput", "src/compress/flate/testdata/null-long-match.wb.expect-noinput", "src/compress/flate/token.go", "src/compress/flate/writer_test.go", "src/compress/gzip/", "src/compress/gzip/example_test.go", "src/compress/gzip/gunzip.go", "src/compress/gzip/gunzip_test.go", "src/compress/gzip/gzip.go", "src/compress/gzip/gzip_test.go", "src/compress/gzip/issue14937_test.go", "src/compress/gzip/testdata/", "src/compress/gzip/testdata/issue6550.gz.base64", "src/compress/lzw/", "src/compress/lzw/reader.go", "src/compress/lzw/reader_test.go", "src/compress/lzw/writer.go", "src/compress/lzw/writer_test.go", "src/compress/testdata/", "src/compress/testdata/e.txt", "src/compress/testdata/gettysburg.txt", "src/compress/testdata/pi.txt", "src/compress/zlib/", "src/compress/zlib/example_test.go", "src/compress/zlib/reader.go", "src/compress/zlib/reader_test.go", "src/compress/zlib/writer.go", "src/compress/zlib/writer_test.go"}
if !reflect.DeepEqual(want, got) {
t.Errorf("got unexpected result: %#v", got)
}
}
func Test_metaCacheEntries_filterPrefix(t *testing.T) {
data := loadMetacacheSampleEntries(t)
data.filterPrefix("src/compress/bzip2/")
got := data.entries().names()
want := []string{"src/compress/bzip2/", "src/compress/bzip2/bit_reader.go", "src/compress/bzip2/bzip2.go", "src/compress/bzip2/bzip2_test.go", "src/compress/bzip2/huffman.go", "src/compress/bzip2/move_to_front.go", "src/compress/bzip2/testdata/", "src/compress/bzip2/testdata/Isaac.Newton-Opticks.txt.bz2", "src/compress/bzip2/testdata/e.txt.bz2", "src/compress/bzip2/testdata/fail-issue5747.bz2", "src/compress/bzip2/testdata/pass-random1.bin", "src/compress/bzip2/testdata/pass-random1.bz2", "src/compress/bzip2/testdata/pass-random2.bin", "src/compress/bzip2/testdata/pass-random2.bz2", "src/compress/bzip2/testdata/pass-sawtooth.bz2", "src/compress/bzip2/testdata/random.data.bz2"}
if !reflect.DeepEqual(want, got) {
t.Errorf("got unexpected result: %#v", got)
}
}
func Test_metaCacheEntry_isInDir(t *testing.T) {
tests := []struct {
testName string
entry string
dir string
sep string
want bool
}{
{
testName: "basic-file",
entry: "src/file",
dir: "src/",
sep: slashSeparator,
want: true,
},
{
testName: "basic-dir",
entry: "src/dir/",
dir: "src/",
sep: slashSeparator,
want: true,
},
{
testName: "deeper-file",
entry: "src/dir/somewhere.ext",
dir: "src/",
sep: slashSeparator,
want: false,
},
{
testName: "deeper-dir",
entry: "src/dir/somewhere/",
dir: "src/",
sep: slashSeparator,
want: false,
},
{
testName: "root-dir",
entry: "doc/",
dir: "",
sep: slashSeparator,
want: true,
},
{
testName: "root-file",
entry: "word.doc",
dir: "",
sep: slashSeparator,
want: true,
},
}
for _, tt := range tests {
t.Run(tt.testName, func(t *testing.T) {
e := metaCacheEntry{
name: tt.entry,
}
if got := e.isInDir(tt.dir, tt.sep); got != tt.want {
t.Errorf("isInDir() = %v, want %v", got, tt.want)
}
})
}
}
| cmd/metacache-entries_test.go | 0 | https://github.com/minio/minio/commit/acc9645249a3ee61cedff832d003421015cc724d | [
0.0001781260216375813,
0.00017213298997376114,
0.00016474153380841017,
0.00017321054474450648,
0.000003710206783580361
] |
{
"id": 5,
"code_window": [
"\t\t}\n",
"\t}\n",
"\n",
"\t// Closure to handle single connection.\n",
"\thandleConn := func(tcpConn *net.TCPConn) {\n",
"\t\ttcpConn.SetKeepAlive(true)\n",
"\t\tsend(acceptResult{tcpConn, nil})\n",
"\t}\n",
"\n",
"\t// Closure to handle TCPListener until done channel is closed.\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"keep"
],
"after_edit": [],
"file_path": "internal/http/listener.go",
"type": "replace",
"edit_start_line_idx": 57
} | //go:build windows
// +build windows
// Copyright (c) 2015-2021 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package disk
// SameDisk reports whether di1 and di2 describe the same disk.
func SameDisk(disk1, disk2 string) (bool, error) {
return false, nil
}
| internal/disk/disk_windows.go | 0 | https://github.com/minio/minio/commit/acc9645249a3ee61cedff832d003421015cc724d | [
0.00017789365665521473,
0.00017361128993798047,
0.00016520070494152606,
0.00017773955187294632,
0.000005947524641669588
] |
{
"id": 6,
"code_window": [
"\t// Closure to handle TCPListener until done channel is closed.\n",
"\thandleListener := func(tcpListener *net.TCPListener) {\n",
"\t\tfor {\n",
"\t\t\ttcpConn, err := tcpListener.AcceptTCP()\n"
],
"labels": [
"keep",
"replace",
"keep",
"keep"
],
"after_edit": [
"\thandleListener := func(idx int, tcpListener *net.TCPListener) {\n"
],
"file_path": "internal/http/listener.go",
"type": "replace",
"edit_start_line_idx": 64
} | // Copyright (c) 2015-2021 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package http
import (
"context"
"fmt"
"net"
"syscall"
)
type acceptResult struct {
conn net.Conn
err error
}
// httpListener - HTTP listener capable of handling multiple server addresses.
type httpListener struct {
tcpListeners []*net.TCPListener // underlaying TCP listeners.
acceptCh chan acceptResult // channel where all TCP listeners write accepted connection.
ctx context.Context
ctxCanceler context.CancelFunc
}
// start - starts separate goroutine for each TCP listener. A valid new connection is passed to httpListener.acceptCh.
func (listener *httpListener) start() {
// Closure to send acceptResult to acceptCh.
// It returns true if the result is sent else false if returns when doneCh is closed.
send := func(result acceptResult) bool {
select {
case listener.acceptCh <- result:
// Successfully written to acceptCh
return true
case <-listener.ctx.Done():
// As stop signal is received, close accepted connection.
if result.conn != nil {
result.conn.Close()
}
return false
}
}
// Closure to handle single connection.
handleConn := func(tcpConn *net.TCPConn) {
tcpConn.SetKeepAlive(true)
send(acceptResult{tcpConn, nil})
}
// Closure to handle TCPListener until done channel is closed.
handleListener := func(tcpListener *net.TCPListener) {
for {
tcpConn, err := tcpListener.AcceptTCP()
if err != nil {
// Returns when send fails.
if !send(acceptResult{nil, err}) {
return
}
} else {
go handleConn(tcpConn)
}
}
}
// Start separate goroutine for each TCP listener to handle connection.
for _, tcpListener := range listener.tcpListeners {
go handleListener(tcpListener)
}
}
// Accept - reads from httpListener.acceptCh for one of previously accepted TCP connection and returns the same.
func (listener *httpListener) Accept() (conn net.Conn, err error) {
select {
case result, ok := <-listener.acceptCh:
if ok {
return result.conn, result.err
}
case <-listener.ctx.Done():
}
return nil, syscall.EINVAL
}
// Close - closes underneath all TCP listeners.
func (listener *httpListener) Close() (err error) {
listener.ctxCanceler()
for i := range listener.tcpListeners {
listener.tcpListeners[i].Close()
}
return nil
}
// Addr - net.Listener interface compatible method returns net.Addr. In case of multiple TCP listeners, it returns '0.0.0.0' as IP address.
func (listener *httpListener) Addr() (addr net.Addr) {
addr = listener.tcpListeners[0].Addr()
if len(listener.tcpListeners) == 1 {
return addr
}
tcpAddr := addr.(*net.TCPAddr)
if ip := net.ParseIP("0.0.0.0"); ip != nil {
tcpAddr.IP = ip
}
addr = tcpAddr
return addr
}
// Addrs - returns all address information of TCP listeners.
func (listener *httpListener) Addrs() (addrs []net.Addr) {
for i := range listener.tcpListeners {
addrs = append(addrs, listener.tcpListeners[i].Addr())
}
return addrs
}
// newHTTPListener - creates new httpListener object which is interface compatible to net.Listener.
// httpListener is capable to
// * listen to multiple addresses
// * controls incoming connections only doing HTTP protocol
func newHTTPListener(ctx context.Context, serverAddrs []string) (listener *httpListener, err error) {
var tcpListeners []*net.TCPListener
// Close all opened listeners on error
defer func() {
if err == nil {
return
}
for _, tcpListener := range tcpListeners {
// Ignore error on close.
tcpListener.Close()
}
}()
for _, serverAddr := range serverAddrs {
var l net.Listener
if l, err = listenCfg.Listen(ctx, "tcp", serverAddr); err != nil {
return nil, err
}
tcpListener, ok := l.(*net.TCPListener)
if !ok {
return nil, fmt.Errorf("unexpected listener type found %v, expected net.TCPListener", l)
}
tcpListeners = append(tcpListeners, tcpListener)
}
listener = &httpListener{
tcpListeners: tcpListeners,
acceptCh: make(chan acceptResult, len(tcpListeners)),
}
listener.ctx, listener.ctxCanceler = context.WithCancel(ctx)
listener.start()
return listener, nil
}
| internal/http/listener.go | 1 | https://github.com/minio/minio/commit/acc9645249a3ee61cedff832d003421015cc724d | [
0.9986080527305603,
0.3538426160812378,
0.00017518040840514004,
0.03450639173388481,
0.4534014165401459
] |
{
"id": 6,
"code_window": [
"\t// Closure to handle TCPListener until done channel is closed.\n",
"\thandleListener := func(tcpListener *net.TCPListener) {\n",
"\t\tfor {\n",
"\t\t\ttcpConn, err := tcpListener.AcceptTCP()\n"
],
"labels": [
"keep",
"replace",
"keep",
"keep"
],
"after_edit": [
"\thandleListener := func(idx int, tcpListener *net.TCPListener) {\n"
],
"file_path": "internal/http/listener.go",
"type": "replace",
"edit_start_line_idx": 64
} | *Federation feature is deprecated and should be avoided for future deployments*
# Federation Quickstart Guide [](https://slack.min.io)
This document explains how to configure MinIO with `Bucket lookup from DNS` style federation.
## Get started
### 1. Prerequisites
Install MinIO - [MinIO Quickstart Guide](https://docs.min.io/docs/minio-quickstart-guide).
### 2. Run MinIO in federated mode
Bucket lookup from DNS federation requires two dependencies
- etcd (for bucket DNS service records)
- CoreDNS (for DNS management based on populated bucket DNS service records, optional)
## Architecture

### Environment variables
#### MINIO_ETCD_ENDPOINTS
This is comma separated list of etcd servers that you want to use as the MinIO federation back-end. This should
be same across the federated deployment, i.e. all the MinIO instances within a federated deployment should use same
etcd back-end.
#### MINIO_DOMAIN
This is the top level domain name used for the federated setup. This domain name should ideally resolve to a load-balancer
running in front of all the federated MinIO instances. The domain name is used to create sub domain entries to etcd. For
example, if the domain is set to `domain.com`, the buckets `bucket1`, `bucket2` will be accessible as `bucket1.domain.com`
and `bucket2.domain.com`.
#### MINIO_PUBLIC_IPS
This is comma separated list of IP addresses to which buckets created on this MinIO instance will resolve to. For example,
a bucket `bucket1` created on current MinIO instance will be accessible as `bucket1.domain.com`, and the DNS entry for
`bucket1.domain.com` will point to IP address set in `MINIO_PUBLIC_IPS`.
*Note*
- This field is mandatory for standalone and erasure code MinIO server deployments, to enable federated mode.
- This field is optional for distributed deployments. If you don't set this field in a federated setup, we use the IP addresses of
hosts passed to the MinIO server startup and use them for DNS entries.
### Run Multiple Clusters
> cluster1
```sh
export MINIO_ETCD_ENDPOINTS="http://remote-etcd1:2379,http://remote-etcd2:4001"
export MINIO_DOMAIN=domain.com
export MINIO_PUBLIC_IPS=44.35.2.1,44.35.2.2,44.35.2.3,44.35.2.4
minio server http://rack{1...4}.host{1...4}.domain.com/mnt/export{1...32}
```
> cluster2
```sh
export MINIO_ETCD_ENDPOINTS="http://remote-etcd1:2379,http://remote-etcd2:4001"
export MINIO_DOMAIN=domain.com
export MINIO_PUBLIC_IPS=44.35.1.1,44.35.1.2,44.35.1.3,44.35.1.4
minio server http://rack{5...8}.host{5...8}.domain.com/mnt/export{1...32}
```
In this configuration you can see `MINIO_ETCD_ENDPOINTS` points to the etcd backend which manages MinIO's
`config.json` and bucket DNS SRV records. `MINIO_DOMAIN` indicates the domain suffix for the bucket which
will be used to resolve bucket through DNS. For example if you have a bucket such as `mybucket`, the
client can use now `mybucket.domain.com` to directly resolve itself to the right cluster. `MINIO_PUBLIC_IPS`
points to the public IP address where each cluster might be accessible, this is unique for each cluster.
NOTE: `mybucket` only exists on one cluster either `cluster1` or `cluster2` this is random and
is decided by how `domain.com` gets resolved, if there is a round-robin DNS on `domain.com` then
it is randomized which cluster might provision the bucket.
### 3. Upgrading to `etcdv3` API
Users running MinIO federation from release `RELEASE.2018-06-09T03-43-35Z` to `RELEASE.2018-07-10T01-42-11Z`, should migrate the existing bucket data on etcd server to `etcdv3` API, and update CoreDNS version to `1.2.0` before updating their MinIO server to the latest version.
Here is some background on why this is needed - MinIO server release `RELEASE.2018-06-09T03-43-35Z` to `RELEASE.2018-07-10T01-42-11Z` used etcdv2 API to store bucket data to etcd server. This was due to `etcdv3` support not available for CoreDNS server. So, even if MinIO used `etcdv3` API to store bucket data, CoreDNS wouldn't be able to read and serve it as DNS records.
Now that CoreDNS [supports etcdv3](https://coredns.io/2018/07/11/coredns-1.2.0-release/), MinIO server uses `etcdv3` API to store bucket data to etcd server. As `etcdv2` and `etcdv3` APIs are not compatible, data stored using `etcdv2` API is not visible to the `etcdv3` API. So, bucket data stored by previous MinIO version will not be visible to current MinIO version, until a migration is done.
CoreOS team has documented the steps required to migrate existing data from `etcdv2` to `etcdv3` in [this blog post](https://coreos.com/blog/migrating-applications-etcd-v3.html). Please refer the post and migrate etcd data to `etcdv3` API.
### 4. Test your setup
To test this setup, access the MinIO server via browser or [`mc`](https://docs.min.io/docs/minio-client-quickstart-guide). You’ll see the uploaded files are accessible from the all the MinIO endpoints.
# Explore Further
- [Use `mc` with MinIO Server](https://docs.min.io/docs/minio-client-quickstart-guide)
- [Use `aws-cli` with MinIO Server](https://docs.min.io/docs/aws-cli-with-minio)
- [Use `s3cmd` with MinIO Server](https://docs.min.io/docs/s3cmd-with-minio)
- [Use `minio-go` SDK with MinIO Server](https://docs.min.io/docs/golang-client-quickstart-guide)
- [The MinIO documentation website](https://docs.min.io)
| docs/federation/lookup/README.md | 0 | https://github.com/minio/minio/commit/acc9645249a3ee61cedff832d003421015cc724d | [
0.0001969229051610455,
0.00016961786604952067,
0.00016283171135000885,
0.00016614460037089884,
0.000009469971701037139
] |
{
"id": 6,
"code_window": [
"\t// Closure to handle TCPListener until done channel is closed.\n",
"\thandleListener := func(tcpListener *net.TCPListener) {\n",
"\t\tfor {\n",
"\t\t\ttcpConn, err := tcpListener.AcceptTCP()\n"
],
"labels": [
"keep",
"replace",
"keep",
"keep"
],
"after_edit": [
"\thandleListener := func(idx int, tcpListener *net.TCPListener) {\n"
],
"file_path": "internal/http/listener.go",
"type": "replace",
"edit_start_line_idx": 64
} | # Contributor Covenant Code of Conduct
## Our Pledge
In the interest of fostering an open and welcoming environment, we as
contributors and maintainers pledge to making participation in our project and
our community a harassment-free experience for everyone, regardless of age, body
size, disability, ethnicity, gender identity and expression, level of experience,
nationality, personal appearance, race, religion, or sexual identity and
orientation.
## Our Standards
Examples of behavior that contributes to creating a positive environment
include:
* Using welcoming and inclusive language
* Being respectful of differing viewpoints and experiences
* Gracefully accepting constructive criticism
* Focusing on what is best for the community
* Showing empathy towards other community members
Examples of unacceptable behavior by participants include:
* The use of sexualized language or imagery and unwelcome sexual attention or
advances
* Trolling, insulting/derogatory comments, and personal or political attacks
* Public or private harassment
* Publishing others' private information, such as a physical or electronic
address, without explicit permission
* Other conduct which could reasonably be considered inappropriate in a
professional setting
## Our Responsibilities
Project maintainers are responsible for clarifying the standards of acceptable
behavior and are expected to take appropriate and fair corrective action in
response to any instances of unacceptable behavior, in compliance with the
licensing terms applying to the Project developments.
Project maintainers have the right and responsibility to remove, edit, or
reject comments, commits, code, wiki edits, issues, and other contributions
that are not aligned to this Code of Conduct, or to ban temporarily or
permanently any contributor for other behaviors that they deem inappropriate,
threatening, offensive, or harmful. However, these actions shall respect the
licensing terms of the Project Developments that will always supersede such
Code of Conduct.
## Scope
This Code of Conduct applies both within project spaces and in public spaces
when an individual is representing the project or its community. Examples of
representing a project or community include using an official project e-mail
address, posting via an official social media account, or acting as an appointed
representative at an online or offline event. Representation of a project may be
further defined and clarified by project maintainers.
## Enforcement
Instances of abusive, harassing, or otherwise unacceptable behavior may be
reported by contacting the project team at [email protected]. The project team
will review and investigate all complaints, and will respond in a way that it deems
appropriate to the circumstances. The project team is obligated to maintain
confidentiality with regard to the reporter of an incident.
Further details of specific enforcement policies may be posted separately.
Project maintainers who do not follow or enforce the Code of Conduct in good
faith may face temporary or permanent repercussions as determined by other
members of the project's leadership.
## Attribution
This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4,
available at [http://contributor-covenant.org/version/1/4][version]
This version includes a clarification to ensure that the code of conduct is in
compliance with the free software licensing terms of the project.
[homepage]: http://contributor-covenant.org
[version]: http://contributor-covenant.org/version/1/4/
| code_of_conduct.md | 0 | https://github.com/minio/minio/commit/acc9645249a3ee61cedff832d003421015cc724d | [
0.000173931461176835,
0.00017176718392875046,
0.00016643172421026975,
0.00017232555546797812,
0.0000021154219211894087
] |
{
"id": 6,
"code_window": [
"\t// Closure to handle TCPListener until done channel is closed.\n",
"\thandleListener := func(tcpListener *net.TCPListener) {\n",
"\t\tfor {\n",
"\t\t\ttcpConn, err := tcpListener.AcceptTCP()\n"
],
"labels": [
"keep",
"replace",
"keep",
"keep"
],
"after_edit": [
"\thandleListener := func(idx int, tcpListener *net.TCPListener) {\n"
],
"file_path": "internal/http/listener.go",
"type": "replace",
"edit_start_line_idx": 64
} | // Copyright (c) 2015-2021 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package cmd
import (
"os"
"runtime"
"syscall"
"testing"
)
func TestSysErrors(t *testing.T) {
pathErr := &os.PathError{Err: syscall.ENAMETOOLONG}
ok := isSysErrTooLong(pathErr)
if !ok {
t.Fatalf("Unexpected error expecting %s", syscall.ENAMETOOLONG)
}
pathErr = &os.PathError{Err: syscall.ENOTDIR}
ok = isSysErrNotDir(pathErr)
if !ok {
t.Fatalf("Unexpected error expecting %s", syscall.ENOTDIR)
}
if runtime.GOOS != globalWindowsOSName {
pathErr = &os.PathError{Err: syscall.ENOTEMPTY}
ok = isSysErrNotEmpty(pathErr)
if !ok {
t.Fatalf("Unexpected error expecting %s", syscall.ENOTEMPTY)
}
} else {
pathErr = &os.PathError{Err: syscall.Errno(0x91)}
ok = isSysErrNotEmpty(pathErr)
if !ok {
t.Fatal("Unexpected error expecting 0x91")
}
}
if runtime.GOOS == globalWindowsOSName {
pathErr = &os.PathError{Err: syscall.Errno(0x03)}
ok = isSysErrPathNotFound(pathErr)
if !ok {
t.Fatal("Unexpected error expecting 0x03")
}
}
}
| cmd/xl-storage-errors_test.go | 0 | https://github.com/minio/minio/commit/acc9645249a3ee61cedff832d003421015cc724d | [
0.0001785432978067547,
0.0001720887521514669,
0.0001679469714872539,
0.00017117007519118488,
0.00000364453853762825
] |
{
"id": 7,
"code_window": [
"\t\tfor {\n",
"\t\t\ttcpConn, err := tcpListener.AcceptTCP()\n",
"\t\t\tif err != nil {\n",
"\t\t\t\t// Returns when send fails.\n",
"\t\t\t\tif !send(acceptResult{nil, err}) {\n",
"\t\t\t\t\treturn\n",
"\t\t\t\t}\n",
"\t\t\t} else {\n",
"\t\t\t\tgo handleConn(tcpConn)\n",
"\t\t\t}\n"
],
"labels": [
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"keep"
],
"after_edit": [
"\t\t\tif tcpConn != nil {\n",
"\t\t\t\ttcpConn.SetKeepAlive(true)\n"
],
"file_path": "internal/http/listener.go",
"type": "replace",
"edit_start_line_idx": 67
} | // Copyright (c) 2015-2021 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package http
import (
"context"
"fmt"
"net"
"syscall"
)
type acceptResult struct {
conn net.Conn
err error
}
// httpListener - HTTP listener capable of handling multiple server addresses.
type httpListener struct {
tcpListeners []*net.TCPListener // underlaying TCP listeners.
acceptCh chan acceptResult // channel where all TCP listeners write accepted connection.
ctx context.Context
ctxCanceler context.CancelFunc
}
// start - starts separate goroutine for each TCP listener. A valid new connection is passed to httpListener.acceptCh.
func (listener *httpListener) start() {
// Closure to send acceptResult to acceptCh.
// It returns true if the result is sent else false if returns when doneCh is closed.
send := func(result acceptResult) bool {
select {
case listener.acceptCh <- result:
// Successfully written to acceptCh
return true
case <-listener.ctx.Done():
// As stop signal is received, close accepted connection.
if result.conn != nil {
result.conn.Close()
}
return false
}
}
// Closure to handle single connection.
handleConn := func(tcpConn *net.TCPConn) {
tcpConn.SetKeepAlive(true)
send(acceptResult{tcpConn, nil})
}
// Closure to handle TCPListener until done channel is closed.
handleListener := func(tcpListener *net.TCPListener) {
for {
tcpConn, err := tcpListener.AcceptTCP()
if err != nil {
// Returns when send fails.
if !send(acceptResult{nil, err}) {
return
}
} else {
go handleConn(tcpConn)
}
}
}
// Start separate goroutine for each TCP listener to handle connection.
for _, tcpListener := range listener.tcpListeners {
go handleListener(tcpListener)
}
}
// Accept - reads from httpListener.acceptCh for one of previously accepted TCP connection and returns the same.
func (listener *httpListener) Accept() (conn net.Conn, err error) {
select {
case result, ok := <-listener.acceptCh:
if ok {
return result.conn, result.err
}
case <-listener.ctx.Done():
}
return nil, syscall.EINVAL
}
// Close - closes underneath all TCP listeners.
func (listener *httpListener) Close() (err error) {
listener.ctxCanceler()
for i := range listener.tcpListeners {
listener.tcpListeners[i].Close()
}
return nil
}
// Addr - net.Listener interface compatible method returns net.Addr. In case of multiple TCP listeners, it returns '0.0.0.0' as IP address.
func (listener *httpListener) Addr() (addr net.Addr) {
addr = listener.tcpListeners[0].Addr()
if len(listener.tcpListeners) == 1 {
return addr
}
tcpAddr := addr.(*net.TCPAddr)
if ip := net.ParseIP("0.0.0.0"); ip != nil {
tcpAddr.IP = ip
}
addr = tcpAddr
return addr
}
// Addrs - returns all address information of TCP listeners.
func (listener *httpListener) Addrs() (addrs []net.Addr) {
for i := range listener.tcpListeners {
addrs = append(addrs, listener.tcpListeners[i].Addr())
}
return addrs
}
// newHTTPListener - creates new httpListener object which is interface compatible to net.Listener.
// httpListener is capable to
// * listen to multiple addresses
// * controls incoming connections only doing HTTP protocol
func newHTTPListener(ctx context.Context, serverAddrs []string) (listener *httpListener, err error) {
var tcpListeners []*net.TCPListener
// Close all opened listeners on error
defer func() {
if err == nil {
return
}
for _, tcpListener := range tcpListeners {
// Ignore error on close.
tcpListener.Close()
}
}()
for _, serverAddr := range serverAddrs {
var l net.Listener
if l, err = listenCfg.Listen(ctx, "tcp", serverAddr); err != nil {
return nil, err
}
tcpListener, ok := l.(*net.TCPListener)
if !ok {
return nil, fmt.Errorf("unexpected listener type found %v, expected net.TCPListener", l)
}
tcpListeners = append(tcpListeners, tcpListener)
}
listener = &httpListener{
tcpListeners: tcpListeners,
acceptCh: make(chan acceptResult, len(tcpListeners)),
}
listener.ctx, listener.ctxCanceler = context.WithCancel(ctx)
listener.start()
return listener, nil
}
| internal/http/listener.go | 1 | https://github.com/minio/minio/commit/acc9645249a3ee61cedff832d003421015cc724d | [
0.9970846772193909,
0.05926777794957161,
0.00016403351037297398,
0.0024695005267858505,
0.22749054431915283
] |
{
"id": 7,
"code_window": [
"\t\tfor {\n",
"\t\t\ttcpConn, err := tcpListener.AcceptTCP()\n",
"\t\t\tif err != nil {\n",
"\t\t\t\t// Returns when send fails.\n",
"\t\t\t\tif !send(acceptResult{nil, err}) {\n",
"\t\t\t\t\treturn\n",
"\t\t\t\t}\n",
"\t\t\t} else {\n",
"\t\t\t\tgo handleConn(tcpConn)\n",
"\t\t\t}\n"
],
"labels": [
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"keep"
],
"after_edit": [
"\t\t\tif tcpConn != nil {\n",
"\t\t\t\ttcpConn.SetKeepAlive(true)\n"
],
"file_path": "internal/http/listener.go",
"type": "replace",
"edit_start_line_idx": 67
} | // Copyright (c) 2015-2021 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package target
import (
"database/sql"
"testing"
)
// TestPostgreSQLRegistration checks if sql driver
// is registered and fails otherwise.
func TestMySQLRegistration(t *testing.T) {
var found bool
for _, drv := range sql.Drivers() {
if drv == "mysql" {
found = true
break
}
}
if !found {
t.Fatal("mysql driver not registered")
}
}
| internal/event/target/mysql_test.go | 0 | https://github.com/minio/minio/commit/acc9645249a3ee61cedff832d003421015cc724d | [
0.00017747671518009156,
0.00017312128329649568,
0.00016637967200949788,
0.00017431438027415425,
0.0000041668627090984955
] |
{
"id": 7,
"code_window": [
"\t\tfor {\n",
"\t\t\ttcpConn, err := tcpListener.AcceptTCP()\n",
"\t\t\tif err != nil {\n",
"\t\t\t\t// Returns when send fails.\n",
"\t\t\t\tif !send(acceptResult{nil, err}) {\n",
"\t\t\t\t\treturn\n",
"\t\t\t\t}\n",
"\t\t\t} else {\n",
"\t\t\t\tgo handleConn(tcpConn)\n",
"\t\t\t}\n"
],
"labels": [
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"keep"
],
"after_edit": [
"\t\t\tif tcpConn != nil {\n",
"\t\t\t\ttcpConn.SetKeepAlive(true)\n"
],
"file_path": "internal/http/listener.go",
"type": "replace",
"edit_start_line_idx": 67
} | # MinIO Docker Quickstart Guide [](https://slack.min.io) [](https://hub.docker.com/r/minio/minio/)
## Prerequisites
Docker installed on your machine. Download the relevant installer from [here](https://www.docker.com/community-edition#/download).
## Run Standalone MinIO on Docker.
MinIO needs a persistent volume to store configuration and application data. However, for testing purposes, you can launch MinIO by simply passing a directory (`/data` in the example below). This directory gets created in the container filesystem at the time of container start. But all the data is lost after container exits.
```sh
docker run \
-p 9000:9000 \
-p 9001:9001 \
-e "MINIO_ROOT_USER=AKIAIOSFODNN7EXAMPLE" \
-e "MINIO_ROOT_PASSWORD=wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY" \
quay.io/minio/minio server /data --console-address ":9001"
```
To create a MinIO container with persistent storage, you need to map local persistent directories from the host OS to virtual config `~/.minio` and export `/data` directories. To do this, run the below commands
#### GNU/Linux and macOS
```sh
docker run \
-p 9000:9000 \
-p 9001:9001 \
--name minio1 \
-v /mnt/data:/data \
-e "MINIO_ROOT_USER=AKIAIOSFODNN7EXAMPLE" \
-e "MINIO_ROOT_PASSWORD=wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY" \
quay.io/minio/minio server /data --console-address ":9001"
```
#### Windows
```sh
docker run \
-p 9000:9000 \
-p 9001:9001 \
--name minio1 \
-v D:\data:/data \
-e "MINIO_ROOT_USER=AKIAIOSFODNN7EXAMPLE" \
-e "MINIO_ROOT_PASSWORD=wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY" \
quay.io/minio/minio server /data --console-address ":9001"
```
## Run Distributed MinIO on Docker
Distributed MinIO can be deployed via [Docker Compose](https://docs.min.io/docs/deploy-minio-on-docker-compose). This means Docker Compose lets you quickly get started with Distributed MinIO on your computer - ideal for development, testing, staging environments. We recommend kubernetes based deployment for production level deployment https://github.com/minio/operator.
## MinIO Docker Tips
### MinIO Custom Access and Secret Keys
To override MinIO's auto-generated keys, you may pass secret and access keys explicitly as environment variables. MinIO server also allows regular strings as access and secret keys.
#### GNU/Linux and macOS
```sh
docker run \
-p 9000:9000 \
-p 9001:9001 \
--name minio1 \
-e "MINIO_ROOT_USER=AKIAIOSFODNN7EXAMPLE" \
-e "MINIO_ROOT_PASSWORD=wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY" \
-v /mnt/data:/data \
quay.io/minio/minio server /data --console-address ":9001"
```
#### Windows
```powershell
docker run \
-p 9000:9000 \
-p 9001:9001 \
--name minio1 \
-e "MINIO_ROOT_USER=AKIAIOSFODNN7EXAMPLE" \
-e "MINIO_ROOT_PASSWORD=wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY" \
-v D:\data:/data \
quay.io/minio/minio server /data --console-address ":9001"
```
### Run MinIO Docker as a regular user
Docker provides standardized mechanisms to run docker containers as non-root users.
#### GNU/Linux and macOS
On Linux and macOS you can use `--user` to run the container as regular user.
> NOTE: make sure --user has write permission to *${HOME}/data* prior to using `--user`.
```sh
mkdir -p ${HOME}/data
docker run \
-p 9000:9000 \
-p 9001:9001 \
--user $(id -u):$(id -g) \
--name minio1 \
-e "MINIO_ROOT_USER=AKIAIOSFODNN7EXAMPLE" \
-e "MINIO_ROOT_PASSWORD=wJalrXUtnFEMIK7MDENGbPxRfiCYEXAMPLEKEY" \
-v ${HOME}/data:/data \
quay.io/minio/minio server /data
```
#### Windows
On windows you would need to use [Docker integrated windows authentication](https://success.docker.com/article/modernizing-traditional-dot-net-applications#integratedwindowsauthentication) and [Create a container with Active Directory Support](https://blogs.msdn.microsoft.com/containerstuff/2017/01/30/create-a-container-with-active-directory-support/)
> NOTE: make sure your AD/Windows user has write permissions to *D:\data* prior to using `credentialspec=`.
```powershell
docker run \
-p 9000:9000 \
-p 9001:9001 \
--name minio1 \
--security-opt "credentialspec=file://myuser.json"
-e "MINIO_ROOT_USER=AKIAIOSFODNN7EXAMPLE" \
-e "MINIO_ROOT_PASSWORD=wJalrXUtnFEMIK7MDENGbPxRfiCYEXAMPLEKEY" \
-v D:\data:/data \
quay.io/minio/minio server /data
```
### MinIO Custom Access and Secret Keys using Docker secrets
To override MinIO's auto-generated keys, you may pass secret and access keys explicitly by creating access and secret keys as [Docker secrets](https://docs.docker.com/engine/swarm/secrets/). MinIO server also allows regular strings as access and secret keys.
```
echo "AKIAIOSFODNN7EXAMPLE" | docker secret create access_key -
echo "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY" | docker secret create secret_key -
```
Create a MinIO service using `docker service` to read from Docker secrets.
```
docker service create --name="minio-service" --secret="access_key" --secret="secret_key" quay.io/minio/minio server /data
```
Read more about `docker service` [here](https://docs.docker.com/engine/swarm/how-swarm-mode-works/services/)
#### MinIO Custom Access and Secret Key files
To use other secret names follow the instructions above and replace `access_key` and `secret_key` with your custom names (e.g. `my_secret_key`,`my_custom_key`). Run your service with
```
docker service create --name="minio-service" \
--secret="my_access_key" \
--secret="my_secret_key" \
--env="MINIO_ROOT_USER_FILE=my_access_key" \
--env="MINIO_ROOT_PASSWORD_FILE=my_secret_key" \
quay.io/minio/minio server /data
```
`MINIO_ROOT_USER_FILE` and `MINIO_ROOT_PASSWORD_FILE` also support custom absolute paths, in case Docker secrets are mounted to custom locations or other tools are used to mount secrets into the container. For example, HashiCorp Vault injects secrets to `/vault/secrets`. With the custom names above, set the environment variables to
```
MINIO_ROOT_USER_FILE=/vault/secrets/my_access_key
MINIO_ROOT_PASSWORD_FILE=/vault/secrets/my_secret_key
```
### Retrieving Container ID
To use Docker commands on a specific container, you need to know the `Container ID` for that container. To get the `Container ID`, run
```sh
docker ps -a
```
`-a` flag makes sure you get all the containers (Created, Running, Exited). Then identify the `Container ID` from the output.
### Starting and Stopping Containers
To start a stopped container, you can use the [`docker start`](https://docs.docker.com/engine/reference/commandline/start/) command.
```sh
docker start <container_id>
```
To stop a running container, you can use the [`docker stop`](https://docs.docker.com/engine/reference/commandline/stop/) command.
```sh
docker stop <container_id>
```
### MinIO container logs
To access MinIO logs, you can use the [`docker logs`](https://docs.docker.com/engine/reference/commandline/logs/) command.
```sh
docker logs <container_id>
```
### Monitor MinIO Docker Container
To monitor the resources used by MinIO container, you can use the [`docker stats`](https://docs.docker.com/engine/reference/commandline/stats/) command.
```sh
docker stats <container_id>
```
## Explore Further
* [Deploy MinIO on Docker Compose](https://docs.min.io/docs/deploy-minio-on-docker-compose)
* [Distributed MinIO Quickstart Guide](https://docs.min.io/docs/distributed-minio-quickstart-guide)
* [MinIO Erasure Code QuickStart Guide](https://docs.min.io/docs/minio-erasure-code-quickstart-guide)
| docs/docker/README.md | 0 | https://github.com/minio/minio/commit/acc9645249a3ee61cedff832d003421015cc724d | [
0.00017100886907428503,
0.00016609682643320411,
0.00016145463450811803,
0.00016639490786474198,
0.0000025979115889640525
] |
{
"id": 7,
"code_window": [
"\t\tfor {\n",
"\t\t\ttcpConn, err := tcpListener.AcceptTCP()\n",
"\t\t\tif err != nil {\n",
"\t\t\t\t// Returns when send fails.\n",
"\t\t\t\tif !send(acceptResult{nil, err}) {\n",
"\t\t\t\t\treturn\n",
"\t\t\t\t}\n",
"\t\t\t} else {\n",
"\t\t\t\tgo handleConn(tcpConn)\n",
"\t\t\t}\n"
],
"labels": [
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"keep"
],
"after_edit": [
"\t\t\tif tcpConn != nil {\n",
"\t\t\t\ttcpConn.SetKeepAlive(true)\n"
],
"file_path": "internal/http/listener.go",
"type": "replace",
"edit_start_line_idx": 67
} | # AssumeRoleWithWebIdentity [](https://slack.min.io)
## Introduction
Calling AssumeRoleWithWebIdentity does not require the use of MinIO default credentials. Therefore, you can distribute an application (for example, on mobile devices) that requests temporary security credentials without including MinIO default credentials in the application. Instead, the identity of the caller is validated by using a JWT id_token from the web identity provider. The temporary security credentials returned by this API consists of an access key, a secret key, and a security token. Applications can use these temporary security credentials to sign calls to MinIO API operations.
By default, the temporary security credentials created by AssumeRoleWithWebIdentity last for one hour. However, use the optional DurationSeconds parameter to specify the duration of the credentials. This value varies from 900 seconds (15 minutes) up to the maximum session duration of 365 days.
## API Request Parameters
### WebIdentityToken
The OAuth 2.0 id_token that is provided by the web identity provider. Application must get this token by authenticating the user who is using your application with a web identity provider before the application makes an AssumeRoleWithWebIdentity call.
| Params | Value |
| :-- | :-- |
| *Type* | *String* |
| *Length Constraints* | *Minimum length of 4. Maximum length of 2048.* |
| *Required* | *Yes* |
### WebIdentityAccessToken (MinIO Extension)
There are situations when identity provider does not provide user claims in `id_token` instead it needs to be retrieved from UserInfo endpoint, this extension is only useful in this scenario. This is rare so use it accordingly depending on your Identity provider implementation. `access_token` is available as part of the OIDC authentication flow similar to `id_token`.
| Params | Value |
| :-- | :-- |
| *Type* | *String* |
| *Required* | *No* |
### Version
Indicates STS API version information, the only supported value is '2011-06-15'. This value is borrowed from AWS STS API documentation for compatibility reasons.
| Params | Value |
| :-- | :-- |
| *Type* | *String* |
| *Required* | *Yes* |
### DurationSeconds
The duration, in seconds. The value can range from 900 seconds (15 minutes) up to 365 days. If value is higher than this setting, then operation fails. By default, the value is set to 3600 seconds. If no *DurationSeconds* is specified expiry seconds is obtained from *WebIdentityToken*.
| Params | Value |
| :-- | :-- |
| *Type* | *Integer* |
| *Valid Range* | *Minimum value of 900. Maximum value of 31536000.* |
| *Required* | *No* |
### Policy
An IAM policy in JSON format that you want to use as an inline session policy. This parameter is optional. Passing policies to this operation returns new temporary credentials. The resulting session's permissions are the intersection of the canned policy name and the policy set here. You cannot use this policy to grant more permissions than those allowed by the canned policy name being assumed.
| Params | Value |
| :-- | :-- |
| *Type* | *String* |
| *Valid Range* | *Minimum length of 1. Maximum length of 2048.* |
| *Required* | *No* |
### Response Elements
XML response for this API is similar to [AWS STS AssumeRoleWithWebIdentity](https://docs.aws.amazon.com/STS/latest/APIReference/API_AssumeRoleWithWebIdentity.html#API_AssumeRoleWithWebIdentity_ResponseElements)
### Errors
XML error response for this API is similar to [AWS STS AssumeRoleWithWebIdentity](https://docs.aws.amazon.com/STS/latest/APIReference/API_AssumeRoleWithWebIdentity.html#API_AssumeRoleWithWebIdentity_Errors)
## Sample `POST` Request
```
http://minio.cluster:9000?Action=AssumeRoleWithWebIdentity&DurationSeconds=3600&WebIdentityToken=eyJ4NXQiOiJOVEF4Wm1NeE5ETXlaRGczTVRVMVpHTTBNekV6T0RKaFpXSTRORE5sWkRVMU9HRmtOakZpTVEiLCJraWQiOiJOVEF4Wm1NeE5ETXlaRGczTVRVMVpHTTBNekV6T0RKaFpXSTRORE5sWkRVMU9HRmtOakZpTVEiLCJhbGciOiJSUzI1NiJ9.eyJhdWQiOiJQb0VnWFA2dVZPNDVJc0VOUm5nRFhqNUF1NVlhIiwiYXpwIjoiUG9FZ1hQNnVWTzQ1SXNFTlJuZ0RYajVBdTVZYSIsImlzcyI6Imh0dHBzOlwvXC9sb2NhbGhvc3Q6OTQ0M1wvb2F1dGgyXC90b2tlbiIsImV4cCI6MTU0MTgwOTU4MiwiaWF0IjoxNTQxODA1OTgyLCJqdGkiOiI2Y2YyMGIwZS1lNGZmLTQzZmQtYTdiYS1kYTc3YTE3YzM2MzYifQ.Jm29jPliRvrK6Os34nSK3rhzIYLFjE__zdVGNng3uGKXGKzP3We_i6NPnhA0szJXMOKglXzUF1UgSz8MctbaxFS8XDusQPVe4LkB_45hwBm6TmBxzui911nt-1RbBLN_jZIlvl2lPrbTUH5hSn9kEkph6seWanTNQpz9tNEoVa6R_OX3kpJqxe8tLQUWw453A1JTwFNhdHa6-f1K8_Q_eEZ_4gOYINQ9t_fhTibdbkXZkJQFLop-Jwoybi9s4nwQU_dATocgcufq5eCeNItQeleT-23lGxIz0X7CiJrJynYLdd-ER0F77SumqEb5iCxhxuf4H7dovwd1kAmyKzLxpw&Version=2011-06-15
```
## Sample Response
```
<?xml version="1.0" encoding="UTF-8"?>
<AssumeRoleWithWebIdentityResponse xmlns="https://sts.amazonaws.com/doc/2011-06-15/">
<AssumeRoleWithWebIdentityResult>
<AssumedRoleUser>
<Arn/>
<AssumeRoleId/>
</AssumedRoleUser>
<Credentials>
<AccessKeyId>Y4RJU1RNFGK48LGO9I2S</AccessKeyId>
<SecretAccessKey>sYLRKS1Z7hSjluf6gEbb9066hnx315wHTiACPAjg</SecretAccessKey>
<Expiration>2019-08-08T20:26:12Z</Expiration>
<SessionToken>eyJhbGciOiJIUzUxMiIsInR5cCI6IkpXVCJ9.eyJhY2Nlc3NLZXkiOiJZNFJKVTFSTkZHSzQ4TEdPOUkyUyIsImF1ZCI6IlBvRWdYUDZ1Vk80NUlzRU5SbmdEWGo1QXU1WWEiLCJhenAiOiJQb0VnWFA2dVZPNDVJc0VOUm5nRFhqNUF1NVlhIiwiZXhwIjoxNTQxODExMDcxLCJpYXQiOjE1NDE4MDc0NzEsImlzcyI6Imh0dHBzOi8vbG9jYWxob3N0Ojk0NDMvb2F1dGgyL3Rva2VuIiwianRpIjoiYTBiMjc2MjktZWUxYS00M2JmLTg3MzktZjMzNzRhNGNkYmMwIn0.ewHqKVFTaP-j_kgZrcOEKroNUjk10GEp8bqQjxBbYVovV0nHO985VnRESFbcT6XMDDKHZiWqN2vi_ETX_u3Q-w</SessionToken>
</Credentials>
</AssumeRoleWithWebIdentityResult>
<ResponseMetadata/>
</AssumeRoleWithWebIdentityResponse>
```
## Using WebIdentity API
```
export MINIO_ROOT_USER=minio
export MINIO_ROOT_PASSWORD=minio123
export MINIO_IDENTITY_OPENID_CONFIG_URL=https://accounts.google.com/.well-known/openid-configuration
export MINIO_IDENTITY_OPENID_CLIENT_ID="843351d4-1080-11ea-aa20-271ecba3924a"
# Optional: Allow to specify the requested OpenID scopes (OpenID only requires the `openid` scope)
#export MINIO_IDENTITY_OPENID_SCOPES="openid,profile,email"
minio server /mnt/export
```
or using `mc`
```
mc admin config get myminio identity_openid
identity_openid config_url=https://accounts.google.com/.well-known/openid-configuration client_id=843351d4-1080-11ea-aa20-271ecba3924a
```
Testing with an example
> Visit [Google Developer Console](https://console.cloud.google.com) under Project, APIs, Credentials to get your OAuth2 client credentials. Add `http://localhost:8080/oauth2/callback` as a valid OAuth2 Redirect URL.
```
$ go run web-identity.go -cid 204367807228-ok7601k6gj1pgge7m09h7d79co8p35xx.apps.googleusercontent.com -csec XsT_PgPdT1nO9DD45rMLJw7G
2018/12/26 17:49:36 listening on http://localhost:8080/
```
> NOTE: for a reasonable test outcome, make sure the assumed user has at least permission/policy to list all buckets. That policy would look like below:
```
{
"version": "2012-10-17",
"statement": [
{
"effect": "Allow",
"action": [
"s3:ListAllMyBuckets"
],
"resource": [
"arn:aws:s3:::*"
]
}
]
}
```
## Authorization Flow
- Visit http://localhost:8080, login will direct the user to the Google OAuth2 Auth URL to obtain a permission grant.
- The redirection URI (callback handler) receives the OAuth2 callback, verifies the state parameter, and obtains a Token.
- Using the id_token the callback handler further talks to Google OAuth2 Token URL to obtain an JWT id_token.
- Once obtained the JWT id_token is further sent to STS endpoint i.e MinIO to retrieve temporary credentials.
- Temporary credentials are displayed on the browser upon successful retrieval.
## Using MinIO Console
To support WebIdentity based login for MinIO Console, set openid configuration and restart MinIO
```
mc admin config set myminio identity_openid config_url="<CONFIG_URL>" client_id="<client_identifier>"
```
```
mc admin service restart myminio
```
Sample URLs for Keycloak are
`config_url` - `http://localhost:8080/auth/realms/demo/.well-known/openid-configuration`
JWT token returned by the Identity Provider should include a custom claim for the policy, this is required to create a STS user in MinIO. The name of the custom claim could be either `policy` or `<NAMESPACE_PREFIX>policy`. If there is no namespace then `claim_prefix` can be ingored. For example if the custom claim name is `https://min.io/policy` then, `claim_prefix` should be set as `https://min.io/`.
- Open MinIO Console and click `Login with SSO`
- The user will be redirected to the Identity Provider login page
- Upon successful login on Identity Provider page the user will be automatically logged into MinIO Console.
## Explore Further
- [MinIO Admin Complete Guide](https://docs.min.io/docs/minio-admin-complete-guide.html)
- [The MinIO documentation website](https://docs.min.io)
| docs/sts/web-identity.md | 0 | https://github.com/minio/minio/commit/acc9645249a3ee61cedff832d003421015cc724d | [
0.0004929471760988235,
0.00018890714272856712,
0.00016240096010733396,
0.0001673947845119983,
0.00007866391388233751
] |
{
"id": 8,
"code_window": [
"\t\t\t}\n",
"\t\t}\n",
"\t}\n",
"\n",
"\t// Start separate goroutine for each TCP listener to handle connection.\n"
],
"labels": [
"add",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\tsend(acceptResult{tcpConn, err, idx})\n"
],
"file_path": "internal/http/listener.go",
"type": "add",
"edit_start_line_idx": 75
} | // Copyright (c) 2015-2021 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package http
import (
"context"
"fmt"
"net"
"syscall"
)
type acceptResult struct {
conn net.Conn
err error
}
// httpListener - HTTP listener capable of handling multiple server addresses.
type httpListener struct {
tcpListeners []*net.TCPListener // underlaying TCP listeners.
acceptCh chan acceptResult // channel where all TCP listeners write accepted connection.
ctx context.Context
ctxCanceler context.CancelFunc
}
// start - starts separate goroutine for each TCP listener. A valid new connection is passed to httpListener.acceptCh.
func (listener *httpListener) start() {
// Closure to send acceptResult to acceptCh.
// It returns true if the result is sent else false if returns when doneCh is closed.
send := func(result acceptResult) bool {
select {
case listener.acceptCh <- result:
// Successfully written to acceptCh
return true
case <-listener.ctx.Done():
// As stop signal is received, close accepted connection.
if result.conn != nil {
result.conn.Close()
}
return false
}
}
// Closure to handle single connection.
handleConn := func(tcpConn *net.TCPConn) {
tcpConn.SetKeepAlive(true)
send(acceptResult{tcpConn, nil})
}
// Closure to handle TCPListener until done channel is closed.
handleListener := func(tcpListener *net.TCPListener) {
for {
tcpConn, err := tcpListener.AcceptTCP()
if err != nil {
// Returns when send fails.
if !send(acceptResult{nil, err}) {
return
}
} else {
go handleConn(tcpConn)
}
}
}
// Start separate goroutine for each TCP listener to handle connection.
for _, tcpListener := range listener.tcpListeners {
go handleListener(tcpListener)
}
}
// Accept - reads from httpListener.acceptCh for one of previously accepted TCP connection and returns the same.
func (listener *httpListener) Accept() (conn net.Conn, err error) {
select {
case result, ok := <-listener.acceptCh:
if ok {
return result.conn, result.err
}
case <-listener.ctx.Done():
}
return nil, syscall.EINVAL
}
// Close - closes underneath all TCP listeners.
func (listener *httpListener) Close() (err error) {
listener.ctxCanceler()
for i := range listener.tcpListeners {
listener.tcpListeners[i].Close()
}
return nil
}
// Addr - net.Listener interface compatible method returns net.Addr. In case of multiple TCP listeners, it returns '0.0.0.0' as IP address.
func (listener *httpListener) Addr() (addr net.Addr) {
addr = listener.tcpListeners[0].Addr()
if len(listener.tcpListeners) == 1 {
return addr
}
tcpAddr := addr.(*net.TCPAddr)
if ip := net.ParseIP("0.0.0.0"); ip != nil {
tcpAddr.IP = ip
}
addr = tcpAddr
return addr
}
// Addrs - returns all address information of TCP listeners.
func (listener *httpListener) Addrs() (addrs []net.Addr) {
for i := range listener.tcpListeners {
addrs = append(addrs, listener.tcpListeners[i].Addr())
}
return addrs
}
// newHTTPListener - creates new httpListener object which is interface compatible to net.Listener.
// httpListener is capable to
// * listen to multiple addresses
// * controls incoming connections only doing HTTP protocol
func newHTTPListener(ctx context.Context, serverAddrs []string) (listener *httpListener, err error) {
var tcpListeners []*net.TCPListener
// Close all opened listeners on error
defer func() {
if err == nil {
return
}
for _, tcpListener := range tcpListeners {
// Ignore error on close.
tcpListener.Close()
}
}()
for _, serverAddr := range serverAddrs {
var l net.Listener
if l, err = listenCfg.Listen(ctx, "tcp", serverAddr); err != nil {
return nil, err
}
tcpListener, ok := l.(*net.TCPListener)
if !ok {
return nil, fmt.Errorf("unexpected listener type found %v, expected net.TCPListener", l)
}
tcpListeners = append(tcpListeners, tcpListener)
}
listener = &httpListener{
tcpListeners: tcpListeners,
acceptCh: make(chan acceptResult, len(tcpListeners)),
}
listener.ctx, listener.ctxCanceler = context.WithCancel(ctx)
listener.start()
return listener, nil
}
| internal/http/listener.go | 1 | https://github.com/minio/minio/commit/acc9645249a3ee61cedff832d003421015cc724d | [
0.1249525398015976,
0.013566692359745502,
0.00016782690363470465,
0.0054842703975737095,
0.02793903648853302
] |
{
"id": 8,
"code_window": [
"\t\t\t}\n",
"\t\t}\n",
"\t}\n",
"\n",
"\t// Start separate goroutine for each TCP listener to handle connection.\n"
],
"labels": [
"add",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\tsend(acceptResult{tcpConn, err, idx})\n"
],
"file_path": "internal/http/listener.go",
"type": "add",
"edit_start_line_idx": 75
} | // Copyright (c) 2015-2021 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package cmd
import (
"context"
"errors"
"net/http"
"github.com/minio/kes"
"github.com/minio/madmin-go"
"github.com/minio/minio/internal/auth"
"github.com/minio/minio/internal/config"
iampolicy "github.com/minio/pkg/iam/policy"
)
func validateAdminReq(ctx context.Context, w http.ResponseWriter, r *http.Request, action iampolicy.AdminAction) (ObjectLayer, auth.Credentials) {
// Get current object layer instance.
objectAPI := newObjectLayerFn()
if objectAPI == nil || globalNotificationSys == nil {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
return nil, auth.Credentials{}
}
// Validate request signature.
cred, adminAPIErr := checkAdminRequestAuth(ctx, r, action, "")
if adminAPIErr != ErrNone {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(adminAPIErr), r.URL)
return nil, cred
}
return objectAPI, cred
}
// AdminError - is a generic error for all admin APIs.
type AdminError struct {
Code string
Message string
StatusCode int
}
func (ae AdminError) Error() string {
return ae.Message
}
func toAdminAPIErr(ctx context.Context, err error) APIError {
if err == nil {
return noError
}
var apiErr APIError
switch e := err.(type) {
case iampolicy.Error:
apiErr = APIError{
Code: "XMinioMalformedIAMPolicy",
Description: e.Error(),
HTTPStatusCode: http.StatusBadRequest,
}
case config.Error:
apiErr = APIError{
Code: "XMinioConfigError",
Description: e.Error(),
HTTPStatusCode: http.StatusBadRequest,
}
case AdminError:
apiErr = APIError{
Code: e.Code,
Description: e.Message,
HTTPStatusCode: e.StatusCode,
}
default:
switch {
case errors.Is(err, errConfigNotFound):
apiErr = APIError{
Code: "XMinioConfigError",
Description: err.Error(),
HTTPStatusCode: http.StatusNotFound,
}
case errors.Is(err, errIAMActionNotAllowed):
apiErr = APIError{
Code: "XMinioIAMActionNotAllowed",
Description: err.Error(),
HTTPStatusCode: http.StatusForbidden,
}
case errors.Is(err, errIAMNotInitialized):
apiErr = APIError{
Code: "XMinioIAMNotInitialized",
Description: err.Error(),
HTTPStatusCode: http.StatusServiceUnavailable,
}
case errors.Is(err, kes.ErrKeyExists):
apiErr = APIError{
Code: "XMinioKMSKeyExists",
Description: err.Error(),
HTTPStatusCode: http.StatusConflict,
}
// Tier admin API errors
case errors.Is(err, madmin.ErrTierNameEmpty):
apiErr = APIError{
Code: "XMinioAdminTierNameEmpty",
Description: err.Error(),
HTTPStatusCode: http.StatusBadRequest,
}
case errors.Is(err, madmin.ErrTierInvalidConfig):
apiErr = APIError{
Code: "XMinioAdminTierInvalidConfig",
Description: err.Error(),
HTTPStatusCode: http.StatusBadRequest,
}
case errors.Is(err, madmin.ErrTierInvalidConfigVersion):
apiErr = APIError{
Code: "XMinioAdminTierInvalidConfigVersion",
Description: err.Error(),
HTTPStatusCode: http.StatusBadRequest,
}
case errors.Is(err, madmin.ErrTierTypeUnsupported):
apiErr = APIError{
Code: "XMinioAdminTierTypeUnsupported",
Description: err.Error(),
HTTPStatusCode: http.StatusBadRequest,
}
case errors.Is(err, errTierBackendInUse):
apiErr = APIError{
Code: "XMinioAdminTierBackendInUse",
Description: err.Error(),
HTTPStatusCode: http.StatusConflict,
}
case errors.Is(err, errTierInsufficientCreds):
apiErr = APIError{
Code: "XMinioAdminTierInsufficientCreds",
Description: err.Error(),
HTTPStatusCode: http.StatusBadRequest,
}
case errIsTierPermError(err):
apiErr = APIError{
Code: "XMinioAdminTierInsufficientPermissions",
Description: err.Error(),
HTTPStatusCode: http.StatusBadRequest,
}
default:
apiErr = errorCodes.ToAPIErrWithErr(toAdminAPIErrCode(ctx, err), err)
}
}
return apiErr
}
// toAdminAPIErrCode - converts errErasureWriteQuorum error to admin API
// specific error.
func toAdminAPIErrCode(ctx context.Context, err error) APIErrorCode {
switch err {
case errErasureWriteQuorum:
return ErrAdminConfigNoQuorum
default:
return toAPIErrorCode(ctx, err)
}
}
| cmd/admin-handler-utils.go | 0 | https://github.com/minio/minio/commit/acc9645249a3ee61cedff832d003421015cc724d | [
0.0024198107421398163,
0.0003535746072884649,
0.00016775507538113743,
0.00018002596334554255,
0.0005199236911721528
] |
{
"id": 8,
"code_window": [
"\t\t\t}\n",
"\t\t}\n",
"\t}\n",
"\n",
"\t// Start separate goroutine for each TCP listener to handle connection.\n"
],
"labels": [
"add",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\tsend(acceptResult{tcpConn, err, idx})\n"
],
"file_path": "internal/http/listener.go",
"type": "add",
"edit_start_line_idx": 75
} | //go:build windows
// +build windows
// Copyright (c) 2015-2021 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package lock
import (
"fmt"
"os"
"path/filepath"
"syscall"
"unsafe"
"golang.org/x/sys/windows"
)
var (
modkernel32 = windows.NewLazySystemDLL("kernel32.dll")
procLockFileEx = modkernel32.NewProc("LockFileEx")
)
const (
// https://msdn.microsoft.com/en-us/library/windows/desktop/aa365203(v=vs.85).aspx
lockFileExclusiveLock = 2
lockFileFailImmediately = 1
// see https://msdn.microsoft.com/en-us/library/windows/desktop/ms681382(v=vs.85).aspx
errLockViolation syscall.Errno = 0x21
)
// lockedOpenFile is an internal function.
func lockedOpenFile(path string, flag int, perm os.FileMode, lockType uint32) (*LockedFile, error) {
f, err := Open(path, flag, perm)
if err != nil {
return nil, err
}
if err = lockFile(syscall.Handle(f.Fd()), lockType); err != nil {
f.Close()
return nil, err
}
st, err := os.Stat(path)
if err != nil {
f.Close()
return nil, err
}
if st.IsDir() {
f.Close()
return nil, &os.PathError{
Op: "open",
Path: path,
Err: syscall.EISDIR,
}
}
return &LockedFile{File: f}, nil
}
// TryLockedOpenFile - tries a new write lock, functionality
// it is similar to LockedOpenFile with with syscall.LOCK_EX
// mode but along with syscall.LOCK_NB such that the function
// doesn't wait forever but instead returns if it cannot
// acquire a write lock.
func TryLockedOpenFile(path string, flag int, perm os.FileMode) (*LockedFile, error) {
var lockType uint32 = lockFileFailImmediately | lockFileExclusiveLock
switch flag {
case syscall.O_RDONLY:
// https://docs.microsoft.com/en-us/windows/desktop/api/fileapi/nf-fileapi-lockfileex
//lint:ignore SA4016 Reasons
lockType = lockFileFailImmediately | 0 // Set this to enable shared lock and fail immediately.
}
return lockedOpenFile(path, flag, perm, lockType)
}
// LockedOpenFile - initializes a new lock and protects
// the file from concurrent access.
func LockedOpenFile(path string, flag int, perm os.FileMode) (*LockedFile, error) {
var lockType uint32 = lockFileExclusiveLock
switch flag {
case syscall.O_RDONLY:
// https://docs.microsoft.com/en-us/windows/desktop/api/fileapi/nf-fileapi-lockfileex
lockType = 0 // Set this to enable shared lock.
}
return lockedOpenFile(path, flag, perm, lockType)
}
// fixLongPath returns the extended-length (\\?\-prefixed) form of
// path when needed, in order to avoid the default 260 character file
// path limit imposed by Windows. If path is not easily converted to
// the extended-length form (for example, if path is a relative path
// or contains .. elements), or is short enough, fixLongPath returns
// path unmodified.
//
// See https://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx#maxpath
func fixLongPath(path string) string {
// Do nothing (and don't allocate) if the path is "short".
// Empirically (at least on the Windows Server 2013 builder),
// the kernel is arbitrarily okay with < 248 bytes. That
// matches what the docs above say:
// "When using an API to create a directory, the specified
// path cannot be so long that you cannot append an 8.3 file
// name (that is, the directory name cannot exceed MAX_PATH
// minus 12)." Since MAX_PATH is 260, 260 - 12 = 248.
//
// The MSDN docs appear to say that a normal path that is 248 bytes long
// will work; empirically the path must be less then 248 bytes long.
if len(path) < 248 {
// Don't fix. (This is how Go 1.7 and earlier worked,
// not automatically generating the \\?\ form)
return path
}
// The extended form begins with \\?\, as in
// \\?\c:\windows\foo.txt or \\?\UNC\server\share\foo.txt.
// The extended form disables evaluation of . and .. path
// elements and disables the interpretation of / as equivalent
// to \. The conversion here rewrites / to \ and elides
// . elements as well as trailing or duplicate separators. For
// simplicity it avoids the conversion entirely for relative
// paths or paths containing .. elements. For now,
// \\server\share paths are not converted to
// \\?\UNC\server\share paths because the rules for doing so
// are less well-specified.
if len(path) >= 2 && path[:2] == `\\` {
// Don't canonicalize UNC paths.
return path
}
if !filepath.IsAbs(path) {
// Relative path
return path
}
const prefix = `\\?`
pathbuf := make([]byte, len(prefix)+len(path)+len(`\`))
copy(pathbuf, prefix)
n := len(path)
r, w := 0, len(prefix)
for r < n {
switch {
case os.IsPathSeparator(path[r]):
// empty block
r++
case path[r] == '.' && (r+1 == n || os.IsPathSeparator(path[r+1])):
// /./
r++
case r+1 < n && path[r] == '.' && path[r+1] == '.' && (r+2 == n || os.IsPathSeparator(path[r+2])):
// /../ is currently unhandled
return path
default:
pathbuf[w] = '\\'
w++
for ; r < n && !os.IsPathSeparator(path[r]); r++ {
pathbuf[w] = path[r]
w++
}
}
}
// A drive's root directory needs a trailing \
if w == len(`\\?\c:`) {
pathbuf[w] = '\\'
w++
}
return string(pathbuf[:w])
}
// Open - perm param is ignored, on windows file perms/NT acls
// are not octet combinations. Providing access to NT
// acls is out of scope here.
func Open(path string, flag int, perm os.FileMode) (*os.File, error) {
if path == "" {
return nil, syscall.ERROR_FILE_NOT_FOUND
}
pathp, err := syscall.UTF16PtrFromString(fixLongPath(path))
if err != nil {
return nil, err
}
var access uint32
switch flag {
case syscall.O_RDONLY:
access = syscall.GENERIC_READ
case syscall.O_WRONLY:
access = syscall.GENERIC_WRITE
case syscall.O_RDWR:
fallthrough
case syscall.O_RDWR | syscall.O_CREAT:
fallthrough
case syscall.O_WRONLY | syscall.O_CREAT:
access = syscall.GENERIC_READ | syscall.GENERIC_WRITE
case syscall.O_WRONLY | syscall.O_CREAT | syscall.O_APPEND:
access = syscall.FILE_APPEND_DATA
default:
return nil, fmt.Errorf("Unsupported flag (%d)", flag)
}
var createflag uint32
switch {
case flag&syscall.O_CREAT == syscall.O_CREAT:
createflag = syscall.OPEN_ALWAYS
default:
createflag = syscall.OPEN_EXISTING
}
shareflag := uint32(syscall.FILE_SHARE_READ | syscall.FILE_SHARE_WRITE | syscall.FILE_SHARE_DELETE)
accessAttr := uint32(syscall.FILE_ATTRIBUTE_NORMAL | 0x80000000)
fd, err := syscall.CreateFile(pathp, access, shareflag, nil, createflag, accessAttr, 0)
if err != nil {
return nil, err
}
return os.NewFile(uintptr(fd), path), nil
}
func lockFile(fd syscall.Handle, flags uint32) error {
// https://msdn.microsoft.com/en-us/library/windows/desktop/aa365203(v=vs.85).aspx
if fd == syscall.InvalidHandle {
return nil
}
err := lockFileEx(fd, flags, 1, 0, &syscall.Overlapped{})
if err == nil {
return nil
} else if err.Error() == "The process cannot access the file because another process has locked a portion of the file." {
return ErrAlreadyLocked
} else if err != errLockViolation {
return err
}
return nil
}
func lockFileEx(h syscall.Handle, flags, locklow, lockhigh uint32, ol *syscall.Overlapped) (err error) {
var reserved = uint32(0)
r1, _, e1 := syscall.Syscall6(procLockFileEx.Addr(), 6, uintptr(h), uintptr(flags),
uintptr(reserved), uintptr(locklow), uintptr(lockhigh), uintptr(unsafe.Pointer(ol)))
if r1 == 0 {
if e1 != 0 {
err = error(e1)
} else {
err = syscall.EINVAL
}
}
return
}
| internal/lock/lock_windows.go | 0 | https://github.com/minio/minio/commit/acc9645249a3ee61cedff832d003421015cc724d | [
0.0003972222039010376,
0.00020308318198658526,
0.00016705386224202812,
0.00018413644284009933,
0.000049958409363171086
] |
{
"id": 8,
"code_window": [
"\t\t\t}\n",
"\t\t}\n",
"\t}\n",
"\n",
"\t// Start separate goroutine for each TCP listener to handle connection.\n"
],
"labels": [
"add",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\tsend(acceptResult{tcpConn, err, idx})\n"
],
"file_path": "internal/http/listener.go",
"type": "add",
"edit_start_line_idx": 75
} | // Copyright (c) 2015-2021 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package cmd
import (
"fmt"
"os"
"path"
)
// Wrapper functions to os.RemoveAll, which calls reliableRemoveAll
// this is to ensure that if there is a racy parent directory
// create in between we can simply retry the operation.
func removeAll(dirPath string) (err error) {
if dirPath == "" {
return errInvalidArgument
}
if err = checkPathLength(dirPath); err != nil {
return err
}
if err = reliableRemoveAll(dirPath); err != nil {
switch {
case isSysErrNotDir(err):
// File path cannot be verified since one of
// the parents is a file.
return errFileAccessDenied
case isSysErrPathNotFound(err):
// This is a special case should be handled only for
// windows, because windows API does not return "not a
// directory" error message. Handle this specifically
// here.
return errFileAccessDenied
}
}
return err
}
// Reliably retries os.RemoveAll if for some reason os.RemoveAll returns
// syscall.ENOTEMPTY (children has files).
func reliableRemoveAll(dirPath string) (err error) {
i := 0
for {
// Removes all the directories and files.
if err = RemoveAll(dirPath); err != nil {
// Retry only for the first retryable error.
if isSysErrNotEmpty(err) && i == 0 {
i++
continue
}
}
break
}
return err
}
// Wrapper functions to os.MkdirAll, which calls reliableMkdirAll
// this is to ensure that if there is a racy parent directory
// delete in between we can simply retry the operation.
func mkdirAll(dirPath string, mode os.FileMode) (err error) {
if dirPath == "" {
return errInvalidArgument
}
if err = checkPathLength(dirPath); err != nil {
return err
}
if err = reliableMkdirAll(dirPath, mode); err != nil {
// File path cannot be verified since one of the parents is a file.
if isSysErrNotDir(err) {
return errFileAccessDenied
} else if isSysErrPathNotFound(err) {
// This is a special case should be handled only for
// windows, because windows API does not return "not a
// directory" error message. Handle this specifically here.
return errFileAccessDenied
}
return osErrToFileErr(err)
}
return nil
}
// Reliably retries os.MkdirAll if for some reason os.MkdirAll returns
// syscall.ENOENT (parent does not exist).
func reliableMkdirAll(dirPath string, mode os.FileMode) (err error) {
i := 0
for {
// Creates all the parent directories, with mode 0777 mkdir honors system umask.
if err = MkdirAll(dirPath, mode); err != nil {
// Retry only for the first retryable error.
if osIsNotExist(err) && i == 0 {
i++
continue
}
}
break
}
return err
}
// Wrapper function to os.Rename, which calls reliableMkdirAll
// and reliableRenameAll. This is to ensure that if there is a
// racy parent directory delete in between we can simply retry
// the operation.
func renameAll(srcFilePath, dstFilePath string) (err error) {
if srcFilePath == "" || dstFilePath == "" {
return errInvalidArgument
}
if err = checkPathLength(srcFilePath); err != nil {
return err
}
if err = checkPathLength(dstFilePath); err != nil {
return err
}
if err = reliableRename(srcFilePath, dstFilePath); err != nil {
switch {
case isSysErrNotDir(err) && !osIsNotExist(err):
// Windows can have both isSysErrNotDir(err) and osIsNotExist(err) returning
// true if the source file path contains an inexistant directory. In that case,
// we want to return errFileNotFound instead, which will honored in subsequent
// switch cases
return errFileAccessDenied
case isSysErrPathNotFound(err):
// This is a special case should be handled only for
// windows, because windows API does not return "not a
// directory" error message. Handle this specifically here.
return errFileAccessDenied
case isSysErrCrossDevice(err):
return fmt.Errorf("%w (%s)->(%s)", errCrossDeviceLink, srcFilePath, dstFilePath)
case osIsNotExist(err):
return errFileNotFound
case osIsExist(err):
// This is returned only when destination is a directory and we
// are attempting a rename from file to directory.
return errIsNotRegular
default:
return err
}
}
return nil
}
// Reliably retries os.RenameAll if for some reason os.RenameAll returns
// syscall.ENOENT (parent does not exist).
func reliableRename(srcFilePath, dstFilePath string) (err error) {
if err = reliableMkdirAll(path.Dir(dstFilePath), 0777); err != nil {
return err
}
i := 0
for {
// After a successful parent directory create attempt a renameAll.
if err = Rename(srcFilePath, dstFilePath); err != nil {
// Retry only for the first retryable error.
if osIsNotExist(err) && i == 0 {
i++
continue
}
}
break
}
return err
}
| cmd/os-reliable.go | 0 | https://github.com/minio/minio/commit/acc9645249a3ee61cedff832d003421015cc724d | [
0.0002870767784770578,
0.00019930233247578144,
0.00016775507538113743,
0.00018338982772547752,
0.00003565123552107252
] |
{
"id": 9,
"code_window": [
"\t\t}\n",
"\t}\n",
"\n",
"\t// Start separate goroutine for each TCP listener to handle connection.\n",
"\tfor _, tcpListener := range listener.tcpListeners {\n",
"\t\tgo handleListener(tcpListener)\n",
"\t}\n",
"}\n",
"\n",
"// Accept - reads from httpListener.acceptCh for one of previously accepted TCP connection and returns the same.\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tfor idx, tcpListener := range listener.tcpListeners {\n",
"\t\tgo handleListener(idx, tcpListener)\n"
],
"file_path": "internal/http/listener.go",
"type": "replace",
"edit_start_line_idx": 79
} | // Copyright (c) 2015-2021 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package http
import (
"context"
"fmt"
"net"
"syscall"
)
type acceptResult struct {
conn net.Conn
err error
}
// httpListener - HTTP listener capable of handling multiple server addresses.
type httpListener struct {
tcpListeners []*net.TCPListener // underlaying TCP listeners.
acceptCh chan acceptResult // channel where all TCP listeners write accepted connection.
ctx context.Context
ctxCanceler context.CancelFunc
}
// start - starts separate goroutine for each TCP listener. A valid new connection is passed to httpListener.acceptCh.
func (listener *httpListener) start() {
// Closure to send acceptResult to acceptCh.
// It returns true if the result is sent else false if returns when doneCh is closed.
send := func(result acceptResult) bool {
select {
case listener.acceptCh <- result:
// Successfully written to acceptCh
return true
case <-listener.ctx.Done():
// As stop signal is received, close accepted connection.
if result.conn != nil {
result.conn.Close()
}
return false
}
}
// Closure to handle single connection.
handleConn := func(tcpConn *net.TCPConn) {
tcpConn.SetKeepAlive(true)
send(acceptResult{tcpConn, nil})
}
// Closure to handle TCPListener until done channel is closed.
handleListener := func(tcpListener *net.TCPListener) {
for {
tcpConn, err := tcpListener.AcceptTCP()
if err != nil {
// Returns when send fails.
if !send(acceptResult{nil, err}) {
return
}
} else {
go handleConn(tcpConn)
}
}
}
// Start separate goroutine for each TCP listener to handle connection.
for _, tcpListener := range listener.tcpListeners {
go handleListener(tcpListener)
}
}
// Accept - reads from httpListener.acceptCh for one of previously accepted TCP connection and returns the same.
func (listener *httpListener) Accept() (conn net.Conn, err error) {
select {
case result, ok := <-listener.acceptCh:
if ok {
return result.conn, result.err
}
case <-listener.ctx.Done():
}
return nil, syscall.EINVAL
}
// Close - closes underneath all TCP listeners.
func (listener *httpListener) Close() (err error) {
listener.ctxCanceler()
for i := range listener.tcpListeners {
listener.tcpListeners[i].Close()
}
return nil
}
// Addr - net.Listener interface compatible method returns net.Addr. In case of multiple TCP listeners, it returns '0.0.0.0' as IP address.
func (listener *httpListener) Addr() (addr net.Addr) {
addr = listener.tcpListeners[0].Addr()
if len(listener.tcpListeners) == 1 {
return addr
}
tcpAddr := addr.(*net.TCPAddr)
if ip := net.ParseIP("0.0.0.0"); ip != nil {
tcpAddr.IP = ip
}
addr = tcpAddr
return addr
}
// Addrs - returns all address information of TCP listeners.
func (listener *httpListener) Addrs() (addrs []net.Addr) {
for i := range listener.tcpListeners {
addrs = append(addrs, listener.tcpListeners[i].Addr())
}
return addrs
}
// newHTTPListener - creates new httpListener object which is interface compatible to net.Listener.
// httpListener is capable to
// * listen to multiple addresses
// * controls incoming connections only doing HTTP protocol
func newHTTPListener(ctx context.Context, serverAddrs []string) (listener *httpListener, err error) {
var tcpListeners []*net.TCPListener
// Close all opened listeners on error
defer func() {
if err == nil {
return
}
for _, tcpListener := range tcpListeners {
// Ignore error on close.
tcpListener.Close()
}
}()
for _, serverAddr := range serverAddrs {
var l net.Listener
if l, err = listenCfg.Listen(ctx, "tcp", serverAddr); err != nil {
return nil, err
}
tcpListener, ok := l.(*net.TCPListener)
if !ok {
return nil, fmt.Errorf("unexpected listener type found %v, expected net.TCPListener", l)
}
tcpListeners = append(tcpListeners, tcpListener)
}
listener = &httpListener{
tcpListeners: tcpListeners,
acceptCh: make(chan acceptResult, len(tcpListeners)),
}
listener.ctx, listener.ctxCanceler = context.WithCancel(ctx)
listener.start()
return listener, nil
}
| internal/http/listener.go | 1 | https://github.com/minio/minio/commit/acc9645249a3ee61cedff832d003421015cc724d | [
0.9971418976783752,
0.21812067925930023,
0.0001746610359987244,
0.012570705264806747,
0.3854805827140808
] |
{
"id": 9,
"code_window": [
"\t\t}\n",
"\t}\n",
"\n",
"\t// Start separate goroutine for each TCP listener to handle connection.\n",
"\tfor _, tcpListener := range listener.tcpListeners {\n",
"\t\tgo handleListener(tcpListener)\n",
"\t}\n",
"}\n",
"\n",
"// Accept - reads from httpListener.acceptCh for one of previously accepted TCP connection and returns the same.\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tfor idx, tcpListener := range listener.tcpListeners {\n",
"\t\tgo handleListener(idx, tcpListener)\n"
],
"file_path": "internal/http/listener.go",
"type": "replace",
"edit_start_line_idx": 79
} | // Copyright (c) 2015-2021 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package cmd
import (
"bytes"
"context"
"encoding/hex"
"errors"
"fmt"
"io"
"math/rand"
"net"
"net/http"
"path"
"runtime"
"strconv"
"strings"
"sync"
"time"
"unicode/utf8"
"github.com/google/uuid"
"github.com/klauspost/compress/s2"
"github.com/klauspost/readahead"
"github.com/minio/minio-go/v7/pkg/s3utils"
"github.com/minio/minio/internal/config/compress"
"github.com/minio/minio/internal/config/dns"
"github.com/minio/minio/internal/config/storageclass"
"github.com/minio/minio/internal/crypto"
"github.com/minio/minio/internal/hash"
xhttp "github.com/minio/minio/internal/http"
"github.com/minio/minio/internal/ioutil"
"github.com/minio/minio/internal/logger"
"github.com/minio/pkg/trie"
"github.com/minio/pkg/wildcard"
)
const (
// MinIO meta bucket.
minioMetaBucket = ".minio.sys"
// Multipart meta prefix.
mpartMetaPrefix = "multipart"
// MinIO Multipart meta prefix.
minioMetaMultipartBucket = minioMetaBucket + SlashSeparator + mpartMetaPrefix
// MinIO tmp meta prefix.
minioMetaTmpBucket = minioMetaBucket + "/tmp"
// MinIO tmp meta prefix for deleted objects.
minioMetaTmpDeletedBucket = minioMetaTmpBucket + "/.trash"
minioMetaSpeedTestBucket = minioMetaBucket + "/speedtest"
minioMetaSpeedTestBucketPrefix = "objects/"
// DNS separator (period), used for bucket name validation.
dnsDelimiter = "."
// On compressed files bigger than this;
compReadAheadSize = 100 << 20
// Read this many buffers ahead.
compReadAheadBuffers = 5
// Size of each buffer.
compReadAheadBufSize = 1 << 20
)
// isMinioBucket returns true if given bucket is a MinIO internal
// bucket and false otherwise.
func isMinioMetaBucketName(bucket string) bool {
return strings.HasPrefix(bucket, minioMetaBucket)
}
// IsValidBucketName verifies that a bucket name is in accordance with
// Amazon's requirements (i.e. DNS naming conventions). It must be 3-63
// characters long, and it must be a sequence of one or more labels
// separated by periods. Each label can contain lowercase ascii
// letters, decimal digits and hyphens, but must not begin or end with
// a hyphen. See:
// http://docs.aws.amazon.com/AmazonS3/latest/dev/BucketRestrictions.html
func IsValidBucketName(bucket string) bool {
// Special case when bucket is equal to one of the meta buckets.
if isMinioMetaBucketName(bucket) {
return true
}
if len(bucket) < 3 || len(bucket) > 63 {
return false
}
// Split on dot and check each piece conforms to rules.
allNumbers := true
pieces := strings.Split(bucket, dnsDelimiter)
for _, piece := range pieces {
if len(piece) == 0 || piece[0] == '-' ||
piece[len(piece)-1] == '-' {
// Current piece has 0-length or starts or
// ends with a hyphen.
return false
}
// Now only need to check if each piece is a valid
// 'label' in AWS terminology and if the bucket looks
// like an IP address.
isNotNumber := false
for i := 0; i < len(piece); i++ {
switch {
case (piece[i] >= 'a' && piece[i] <= 'z' ||
piece[i] == '-'):
// Found a non-digit character, so
// this piece is not a number.
isNotNumber = true
case piece[i] >= '0' && piece[i] <= '9':
// Nothing to do.
default:
// Found invalid character.
return false
}
}
allNumbers = allNumbers && !isNotNumber
}
// Does the bucket name look like an IP address?
return !(len(pieces) == 4 && allNumbers)
}
// IsValidObjectName verifies an object name in accordance with Amazon's
// requirements. It cannot exceed 1024 characters and must be a valid UTF8
// string.
//
// See:
// http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html
//
// You should avoid the following characters in a key name because of
// significant special handling for consistency across all
// applications.
//
// Rejects strings with following characters.
//
// - Backslash ("\")
//
// additionally minio does not support object names with trailing SlashSeparator.
func IsValidObjectName(object string) bool {
if len(object) == 0 {
return false
}
if HasSuffix(object, SlashSeparator) {
return false
}
return IsValidObjectPrefix(object)
}
// IsValidObjectPrefix verifies whether the prefix is a valid object name.
// Its valid to have a empty prefix.
func IsValidObjectPrefix(object string) bool {
if hasBadPathComponent(object) {
return false
}
if !utf8.ValidString(object) {
return false
}
if strings.Contains(object, `//`) {
return false
}
return true
}
// checkObjectNameForLengthAndSlash -check for the validity of object name length and prefis as slash
func checkObjectNameForLengthAndSlash(bucket, object string) error {
// Check for the length of object name
if len(object) > 1024 {
return ObjectNameTooLong{
Bucket: bucket,
Object: object,
}
}
// Check for slash as prefix in object name
if HasPrefix(object, SlashSeparator) {
return ObjectNamePrefixAsSlash{
Bucket: bucket,
Object: object,
}
}
if runtime.GOOS == globalWindowsOSName {
// Explicitly disallowed characters on windows.
// Avoids most problematic names.
if strings.ContainsAny(object, `:*?"|<>`) {
return ObjectNameInvalid{
Bucket: bucket,
Object: object,
}
}
}
return nil
}
// SlashSeparator - slash separator.
const SlashSeparator = "/"
// retainSlash - retains slash from a path.
func retainSlash(s string) string {
if s == "" {
return s
}
return strings.TrimSuffix(s, SlashSeparator) + SlashSeparator
}
// pathsJoinPrefix - like pathJoin retains trailing SlashSeparator
// for all elements, prepends them with 'prefix' respectively.
func pathsJoinPrefix(prefix string, elem ...string) (paths []string) {
paths = make([]string, len(elem))
for i, e := range elem {
paths[i] = pathJoin(prefix, e)
}
return paths
}
// pathJoin - like path.Join() but retains trailing SlashSeparator of the last element
func pathJoin(elem ...string) string {
trailingSlash := ""
if len(elem) > 0 {
if HasSuffix(elem[len(elem)-1], SlashSeparator) {
trailingSlash = SlashSeparator
}
}
return path.Join(elem...) + trailingSlash
}
// mustGetUUID - get a random UUID.
func mustGetUUID() string {
u, err := uuid.NewRandom()
if err != nil {
logger.CriticalIf(GlobalContext, err)
}
return u.String()
}
// Create an s3 compatible MD5sum for complete multipart transaction.
func getCompleteMultipartMD5(parts []CompletePart) string {
var finalMD5Bytes []byte
for _, part := range parts {
md5Bytes, err := hex.DecodeString(canonicalizeETag(part.ETag))
if err != nil {
finalMD5Bytes = append(finalMD5Bytes, []byte(part.ETag)...)
} else {
finalMD5Bytes = append(finalMD5Bytes, md5Bytes...)
}
}
s3MD5 := fmt.Sprintf("%s-%d", getMD5Hash(finalMD5Bytes), len(parts))
return s3MD5
}
// Clean unwanted fields from metadata
func cleanMetadata(metadata map[string]string) map[string]string {
// Remove STANDARD StorageClass
metadata = removeStandardStorageClass(metadata)
// Clean meta etag keys 'md5Sum', 'etag', "expires", "x-amz-tagging".
return cleanMetadataKeys(metadata, "md5Sum", "etag", "expires", xhttp.AmzObjectTagging, "last-modified", VersionPurgeStatusKey)
}
// Filter X-Amz-Storage-Class field only if it is set to STANDARD.
// This is done since AWS S3 doesn't return STANDARD Storage class as response header.
func removeStandardStorageClass(metadata map[string]string) map[string]string {
if metadata[xhttp.AmzStorageClass] == storageclass.STANDARD {
delete(metadata, xhttp.AmzStorageClass)
}
return metadata
}
// cleanMetadataKeys takes keyNames to be filtered
// and returns a new map with all the entries with keyNames removed.
func cleanMetadataKeys(metadata map[string]string, keyNames ...string) map[string]string {
var newMeta = make(map[string]string, len(metadata))
for k, v := range metadata {
if contains(keyNames, k) {
continue
}
newMeta[k] = v
}
return newMeta
}
// Extracts etag value from the metadata.
func extractETag(metadata map[string]string) string {
etag, ok := metadata["etag"]
if !ok {
// md5Sum tag is kept for backward compatibility.
etag = metadata["md5Sum"]
}
// Success.
return etag
}
// HasPrefix - Prefix matcher string matches prefix in a platform specific way.
// For example on windows since its case insensitive we are supposed
// to do case insensitive checks.
func HasPrefix(s string, prefix string) bool {
if runtime.GOOS == globalWindowsOSName {
return strings.HasPrefix(strings.ToLower(s), strings.ToLower(prefix))
}
return strings.HasPrefix(s, prefix)
}
// HasSuffix - Suffix matcher string matches suffix in a platform specific way.
// For example on windows since its case insensitive we are supposed
// to do case insensitive checks.
func HasSuffix(s string, suffix string) bool {
if runtime.GOOS == globalWindowsOSName {
return strings.HasSuffix(strings.ToLower(s), strings.ToLower(suffix))
}
return strings.HasSuffix(s, suffix)
}
// Validates if two strings are equal.
func isStringEqual(s1 string, s2 string) bool {
if runtime.GOOS == globalWindowsOSName {
return strings.EqualFold(s1, s2)
}
return s1 == s2
}
// Ignores all reserved bucket names or invalid bucket names.
func isReservedOrInvalidBucket(bucketEntry string, strict bool) bool {
if bucketEntry == "" {
return true
}
bucketEntry = strings.TrimSuffix(bucketEntry, SlashSeparator)
if strict {
if err := s3utils.CheckValidBucketNameStrict(bucketEntry); err != nil {
return true
}
} else {
if err := s3utils.CheckValidBucketName(bucketEntry); err != nil {
return true
}
}
return isMinioMetaBucket(bucketEntry) || isMinioReservedBucket(bucketEntry)
}
// Returns true if input bucket is a reserved minio meta bucket '.minio.sys'.
func isMinioMetaBucket(bucketName string) bool {
return bucketName == minioMetaBucket
}
// Returns true if input bucket is a reserved minio bucket 'minio'.
func isMinioReservedBucket(bucketName string) bool {
return bucketName == minioReservedBucket
}
// returns a slice of hosts by reading a slice of DNS records
func getHostsSlice(records []dns.SrvRecord) []string {
hosts := make([]string, len(records))
for i, r := range records {
hosts[i] = net.JoinHostPort(r.Host, string(r.Port))
}
return hosts
}
// returns an online host (and corresponding port) from a slice of DNS records
func getHostFromSrv(records []dns.SrvRecord) (host string) {
hosts := getHostsSlice(records)
rng := rand.New(rand.NewSource(time.Now().UTC().UnixNano()))
var d net.Dialer
var retry int
for retry < len(hosts) {
ctx, cancel := context.WithTimeout(GlobalContext, 300*time.Millisecond)
host = hosts[rng.Intn(len(hosts))]
conn, err := d.DialContext(ctx, "tcp", host)
cancel()
if err != nil {
retry++
continue
}
conn.Close()
break
}
return host
}
// IsCompressed returns true if the object is marked as compressed.
func (o ObjectInfo) IsCompressed() bool {
_, ok := o.UserDefined[ReservedMetadataPrefix+"compression"]
return ok
}
// IsCompressedOK returns whether the object is compressed and can be decompressed.
func (o ObjectInfo) IsCompressedOK() (bool, error) {
scheme, ok := o.UserDefined[ReservedMetadataPrefix+"compression"]
if !ok {
return false, nil
}
switch scheme {
case compressionAlgorithmV1, compressionAlgorithmV2:
return true, nil
}
return true, fmt.Errorf("unknown compression scheme: %s", scheme)
}
// GetActualETag - returns the actual etag of the stored object
// decrypts SSE objects.
func (o ObjectInfo) GetActualETag(h http.Header) string {
if _, ok := crypto.IsEncrypted(o.UserDefined); !ok {
return o.ETag
}
return getDecryptedETag(h, o, false)
}
// GetActualSize - returns the actual size of the stored object
func (o ObjectInfo) GetActualSize() (int64, error) {
if o.IsCompressed() {
sizeStr, ok := o.UserDefined[ReservedMetadataPrefix+"actual-size"]
if !ok {
return -1, errInvalidDecompressedSize
}
size, err := strconv.ParseInt(sizeStr, 10, 64)
if err != nil {
return -1, errInvalidDecompressedSize
}
return size, nil
}
if _, ok := crypto.IsEncrypted(o.UserDefined); ok {
return o.DecryptedSize()
}
return o.Size, nil
}
// Disabling compression for encrypted enabled requests.
// Using compression and encryption together enables room for side channel attacks.
// Eliminate non-compressible objects by extensions/content-types.
func isCompressible(header http.Header, object string) bool {
globalCompressConfigMu.Lock()
cfg := globalCompressConfig
globalCompressConfigMu.Unlock()
_, ok := crypto.IsRequested(header)
if !cfg.Enabled || (ok && !cfg.AllowEncrypted) || excludeForCompression(header, object, cfg) {
return false
}
return true
}
// Eliminate the non-compressible objects.
func excludeForCompression(header http.Header, object string, cfg compress.Config) bool {
objStr := object
contentType := header.Get(xhttp.ContentType)
if !cfg.Enabled {
return true
}
// We strictly disable compression for standard extensions/content-types (`compressed`).
if hasStringSuffixInSlice(objStr, standardExcludeCompressExtensions) || hasPattern(standardExcludeCompressContentTypes, contentType) {
return true
}
// Filter compression includes.
exclude := len(cfg.Extensions) > 0 || len(cfg.MimeTypes) > 0
if len(cfg.Extensions) > 0 && hasStringSuffixInSlice(objStr, cfg.Extensions) {
exclude = false
}
if len(cfg.MimeTypes) > 0 && hasPattern(cfg.MimeTypes, contentType) {
exclude = false
}
return exclude
}
// Utility which returns if a string is present in the list.
// Comparison is case insensitive.
func hasStringSuffixInSlice(str string, list []string) bool {
str = strings.ToLower(str)
for _, v := range list {
if strings.HasSuffix(str, strings.ToLower(v)) {
return true
}
}
return false
}
// Returns true if any of the given wildcard patterns match the matchStr.
func hasPattern(patterns []string, matchStr string) bool {
for _, pattern := range patterns {
if ok := wildcard.MatchSimple(pattern, matchStr); ok {
return true
}
}
return false
}
// Returns the part file name which matches the partNumber and etag.
func getPartFile(entriesTrie *trie.Trie, partNumber int, etag string) (partFile string) {
for _, match := range entriesTrie.PrefixMatch(fmt.Sprintf("%.5d.%s.", partNumber, etag)) {
partFile = match
break
}
return partFile
}
func partNumberToRangeSpec(oi ObjectInfo, partNumber int) *HTTPRangeSpec {
if oi.Size == 0 || len(oi.Parts) == 0 {
return nil
}
var start int64
var end = int64(-1)
for i := 0; i < len(oi.Parts) && i < partNumber; i++ {
start = end + 1
end = start + oi.Parts[i].ActualSize - 1
}
return &HTTPRangeSpec{Start: start, End: end}
}
// Returns the compressed offset which should be skipped.
// If encrypted offsets are adjusted for encrypted block headers/trailers.
// Since de-compression is after decryption encryption overhead is only added to compressedOffset.
func getCompressedOffsets(objectInfo ObjectInfo, offset int64) (compressedOffset int64, partSkip int64, firstPart int) {
var skipLength int64
var cumulativeActualSize int64
var firstPartIdx int
if len(objectInfo.Parts) > 0 {
for i, part := range objectInfo.Parts {
cumulativeActualSize += part.ActualSize
if cumulativeActualSize <= offset {
compressedOffset += part.Size
} else {
firstPartIdx = i
skipLength = cumulativeActualSize - part.ActualSize
break
}
}
}
return compressedOffset, offset - skipLength, firstPartIdx
}
// GetObjectReader is a type that wraps a reader with a lock to
// provide a ReadCloser interface that unlocks on Close()
type GetObjectReader struct {
io.Reader
ObjInfo ObjectInfo
cleanUpFns []func()
opts ObjectOptions
once sync.Once
}
// WithCleanupFuncs sets additional cleanup functions to be called when closing
// the GetObjectReader.
func (g *GetObjectReader) WithCleanupFuncs(fns ...func()) *GetObjectReader {
g.cleanUpFns = append(g.cleanUpFns, fns...)
return g
}
// NewGetObjectReaderFromReader sets up a GetObjectReader with a given
// reader. This ignores any object properties.
func NewGetObjectReaderFromReader(r io.Reader, oi ObjectInfo, opts ObjectOptions, cleanupFns ...func()) (*GetObjectReader, error) {
if opts.CheckPrecondFn != nil && opts.CheckPrecondFn(oi) {
// Call the cleanup funcs
for i := len(cleanupFns) - 1; i >= 0; i-- {
cleanupFns[i]()
}
return nil, PreConditionFailed{}
}
return &GetObjectReader{
ObjInfo: oi,
Reader: r,
cleanUpFns: cleanupFns,
opts: opts,
}, nil
}
// ObjReaderFn is a function type that takes a reader and returns
// GetObjectReader and an error. Request headers are passed to provide
// encryption parameters. cleanupFns allow cleanup funcs to be
// registered for calling after usage of the reader.
type ObjReaderFn func(inputReader io.Reader, h http.Header, cleanupFns ...func()) (r *GetObjectReader, err error)
// NewGetObjectReader creates a new GetObjectReader. The cleanUpFns
// are called on Close() in FIFO order as passed in ObjReadFn(). NOTE: It is
// assumed that clean up functions do not panic (otherwise, they may
// not all run!).
func NewGetObjectReader(rs *HTTPRangeSpec, oi ObjectInfo, opts ObjectOptions) (
fn ObjReaderFn, off, length int64, err error) {
if opts.CheckPrecondFn != nil && opts.CheckPrecondFn(oi) {
return nil, 0, 0, PreConditionFailed{}
}
if rs == nil && opts.PartNumber > 0 {
rs = partNumberToRangeSpec(oi, opts.PartNumber)
}
_, isEncrypted := crypto.IsEncrypted(oi.UserDefined)
isCompressed, err := oi.IsCompressedOK()
if err != nil {
return nil, 0, 0, err
}
// if object is encrypted and it is a restore request, fetch content without decrypting.
if opts.Transition.RestoreRequest != nil {
isEncrypted = false
isCompressed = false
}
// Calculate range to read (different for encrypted/compressed objects)
switch {
case isCompressed:
var firstPart int
if opts.PartNumber > 0 {
// firstPart is an index to Parts slice,
// make sure that PartNumber uses the
// index value properly.
firstPart = opts.PartNumber - 1
}
// If compressed, we start from the beginning of the part.
// Read the decompressed size from the meta.json.
actualSize, err := oi.GetActualSize()
if err != nil {
return nil, 0, 0, err
}
off, length = int64(0), oi.Size
decOff, decLength := int64(0), actualSize
if rs != nil {
off, length, err = rs.GetOffsetLength(actualSize)
if err != nil {
return nil, 0, 0, err
}
// In case of range based queries on multiparts, the offset and length are reduced.
off, decOff, firstPart = getCompressedOffsets(oi, off)
decLength = length
length = oi.Size - off
// For negative length we read everything.
if decLength < 0 {
decLength = actualSize - decOff
}
// Reply back invalid range if the input offset and length fall out of range.
if decOff > actualSize || decOff+decLength > actualSize {
return nil, 0, 0, errInvalidRange
}
}
fn = func(inputReader io.Reader, h http.Header, cFns ...func()) (r *GetObjectReader, err error) {
if isEncrypted {
copySource := h.Get(xhttp.AmzServerSideEncryptionCopyCustomerAlgorithm) != ""
// Attach decrypter on inputReader
inputReader, err = DecryptBlocksRequestR(inputReader, h, 0, firstPart, oi, copySource)
if err != nil {
// Call the cleanup funcs
for i := len(cFns) - 1; i >= 0; i-- {
cFns[i]()
}
return nil, err
}
oi.Size = decLength
}
// Decompression reader.
s2Reader := s2.NewReader(inputReader)
// Apply the skipLen and limit on the decompressed stream.
if decOff > 0 {
if err = s2Reader.Skip(decOff); err != nil {
// Call the cleanup funcs
for i := len(cFns) - 1; i >= 0; i-- {
cFns[i]()
}
return nil, err
}
}
decReader := io.LimitReader(s2Reader, decLength)
if decLength > compReadAheadSize {
rah, err := readahead.NewReaderSize(decReader, compReadAheadBuffers, compReadAheadBufSize)
if err == nil {
decReader = rah
cFns = append([]func(){func() {
rah.Close()
}}, cFns...)
}
}
oi.Size = decLength
// Assemble the GetObjectReader
r = &GetObjectReader{
ObjInfo: oi,
Reader: decReader,
cleanUpFns: cFns,
opts: opts,
}
return r, nil
}
case isEncrypted:
var seqNumber uint32
var partStart int
var skipLen int64
off, length, skipLen, seqNumber, partStart, err = oi.GetDecryptedRange(rs)
if err != nil {
return nil, 0, 0, err
}
var decSize int64
decSize, err = oi.DecryptedSize()
if err != nil {
return nil, 0, 0, err
}
var decRangeLength int64
decRangeLength, err = rs.GetLength(decSize)
if err != nil {
return nil, 0, 0, err
}
// We define a closure that performs decryption given
// a reader that returns the desired range of
// encrypted bytes. The header parameter is used to
// provide encryption parameters.
fn = func(inputReader io.Reader, h http.Header, cFns ...func()) (r *GetObjectReader, err error) {
copySource := h.Get(xhttp.AmzServerSideEncryptionCopyCustomerAlgorithm) != ""
// Attach decrypter on inputReader
var decReader io.Reader
decReader, err = DecryptBlocksRequestR(inputReader, h, seqNumber, partStart, oi, copySource)
if err != nil {
// Call the cleanup funcs
for i := len(cFns) - 1; i >= 0; i-- {
cFns[i]()
}
return nil, err
}
oi.ETag = getDecryptedETag(h, oi, false)
// Apply the skipLen and limit on the
// decrypted stream
decReader = io.LimitReader(ioutil.NewSkipReader(decReader, skipLen), decRangeLength)
// Assemble the GetObjectReader
r = &GetObjectReader{
ObjInfo: oi,
Reader: decReader,
cleanUpFns: cFns,
opts: opts,
}
return r, nil
}
default:
off, length, err = rs.GetOffsetLength(oi.Size)
if err != nil {
return nil, 0, 0, err
}
fn = func(inputReader io.Reader, _ http.Header, cFns ...func()) (r *GetObjectReader, err error) {
r = &GetObjectReader{
ObjInfo: oi,
Reader: inputReader,
cleanUpFns: cFns,
opts: opts,
}
return r, nil
}
}
return fn, off, length, nil
}
// Close - calls the cleanup actions in reverse order
func (g *GetObjectReader) Close() error {
if g == nil {
return nil
}
// sync.Once is used here to ensure that Close() is
// idempotent.
g.once.Do(func() {
for i := len(g.cleanUpFns) - 1; i >= 0; i-- {
g.cleanUpFns[i]()
}
})
return nil
}
//SealMD5CurrFn seals md5sum with object encryption key and returns sealed
// md5sum
type SealMD5CurrFn func([]byte) []byte
// PutObjReader is a type that wraps sio.EncryptReader and
// underlying hash.Reader in a struct
type PutObjReader struct {
*hash.Reader // actual data stream
rawReader *hash.Reader // original data stream
sealMD5Fn SealMD5CurrFn
}
// Size returns the absolute number of bytes the Reader
// will return during reading. It returns -1 for unlimited
// data.
func (p *PutObjReader) Size() int64 {
return p.Reader.Size()
}
// MD5CurrentHexString returns the current MD5Sum or encrypted MD5Sum
// as a hex encoded string
func (p *PutObjReader) MD5CurrentHexString() string {
md5sumCurr := p.rawReader.MD5Current()
var appendHyphen bool
// md5sumcurr is not empty in two scenarios
// - server is running in strict compatibility mode
// - client set Content-Md5 during PUT operation
if len(md5sumCurr) == 0 {
// md5sumCurr is only empty when we are running
// in non-compatibility mode.
md5sumCurr = make([]byte, 16)
rand.Read(md5sumCurr)
appendHyphen = true
}
if p.sealMD5Fn != nil {
md5sumCurr = p.sealMD5Fn(md5sumCurr)
}
if appendHyphen {
// Make sure to return etag string upto 32 length, for SSE
// requests ETag might be longer and the code decrypting the
// ETag ignores ETag in multipart ETag form i.e <hex>-N
return hex.EncodeToString(md5sumCurr)[:32] + "-1"
}
return hex.EncodeToString(md5sumCurr)
}
// WithEncryption sets up encrypted reader and the sealing for content md5sum
// using objEncKey. Unsealed md5sum is computed from the rawReader setup when
// NewPutObjReader was called. It returns an error if called on an uninitialized
// PutObjReader.
func (p *PutObjReader) WithEncryption(encReader *hash.Reader, objEncKey *crypto.ObjectKey) (*PutObjReader, error) {
if p.Reader == nil {
return nil, errors.New("put-object reader uninitialized")
}
p.Reader = encReader
p.sealMD5Fn = sealETagFn(*objEncKey)
return p, nil
}
// NewPutObjReader returns a new PutObjReader. It uses given hash.Reader's
// MD5Current method to construct md5sum when requested downstream.
func NewPutObjReader(rawReader *hash.Reader) *PutObjReader {
return &PutObjReader{Reader: rawReader, rawReader: rawReader}
}
func sealETag(encKey crypto.ObjectKey, md5CurrSum []byte) []byte {
var emptyKey [32]byte
if bytes.Equal(encKey[:], emptyKey[:]) {
return md5CurrSum
}
return encKey.SealETag(md5CurrSum)
}
func sealETagFn(key crypto.ObjectKey) SealMD5CurrFn {
fn := func(md5sumcurr []byte) []byte {
return sealETag(key, md5sumcurr)
}
return fn
}
// CleanMinioInternalMetadataKeys removes X-Amz-Meta- prefix from minio internal
// encryption metadata that was sent by minio gateway
func CleanMinioInternalMetadataKeys(metadata map[string]string) map[string]string {
var newMeta = make(map[string]string, len(metadata))
for k, v := range metadata {
if strings.HasPrefix(k, "X-Amz-Meta-X-Minio-Internal-") {
newMeta[strings.TrimPrefix(k, "X-Amz-Meta-")] = v
} else {
newMeta[k] = v
}
}
return newMeta
}
// compressOpts are the options for writing compressed data.
var compressOpts []s2.WriterOption
func init() {
if runtime.GOARCH == "amd64" {
// On amd64 we have assembly and can use stronger compression.
compressOpts = append(compressOpts, s2.WriterBetterCompression())
}
}
// newS2CompressReader will read data from r, compress it and return the compressed data as a Reader.
// Use Close to ensure resources are released on incomplete streams.
//
// input 'on' is always recommended such that this function works
// properly, because we do not wish to create an object even if
// client closed the stream prematurely.
func newS2CompressReader(r io.Reader, on int64) io.ReadCloser {
pr, pw := io.Pipe()
// Copy input to compressor
go func() {
comp := s2.NewWriter(pw, compressOpts...)
cn, err := io.Copy(comp, r)
if err != nil {
comp.Close()
pw.CloseWithError(err)
return
}
if on > 0 && on != cn {
// if client didn't sent all data
// from the client verify here.
comp.Close()
pw.CloseWithError(IncompleteBody{})
return
}
// Close the stream.
pw.CloseWithError(comp.Close())
}()
return pr
}
// compressSelfTest performs a self-test to ensure that compression
// algorithms completes a roundtrip. If any algorithm
// produces an incorrect checksum it fails with a hard error.
//
// compressSelfTest tries to catch any issue in the compression implementation
// early instead of silently corrupting data.
func compressSelfTest() {
// 4 MB block.
// Approx runtime ~30ms
data := make([]byte, 4<<20)
rng := rand.New(rand.NewSource(0))
for i := range data {
// Generate compressible stream...
data[i] = byte(rng.Int63() & 3)
}
failOnErr := func(err error) {
if err != nil {
logger.Fatal(errSelfTestFailure, "compress: error on self-test: %v", err)
}
}
const skip = 2<<20 + 511
r := newS2CompressReader(bytes.NewBuffer(data), int64(len(data)))
b, err := io.ReadAll(r)
failOnErr(err)
failOnErr(r.Close())
// Decompression reader.
s2Reader := s2.NewReader(bytes.NewBuffer(b))
// Apply the skipLen on the decompressed stream.
failOnErr(s2Reader.Skip(skip))
got, err := io.ReadAll(s2Reader)
failOnErr(err)
if !bytes.Equal(got, data[skip:]) {
logger.Fatal(errSelfTestFailure, "compress: self-test roundtrip mismatch.")
}
}
// getDiskInfos returns the disk information for the provided disks.
// If a disk is nil or an error is returned the result will be nil as well.
func getDiskInfos(ctx context.Context, disks []StorageAPI) []*DiskInfo {
res := make([]*DiskInfo, len(disks))
for i, disk := range disks {
if disk == nil {
continue
}
if di, err := disk.DiskInfo(ctx); err == nil {
res[i] = &di
}
}
return res
}
// hasSpaceFor returns whether the disks in `di` have space for and object of a given size.
func hasSpaceFor(di []*DiskInfo, size int64) bool {
// We multiply the size by 2 to account for erasure coding.
size *= 2
if size < 0 {
// If no size, assume diskAssumeUnknownSize.
size = diskAssumeUnknownSize
}
var available uint64
var total uint64
var nDisks int
for _, disk := range di {
if disk == nil || disk.Total == 0 || (disk.FreeInodes < diskMinInodes && disk.UsedInodes > 0) {
// Disk offline, no inodes or something else is wrong.
continue
}
nDisks++
total += disk.Total
available += disk.Total - disk.Used
}
if nDisks == 0 {
return false
}
// Check we have enough on each disk, ignoring diskFillFraction.
perDisk := size / int64(nDisks)
for _, disk := range di {
if disk == nil || disk.Total == 0 || (disk.FreeInodes < diskMinInodes && disk.UsedInodes > 0) {
continue
}
if int64(disk.Free) <= perDisk {
return false
}
}
// Make sure we can fit "size" on to the disk without getting above the diskFillFraction
if available < uint64(size) {
return false
}
// How much will be left after adding the file.
available -= uint64(size)
// wantLeft is how much space there at least must be left.
wantLeft := uint64(float64(total) * (1.0 - diskFillFraction))
return available > wantLeft
}
| cmd/object-api-utils.go | 0 | https://github.com/minio/minio/commit/acc9645249a3ee61cedff832d003421015cc724d | [
0.0005848450236953795,
0.00019124186655972153,
0.00016439567843917757,
0.00017164548626169562,
0.0000622150328126736
] |
{
"id": 9,
"code_window": [
"\t\t}\n",
"\t}\n",
"\n",
"\t// Start separate goroutine for each TCP listener to handle connection.\n",
"\tfor _, tcpListener := range listener.tcpListeners {\n",
"\t\tgo handleListener(tcpListener)\n",
"\t}\n",
"}\n",
"\n",
"// Accept - reads from httpListener.acceptCh for one of previously accepted TCP connection and returns the same.\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tfor idx, tcpListener := range listener.tcpListeners {\n",
"\t\tgo handleListener(idx, tcpListener)\n"
],
"file_path": "internal/http/listener.go",
"type": "replace",
"edit_start_line_idx": 79
} | //go:build linux
// +build linux
// Copyright (c) 2015-2021 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package disk
import (
"os"
"syscall"
"golang.org/x/sys/unix"
)
// Fdatasync - fdatasync() is similar to fsync(), but does not flush modified metadata
// unless that metadata is needed in order to allow a subsequent data retrieval
// to be correctly handled. For example, changes to st_atime or st_mtime
// (respectively, time of last access and time of last modification; see inode(7))
// do not require flushing because they are not necessary for a subsequent data
// read to be handled correctly. On the other hand, a change to the file size
// (st_size, as made by say ftruncate(2)), would require a metadata flush.
//
// The aim of fdatasync() is to reduce disk activity for applications that
// do not require all metadata to be synchronized with the disk.
func Fdatasync(f *os.File) error {
return syscall.Fdatasync(int(f.Fd()))
}
// FadviseDontNeed invalidates page-cache
func FadviseDontNeed(f *os.File) error {
return unix.Fadvise(int(f.Fd()), 0, 0, unix.FADV_DONTNEED)
}
| internal/disk/fdatasync_linux.go | 0 | https://github.com/minio/minio/commit/acc9645249a3ee61cedff832d003421015cc724d | [
0.0001771350362105295,
0.00017058746016118675,
0.00016571010928601027,
0.00016847900405991822,
0.0000049349669097864535
] |
{
"id": 9,
"code_window": [
"\t\t}\n",
"\t}\n",
"\n",
"\t// Start separate goroutine for each TCP listener to handle connection.\n",
"\tfor _, tcpListener := range listener.tcpListeners {\n",
"\t\tgo handleListener(tcpListener)\n",
"\t}\n",
"}\n",
"\n",
"// Accept - reads from httpListener.acceptCh for one of previously accepted TCP connection and returns the same.\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tfor idx, tcpListener := range listener.tcpListeners {\n",
"\t\tgo handleListener(idx, tcpListener)\n"
],
"file_path": "internal/http/listener.go",
"type": "replace",
"edit_start_line_idx": 79
} | // Copyright (c) 2015-2021 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package cmd
import (
"encoding/xml"
"io"
"net/http"
"github.com/gorilla/mux"
xhttp "github.com/minio/minio/internal/http"
"github.com/minio/minio/internal/logger"
"github.com/minio/pkg/bucket/policy"
)
// Data types used for returning dummy access control
// policy XML, these variables shouldn't be used elsewhere
// they are only defined to be used in this file alone.
type grantee struct {
XMLNS string `xml:"xmlns:xsi,attr"`
XMLXSI string `xml:"xsi:type,attr"`
Type string `xml:"Type"`
ID string `xml:"ID,omitempty"`
DisplayName string `xml:"DisplayName,omitempty"`
URI string `xml:"URI,omitempty"`
}
type grant struct {
Grantee grantee `xml:"Grantee"`
Permission string `xml:"Permission"`
}
type accessControlPolicy struct {
XMLName xml.Name `xml:"AccessControlPolicy"`
Owner Owner `xml:"Owner"`
AccessControlList struct {
Grants []grant `xml:"Grant"`
} `xml:"AccessControlList"`
}
// PutBucketACLHandler - PUT Bucket ACL
// -----------------
// This operation uses the ACL subresource
// to set ACL for a bucket, this is a dummy call
// only responds success if the ACL is private.
func (api objectAPIHandlers) PutBucketACLHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "PutBucketACL")
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
vars := mux.Vars(r)
bucket := vars["bucket"]
objAPI := api.ObjectAPI()
if objAPI == nil {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
return
}
// Allow putBucketACL if policy action is set, since this is a dummy call
// we are simply re-purposing the bucketPolicyAction.
if s3Error := checkRequestAuthType(ctx, r, policy.PutBucketPolicyAction, bucket, ""); s3Error != ErrNone {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL)
return
}
// Before proceeding validate if bucket exists.
_, err := objAPI.GetBucketInfo(ctx, bucket)
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
aclHeader := r.Header.Get(xhttp.AmzACL)
if aclHeader == "" {
acl := &accessControlPolicy{}
if err = xmlDecoder(r.Body, acl, r.ContentLength); err != nil {
if err == io.EOF {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMissingSecurityHeader),
r.URL)
return
}
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
if len(acl.AccessControlList.Grants) == 0 {
writeErrorResponse(ctx, w, toAPIError(ctx, NotImplemented{}), r.URL)
return
}
if acl.AccessControlList.Grants[0].Permission != "FULL_CONTROL" {
writeErrorResponse(ctx, w, toAPIError(ctx, NotImplemented{}), r.URL)
return
}
}
if aclHeader != "" && aclHeader != "private" {
writeErrorResponse(ctx, w, toAPIError(ctx, NotImplemented{}), r.URL)
return
}
w.(http.Flusher).Flush()
}
// GetBucketACLHandler - GET Bucket ACL
// -----------------
// This operation uses the ACL
// subresource to return the ACL of a specified bucket.
func (api objectAPIHandlers) GetBucketACLHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "GetBucketACL")
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
vars := mux.Vars(r)
bucket := vars["bucket"]
objAPI := api.ObjectAPI()
if objAPI == nil {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
return
}
// Allow getBucketACL if policy action is set, since this is a dummy call
// we are simply re-purposing the bucketPolicyAction.
if s3Error := checkRequestAuthType(ctx, r, policy.GetBucketPolicyAction, bucket, ""); s3Error != ErrNone {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL)
return
}
// Before proceeding validate if bucket exists.
_, err := objAPI.GetBucketInfo(ctx, bucket)
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
acl := &accessControlPolicy{}
acl.AccessControlList.Grants = append(acl.AccessControlList.Grants, grant{
Grantee: grantee{
XMLNS: "http://www.w3.org/2001/XMLSchema-instance",
XMLXSI: "CanonicalUser",
Type: "CanonicalUser",
},
Permission: "FULL_CONTROL",
})
if err := xml.NewEncoder(w).Encode(acl); err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
w.(http.Flusher).Flush()
}
// PutObjectACLHandler - PUT Object ACL
// -----------------
// This operation uses the ACL subresource
// to set ACL for a bucket, this is a dummy call
// only responds success if the ACL is private.
func (api objectAPIHandlers) PutObjectACLHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "PutObjectACL")
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
vars := mux.Vars(r)
bucket := vars["bucket"]
object, err := unescapePath(vars["object"])
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
objAPI := api.ObjectAPI()
if objAPI == nil {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
return
}
// Allow putObjectACL if policy action is set, since this is a dummy call
// we are simply re-purposing the bucketPolicyAction.
if s3Error := checkRequestAuthType(ctx, r, policy.PutBucketPolicyAction, bucket, ""); s3Error != ErrNone {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL)
return
}
// Before proceeding validate if object exists.
_, err = objAPI.GetObjectInfo(ctx, bucket, object, ObjectOptions{})
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
aclHeader := r.Header.Get(xhttp.AmzACL)
if aclHeader == "" {
acl := &accessControlPolicy{}
if err = xmlDecoder(r.Body, acl, r.ContentLength); err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
if len(acl.AccessControlList.Grants) == 0 {
writeErrorResponse(ctx, w, toAPIError(ctx, NotImplemented{}), r.URL)
return
}
if acl.AccessControlList.Grants[0].Permission != "FULL_CONTROL" {
writeErrorResponse(ctx, w, toAPIError(ctx, NotImplemented{}), r.URL)
return
}
}
if aclHeader != "" && aclHeader != "private" {
writeErrorResponse(ctx, w, toAPIError(ctx, NotImplemented{}), r.URL)
return
}
w.(http.Flusher).Flush()
}
// GetObjectACLHandler - GET Object ACL
// -----------------
// This operation uses the ACL
// subresource to return the ACL of a specified object.
func (api objectAPIHandlers) GetObjectACLHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "GetObjectACL")
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
vars := mux.Vars(r)
bucket := vars["bucket"]
object, err := unescapePath(vars["object"])
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
objAPI := api.ObjectAPI()
if objAPI == nil {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
return
}
// Allow getObjectACL if policy action is set, since this is a dummy call
// we are simply re-purposing the bucketPolicyAction.
if s3Error := checkRequestAuthType(ctx, r, policy.GetBucketPolicyAction, bucket, ""); s3Error != ErrNone {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL)
return
}
// Before proceeding validate if object exists.
_, err = objAPI.GetObjectInfo(ctx, bucket, object, ObjectOptions{})
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
acl := &accessControlPolicy{}
acl.AccessControlList.Grants = append(acl.AccessControlList.Grants, grant{
Grantee: grantee{
XMLNS: "http://www.w3.org/2001/XMLSchema-instance",
XMLXSI: "CanonicalUser",
Type: "CanonicalUser",
},
Permission: "FULL_CONTROL",
})
if err := xml.NewEncoder(w).Encode(acl); err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
w.(http.Flusher).Flush()
}
| cmd/acl-handlers.go | 0 | https://github.com/minio/minio/commit/acc9645249a3ee61cedff832d003421015cc724d | [
0.022445928305387497,
0.0009944423800334334,
0.0001636490342207253,
0.00017288925300817937,
0.0040571847930550575
] |
{
"id": 0,
"code_window": [
"query T noticetrace\n",
"WITH cte AS (EXPORT INTO CSV 'nodelocal://1/export1/' FROM SELECT * FROM t) SELECT filename FROM cte;\n",
"----\n",
"NOTICE: EXPORT is not the recommended way to move data out of CockroachDB and may be deprecated in the future. Please consider exporting data with changefeeds instead: https://www.cockroachlabs.com/docs/stable/export-data-with-changefeeds\n"
],
"labels": [
"keep",
"keep",
"keep",
"add"
],
"after_edit": [
"\n",
"# Regression test for #115290. Correctly handle the case where the Export's\n",
"# input expression has NOT NULL columns that are not part of the presentation of\n",
"# the expression.\n",
"statement ok\n",
"CREATE TABLE t115290 (\n",
" id INT PRIMARY KEY,\n",
" a INT NOT NULL,\n",
" b INT\n",
");\n",
"\n",
"statement ok\n",
"EXPORT INTO PARQUET 'nodelocal://1/export1/' FROM SELECT b FROM t115290 ORDER BY a;"
],
"file_path": "pkg/sql/logictest/testdata/logic_test/export",
"type": "add",
"edit_start_line_idx": 15
} | // Copyright 2019 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package execbuilder
import (
"bytes"
"github.com/cockroachdb/cockroach/pkg/server/telemetry"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/colinfo"
"github.com/cockroachdb/cockroach/pkg/sql/opt"
"github.com/cockroachdb/cockroach/pkg/sql/opt/cat"
"github.com/cockroachdb/cockroach/pkg/sql/opt/exec"
"github.com/cockroachdb/cockroach/pkg/sql/opt/exec/explain"
"github.com/cockroachdb/cockroach/pkg/sql/opt/memo"
"github.com/cockroachdb/cockroach/pkg/sql/opt/xform"
"github.com/cockroachdb/cockroach/pkg/sql/sem/tree"
"github.com/cockroachdb/cockroach/pkg/sql/sqltelemetry"
"github.com/cockroachdb/cockroach/pkg/util/treeprinter"
"github.com/cockroachdb/redact"
)
func (b *Builder) buildCreateTable(
ct *memo.CreateTableExpr,
) (_ execPlan, outputCols colOrdMap, err error) {
schema := b.mem.Metadata().Schema(ct.Schema)
if !ct.Syntax.As() {
root, err := b.factory.ConstructCreateTable(schema, ct.Syntax)
return execPlan{root: root}, colOrdMap{}, err
}
// Construct AS input to CREATE TABLE.
input, inputCols, err := b.buildRelational(ct.Input)
if err != nil {
return execPlan{}, colOrdMap{}, err
}
// Impose ordering and naming on input columns, so that they match the
// order and names of the table columns into which values will be
// inserted.
input, _, err = b.applyPresentation(input, inputCols, ct.InputCols)
if err != nil {
return execPlan{}, colOrdMap{}, err
}
root, err := b.factory.ConstructCreateTableAs(input.root, schema, ct.Syntax)
return execPlan{root: root}, colOrdMap{}, err
}
func (b *Builder) buildCreateView(
cv *memo.CreateViewExpr,
) (_ execPlan, outputCols colOrdMap, err error) {
md := b.mem.Metadata()
schema := md.Schema(cv.Schema)
cols := make(colinfo.ResultColumns, len(cv.Columns))
for i := range cols {
cols[i].Name = cv.Columns[i].Alias
cols[i].Typ = md.ColumnMeta(cv.Columns[i].ID).Type
}
root, err := b.factory.ConstructCreateView(
cv.Syntax,
schema,
cv.ViewQuery,
cols,
cv.Deps,
cv.TypeDeps,
)
return execPlan{root: root}, colOrdMap{}, err
}
func (b *Builder) buildCreateFunction(
cf *memo.CreateFunctionExpr,
) (_ execPlan, outputCols colOrdMap, err error) {
md := b.mem.Metadata()
schema := md.Schema(cf.Schema)
root, err := b.factory.ConstructCreateFunction(
schema,
cf.Syntax,
cf.Deps,
cf.TypeDeps,
)
return execPlan{root: root}, colOrdMap{}, err
}
func (b *Builder) buildExplainOpt(
explain *memo.ExplainExpr,
) (_ execPlan, outputCols colOrdMap, err error) {
fmtFlags := memo.ExprFmtHideAll
switch {
case explain.Options.Flags[tree.ExplainFlagVerbose]:
fmtFlags = memo.ExprFmtHideQualifications | memo.ExprFmtHideScalars |
memo.ExprFmtHideTypes | memo.ExprFmtHideNotNull | memo.ExprFmtHideNotVisibleIndexInfo |
memo.ExprFmtHideFastPathChecks
case explain.Options.Flags[tree.ExplainFlagTypes]:
fmtFlags = memo.ExprFmtHideQualifications | memo.ExprFmtHideNotVisibleIndexInfo
}
redactValues := explain.Options.Flags[tree.ExplainFlagRedact]
// Format the plan here and pass it through to the exec factory.
// If catalog option was passed, show catalog object details for all tables.
var planText bytes.Buffer
if explain.Options.Flags[tree.ExplainFlagCatalog] {
for _, t := range b.mem.Metadata().AllTables() {
tp := treeprinter.New()
cat.FormatTable(b.ctx, b.catalog, t.Table, tp, redactValues)
catStr := tp.String()
if redactValues {
catStr = string(redact.RedactableString(catStr).Redact())
}
planText.WriteString(catStr)
}
// TODO(radu): add views, sequences
}
// If MEMO option was passed, show the memo.
if explain.Options.Flags[tree.ExplainFlagMemo] {
memoStr := b.optimizer.FormatMemo(xform.FmtPretty, redactValues)
if redactValues {
memoStr = string(redact.RedactableString(memoStr).Redact())
}
planText.WriteString(memoStr)
}
f := memo.MakeExprFmtCtx(b.ctx, fmtFlags, redactValues, b.mem, b.catalog)
f.FormatExpr(explain.Input)
planStr := f.Buffer.String()
if redactValues {
planStr = string(redact.RedactableString(planStr).Redact())
}
planText.WriteString(planStr)
// If we're going to display the environment, there's a bunch of queries we
// need to run to get that information, and we can't run them from here, so
// tell the exec factory what information it needs to fetch.
var envOpts exec.ExplainEnvData
if explain.Options.Flags[tree.ExplainFlagEnv] {
var err error
envOpts, err = b.getEnvData()
if err != nil {
return execPlan{}, colOrdMap{}, err
}
}
var ep execPlan
ep.root, err = b.factory.ConstructExplainOpt(planText.String(), envOpts)
if err != nil {
return execPlan{}, colOrdMap{}, err
}
return ep, b.outputColsFromList(explain.ColList), nil
}
func (b *Builder) buildExplain(
explainExpr *memo.ExplainExpr,
) (_ execPlan, outputCols colOrdMap, err error) {
if explainExpr.Options.Mode == tree.ExplainOpt {
return b.buildExplainOpt(explainExpr)
}
var ep execPlan
ep.root, err = b.factory.ConstructExplain(
&explainExpr.Options,
explainExpr.StmtType,
func(f exec.Factory) (exec.Plan, error) {
// Create a separate builder for the explain query. buildRelational
// annotates nodes with extra information when the factory is an
// exec.ExplainFactory so it must be the outer factory and the gist
// factory must be the inner factory.
gf := explain.NewPlanGistFactory(f)
ef := explain.NewFactory(gf, b.semaCtx, b.evalCtx)
explainBld := New(
b.ctx, ef, b.optimizer, b.mem, b.catalog, explainExpr.Input,
b.semaCtx, b.evalCtx, b.initialAllowAutoCommit, b.IsANSIDML,
)
explainBld.disableTelemetry = true
plan, err := explainBld.Build()
if err != nil {
return nil, err
}
explainPlan := plan.(*explain.Plan)
explainPlan.Gist = gf.PlanGist()
return plan, nil
},
)
if err != nil {
return execPlan{}, colOrdMap{}, err
}
return ep, b.outputColsFromList(explainExpr.ColList), nil
}
func (b *Builder) buildShowTrace(
show *memo.ShowTraceForSessionExpr,
) (_ execPlan, outputCols colOrdMap, err error) {
var ep execPlan
ep.root, err = b.factory.ConstructShowTrace(show.TraceType, show.Compact)
if err != nil {
return execPlan{}, colOrdMap{}, err
}
return ep, b.outputColsFromList(show.ColList), nil
}
func (b *Builder) buildAlterTableSplit(
split *memo.AlterTableSplitExpr,
) (_ execPlan, outputCols colOrdMap, err error) {
input, _, err := b.buildRelational(split.Input)
if err != nil {
return execPlan{}, colOrdMap{}, err
}
scalarCtx := buildScalarCtx{}
expiration, err := b.buildScalar(&scalarCtx, split.Expiration)
if err != nil {
return execPlan{}, colOrdMap{}, err
}
table := b.mem.Metadata().Table(split.Table)
var ep execPlan
ep.root, err = b.factory.ConstructAlterTableSplit(
table.Index(split.Index),
input.root,
expiration,
)
if err != nil {
return execPlan{}, colOrdMap{}, err
}
return ep, b.outputColsFromList(split.Columns), nil
}
func (b *Builder) buildAlterTableUnsplit(
unsplit *memo.AlterTableUnsplitExpr,
) (_ execPlan, outputCols colOrdMap, err error) {
input, _, err := b.buildRelational(unsplit.Input)
if err != nil {
return execPlan{}, colOrdMap{}, err
}
table := b.mem.Metadata().Table(unsplit.Table)
var ep execPlan
ep.root, err = b.factory.ConstructAlterTableUnsplit(
table.Index(unsplit.Index),
input.root,
)
if err != nil {
return execPlan{}, colOrdMap{}, err
}
return ep, b.outputColsFromList(unsplit.Columns), nil
}
func (b *Builder) buildAlterTableUnsplitAll(
unsplitAll *memo.AlterTableUnsplitAllExpr,
) (_ execPlan, outputCols colOrdMap, err error) {
table := b.mem.Metadata().Table(unsplitAll.Table)
var ep execPlan
ep.root, err = b.factory.ConstructAlterTableUnsplitAll(table.Index(unsplitAll.Index))
if err != nil {
return execPlan{}, colOrdMap{}, err
}
return ep, b.outputColsFromList(unsplitAll.Columns), nil
}
func (b *Builder) buildAlterTableRelocate(
relocate *memo.AlterTableRelocateExpr,
) (_ execPlan, outputCols colOrdMap, err error) {
input, _, err := b.buildRelational(relocate.Input)
if err != nil {
return execPlan{}, colOrdMap{}, err
}
table := b.mem.Metadata().Table(relocate.Table)
var ep execPlan
ep.root, err = b.factory.ConstructAlterTableRelocate(
table.Index(relocate.Index),
input.root,
relocate.SubjectReplicas,
)
if err != nil {
return execPlan{}, colOrdMap{}, err
}
return ep, b.outputColsFromList(relocate.Columns), nil
}
func (b *Builder) buildAlterRangeRelocate(
relocate *memo.AlterRangeRelocateExpr,
) (_ execPlan, outputCols colOrdMap, err error) {
input, _, err := b.buildRelational(relocate.Input)
if err != nil {
return execPlan{}, colOrdMap{}, err
}
scalarCtx := buildScalarCtx{}
toStoreID, err := b.buildScalar(&scalarCtx, relocate.ToStoreID)
if err != nil {
return execPlan{}, colOrdMap{}, err
}
fromStoreID, err := b.buildScalar(&scalarCtx, relocate.FromStoreID)
if err != nil {
return execPlan{}, colOrdMap{}, err
}
var ep execPlan
ep.root, err = b.factory.ConstructAlterRangeRelocate(
input.root,
relocate.SubjectReplicas,
toStoreID,
fromStoreID,
)
if err != nil {
return execPlan{}, colOrdMap{}, err
}
return ep, b.outputColsFromList(relocate.Columns), nil
}
func (b *Builder) buildControlJobs(
ctl *memo.ControlJobsExpr,
) (_ execPlan, outputCols colOrdMap, err error) {
input, _, err := b.buildRelational(ctl.Input)
if err != nil {
return execPlan{}, colOrdMap{}, err
}
scalarCtx := buildScalarCtx{}
reason, err := b.buildScalar(&scalarCtx, ctl.Reason)
if err != nil {
return execPlan{}, colOrdMap{}, err
}
var ep execPlan
ep.root, err = b.factory.ConstructControlJobs(
ctl.Command,
input.root,
reason,
)
if err != nil {
return execPlan{}, colOrdMap{}, err
}
// ControlJobs returns no columns.
return ep, colOrdMap{}, nil
}
func (b *Builder) buildControlSchedules(
ctl *memo.ControlSchedulesExpr,
) (_ execPlan, outputCols colOrdMap, err error) {
input, _, err := b.buildRelational(ctl.Input)
if err != nil {
return execPlan{}, colOrdMap{}, err
}
var ep execPlan
ep.root, err = b.factory.ConstructControlSchedules(
ctl.Command,
input.root,
)
if err != nil {
return execPlan{}, colOrdMap{}, err
}
// ControlSchedules returns no columns.
return ep, colOrdMap{}, nil
}
func (b *Builder) buildShowCompletions(
ctl *memo.ShowCompletionsExpr,
) (_ execPlan, outputCols colOrdMap, err error) {
var ep execPlan
ep.root, err = b.factory.ConstructShowCompletions(
ctl.Command,
)
if err != nil {
return execPlan{}, colOrdMap{}, err
}
return ep, b.outputColsFromList(ctl.Columns), nil
}
func (b *Builder) buildCancelQueries(
cancel *memo.CancelQueriesExpr,
) (_ execPlan, outputCols colOrdMap, err error) {
input, _, err := b.buildRelational(cancel.Input)
if err != nil {
return execPlan{}, colOrdMap{}, err
}
var ep execPlan
ep.root, err = b.factory.ConstructCancelQueries(input.root, cancel.IfExists)
if err != nil {
return execPlan{}, colOrdMap{}, err
}
if !b.disableTelemetry {
telemetry.Inc(sqltelemetry.CancelQueriesUseCounter)
}
// CancelQueries returns no columns.
return ep, colOrdMap{}, nil
}
func (b *Builder) buildCancelSessions(
cancel *memo.CancelSessionsExpr,
) (_ execPlan, outputCols colOrdMap, err error) {
input, _, err := b.buildRelational(cancel.Input)
if err != nil {
return execPlan{}, colOrdMap{}, err
}
node, err := b.factory.ConstructCancelSessions(input.root, cancel.IfExists)
if err != nil {
return execPlan{}, colOrdMap{}, err
}
if !b.disableTelemetry {
telemetry.Inc(sqltelemetry.CancelSessionsUseCounter)
}
// CancelSessions returns no columns.
return execPlan{root: node}, colOrdMap{}, nil
}
func (b *Builder) buildCreateStatistics(
c *memo.CreateStatisticsExpr,
) (_ execPlan, outputCols colOrdMap, err error) {
node, err := b.factory.ConstructCreateStatistics(c.Syntax)
if err != nil {
return execPlan{}, colOrdMap{}, err
}
// CreateStatistics returns no columns.
return execPlan{root: node}, colOrdMap{}, nil
}
func (b *Builder) buildExport(
export *memo.ExportExpr,
) (_ execPlan, outputCols colOrdMap, err error) {
input, inputCols, err := b.buildRelational(export.Input)
if err != nil {
return execPlan{}, colOrdMap{}, err
}
scalarCtx := buildScalarCtx{}
fileName, err := b.buildScalar(&scalarCtx, export.FileName)
if err != nil {
return execPlan{}, colOrdMap{}, err
}
opts := make([]exec.KVOption, len(export.Options))
for i, o := range export.Options {
opts[i].Key = o.Key
var err error
opts[i].Value, err = b.buildScalar(&scalarCtx, o.Value)
if err != nil {
return execPlan{}, colOrdMap{}, err
}
}
notNullColsSet, err := getNodeColumnOrdinalSet(inputCols, export.Input.Relational().NotNullCols)
if err != nil {
return execPlan{}, colOrdMap{}, err
}
var ep execPlan
ep.root, err = b.factory.ConstructExport(
input.root,
fileName,
export.FileFormat,
opts,
notNullColsSet,
)
if err != nil {
return execPlan{}, colOrdMap{}, err
}
return ep, b.outputColsFromList(export.Columns), nil
}
// planWithColumns creates an execPlan for a node which has a fixed output
// schema.
func (b *Builder) outputColsFromList(cols opt.ColList) colOrdMap {
outputCols := b.colOrdsAlloc.Alloc()
for i, c := range cols {
outputCols.Set(c, i)
}
return outputCols
}
| pkg/sql/opt/exec/execbuilder/statement.go | 1 | https://github.com/cockroachdb/cockroach/commit/592272ca2aa909fc5dd3aed29eb3fff30c567120 | [
0.0009093167609535158,
0.0001885233650682494,
0.00016417763254139572,
0.00017140075215138495,
0.00010587309952825308
] |
{
"id": 0,
"code_window": [
"query T noticetrace\n",
"WITH cte AS (EXPORT INTO CSV 'nodelocal://1/export1/' FROM SELECT * FROM t) SELECT filename FROM cte;\n",
"----\n",
"NOTICE: EXPORT is not the recommended way to move data out of CockroachDB and may be deprecated in the future. Please consider exporting data with changefeeds instead: https://www.cockroachlabs.com/docs/stable/export-data-with-changefeeds\n"
],
"labels": [
"keep",
"keep",
"keep",
"add"
],
"after_edit": [
"\n",
"# Regression test for #115290. Correctly handle the case where the Export's\n",
"# input expression has NOT NULL columns that are not part of the presentation of\n",
"# the expression.\n",
"statement ok\n",
"CREATE TABLE t115290 (\n",
" id INT PRIMARY KEY,\n",
" a INT NOT NULL,\n",
" b INT\n",
");\n",
"\n",
"statement ok\n",
"EXPORT INTO PARQUET 'nodelocal://1/export1/' FROM SELECT b FROM t115290 ORDER BY a;"
],
"file_path": "pkg/sql/logictest/testdata/logic_test/export",
"type": "add",
"edit_start_line_idx": 15
} | # Set up a key with 3 shared locks.
run ok
put k=k1 v=v1 ts=5,0
----
>> at end:
data: "k1"/5.000000000,0 -> /BYTES/v1
run ok
txn_begin t=A ts=10,0
txn_begin t=B ts=11,0
txn_begin t=C ts=12,0
txn_begin t=D ts=13,0
----
>> at end:
txn: "D" meta={id=00000004 key=/Min iso=Serializable pri=0.00000000 epo=0 ts=13.000000000,0 min=0,0 seq=0} lock=true stat=PENDING rts=13.000000000,0 wto=false gul=0,0
run ok
acquire_lock t=A k=k1 str=shared
acquire_lock t=B k=k1 str=shared
acquire_lock t=C k=k1 str=shared
----
>> at end:
lock (Replicated): "k1"/Shared -> txn={id=00000003 key=/Min iso=Serializable pri=0.00000000 epo=0 ts=12.000000000,0 min=0,0 seq=0} ts=12.000000000,0 del=false klen=0 vlen=0 mergeTs=<nil> txnDidNotUpdateMeta=true
lock (Replicated): "k1"/Shared -> txn={id=00000002 key=/Min iso=Serializable pri=0.00000000 epo=0 ts=11.000000000,0 min=0,0 seq=0} ts=11.000000000,0 del=false klen=0 vlen=0 mergeTs=<nil> txnDidNotUpdateMeta=true
lock (Replicated): "k1"/Shared -> txn={id=00000001 key=/Min iso=Serializable pri=0.00000000 epo=0 ts=10.000000000,0 min=0,0 seq=0} ts=10.000000000,0 del=false klen=0 vlen=0 mergeTs=<nil> txnDidNotUpdateMeta=true
# Perform locking and mutation operations with maxLockConflicts set.
run error
check_for_acquire_lock t=D k=k1 str=exclusive maxLockConflicts=0
----
error: (*kvpb.LockConflictError:) conflicting locks on "k1", "k1", "k1"
run error
check_for_acquire_lock t=D k=k1 str=exclusive maxLockConflicts=1
----
error: (*kvpb.LockConflictError:) conflicting locks on "k1"
run error
check_for_acquire_lock t=D k=k1 str=exclusive maxLockConflicts=2
----
error: (*kvpb.LockConflictError:) conflicting locks on "k1", "k1"
run error
check_for_acquire_lock t=D k=k1 str=exclusive maxLockConflicts=3
----
error: (*kvpb.LockConflictError:) conflicting locks on "k1", "k1", "k1"
run error
check_for_acquire_lock t=D k=k1 str=exclusive maxLockConflicts=4
----
error: (*kvpb.LockConflictError:) conflicting locks on "k1", "k1", "k1"
run error
acquire_lock t=D k=k1 str=exclusive maxLockConflicts=2
----
>> at end:
lock (Replicated): "k1"/Shared -> txn={id=00000003 key=/Min iso=Serializable pri=0.00000000 epo=0 ts=12.000000000,0 min=0,0 seq=0} ts=12.000000000,0 del=false klen=0 vlen=0 mergeTs=<nil> txnDidNotUpdateMeta=true
lock (Replicated): "k1"/Shared -> txn={id=00000002 key=/Min iso=Serializable pri=0.00000000 epo=0 ts=11.000000000,0 min=0,0 seq=0} ts=11.000000000,0 del=false klen=0 vlen=0 mergeTs=<nil> txnDidNotUpdateMeta=true
lock (Replicated): "k1"/Shared -> txn={id=00000001 key=/Min iso=Serializable pri=0.00000000 epo=0 ts=10.000000000,0 min=0,0 seq=0} ts=10.000000000,0 del=false klen=0 vlen=0 mergeTs=<nil> txnDidNotUpdateMeta=true
error: (*kvpb.LockConflictError:) conflicting locks on "k1", "k1"
run error
put t=D k=k1 v=v1 maxLockConflicts=2
----
>> at end:
data: "k1"/5.000000000,0 -> /BYTES/v1
error: (*kvpb.LockConflictError:) conflicting locks on "k1", "k1"
run error
increment t=D k=k1 maxLockConflicts=2
----
>> at end:
data: "k1"/5.000000000,0 -> /BYTES/v1
error: (*kvpb.LockConflictError:) conflicting locks on "k1", "k1"
run error
cput t=D k=k1 v=v2 maxLockConflicts=2
----
>> at end:
data: "k1"/5.000000000,0 -> /BYTES/v1
error: (*kvpb.LockConflictError:) conflicting locks on "k1", "k1"
run error
initput t=D k=k1 v=v2 maxLockConflicts=2
----
>> at end:
data: "k1"/5.000000000,0 -> /BYTES/v1
error: (*kvpb.LockConflictError:) conflicting locks on "k1", "k1"
run error
del t=D k=k1 maxLockConflicts=2
----
>> at end:
data: "k1"/5.000000000,0 -> /BYTES/v1
error: (*kvpb.LockConflictError:) conflicting locks on "k1", "k1"
run error
del_range t=D k=k1 k=k2 maxLockConflicts=2
----
>> at end:
data: "k1"/5.000000000,0 -> /BYTES/v1
error: (*kvpb.LockConflictError:) conflicting locks on "k1", "k1"
run error
del_range_ts k=k1 k=k2 ts=10,0 maxLockConflicts=2
----
>> at end:
data: "k1"/5.000000000,0 -> /BYTES/v1
error: (*kvpb.LockConflictError:) conflicting locks on "k1", "k1"
run error
del_range_pred k=k1 k=k2 maxLockConflicts=2
----
>> at end:
data: "k1"/5.000000000,0 -> /BYTES/v1
error: (*kvpb.LockConflictError:) conflicting locks on "k1", "k1"
| pkg/storage/testdata/mvcc_histories/replicated_locks_max_conflicts | 0 | https://github.com/cockroachdb/cockroach/commit/592272ca2aa909fc5dd3aed29eb3fff30c567120 | [
0.0001684406161075458,
0.0001651717902859673,
0.00016071306890808046,
0.0001648259931243956,
0.000002303238943568431
] |
{
"id": 0,
"code_window": [
"query T noticetrace\n",
"WITH cte AS (EXPORT INTO CSV 'nodelocal://1/export1/' FROM SELECT * FROM t) SELECT filename FROM cte;\n",
"----\n",
"NOTICE: EXPORT is not the recommended way to move data out of CockroachDB and may be deprecated in the future. Please consider exporting data with changefeeds instead: https://www.cockroachlabs.com/docs/stable/export-data-with-changefeeds\n"
],
"labels": [
"keep",
"keep",
"keep",
"add"
],
"after_edit": [
"\n",
"# Regression test for #115290. Correctly handle the case where the Export's\n",
"# input expression has NOT NULL columns that are not part of the presentation of\n",
"# the expression.\n",
"statement ok\n",
"CREATE TABLE t115290 (\n",
" id INT PRIMARY KEY,\n",
" a INT NOT NULL,\n",
" b INT\n",
");\n",
"\n",
"statement ok\n",
"EXPORT INTO PARQUET 'nodelocal://1/export1/' FROM SELECT b FROM t115290 ORDER BY a;"
],
"file_path": "pkg/sql/logictest/testdata/logic_test/export",
"type": "add",
"edit_start_line_idx": 15
} | Copyright (c) 2015, Dave Cheney <[email protected]>
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
| licenses/BSD2-vitess.io.vitess.go.vt.vterrors.txt | 0 | https://github.com/cockroachdb/cockroach/commit/592272ca2aa909fc5dd3aed29eb3fff30c567120 | [
0.00016904382209759206,
0.0001637583045521751,
0.00016093773592729121,
0.0001612933847354725,
0.0000037402376165118767
] |
{
"id": 0,
"code_window": [
"query T noticetrace\n",
"WITH cte AS (EXPORT INTO CSV 'nodelocal://1/export1/' FROM SELECT * FROM t) SELECT filename FROM cte;\n",
"----\n",
"NOTICE: EXPORT is not the recommended way to move data out of CockroachDB and may be deprecated in the future. Please consider exporting data with changefeeds instead: https://www.cockroachlabs.com/docs/stable/export-data-with-changefeeds\n"
],
"labels": [
"keep",
"keep",
"keep",
"add"
],
"after_edit": [
"\n",
"# Regression test for #115290. Correctly handle the case where the Export's\n",
"# input expression has NOT NULL columns that are not part of the presentation of\n",
"# the expression.\n",
"statement ok\n",
"CREATE TABLE t115290 (\n",
" id INT PRIMARY KEY,\n",
" a INT NOT NULL,\n",
" b INT\n",
");\n",
"\n",
"statement ok\n",
"EXPORT INTO PARQUET 'nodelocal://1/export1/' FROM SELECT b FROM t115290 ORDER BY a;"
],
"file_path": "pkg/sql/logictest/testdata/logic_test/export",
"type": "add",
"edit_start_line_idx": 15
} | // Copyright 2023 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package main
import (
"errors"
"flag"
"fmt"
"go/ast"
"go/parser"
"go/token"
"os"
"path/filepath"
"sort"
"strconv"
"strings"
)
var (
lineComment bool
output, typeName, trimPrefix, stringToValueMapName, enumValuesSliceName string
allowedIntegerTypes = []string{
"byte",
"int",
"int8",
"int16",
"int32",
"int64",
"rune",
"uint",
"uint8",
"uint16",
"uint32",
"uint64",
}
)
type tinyStringer struct {
files []string
typeName, trimPrefix, output, stringToValueMapName, enumValuesSliceName string
lineComment bool
}
func init() {
flag.StringVar(&stringToValueMapName, "stringtovaluemapname", "", "if set, also create a map of enum name -> value of the given name")
flag.StringVar(&enumValuesSliceName, "enumvaluesslicename", "", "if set, also create a slice of all enum values of the given name")
flag.StringVar(&output, "output", "", "name of output file; default srcdir/<type>_string.go")
flag.StringVar(&typeName, "type", "", "the type for which to generate output")
flag.StringVar(&trimPrefix, "trimprefix", "", "trim the given prefix from generated names")
flag.BoolVar(&lineComment, "linecomment", false, "use line comment text as printed text when present")
}
func main() {
flag.Parse()
if err := doMain(); err != nil {
panic(err)
}
}
func doMain() error {
if typeName == "" {
return errors.New("must provide --type")
}
return tinyStringer{
enumValuesSliceName: enumValuesSliceName,
files: flag.Args(),
lineComment: lineComment,
output: output,
stringToValueMapName: stringToValueMapName,
typeName: typeName,
trimPrefix: trimPrefix,
}.stringify()
}
func (s tinyStringer) stringify() error {
if len(s.files) == 0 {
return errors.New("must provide at least one file argument")
}
// Make sure all input files are in the same package.
var srcDir, whichFile string
for _, file := range s.files {
dir := filepath.Dir(file)
if srcDir == "" {
srcDir = dir
whichFile = file
} else {
if srcDir != dir {
return fmt.Errorf("all input files must be in the same source directory; got input file %s in directory %s, but input file %s in directory %s", whichFile, srcDir, file, dir)
}
}
}
if s.output == "" {
s.output = filepath.Join(srcDir, strings.ToLower(s.typeName)+"_string.go")
}
parsedFiles, pkgName, err := parseAllFiles(s.files)
if err != nil {
return err
}
if err := validateType(parsedFiles, s.typeName); err != nil {
return err
}
inOrder, nameToInt, nameToPrinted, err := s.computeConstantValues(parsedFiles)
if err != nil {
return err
}
if len(nameToInt) == 0 || len(nameToPrinted) == 0 {
return fmt.Errorf("did not find enough constant values for type %s", s.typeName)
}
// Produce s.output.
outputFile, err := os.Create(s.output)
if err != nil {
return err
}
defer func() {
_ = outputFile.Close()
}()
fmt.Fprintf(outputFile, `// Code generated by "stringer"; DO NOT EDIT.
package %s
import "strconv"
func _() {
// An "invalid array index" compiler error signifies that the constant values have changed.
// Re-run the stringer command to generate them again.
var x [1]struct{}
`, pkgName)
for _, constName := range inOrder {
if constName == "_" {
continue
}
minus := "-"
if nameToInt[constName] < 0 {
// Implement the behavior of gofmt, which wants no space
// between the operands unless the number on the right
// is negative (would probably trigger some parse error).
minus = " - "
}
fmt.Fprintf(outputFile, " _ = x[%s%s%d]\n", constName, minus, nameToInt[constName])
}
receiverVar := "i"
if _, ok := nameToInt[receiverVar]; ok {
receiverVar = "_i"
if _, ok := nameToInt[receiverVar]; ok {
return fmt.Errorf("don't know how to choose a receiver variable because %s is a constant name", receiverVar)
}
}
fmt.Fprintf(outputFile, `}
func (%s %s) String() string {
switch %s {
`, receiverVar, s.typeName, receiverVar)
seen := make(map[int]struct{})
for _, constName := range inOrder {
if constName == "_" {
continue
}
if _, ok := seen[nameToInt[constName]]; ok {
continue
}
fmt.Fprintf(outputFile, ` case %s:
return "%s"
`, constName, nameToPrinted[constName])
seen[nameToInt[constName]] = struct{}{}
}
fmt.Fprintf(outputFile, ` default:
return "%s(" + strconv.FormatInt(int64(i), 10) + ")"
}
}
`, s.typeName)
if s.stringToValueMapName != "" {
fmt.Fprintf(outputFile, `
var %s = map[string]%s{
`, s.stringToValueMapName, s.typeName)
// Figure out the length of the longest const name to see how
// much we need to pad it out.
var maxLen int
for _, constName := range inOrder {
if len(nameToPrinted[constName]) > maxLen {
maxLen = len(nameToPrinted[constName])
}
}
for _, constName := range inOrder {
if constName == "_" {
continue
}
padding := strings.Repeat(" ", 1+maxLen-len(nameToPrinted[constName]))
fmt.Fprintf(outputFile, ` "%s":%s%d,
`, nameToPrinted[constName], padding, nameToInt[constName])
}
fmt.Fprintf(outputFile, `}
`)
}
if s.enumValuesSliceName != "" {
seen := make(map[int]struct{})
fmt.Fprintf(outputFile, `
var %s = []%s{
`, s.enumValuesSliceName, s.typeName)
inLexicographicOrder := make([]string, len(inOrder))
copy(inLexicographicOrder, inOrder)
// Clear duplicates, select the first one in order.
i := 0
for i < len(inLexicographicOrder) {
constName := inLexicographicOrder[i]
if _, ok := seen[nameToInt[constName]]; ok {
inLexicographicOrder = append(inLexicographicOrder[:i], inLexicographicOrder[i+1:]...)
} else {
i += 1
seen[nameToInt[constName]] = struct{}{}
}
}
sort.Slice(inLexicographicOrder, func(i, j int) bool {
return nameToPrinted[inLexicographicOrder[i]] < nameToPrinted[inLexicographicOrder[j]]
})
seen = make(map[int]struct{})
for _, constName := range inLexicographicOrder {
if constName == "_" {
continue
}
if _, ok := seen[nameToInt[constName]]; ok {
continue
}
fmt.Fprintf(outputFile, ` %s,
`, constName)
seen[nameToInt[constName]] = struct{}{}
}
fmt.Fprintf(outputFile, `}
`)
}
return nil
}
// parseAllFiles returns a list of all the files parsed, the name of the package, and an error if one occurred.
func parseAllFiles(files []string) ([]*ast.File, string, error) {
// Parse all files.
fset := token.NewFileSet()
parsedFiles := make([]*ast.File, 0, len(files))
for _, file := range files {
parsed, err := parser.ParseFile(fset, file, nil, parser.SkipObjectResolution|parser.ParseComments)
if err != nil {
return nil, "", err
}
parsedFiles = append(parsedFiles, parsed)
}
// All files should have the same package declaration. This will help us
// determine what package the generated file should be in.
var pkgName, whichFile string
for i, file := range parsedFiles {
if pkgName == "" {
pkgName = file.Name.Name
whichFile = files[i]
} else {
if pkgName != file.Name.Name {
return nil, "", fmt.Errorf("all input files must have the same package name; got input file %s w/ 'package %s', but input file %s w/ 'package %s'", whichFile, pkgName, files[i], file.Name.Name)
}
}
}
return parsedFiles, pkgName, nil
}
func validateType(files []*ast.File, typeName string) error {
// Find the definition of the type. Should be an alias for some
// integer type.
for _, file := range files {
for _, decl := range file.Decls {
var genDecl *ast.GenDecl
genDecl, ok := decl.(*ast.GenDecl)
if !ok {
continue
}
if genDecl.Tok != token.TYPE {
continue
}
for _, spec := range genDecl.Specs {
typeSpec, ok := spec.(*ast.TypeSpec)
if !ok {
// Should never happen.
return fmt.Errorf("unexpected error occurred while processing %+v", spec)
}
if typeSpec.Name.Name != typeName {
continue
}
// Ensure the type is an alias for a built-in integer type.
ident, ok := typeSpec.Type.(*ast.Ident)
if !ok {
return fmt.Errorf("expected identifier for definition of type %s", typeName)
}
var found bool
for _, intType := range allowedIntegerTypes {
if ident.Name == intType {
found = true
break
}
}
if !found {
return fmt.Errorf("expected an integer type for definition of type %s; got %s", typeName, ident.Name)
}
}
}
}
return nil
}
func (s tinyStringer) computeConstantValues(
files []*ast.File,
) (inOrder []string, nameToInt map[string]int, nameToPrinted map[string]string, err error) {
nameToInt = make(map[string]int)
nameToPrinted = make(map[string]string)
for _, file := range files {
for _, decl := range file.Decls {
var genDecl *ast.GenDecl
genDecl, ok := decl.(*ast.GenDecl)
if !ok {
continue
}
if genDecl.Tok != token.CONST {
continue
}
var inferAppropriateType, inIota bool
var iotaVal int
for _, spec := range genDecl.Specs {
valueSpec, ok := spec.(*ast.ValueSpec)
if !ok {
// Should never happen.
err = fmt.Errorf("unexpected error occurred while processing %+v", spec)
return
}
if valueSpec.Type == nil && !inferAppropriateType {
continue
}
ident, ok := valueSpec.Type.(*ast.Ident)
if (ok && ident.Name != s.typeName) || (!ok && !inferAppropriateType) {
inferAppropriateType = false
continue
}
inferAppropriateType = true
if len(valueSpec.Names) != 1 {
err = fmt.Errorf("expected one name for constant of type %s; found %+v", s.typeName, valueSpec.Names)
return
}
constName := valueSpec.Names[0].Name
inOrder = append(inOrder, constName)
// Check the value to see what value we'll assign to the constant.
if valueSpec.Values == nil {
if inIota {
nameToInt[constName] = iotaVal
iotaVal += 1
} else {
nameToInt[constName] = 0
}
} else if len(valueSpec.Values) != 1 {
err = fmt.Errorf("expected one value for constant %s; found %+v", constName, valueSpec.Values)
return
} else if lit, ok := valueSpec.Values[0].(*ast.BasicLit); ok {
if lit.Kind == token.INT {
var intVal int64
intVal, err = strconv.ParseInt(lit.Value, 0, 0)
if err != nil {
return
}
nameToInt[constName] = int(intVal)
inIota = false
} else if lit.Kind == token.CHAR {
if len(lit.Value) != 3 {
err = fmt.Errorf("expected string of form 'X' for character: got %s", lit.Value)
return
}
if lit.Value[0] != '\'' || lit.Value[2] != '\'' {
err = fmt.Errorf("expected string of form 'X' for character: got %s", lit.Value)
return
}
nameToInt[constName] = int(lit.Value[1])
inIota = false
} else {
err = fmt.Errorf("expected integer value for constant %s; found %s", constName, lit.Value)
return
}
} else if ident, ok := valueSpec.Values[0].(*ast.Ident); ok {
if ident.Name == "iota" {
inIota = true
nameToInt[constName] = iotaVal
iotaVal += 1
} else if otherValue, ok := nameToInt[ident.Name]; ok {
nameToInt[constName] = otherValue
inIota = false
}
} else if binExpr, ok := valueSpec.Values[0].(*ast.BinaryExpr); ok {
// Handle iota + N or iota - N.
iotaIdent, ok := binExpr.X.(*ast.Ident)
if !ok || iotaIdent.Name != "iota" {
err = fmt.Errorf("expected 'iota' in binary expression %+v; found %+v", binExpr, binExpr.X)
return
}
var otherNumParsed int64
if otherNum, ok := binExpr.Y.(*ast.BasicLit); ok && otherNum.Kind == token.INT {
otherNumParsed, err = strconv.ParseInt(otherNum.Value, 0, 0)
if err != nil {
return
}
} else if otherRef, ok := binExpr.Y.(*ast.Ident); ok {
otherNum, ok := nameToInt[otherRef.Name]
if !ok {
err = fmt.Errorf("could not find value of %s", otherRef.Name)
return
}
otherNumParsed = int64(otherNum)
} else {
err = fmt.Errorf("couldn't parse second argument of binary expression %+v; found %+v", binExpr, binExpr.Y)
return
}
if binExpr.Op == token.ADD {
iotaVal = iotaVal + int(otherNumParsed)
} else if binExpr.Op == token.SUB {
iotaVal = iotaVal - int(otherNumParsed)
}
inIota = true
nameToInt[constName] = iotaVal
iotaVal += 1
} else {
err = fmt.Errorf("don't know how to process %+v", valueSpec.Values[0])
return
}
// Determine the printed name of the constant.
printedName := constName
if s.lineComment && valueSpec.Comment != nil {
printedName = strings.TrimSpace(valueSpec.Comment.Text())
}
if s.trimPrefix != "" {
printedName = strings.TrimPrefix(printedName, s.trimPrefix)
}
nameToPrinted[constName] = printedName
}
}
}
return
}
| pkg/build/bazel/util/tinystringer/main.go | 0 | https://github.com/cockroachdb/cockroach/commit/592272ca2aa909fc5dd3aed29eb3fff30c567120 | [
0.00017478283552918583,
0.00016954581951722503,
0.0001630814076634124,
0.00016935930761974305,
0.0000026193386020167964
] |
{
"id": 1,
"code_window": [
"\t\t\treturn execPlan{}, colOrdMap{}, err\n",
"\t\t}\n",
"\t}\n",
"\tnotNullColsSet, err := getNodeColumnOrdinalSet(inputCols, export.Input.Relational().NotNullCols)\n",
"\tif err != nil {\n",
"\t\treturn execPlan{}, colOrdMap{}, err\n",
"\t}\n",
"\n",
"\tvar ep execPlan\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\n",
"\tvar notNullOrds exec.NodeColumnOrdinalSet\n",
"\tnotNullCols := export.Input.Relational().NotNullCols\n",
"\tfor col, ok := notNullCols.Next(0); ok; col, ok = notNullCols.Next(col + 1) {\n",
"\t\t// Ignore NOT NULL columns that are not part of the input execPlan's\n",
"\t\t// output columns. This can happen when applyPresentation projects-away\n",
"\t\t// some output columns of the input expression. For example, a Sort\n",
"\t\t// expression must output a column it orders by, but that column must be\n",
"\t\t// projected-away after the sort if the presentation does not require\n",
"\t\t// the column.\n",
"\t\tif ord, ok := inputCols.Get(col); ok {\n",
"\t\t\tnotNullOrds.Add(ord)\n",
"\t\t}\n"
],
"file_path": "pkg/sql/opt/exec/execbuilder/statement.go",
"type": "replace",
"edit_start_line_idx": 445
} | # Regression test for incorrectly handling projection on top of the EXPORT
# (#101733).
statement ok
CREATE TABLE t (k PRIMARY KEY) AS SELECT 1;
statement ok
WITH cte AS (EXPORT INTO CSV 'nodelocal://1/export1/' FROM SELECT * FROM t) SELECT filename FROM cte;
statement ok
WITH cte AS (EXPORT INTO PARQUET 'nodelocal://1/export1/' FROM SELECT * FROM t) SELECT filename FROM cte;
query T noticetrace
WITH cte AS (EXPORT INTO CSV 'nodelocal://1/export1/' FROM SELECT * FROM t) SELECT filename FROM cte;
----
NOTICE: EXPORT is not the recommended way to move data out of CockroachDB and may be deprecated in the future. Please consider exporting data with changefeeds instead: https://www.cockroachlabs.com/docs/stable/export-data-with-changefeeds
| pkg/sql/logictest/testdata/logic_test/export | 1 | https://github.com/cockroachdb/cockroach/commit/592272ca2aa909fc5dd3aed29eb3fff30c567120 | [
0.00017171654326375574,
0.0001699578861007467,
0.00016819922893773764,
0.0001699578861007467,
0.0000017586571630090475
] |
{
"id": 1,
"code_window": [
"\t\t\treturn execPlan{}, colOrdMap{}, err\n",
"\t\t}\n",
"\t}\n",
"\tnotNullColsSet, err := getNodeColumnOrdinalSet(inputCols, export.Input.Relational().NotNullCols)\n",
"\tif err != nil {\n",
"\t\treturn execPlan{}, colOrdMap{}, err\n",
"\t}\n",
"\n",
"\tvar ep execPlan\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\n",
"\tvar notNullOrds exec.NodeColumnOrdinalSet\n",
"\tnotNullCols := export.Input.Relational().NotNullCols\n",
"\tfor col, ok := notNullCols.Next(0); ok; col, ok = notNullCols.Next(col + 1) {\n",
"\t\t// Ignore NOT NULL columns that are not part of the input execPlan's\n",
"\t\t// output columns. This can happen when applyPresentation projects-away\n",
"\t\t// some output columns of the input expression. For example, a Sort\n",
"\t\t// expression must output a column it orders by, but that column must be\n",
"\t\t// projected-away after the sort if the presentation does not require\n",
"\t\t// the column.\n",
"\t\tif ord, ok := inputCols.Get(col); ok {\n",
"\t\t\tnotNullOrds.Add(ord)\n",
"\t\t}\n"
],
"file_path": "pkg/sql/opt/exec/execbuilder/statement.go",
"type": "replace",
"edit_start_line_idx": 445
} | // Copyright 2018 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package main
import (
"bytes"
"flag"
"io"
"path/filepath"
"strings"
"testing"
"github.com/cockroachdb/datadriven"
"github.com/cockroachdb/errors"
)
var (
testDataGlob = flag.String("d", "testdata/[^.]*", "test data glob")
)
func TestOptgen(t *testing.T) {
paths, err := filepath.Glob(*testDataGlob)
if err != nil {
t.Fatal(err)
}
if len(paths) == 0 {
t.Fatalf("no testfiles found matching: %s", *testDataGlob)
}
for _, path := range paths {
t.Run(filepath.Base(path), func(t *testing.T) {
datadriven.RunTest(t, path, func(t *testing.T, d *datadriven.TestData) string {
var buf bytes.Buffer
gen := optgen{useGoFmt: true, maxErrors: 2, stdErr: &buf}
gen.globResolver = func(pattern string) ([]string, error) {
switch pattern {
case "test.opt":
return []string{"test.opt"}, nil
case "all":
return []string{"test.opt", "test2.opt"}, nil
case "not-found.opt":
return []string{"notfound.opt"}, nil
default:
return nil, errors.New("invalid source")
}
}
// Resolve input file to the data-driven input text.
gen.fileResolver = func(name string) (io.Reader, error) {
switch name {
case "test.opt":
return strings.NewReader(d.Input), nil
case "test2.opt":
return strings.NewReader(""), nil
default:
return nil, errors.New("invalid filename")
}
}
args := make([]string, len(d.CmdArgs))
for i := range args {
args[i] = d.CmdArgs[i].String()
}
gen.run(args...)
// Suppress DO NOT EDIT so that reviewable will still show the
// file by default.
return strings.Replace(buf.String(), "DO NOT EDIT.", "[omitted]", -1)
})
})
}
}
| pkg/sql/opt/optgen/cmd/optgen/main_test.go | 0 | https://github.com/cockroachdb/cockroach/commit/592272ca2aa909fc5dd3aed29eb3fff30c567120 | [
0.00018010388885159045,
0.00017402137746103108,
0.00016974512254819274,
0.000173551743500866,
0.000003318341896374477
] |
{
"id": 1,
"code_window": [
"\t\t\treturn execPlan{}, colOrdMap{}, err\n",
"\t\t}\n",
"\t}\n",
"\tnotNullColsSet, err := getNodeColumnOrdinalSet(inputCols, export.Input.Relational().NotNullCols)\n",
"\tif err != nil {\n",
"\t\treturn execPlan{}, colOrdMap{}, err\n",
"\t}\n",
"\n",
"\tvar ep execPlan\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\n",
"\tvar notNullOrds exec.NodeColumnOrdinalSet\n",
"\tnotNullCols := export.Input.Relational().NotNullCols\n",
"\tfor col, ok := notNullCols.Next(0); ok; col, ok = notNullCols.Next(col + 1) {\n",
"\t\t// Ignore NOT NULL columns that are not part of the input execPlan's\n",
"\t\t// output columns. This can happen when applyPresentation projects-away\n",
"\t\t// some output columns of the input expression. For example, a Sort\n",
"\t\t// expression must output a column it orders by, but that column must be\n",
"\t\t// projected-away after the sort if the presentation does not require\n",
"\t\t// the column.\n",
"\t\tif ord, ok := inputCols.Get(col); ok {\n",
"\t\t\tnotNullOrds.Add(ord)\n",
"\t\t}\n"
],
"file_path": "pkg/sql/opt/exec/execbuilder/statement.go",
"type": "replace",
"edit_start_line_idx": 445
} | // Copyright 2023 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package kvtenant
import (
"context"
"fmt"
"net"
"sync/atomic"
"testing"
"time"
"github.com/cockroachdb/cockroach/pkg/config"
"github.com/cockroachdb/cockroach/pkg/gossip"
"github.com/cockroachdb/cockroach/pkg/kv/kvclient/kvcoord"
"github.com/cockroachdb/cockroach/pkg/kv/kvpb"
"github.com/cockroachdb/cockroach/pkg/multitenant/mtinfopb"
"github.com/cockroachdb/cockroach/pkg/roachpb"
"github.com/cockroachdb/cockroach/pkg/rpc"
"github.com/cockroachdb/cockroach/pkg/testutils"
"github.com/cockroachdb/cockroach/pkg/util"
"github.com/cockroachdb/cockroach/pkg/util/hlc"
"github.com/cockroachdb/cockroach/pkg/util/leaktest"
"github.com/cockroachdb/cockroach/pkg/util/log"
"github.com/cockroachdb/cockroach/pkg/util/netutil"
"github.com/cockroachdb/cockroach/pkg/util/protoutil"
"github.com/cockroachdb/cockroach/pkg/util/retry"
"github.com/cockroachdb/cockroach/pkg/util/stop"
"github.com/cockroachdb/cockroach/pkg/util/uuid"
"github.com/cockroachdb/errors"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"google.golang.org/grpc/codes"
grpcstatus "google.golang.org/grpc/status"
)
var rpcRetryOpts = retry.Options{
InitialBackoff: 1 * time.Microsecond,
MaxBackoff: 4 * time.Microsecond,
}
var _ kvpb.InternalServer = &mockServer{}
type mockServer struct {
rangeLookupFn func(context.Context, *kvpb.RangeLookupRequest) (*kvpb.RangeLookupResponse, error)
gossipSubFn func(*kvpb.GossipSubscriptionRequest, kvpb.Internal_GossipSubscriptionServer) error
tenantSettingsFn func(request *kvpb.TenantSettingsRequest, server kvpb.Internal_TenantSettingsServer) error
emulateOldVersionSettingServer bool
}
func (m *mockServer) RangeLookup(
ctx context.Context, req *kvpb.RangeLookupRequest,
) (*kvpb.RangeLookupResponse, error) {
return m.rangeLookupFn(ctx, req)
}
func (m *mockServer) GossipSubscription(
req *kvpb.GossipSubscriptionRequest, stream kvpb.Internal_GossipSubscriptionServer,
) error {
return m.gossipSubFn(req, stream)
}
func (m *mockServer) TenantSettings(
req *kvpb.TenantSettingsRequest, stream kvpb.Internal_TenantSettingsServer,
) error {
if m.tenantSettingsFn == nil {
// First message - required by startup protocol.
if err := stream.Send(&kvpb.TenantSettingsEvent{
EventType: kvpb.TenantSettingsEvent_SETTING_EVENT,
Precedence: kvpb.TenantSettingsEvent_TENANT_SPECIFIC_OVERRIDES,
Incremental: false,
Overrides: nil,
}); err != nil {
return err
}
if !m.emulateOldVersionSettingServer {
// Initial tenant metadata.
if err := stream.Send(&kvpb.TenantSettingsEvent{
EventType: kvpb.TenantSettingsEvent_METADATA_EVENT,
Name: "foo",
// TODO(knz): remove cast after the dep cycle has been resolved.
DataState: uint32(mtinfopb.DataStateReady),
ServiceMode: uint32(mtinfopb.ServiceModeExternal),
// Need to ensure this looks like a fake no-op setting override event.
Precedence: kvpb.TenantSettingsEvent_TENANT_SPECIFIC_OVERRIDES,
Incremental: true,
}); err != nil {
return err
}
}
// Finish startup.
if err := stream.Send(&kvpb.TenantSettingsEvent{
EventType: kvpb.TenantSettingsEvent_SETTING_EVENT,
Precedence: kvpb.TenantSettingsEvent_ALL_TENANTS_OVERRIDES,
Incremental: false,
Overrides: nil,
}); err != nil {
return err
}
// Ensure the stream doesn't immediately finish, which can cause
// flakes in tests due to the retry loop in the client.
<-stream.Context().Done()
return nil
}
return m.tenantSettingsFn(req, stream)
}
func (*mockServer) ResetQuorum(
context.Context, *kvpb.ResetQuorumRequest,
) (*kvpb.ResetQuorumResponse, error) {
panic("unimplemented")
}
func (*mockServer) Batch(context.Context, *kvpb.BatchRequest) (*kvpb.BatchResponse, error) {
panic("unimplemented")
}
func (*mockServer) RangeFeed(*kvpb.RangeFeedRequest, kvpb.Internal_RangeFeedServer) error {
panic("unimplemented")
}
func (m *mockServer) MuxRangeFeed(server kvpb.Internal_MuxRangeFeedServer) error {
panic("implement me")
}
func (*mockServer) Join(context.Context, *kvpb.JoinNodeRequest) (*kvpb.JoinNodeResponse, error) {
panic("unimplemented")
}
func (*mockServer) TokenBucket(
ctx context.Context, in *kvpb.TokenBucketRequest,
) (*kvpb.TokenBucketResponse, error) {
panic("unimplemented")
}
func (m *mockServer) GetSpanConfigs(
context.Context, *roachpb.GetSpanConfigsRequest,
) (*roachpb.GetSpanConfigsResponse, error) {
panic("unimplemented")
}
func (m *mockServer) GetAllSystemSpanConfigsThatApply(
context.Context, *roachpb.GetAllSystemSpanConfigsThatApplyRequest,
) (*roachpb.GetAllSystemSpanConfigsThatApplyResponse, error) {
panic("unimplemented")
}
func (m *mockServer) UpdateSpanConfigs(
context.Context, *roachpb.UpdateSpanConfigsRequest,
) (*roachpb.UpdateSpanConfigsResponse, error) {
panic("unimplemented")
}
func (m *mockServer) SpanConfigConformance(
context.Context, *roachpb.SpanConfigConformanceRequest,
) (*roachpb.SpanConfigConformanceResponse, error) {
panic("unimplemented")
}
func (m *mockServer) GetRangeDescriptors(
*kvpb.GetRangeDescriptorsRequest, kvpb.Internal_GetRangeDescriptorsServer,
) error {
panic("unimplemented")
}
func gossipEventForClusterID(clusterID uuid.UUID) *kvpb.GossipSubscriptionEvent {
return &kvpb.GossipSubscriptionEvent{
Key: gossip.KeyClusterID,
Content: roachpb.MakeValueFromBytesAndTimestamp(clusterID.GetBytes(), hlc.Timestamp{}),
PatternMatched: gossip.KeyClusterID,
}
}
func gossipEventForNodeDesc(desc *roachpb.NodeDescriptor) *kvpb.GossipSubscriptionEvent {
val, err := protoutil.Marshal(desc)
if err != nil {
panic(err)
}
return &kvpb.GossipSubscriptionEvent{
Key: gossip.MakeNodeIDKey(desc.NodeID),
Content: roachpb.MakeValueFromBytesAndTimestamp(val, hlc.Timestamp{}),
PatternMatched: gossip.MakePrefixPattern(gossip.KeyNodeDescPrefix),
}
}
func gossipEventForStoreDesc(desc *roachpb.StoreDescriptor) *kvpb.GossipSubscriptionEvent {
val, err := protoutil.Marshal(desc)
if err != nil {
panic(err)
}
return &kvpb.GossipSubscriptionEvent{
Key: gossip.MakeStoreDescKey(desc.StoreID),
Content: roachpb.MakeValueFromBytesAndTimestamp(val, hlc.Timestamp{}),
PatternMatched: gossip.MakePrefixPattern(gossip.KeyStoreDescPrefix),
}
}
func gossipEventForSystemConfig(cfg *config.SystemConfigEntries) *kvpb.GossipSubscriptionEvent {
val, err := protoutil.Marshal(cfg)
if err != nil {
panic(err)
}
return &kvpb.GossipSubscriptionEvent{
Key: gossip.KeyDeprecatedSystemConfig,
Content: roachpb.MakeValueFromBytesAndTimestamp(val, hlc.Timestamp{}),
PatternMatched: gossip.KeyDeprecatedSystemConfig,
}
}
func waitForNodeDesc(t *testing.T, c *connector, nodeID roachpb.NodeID) {
t.Helper()
testutils.SucceedsSoon(t, func() error {
_, err := c.GetNodeDescriptor(nodeID)
return err
})
}
func waitForStoreDesc(t *testing.T, c *connector, storeID roachpb.StoreID) {
t.Helper()
testutils.SucceedsSoon(t, func() error {
_, err := c.GetStoreDescriptor(storeID)
return err
})
}
func newConnector(cfg ConnectorConfig, addrs []string) *connector {
return NewConnector(cfg, addrs).(*connector)
}
// TestConnectorGossipSubscription tests connector's roles as a
// kvcoord.NodeDescStore and as a config.SystemConfigProvider.
func TestConnectorGossipSubscription(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
ctx := context.Background()
stopper := stop.NewStopper()
defer stopper.Stop(ctx)
clock := hlc.NewClockForTesting(nil)
rpcContext := rpc.NewInsecureTestingContext(ctx, clock, stopper)
s, err := rpc.NewServer(ctx, rpcContext)
require.NoError(t, err)
// Test setting the cluster ID by setting it to nil then ensuring it's later
// set to the original ID value.
clusterID := rpcContext.StorageClusterID.Get()
rpcContext.StorageClusterID.Reset(uuid.Nil)
gossipSubC := make(chan *kvpb.GossipSubscriptionEvent)
defer close(gossipSubC)
gossipSubFn := func(req *kvpb.GossipSubscriptionRequest, stream kvpb.Internal_GossipSubscriptionServer) error {
assert.Len(t, req.Patterns, 4)
assert.Equal(t, "cluster-id", req.Patterns[0])
assert.Equal(t, "node:.*", req.Patterns[1])
assert.Equal(t, "store:.*", req.Patterns[2])
assert.Equal(t, "system-db", req.Patterns[3])
for gossipSub := range gossipSubC {
if err := stream.Send(gossipSub); err != nil {
return err
}
}
return nil
}
kvpb.RegisterInternalServer(s, &mockServer{gossipSubFn: gossipSubFn})
ln, err := netutil.ListenAndServeGRPC(stopper, s, util.TestAddr)
require.NoError(t, err)
cfg := ConnectorConfig{
AmbientCtx: log.MakeTestingAmbientContext(stopper.Tracer()),
RPCContext: rpcContext,
RPCRetryOptions: rpcRetryOpts,
}
addrs := []string{ln.Addr().String()}
c := newConnector(cfg, addrs)
// Start should block until the first GossipSubscription response.
startedC := make(chan error)
go func() {
startedC <- c.Start(ctx)
}()
select {
case err := <-startedC:
t.Fatalf("Start unexpectedly completed with err=%v", err)
case <-time.After(10 * time.Millisecond):
}
// Return first GossipSubscription response.
node1 := &roachpb.NodeDescriptor{NodeID: 1, Address: util.MakeUnresolvedAddr("tcp", "1.1.1.1")}
node2 := &roachpb.NodeDescriptor{NodeID: 2, Address: util.MakeUnresolvedAddr("tcp", "2.2.2.2")}
gossipSubC <- gossipEventForNodeDesc(node1)
gossipSubC <- gossipEventForNodeDesc(node2)
gossipSubC <- gossipEventForClusterID(clusterID)
select {
case err := <-startedC:
require.NoError(t, err)
case <-time.After(10 * time.Second):
t.Fatalf("failed to see start complete")
}
// Ensure that ClusterID was updated.
require.Equal(t, clusterID, rpcContext.StorageClusterID.Get())
// Test kvcoord.NodeDescStore impl. Wait for full update first.
waitForNodeDesc(t, c, 2)
desc, err := c.GetNodeDescriptor(1)
require.Equal(t, node1, desc)
require.NoError(t, err)
desc, err = c.GetNodeDescriptor(2)
require.Equal(t, node2, desc)
require.NoError(t, err)
desc, err = c.GetNodeDescriptor(3)
require.Nil(t, desc)
require.Regexp(t, "node descriptor with node ID 3 was not found", err)
// Test GetStoreDescriptor.
storeID1 := roachpb.StoreID(1)
store1 := &roachpb.StoreDescriptor{StoreID: storeID1, Node: *node1}
storeID2 := roachpb.StoreID(2)
store2 := &roachpb.StoreDescriptor{StoreID: storeID2, Node: *node2}
gossipSubC <- gossipEventForStoreDesc(store1)
gossipSubC <- gossipEventForStoreDesc(store2)
waitForStoreDesc(t, c, storeID1)
storeDesc, err := c.GetStoreDescriptor(storeID1)
require.NoError(t, err)
require.Equal(t, store1, storeDesc)
waitForStoreDesc(t, c, storeID2)
storeDesc, err = c.GetStoreDescriptor(storeID2)
require.NoError(t, err)
require.Equal(t, store2, storeDesc)
storeDesc, err = c.GetStoreDescriptor(3)
require.Nil(t, storeDesc)
require.Regexp(t, "store descriptor with store ID 3 was not found", err)
// Return updated GossipSubscription response.
node1Up := &roachpb.NodeDescriptor{NodeID: 1, Address: util.MakeUnresolvedAddr("tcp", "1.2.3.4")}
node3 := &roachpb.NodeDescriptor{NodeID: 3, Address: util.MakeUnresolvedAddr("tcp", "2.2.2.2")}
gossipSubC <- gossipEventForNodeDesc(node1Up)
gossipSubC <- gossipEventForNodeDesc(node3)
// Test kvcoord.NodeDescStore impl. Wait for full update first.
waitForNodeDesc(t, c, 3)
desc, err = c.GetNodeDescriptor(1)
require.Equal(t, node1Up, desc)
require.NoError(t, err)
desc, err = c.GetNodeDescriptor(2)
require.Equal(t, node2, desc)
require.NoError(t, err)
desc, err = c.GetNodeDescriptor(3)
require.Equal(t, node3, desc)
require.NoError(t, err)
// Test config.SystemConfigProvider impl. Should not have a SystemConfig yet.
sysCfg := c.GetSystemConfig()
require.Nil(t, sysCfg)
sysCfgC, _ := c.RegisterSystemConfigChannel()
require.Len(t, sysCfgC, 0)
// Return first SystemConfig response.
sysCfgEntries := &config.SystemConfigEntries{Values: []roachpb.KeyValue{
{Key: roachpb.Key("a")},
{Key: roachpb.Key("b")},
}}
gossipSubC <- gossipEventForSystemConfig(sysCfgEntries)
// Test config.SystemConfigProvider impl. Wait for update first.
<-sysCfgC
sysCfg = c.GetSystemConfig()
require.NotNil(t, sysCfg)
require.Equal(t, sysCfgEntries.Values, sysCfg.Values)
// Return updated SystemConfig response.
sysCfgEntriesUp := &config.SystemConfigEntries{Values: []roachpb.KeyValue{
{Key: roachpb.Key("a")},
{Key: roachpb.Key("c")},
}}
gossipSubC <- gossipEventForSystemConfig(sysCfgEntriesUp)
// Test config.SystemConfigProvider impl. Wait for update first.
<-sysCfgC
sysCfg = c.GetSystemConfig()
require.NotNil(t, sysCfg)
require.Equal(t, sysCfgEntriesUp.Values, sysCfg.Values)
// A newly registered SystemConfig channel will be immediately notified.
sysCfgC2, _ := c.RegisterSystemConfigChannel()
require.Len(t, sysCfgC2, 1)
}
// TestConnectorGossipSubscription tests connector's role as a
// kvcoord.RangeDescriptorDB.
func TestConnectorRangeLookup(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
ctx := context.Background()
stopper := stop.NewStopper()
defer stopper.Stop(ctx)
clock := hlc.NewClockForTesting(nil)
rpcContext := rpc.NewInsecureTestingContext(ctx, clock, stopper)
s, err := rpc.NewServer(ctx, rpcContext)
require.NoError(t, err)
rangeLookupRespC := make(chan *kvpb.RangeLookupResponse, 1)
rangeLookupFn := func(_ context.Context, req *kvpb.RangeLookupRequest) (*kvpb.RangeLookupResponse, error) {
// Validate request.
assert.Equal(t, roachpb.RKey("a"), req.Key)
assert.Equal(t, kvpb.READ_UNCOMMITTED, req.ReadConsistency)
assert.Equal(t, int64(kvcoord.RangeLookupPrefetchCount), req.PrefetchNum)
assert.Equal(t, false, req.PrefetchReverse)
// Respond.
return <-rangeLookupRespC, nil
}
server := &mockServer{rangeLookupFn: rangeLookupFn}
kvpb.RegisterInternalServer(s, server)
ln, err := netutil.ListenAndServeGRPC(stopper, s, util.TestAddr)
require.NoError(t, err)
cfg := ConnectorConfig{
AmbientCtx: log.MakeTestingAmbientContext(stopper.Tracer()),
RPCContext: rpcContext,
RPCRetryOptions: rpcRetryOpts,
}
addrs := []string{ln.Addr().String()}
c := newConnector(cfg, addrs)
// NOTE: we don't actually start the connector worker. That's ok, as
// RangeDescriptorDB methods don't require it to be running.
// Success case.
descs := []roachpb.RangeDescriptor{{RangeID: 1}, {RangeID: 2}}
preDescs := []roachpb.RangeDescriptor{{RangeID: 3}, {RangeID: 4}}
rangeLookupRespC <- &kvpb.RangeLookupResponse{
Descriptors: descs, PrefetchedDescriptors: preDescs,
}
const rc = kvpb.READ_UNCOMMITTED
resDescs, resPreDescs, err := c.RangeLookup(ctx, roachpb.RKey("a"), rc, false /* useReverseScan */)
require.Equal(t, descs, resDescs)
require.Equal(t, preDescs, resPreDescs)
require.NoError(t, err)
// Error case.
rangeLookupRespC <- &kvpb.RangeLookupResponse{
Error: kvpb.NewErrorf("hit error"),
}
resDescs, resPreDescs, err = c.RangeLookup(ctx, roachpb.RKey("a"), rc, false /* useReverseScan */)
require.Nil(t, resDescs)
require.Nil(t, resPreDescs)
require.Regexp(t, "hit error", err)
// Context cancelation.
canceledCtx, cancel := context.WithCancel(ctx)
blockingC := make(chan struct{})
server.rangeLookupFn = func(ctx context.Context, _ *kvpb.RangeLookupRequest) (*kvpb.RangeLookupResponse, error) {
<-blockingC
<-ctx.Done()
return nil, ctx.Err()
}
go func() {
blockingC <- struct{}{}
cancel()
}()
resDescs, resPreDescs, err = c.RangeLookup(canceledCtx, roachpb.RKey("a"), rc, false /* useReverseScan */)
require.Nil(t, resDescs)
require.Nil(t, resPreDescs)
require.Regexp(t, context.Canceled.Error(), err)
}
// TestConnectorRetriesUnreachable tests that connector iterates over each of
// its provided addresses and retries until it is able to establish a connection
// on one of them.
func TestConnectorRetriesUnreachable(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
ctx := context.Background()
stopper := stop.NewStopper()
defer stopper.Stop(ctx)
clock := hlc.NewClockForTesting(nil)
rpcContext := rpc.NewInsecureTestingContext(ctx, clock, stopper)
s, err := rpc.NewServer(ctx, rpcContext)
require.NoError(t, err)
node1 := &roachpb.NodeDescriptor{NodeID: 1, Address: util.MakeUnresolvedAddr("tcp", "1.1.1.1")}
node2 := &roachpb.NodeDescriptor{NodeID: 2, Address: util.MakeUnresolvedAddr("tcp", "2.2.2.2")}
gossipSubEvents := []*kvpb.GossipSubscriptionEvent{
gossipEventForClusterID(rpcContext.StorageClusterID.Get()),
gossipEventForNodeDesc(node1),
gossipEventForNodeDesc(node2),
}
gossipSubFn := func(req *kvpb.GossipSubscriptionRequest, stream kvpb.Internal_GossipSubscriptionServer) error {
assert.Len(t, req.Patterns, 4)
assert.Equal(t, "cluster-id", req.Patterns[0])
assert.Equal(t, "node:.*", req.Patterns[1])
assert.Equal(t, "store:.*", req.Patterns[2])
assert.Equal(t, "system-db", req.Patterns[3])
for _, event := range gossipSubEvents {
if err := stream.Send(event); err != nil {
return err
}
}
<-stream.Context().Done()
return stream.Context().Err()
}
kvpb.RegisterInternalServer(s, &mockServer{gossipSubFn: gossipSubFn})
// Decompose netutil.ListenAndServeGRPC so we can listen before serving.
ln, err := net.Listen(util.TestAddr.Network(), util.TestAddr.String())
require.NoError(t, err)
stopper.AddCloser(stop.CloserFn(s.Stop))
_ = stopper.RunAsyncTask(ctx, "wait-quiesce", func(context.Context) {
<-stopper.ShouldQuiesce()
netutil.FatalIfUnexpected(ln.Close())
})
// Add listen address into list of other bogus addresses.
cfg := ConnectorConfig{
AmbientCtx: log.MakeTestingAmbientContext(stopper.Tracer()),
RPCContext: rpcContext,
RPCRetryOptions: rpcRetryOpts,
}
addrs := []string{"1.1.1.1:9999", ln.Addr().String(), "2.2.2.2:9999"}
c := newConnector(cfg, addrs)
c.rpcDialTimeout = 5 * time.Millisecond // speed up test
// Start should block until the first GossipSubscription response.
startedC := make(chan error)
go func() {
startedC <- c.Start(ctx)
}()
select {
case err := <-startedC:
t.Fatalf("Start unexpectedly completed with err=%v", err)
case <-time.After(25 * time.Millisecond):
}
// Begin serving on gRPC server. connector should quickly connect
// and complete startup.
_ = stopper.RunAsyncTask(ctx, "serve", func(context.Context) {
netutil.FatalIfUnexpected(s.Serve(ln))
})
require.NoError(t, <-startedC)
// Test kvcoord.NodeDescStore impl. Wait for full update first.
waitForNodeDesc(t, c, 2)
desc, err := c.GetNodeDescriptor(1)
require.Equal(t, node1, desc)
require.NoError(t, err)
desc, err = c.GetNodeDescriptor(2)
require.Equal(t, node2, desc)
require.NoError(t, err)
desc, err = c.GetNodeDescriptor(3)
require.Nil(t, desc)
require.True(t, errors.HasType(err, &kvpb.DescNotFoundError{}))
require.Regexp(t, "node descriptor with node ID 3 was not found", err)
}
// TestConnectorRetriesError tests that connector iterates over each of
// its provided addresses and retries if the error is retriable or bails out
// immediately if it is not.
func TestConnectorRetriesError(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
ctx := context.Background()
stopper := stop.NewStopper()
defer stopper.Stop(ctx)
clock := hlc.NewClockForTesting(nil)
rpcContext := rpc.NewInsecureTestingContext(ctx, clock, stopper)
// Function to create rpc server that would delegate to gossip and range lookup
// callbacks.
// Returns address on which server is listening for use in connector.
createServer := func(
t *testing.T,
gossipSubFn func(req *kvpb.GossipSubscriptionRequest, stream kvpb.Internal_GossipSubscriptionServer) error,
rangeLookupFn func(_ context.Context, req *kvpb.RangeLookupRequest) (*kvpb.RangeLookupResponse, error),
) string {
internalServer, err := rpc.NewServer(ctx, rpcContext)
require.NoError(t, err)
kvpb.RegisterInternalServer(internalServer, &mockServer{rangeLookupFn: rangeLookupFn, gossipSubFn: gossipSubFn})
ln, err := net.Listen(util.TestAddr.Network(), util.TestAddr.String())
require.NoError(t, err)
stopper.AddCloser(stop.CloserFn(internalServer.Stop))
_ = stopper.RunAsyncTask(ctx, "wait-quiesce", func(context.Context) {
<-stopper.ShouldQuiesce()
netutil.FatalIfUnexpected(ln.Close())
})
_ = stopper.RunAsyncTask(ctx, "serve", func(context.Context) {
netutil.FatalIfUnexpected(internalServer.Serve(ln))
})
return ln.Addr().String()
}
for _, spec := range []struct {
code codes.Code
shouldRetry bool
}{
{codes.Unauthenticated, false},
{codes.PermissionDenied, false},
{codes.FailedPrecondition, true},
} {
t.Run(fmt.Sprintf("error %v retries %v", spec.code, spec.shouldRetry), func(t *testing.T) {
gossipSubFn := func(req *kvpb.GossipSubscriptionRequest, stream kvpb.Internal_GossipSubscriptionServer) error {
return stream.Send(gossipEventForClusterID(rpcContext.StorageClusterID.Get()))
}
rangeLookupFn := func(_ context.Context, req *kvpb.RangeLookupRequest) (*kvpb.RangeLookupResponse, error) {
descs := []roachpb.RangeDescriptor{{RangeID: 1}, {RangeID: 2}}
preDescs := []roachpb.RangeDescriptor{{RangeID: 3}, {RangeID: 4}}
return &kvpb.RangeLookupResponse{
Descriptors: descs, PrefetchedDescriptors: preDescs,
}, nil
}
var errorsReported int32 = 0
rangeLookupRejectorFn := func(_ context.Context, req *kvpb.RangeLookupRequest) (*kvpb.RangeLookupResponse, error) {
// Respond with error always
atomic.AddInt32(&errorsReported, 1)
return nil, grpcstatus.Errorf(spec.code, "range lookup rejected")
}
addr1 := createServer(t, gossipSubFn, rangeLookupFn)
addr2 := createServer(t, gossipSubFn, rangeLookupRejectorFn)
// Add listen address into list of other bogus addresses.
cfg := ConnectorConfig{
AmbientCtx: log.MakeTestingAmbientContext(stopper.Tracer()),
RPCContext: rpcContext,
RPCRetryOptions: rpcRetryOpts,
}
addrs := []string{addr1, addr2}
c := newConnector(cfg, addrs)
c.rpcDialTimeout = 5 * time.Millisecond // speed up test
require.NoError(t, c.Start(ctx), "connector can't start")
// Test will try to make range lookups until the server returning errors
// is hit. It then checks that error was propagated or not. We use multiple
// iterations as server choice is random and we need to hit failure only once
// to check if it was retried.
for i := 0; i < 100; i++ {
_, _, err := c.RangeLookup(
ctx, roachpb.RKey("a"), kvpb.READ_UNCOMMITTED, false,
)
if atomic.LoadInt32(&errorsReported) == 0 {
continue
}
if spec.shouldRetry {
require.NoError(t, err, "Lookup should retry instead of failing")
} else {
require.Error(t, err, "Lookup should propagate error immediately")
}
break
}
})
}
}
| pkg/kv/kvclient/kvtenant/connector_test.go | 0 | https://github.com/cockroachdb/cockroach/commit/592272ca2aa909fc5dd3aed29eb3fff30c567120 | [
0.00028205584385432303,
0.00017160350398626179,
0.0001564335252624005,
0.0001694527454674244,
0.000016399921150878072
] |
{
"id": 1,
"code_window": [
"\t\t\treturn execPlan{}, colOrdMap{}, err\n",
"\t\t}\n",
"\t}\n",
"\tnotNullColsSet, err := getNodeColumnOrdinalSet(inputCols, export.Input.Relational().NotNullCols)\n",
"\tif err != nil {\n",
"\t\treturn execPlan{}, colOrdMap{}, err\n",
"\t}\n",
"\n",
"\tvar ep execPlan\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\n",
"\tvar notNullOrds exec.NodeColumnOrdinalSet\n",
"\tnotNullCols := export.Input.Relational().NotNullCols\n",
"\tfor col, ok := notNullCols.Next(0); ok; col, ok = notNullCols.Next(col + 1) {\n",
"\t\t// Ignore NOT NULL columns that are not part of the input execPlan's\n",
"\t\t// output columns. This can happen when applyPresentation projects-away\n",
"\t\t// some output columns of the input expression. For example, a Sort\n",
"\t\t// expression must output a column it orders by, but that column must be\n",
"\t\t// projected-away after the sort if the presentation does not require\n",
"\t\t// the column.\n",
"\t\tif ord, ok := inputCols.Get(col); ok {\n",
"\t\t\tnotNullOrds.Add(ord)\n",
"\t\t}\n"
],
"file_path": "pkg/sql/opt/exec/execbuilder/statement.go",
"type": "replace",
"edit_start_line_idx": 445
} | parse
CREATE SEQUENCE a
----
CREATE SEQUENCE a
CREATE SEQUENCE a -- fully parenthesized
CREATE SEQUENCE a -- literals removed
CREATE SEQUENCE _ -- identifiers removed
parse
EXPLAIN CREATE SEQUENCE a
----
EXPLAIN CREATE SEQUENCE a
EXPLAIN CREATE SEQUENCE a -- fully parenthesized
EXPLAIN CREATE SEQUENCE a -- literals removed
EXPLAIN CREATE SEQUENCE _ -- identifiers removed
parse
CREATE SEQUENCE IF NOT EXISTS a
----
CREATE SEQUENCE IF NOT EXISTS a
CREATE SEQUENCE IF NOT EXISTS a -- fully parenthesized
CREATE SEQUENCE IF NOT EXISTS a -- literals removed
CREATE SEQUENCE IF NOT EXISTS _ -- identifiers removed
parse
CREATE SEQUENCE a CYCLE
----
CREATE SEQUENCE a CYCLE
CREATE SEQUENCE a CYCLE -- fully parenthesized
CREATE SEQUENCE a CYCLE -- literals removed
CREATE SEQUENCE _ CYCLE -- identifiers removed
parse
CREATE SEQUENCE a NO CYCLE
----
CREATE SEQUENCE a NO CYCLE
CREATE SEQUENCE a NO CYCLE -- fully parenthesized
CREATE SEQUENCE a NO CYCLE -- literals removed
CREATE SEQUENCE _ NO CYCLE -- identifiers removed
parse
CREATE SEQUENCE a CACHE 0
----
CREATE SEQUENCE a CACHE 0
CREATE SEQUENCE a CACHE 0 -- fully parenthesized
CREATE SEQUENCE a CACHE 0 -- literals removed
CREATE SEQUENCE _ CACHE 0 -- identifiers removed
parse
CREATE SEQUENCE a CACHE 1
----
CREATE SEQUENCE a CACHE 1
CREATE SEQUENCE a CACHE 1 -- fully parenthesized
CREATE SEQUENCE a CACHE 0 -- literals removed
CREATE SEQUENCE _ CACHE 1 -- identifiers removed
parse
CREATE SEQUENCE a CACHE 2
----
CREATE SEQUENCE a CACHE 2
CREATE SEQUENCE a CACHE 2 -- fully parenthesized
CREATE SEQUENCE a CACHE 0 -- literals removed
CREATE SEQUENCE _ CACHE 2 -- identifiers removed
parse
CREATE SEQUENCE a INCREMENT 5
----
CREATE SEQUENCE a INCREMENT 5
CREATE SEQUENCE a INCREMENT 5 -- fully parenthesized
CREATE SEQUENCE a INCREMENT 0 -- literals removed
CREATE SEQUENCE _ INCREMENT 5 -- identifiers removed
parse
CREATE SEQUENCE a INCREMENT BY 5
----
CREATE SEQUENCE a INCREMENT BY 5
CREATE SEQUENCE a INCREMENT BY 5 -- fully parenthesized
CREATE SEQUENCE a INCREMENT BY 0 -- literals removed
CREATE SEQUENCE _ INCREMENT BY 5 -- identifiers removed
parse
CREATE SEQUENCE a NO MAXVALUE
----
CREATE SEQUENCE a NO MAXVALUE
CREATE SEQUENCE a NO MAXVALUE -- fully parenthesized
CREATE SEQUENCE a NO MAXVALUE -- literals removed
CREATE SEQUENCE _ NO MAXVALUE -- identifiers removed
parse
CREATE SEQUENCE a MAXVALUE 1000
----
CREATE SEQUENCE a MAXVALUE 1000
CREATE SEQUENCE a MAXVALUE 1000 -- fully parenthesized
CREATE SEQUENCE a MAXVALUE 0 -- literals removed
CREATE SEQUENCE _ MAXVALUE 1000 -- identifiers removed
parse
CREATE SEQUENCE a NO MINVALUE
----
CREATE SEQUENCE a NO MINVALUE
CREATE SEQUENCE a NO MINVALUE -- fully parenthesized
CREATE SEQUENCE a NO MINVALUE -- literals removed
CREATE SEQUENCE _ NO MINVALUE -- identifiers removed
parse
CREATE SEQUENCE a MINVALUE 1000
----
CREATE SEQUENCE a MINVALUE 1000
CREATE SEQUENCE a MINVALUE 1000 -- fully parenthesized
CREATE SEQUENCE a MINVALUE 0 -- literals removed
CREATE SEQUENCE _ MINVALUE 1000 -- identifiers removed
parse
CREATE SEQUENCE a START 1000
----
CREATE SEQUENCE a START 1000
CREATE SEQUENCE a START 1000 -- fully parenthesized
CREATE SEQUENCE a START 0 -- literals removed
CREATE SEQUENCE _ START 1000 -- identifiers removed
parse
CREATE SEQUENCE a START WITH 1000
----
CREATE SEQUENCE a START WITH 1000
CREATE SEQUENCE a START WITH 1000 -- fully parenthesized
CREATE SEQUENCE a START WITH 0 -- literals removed
CREATE SEQUENCE _ START WITH 1000 -- identifiers removed
parse
CREATE SEQUENCE a INCREMENT 5 NO MAXVALUE MINVALUE 1 START 3
----
CREATE SEQUENCE a INCREMENT 5 NO MAXVALUE MINVALUE 1 START 3
CREATE SEQUENCE a INCREMENT 5 NO MAXVALUE MINVALUE 1 START 3 -- fully parenthesized
CREATE SEQUENCE a INCREMENT 0 NO MAXVALUE MINVALUE 0 START 0 -- literals removed
CREATE SEQUENCE _ INCREMENT 5 NO MAXVALUE MINVALUE 1 START 3 -- identifiers removed
parse
CREATE SEQUENCE a INCREMENT 5 NO CYCLE NO MAXVALUE MINVALUE 1 START 3 CACHE 1
----
CREATE SEQUENCE a INCREMENT 5 NO CYCLE NO MAXVALUE MINVALUE 1 START 3 CACHE 1
CREATE SEQUENCE a INCREMENT 5 NO CYCLE NO MAXVALUE MINVALUE 1 START 3 CACHE 1 -- fully parenthesized
CREATE SEQUENCE a INCREMENT 0 NO CYCLE NO MAXVALUE MINVALUE 0 START 0 CACHE 0 -- literals removed
CREATE SEQUENCE _ INCREMENT 5 NO CYCLE NO MAXVALUE MINVALUE 1 START 3 CACHE 1 -- identifiers removed
parse
CREATE SEQUENCE a VIRTUAL
----
CREATE SEQUENCE a VIRTUAL
CREATE SEQUENCE a VIRTUAL -- fully parenthesized
CREATE SEQUENCE a VIRTUAL -- literals removed
CREATE SEQUENCE _ VIRTUAL -- identifiers removed
parse
CREATE TEMPORARY SEQUENCE a
----
CREATE TEMPORARY SEQUENCE a
CREATE TEMPORARY SEQUENCE a -- fully parenthesized
CREATE TEMPORARY SEQUENCE a -- literals removed
CREATE TEMPORARY SEQUENCE _ -- identifiers removed
parse
CREATE SEQUENCE a OWNED BY b
----
CREATE SEQUENCE a OWNED BY b
CREATE SEQUENCE a OWNED BY (b) -- fully parenthesized
CREATE SEQUENCE a OWNED BY b -- literals removed
CREATE SEQUENCE _ OWNED BY _ -- identifiers removed
parse
CREATE SEQUENCE a OWNED BY NONE
----
CREATE SEQUENCE a OWNED BY NONE
CREATE SEQUENCE a OWNED BY NONE -- fully parenthesized
CREATE SEQUENCE a OWNED BY NONE -- literals removed
CREATE SEQUENCE _ OWNED BY NONE -- identifiers removed
parse
CREATE SEQUENCE a AS integer
----
CREATE SEQUENCE a AS INT8 -- normalized!
CREATE SEQUENCE a AS INT8 -- fully parenthesized
CREATE SEQUENCE a AS INT8 -- literals removed
CREATE SEQUENCE _ AS INT8 -- identifiers removed
parse
CREATE SEQUENCE a AS int
----
CREATE SEQUENCE a AS INT8 -- normalized!
CREATE SEQUENCE a AS INT8 -- fully parenthesized
CREATE SEQUENCE a AS INT8 -- literals removed
CREATE SEQUENCE _ AS INT8 -- identifiers removed
parse
CREATE SEQUENCE a AS bigint
----
CREATE SEQUENCE a AS INT8 -- normalized!
CREATE SEQUENCE a AS INT8 -- fully parenthesized
CREATE SEQUENCE a AS INT8 -- literals removed
CREATE SEQUENCE _ AS INT8 -- identifiers removed
parse
CREATE SEQUENCE a AS smallint
----
CREATE SEQUENCE a AS INT2 -- normalized!
CREATE SEQUENCE a AS INT2 -- fully parenthesized
CREATE SEQUENCE a AS INT2 -- literals removed
CREATE SEQUENCE _ AS INT2 -- identifiers removed
parse
CREATE SEQUENCE a RESTART
----
CREATE SEQUENCE a RESTART
CREATE SEQUENCE a RESTART -- fully parenthesized
CREATE SEQUENCE a RESTART -- literals removed
CREATE SEQUENCE _ RESTART -- identifiers removed
parse
CREATE SEQUENCE a RESTART WITH 1000
----
CREATE SEQUENCE a RESTART WITH 1000
CREATE SEQUENCE a RESTART WITH 1000 -- fully parenthesized
CREATE SEQUENCE a RESTART WITH 0 -- literals removed
CREATE SEQUENCE _ RESTART WITH 1000 -- identifiers removed
parse
EXPLAIN CREATE SEQUENCE a INCREMENT BY 5 RESTART WITH 1000
----
EXPLAIN CREATE SEQUENCE a INCREMENT BY 5 RESTART WITH 1000
EXPLAIN CREATE SEQUENCE a INCREMENT BY 5 RESTART WITH 1000 -- fully parenthesized
EXPLAIN CREATE SEQUENCE a INCREMENT BY 0 RESTART WITH 0 -- literals removed
EXPLAIN CREATE SEQUENCE _ INCREMENT BY 5 RESTART WITH 1000 -- identifiers removed
parse
CREATE SEQUENCE IF NOT EXISTS a INCREMENT BY 5 RESTART WITH 1000
----
CREATE SEQUENCE IF NOT EXISTS a INCREMENT BY 5 RESTART WITH 1000
CREATE SEQUENCE IF NOT EXISTS a INCREMENT BY 5 RESTART WITH 1000 -- fully parenthesized
CREATE SEQUENCE IF NOT EXISTS a INCREMENT BY 0 RESTART WITH 0 -- literals removed
CREATE SEQUENCE IF NOT EXISTS _ INCREMENT BY 5 RESTART WITH 1000 -- identifiers removed
error
CREATE SEQUENCE s1 AS abc
----
at or near "EOF": syntax error: type "abc" does not exist
DETAIL: source SQL:
CREATE SEQUENCE s1 AS abc
^
| pkg/sql/parser/testdata/create_sequence | 0 | https://github.com/cockroachdb/cockroach/commit/592272ca2aa909fc5dd3aed29eb3fff30c567120 | [
0.0001896302419481799,
0.00016642695118207484,
0.0001616783265490085,
0.00016468498506583273,
0.000006390386261045933
] |
{
"id": 2,
"code_window": [
"\tep.root, err = b.factory.ConstructExport(\n",
"\t\tinput.root,\n",
"\t\tfileName,\n",
"\t\texport.FileFormat,\n",
"\t\topts,\n",
"\t\tnotNullColsSet,\n",
"\t)\n",
"\tif err != nil {\n",
"\t\treturn execPlan{}, colOrdMap{}, err\n",
"\t}\n",
"\treturn ep, b.outputColsFromList(export.Columns), nil\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tnotNullOrds,\n"
],
"file_path": "pkg/sql/opt/exec/execbuilder/statement.go",
"type": "replace",
"edit_start_line_idx": 456
} | // Copyright 2019 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package execbuilder
import (
"bytes"
"github.com/cockroachdb/cockroach/pkg/server/telemetry"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/colinfo"
"github.com/cockroachdb/cockroach/pkg/sql/opt"
"github.com/cockroachdb/cockroach/pkg/sql/opt/cat"
"github.com/cockroachdb/cockroach/pkg/sql/opt/exec"
"github.com/cockroachdb/cockroach/pkg/sql/opt/exec/explain"
"github.com/cockroachdb/cockroach/pkg/sql/opt/memo"
"github.com/cockroachdb/cockroach/pkg/sql/opt/xform"
"github.com/cockroachdb/cockroach/pkg/sql/sem/tree"
"github.com/cockroachdb/cockroach/pkg/sql/sqltelemetry"
"github.com/cockroachdb/cockroach/pkg/util/treeprinter"
"github.com/cockroachdb/redact"
)
func (b *Builder) buildCreateTable(
ct *memo.CreateTableExpr,
) (_ execPlan, outputCols colOrdMap, err error) {
schema := b.mem.Metadata().Schema(ct.Schema)
if !ct.Syntax.As() {
root, err := b.factory.ConstructCreateTable(schema, ct.Syntax)
return execPlan{root: root}, colOrdMap{}, err
}
// Construct AS input to CREATE TABLE.
input, inputCols, err := b.buildRelational(ct.Input)
if err != nil {
return execPlan{}, colOrdMap{}, err
}
// Impose ordering and naming on input columns, so that they match the
// order and names of the table columns into which values will be
// inserted.
input, _, err = b.applyPresentation(input, inputCols, ct.InputCols)
if err != nil {
return execPlan{}, colOrdMap{}, err
}
root, err := b.factory.ConstructCreateTableAs(input.root, schema, ct.Syntax)
return execPlan{root: root}, colOrdMap{}, err
}
func (b *Builder) buildCreateView(
cv *memo.CreateViewExpr,
) (_ execPlan, outputCols colOrdMap, err error) {
md := b.mem.Metadata()
schema := md.Schema(cv.Schema)
cols := make(colinfo.ResultColumns, len(cv.Columns))
for i := range cols {
cols[i].Name = cv.Columns[i].Alias
cols[i].Typ = md.ColumnMeta(cv.Columns[i].ID).Type
}
root, err := b.factory.ConstructCreateView(
cv.Syntax,
schema,
cv.ViewQuery,
cols,
cv.Deps,
cv.TypeDeps,
)
return execPlan{root: root}, colOrdMap{}, err
}
func (b *Builder) buildCreateFunction(
cf *memo.CreateFunctionExpr,
) (_ execPlan, outputCols colOrdMap, err error) {
md := b.mem.Metadata()
schema := md.Schema(cf.Schema)
root, err := b.factory.ConstructCreateFunction(
schema,
cf.Syntax,
cf.Deps,
cf.TypeDeps,
)
return execPlan{root: root}, colOrdMap{}, err
}
func (b *Builder) buildExplainOpt(
explain *memo.ExplainExpr,
) (_ execPlan, outputCols colOrdMap, err error) {
fmtFlags := memo.ExprFmtHideAll
switch {
case explain.Options.Flags[tree.ExplainFlagVerbose]:
fmtFlags = memo.ExprFmtHideQualifications | memo.ExprFmtHideScalars |
memo.ExprFmtHideTypes | memo.ExprFmtHideNotNull | memo.ExprFmtHideNotVisibleIndexInfo |
memo.ExprFmtHideFastPathChecks
case explain.Options.Flags[tree.ExplainFlagTypes]:
fmtFlags = memo.ExprFmtHideQualifications | memo.ExprFmtHideNotVisibleIndexInfo
}
redactValues := explain.Options.Flags[tree.ExplainFlagRedact]
// Format the plan here and pass it through to the exec factory.
// If catalog option was passed, show catalog object details for all tables.
var planText bytes.Buffer
if explain.Options.Flags[tree.ExplainFlagCatalog] {
for _, t := range b.mem.Metadata().AllTables() {
tp := treeprinter.New()
cat.FormatTable(b.ctx, b.catalog, t.Table, tp, redactValues)
catStr := tp.String()
if redactValues {
catStr = string(redact.RedactableString(catStr).Redact())
}
planText.WriteString(catStr)
}
// TODO(radu): add views, sequences
}
// If MEMO option was passed, show the memo.
if explain.Options.Flags[tree.ExplainFlagMemo] {
memoStr := b.optimizer.FormatMemo(xform.FmtPretty, redactValues)
if redactValues {
memoStr = string(redact.RedactableString(memoStr).Redact())
}
planText.WriteString(memoStr)
}
f := memo.MakeExprFmtCtx(b.ctx, fmtFlags, redactValues, b.mem, b.catalog)
f.FormatExpr(explain.Input)
planStr := f.Buffer.String()
if redactValues {
planStr = string(redact.RedactableString(planStr).Redact())
}
planText.WriteString(planStr)
// If we're going to display the environment, there's a bunch of queries we
// need to run to get that information, and we can't run them from here, so
// tell the exec factory what information it needs to fetch.
var envOpts exec.ExplainEnvData
if explain.Options.Flags[tree.ExplainFlagEnv] {
var err error
envOpts, err = b.getEnvData()
if err != nil {
return execPlan{}, colOrdMap{}, err
}
}
var ep execPlan
ep.root, err = b.factory.ConstructExplainOpt(planText.String(), envOpts)
if err != nil {
return execPlan{}, colOrdMap{}, err
}
return ep, b.outputColsFromList(explain.ColList), nil
}
func (b *Builder) buildExplain(
explainExpr *memo.ExplainExpr,
) (_ execPlan, outputCols colOrdMap, err error) {
if explainExpr.Options.Mode == tree.ExplainOpt {
return b.buildExplainOpt(explainExpr)
}
var ep execPlan
ep.root, err = b.factory.ConstructExplain(
&explainExpr.Options,
explainExpr.StmtType,
func(f exec.Factory) (exec.Plan, error) {
// Create a separate builder for the explain query. buildRelational
// annotates nodes with extra information when the factory is an
// exec.ExplainFactory so it must be the outer factory and the gist
// factory must be the inner factory.
gf := explain.NewPlanGistFactory(f)
ef := explain.NewFactory(gf, b.semaCtx, b.evalCtx)
explainBld := New(
b.ctx, ef, b.optimizer, b.mem, b.catalog, explainExpr.Input,
b.semaCtx, b.evalCtx, b.initialAllowAutoCommit, b.IsANSIDML,
)
explainBld.disableTelemetry = true
plan, err := explainBld.Build()
if err != nil {
return nil, err
}
explainPlan := plan.(*explain.Plan)
explainPlan.Gist = gf.PlanGist()
return plan, nil
},
)
if err != nil {
return execPlan{}, colOrdMap{}, err
}
return ep, b.outputColsFromList(explainExpr.ColList), nil
}
func (b *Builder) buildShowTrace(
show *memo.ShowTraceForSessionExpr,
) (_ execPlan, outputCols colOrdMap, err error) {
var ep execPlan
ep.root, err = b.factory.ConstructShowTrace(show.TraceType, show.Compact)
if err != nil {
return execPlan{}, colOrdMap{}, err
}
return ep, b.outputColsFromList(show.ColList), nil
}
func (b *Builder) buildAlterTableSplit(
split *memo.AlterTableSplitExpr,
) (_ execPlan, outputCols colOrdMap, err error) {
input, _, err := b.buildRelational(split.Input)
if err != nil {
return execPlan{}, colOrdMap{}, err
}
scalarCtx := buildScalarCtx{}
expiration, err := b.buildScalar(&scalarCtx, split.Expiration)
if err != nil {
return execPlan{}, colOrdMap{}, err
}
table := b.mem.Metadata().Table(split.Table)
var ep execPlan
ep.root, err = b.factory.ConstructAlterTableSplit(
table.Index(split.Index),
input.root,
expiration,
)
if err != nil {
return execPlan{}, colOrdMap{}, err
}
return ep, b.outputColsFromList(split.Columns), nil
}
func (b *Builder) buildAlterTableUnsplit(
unsplit *memo.AlterTableUnsplitExpr,
) (_ execPlan, outputCols colOrdMap, err error) {
input, _, err := b.buildRelational(unsplit.Input)
if err != nil {
return execPlan{}, colOrdMap{}, err
}
table := b.mem.Metadata().Table(unsplit.Table)
var ep execPlan
ep.root, err = b.factory.ConstructAlterTableUnsplit(
table.Index(unsplit.Index),
input.root,
)
if err != nil {
return execPlan{}, colOrdMap{}, err
}
return ep, b.outputColsFromList(unsplit.Columns), nil
}
func (b *Builder) buildAlterTableUnsplitAll(
unsplitAll *memo.AlterTableUnsplitAllExpr,
) (_ execPlan, outputCols colOrdMap, err error) {
table := b.mem.Metadata().Table(unsplitAll.Table)
var ep execPlan
ep.root, err = b.factory.ConstructAlterTableUnsplitAll(table.Index(unsplitAll.Index))
if err != nil {
return execPlan{}, colOrdMap{}, err
}
return ep, b.outputColsFromList(unsplitAll.Columns), nil
}
func (b *Builder) buildAlterTableRelocate(
relocate *memo.AlterTableRelocateExpr,
) (_ execPlan, outputCols colOrdMap, err error) {
input, _, err := b.buildRelational(relocate.Input)
if err != nil {
return execPlan{}, colOrdMap{}, err
}
table := b.mem.Metadata().Table(relocate.Table)
var ep execPlan
ep.root, err = b.factory.ConstructAlterTableRelocate(
table.Index(relocate.Index),
input.root,
relocate.SubjectReplicas,
)
if err != nil {
return execPlan{}, colOrdMap{}, err
}
return ep, b.outputColsFromList(relocate.Columns), nil
}
func (b *Builder) buildAlterRangeRelocate(
relocate *memo.AlterRangeRelocateExpr,
) (_ execPlan, outputCols colOrdMap, err error) {
input, _, err := b.buildRelational(relocate.Input)
if err != nil {
return execPlan{}, colOrdMap{}, err
}
scalarCtx := buildScalarCtx{}
toStoreID, err := b.buildScalar(&scalarCtx, relocate.ToStoreID)
if err != nil {
return execPlan{}, colOrdMap{}, err
}
fromStoreID, err := b.buildScalar(&scalarCtx, relocate.FromStoreID)
if err != nil {
return execPlan{}, colOrdMap{}, err
}
var ep execPlan
ep.root, err = b.factory.ConstructAlterRangeRelocate(
input.root,
relocate.SubjectReplicas,
toStoreID,
fromStoreID,
)
if err != nil {
return execPlan{}, colOrdMap{}, err
}
return ep, b.outputColsFromList(relocate.Columns), nil
}
func (b *Builder) buildControlJobs(
ctl *memo.ControlJobsExpr,
) (_ execPlan, outputCols colOrdMap, err error) {
input, _, err := b.buildRelational(ctl.Input)
if err != nil {
return execPlan{}, colOrdMap{}, err
}
scalarCtx := buildScalarCtx{}
reason, err := b.buildScalar(&scalarCtx, ctl.Reason)
if err != nil {
return execPlan{}, colOrdMap{}, err
}
var ep execPlan
ep.root, err = b.factory.ConstructControlJobs(
ctl.Command,
input.root,
reason,
)
if err != nil {
return execPlan{}, colOrdMap{}, err
}
// ControlJobs returns no columns.
return ep, colOrdMap{}, nil
}
func (b *Builder) buildControlSchedules(
ctl *memo.ControlSchedulesExpr,
) (_ execPlan, outputCols colOrdMap, err error) {
input, _, err := b.buildRelational(ctl.Input)
if err != nil {
return execPlan{}, colOrdMap{}, err
}
var ep execPlan
ep.root, err = b.factory.ConstructControlSchedules(
ctl.Command,
input.root,
)
if err != nil {
return execPlan{}, colOrdMap{}, err
}
// ControlSchedules returns no columns.
return ep, colOrdMap{}, nil
}
func (b *Builder) buildShowCompletions(
ctl *memo.ShowCompletionsExpr,
) (_ execPlan, outputCols colOrdMap, err error) {
var ep execPlan
ep.root, err = b.factory.ConstructShowCompletions(
ctl.Command,
)
if err != nil {
return execPlan{}, colOrdMap{}, err
}
return ep, b.outputColsFromList(ctl.Columns), nil
}
func (b *Builder) buildCancelQueries(
cancel *memo.CancelQueriesExpr,
) (_ execPlan, outputCols colOrdMap, err error) {
input, _, err := b.buildRelational(cancel.Input)
if err != nil {
return execPlan{}, colOrdMap{}, err
}
var ep execPlan
ep.root, err = b.factory.ConstructCancelQueries(input.root, cancel.IfExists)
if err != nil {
return execPlan{}, colOrdMap{}, err
}
if !b.disableTelemetry {
telemetry.Inc(sqltelemetry.CancelQueriesUseCounter)
}
// CancelQueries returns no columns.
return ep, colOrdMap{}, nil
}
func (b *Builder) buildCancelSessions(
cancel *memo.CancelSessionsExpr,
) (_ execPlan, outputCols colOrdMap, err error) {
input, _, err := b.buildRelational(cancel.Input)
if err != nil {
return execPlan{}, colOrdMap{}, err
}
node, err := b.factory.ConstructCancelSessions(input.root, cancel.IfExists)
if err != nil {
return execPlan{}, colOrdMap{}, err
}
if !b.disableTelemetry {
telemetry.Inc(sqltelemetry.CancelSessionsUseCounter)
}
// CancelSessions returns no columns.
return execPlan{root: node}, colOrdMap{}, nil
}
func (b *Builder) buildCreateStatistics(
c *memo.CreateStatisticsExpr,
) (_ execPlan, outputCols colOrdMap, err error) {
node, err := b.factory.ConstructCreateStatistics(c.Syntax)
if err != nil {
return execPlan{}, colOrdMap{}, err
}
// CreateStatistics returns no columns.
return execPlan{root: node}, colOrdMap{}, nil
}
func (b *Builder) buildExport(
export *memo.ExportExpr,
) (_ execPlan, outputCols colOrdMap, err error) {
input, inputCols, err := b.buildRelational(export.Input)
if err != nil {
return execPlan{}, colOrdMap{}, err
}
scalarCtx := buildScalarCtx{}
fileName, err := b.buildScalar(&scalarCtx, export.FileName)
if err != nil {
return execPlan{}, colOrdMap{}, err
}
opts := make([]exec.KVOption, len(export.Options))
for i, o := range export.Options {
opts[i].Key = o.Key
var err error
opts[i].Value, err = b.buildScalar(&scalarCtx, o.Value)
if err != nil {
return execPlan{}, colOrdMap{}, err
}
}
notNullColsSet, err := getNodeColumnOrdinalSet(inputCols, export.Input.Relational().NotNullCols)
if err != nil {
return execPlan{}, colOrdMap{}, err
}
var ep execPlan
ep.root, err = b.factory.ConstructExport(
input.root,
fileName,
export.FileFormat,
opts,
notNullColsSet,
)
if err != nil {
return execPlan{}, colOrdMap{}, err
}
return ep, b.outputColsFromList(export.Columns), nil
}
// planWithColumns creates an execPlan for a node which has a fixed output
// schema.
func (b *Builder) outputColsFromList(cols opt.ColList) colOrdMap {
outputCols := b.colOrdsAlloc.Alloc()
for i, c := range cols {
outputCols.Set(c, i)
}
return outputCols
}
| pkg/sql/opt/exec/execbuilder/statement.go | 1 | https://github.com/cockroachdb/cockroach/commit/592272ca2aa909fc5dd3aed29eb3fff30c567120 | [
0.9982183575630188,
0.025502102449536324,
0.00016273639630526304,
0.003727935953065753,
0.14197544753551483
] |
{
"id": 2,
"code_window": [
"\tep.root, err = b.factory.ConstructExport(\n",
"\t\tinput.root,\n",
"\t\tfileName,\n",
"\t\texport.FileFormat,\n",
"\t\topts,\n",
"\t\tnotNullColsSet,\n",
"\t)\n",
"\tif err != nil {\n",
"\t\treturn execPlan{}, colOrdMap{}, err\n",
"\t}\n",
"\treturn ep, b.outputColsFromList(export.Columns), nil\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tnotNullOrds,\n"
],
"file_path": "pkg/sql/opt/exec/execbuilder/statement.go",
"type": "replace",
"edit_start_line_idx": 456
} | // Copyright 2020 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
import React from "react";
import { Alert, Icon } from "antd";
import "antd/lib/alert/style";
import "antd/lib/icon/style";
import { Link } from "react-router-dom";
import { AlertInfo, AlertLevel } from "src/redux/alerts";
import "./alertMessage.styl";
interface AlertMessageProps extends AlertInfo {
autoClose: boolean;
autoCloseTimeout: number;
closable: boolean;
dismiss(): void;
}
type AlertType = "success" | "info" | "warning" | "error";
const mapAlertLevelToType = (alertLevel: AlertLevel): AlertType => {
switch (alertLevel) {
case AlertLevel.SUCCESS:
return "success";
case AlertLevel.NOTIFICATION:
return "info";
case AlertLevel.WARNING:
return "warning";
case AlertLevel.CRITICAL:
return "error";
default:
return "info";
}
};
const getIconType = (alertLevel: AlertLevel): string => {
switch (alertLevel) {
case AlertLevel.SUCCESS:
return "check-circle";
case AlertLevel.NOTIFICATION:
return "info-circle";
case AlertLevel.WARNING:
return "warning";
case AlertLevel.CRITICAL:
return "close-circle";
default:
return "info-circle";
}
};
export class AlertMessage extends React.Component<AlertMessageProps> {
static defaultProps = {
closable: true,
autoCloseTimeout: 6000,
};
timeoutHandler: number;
componentDidMount() {
const { autoClose, dismiss, autoCloseTimeout } = this.props;
if (autoClose) {
this.timeoutHandler = window.setTimeout(dismiss, autoCloseTimeout);
}
}
componentWillUnmount() {
clearTimeout(this.timeoutHandler);
}
render() {
const { level, dismiss, link, title, text, closable } = this.props;
let description: React.ReactNode = text;
if (link) {
description = (
<Link to={link} onClick={dismiss}>
{text}
</Link>
);
}
const type = mapAlertLevelToType(level);
const iconType = getIconType(level);
return (
<Alert
className="alert-massage"
message={title}
description={description}
showIcon
icon={
<Icon
type={iconType}
theme="filled"
className="alert-massage__icon"
/>
}
closable={closable}
onClose={dismiss}
closeText={
closable && <div className="alert-massage__close-text">×</div>
}
type={type}
/>
);
}
}
| pkg/ui/workspaces/db-console/src/views/shared/components/alertMessage/alertMessage.tsx | 0 | https://github.com/cockroachdb/cockroach/commit/592272ca2aa909fc5dd3aed29eb3fff30c567120 | [
0.00017943812417797744,
0.0001748341164784506,
0.00016680570843163878,
0.0001752180978655815,
0.0000029170441848691553
] |
{
"id": 2,
"code_window": [
"\tep.root, err = b.factory.ConstructExport(\n",
"\t\tinput.root,\n",
"\t\tfileName,\n",
"\t\texport.FileFormat,\n",
"\t\topts,\n",
"\t\tnotNullColsSet,\n",
"\t)\n",
"\tif err != nil {\n",
"\t\treturn execPlan{}, colOrdMap{}, err\n",
"\t}\n",
"\treturn ep, b.outputColsFromList(export.Columns), nil\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tnotNullOrds,\n"
],
"file_path": "pkg/sql/opt/exec/execbuilder/statement.go",
"type": "replace",
"edit_start_line_idx": 456
} | // Copyright 2022 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package load
import (
"fmt"
"github.com/cockroachdb/redact"
)
// Vector is a static container which implements the Load interface.
type Vector [nDimensions]float64
var _ Load = Vector{}
// Dim returns the value of the Dimension given.
func (v Vector) Dim(dim Dimension) float64 {
if int(dim) > len(v) || dim < 0 {
panic(fmt.Sprintf("Unknown load dimension access, %d", dim))
}
return v[dim]
}
// String returns a string representation of Load.
func (v Vector) String() string {
return redact.StringWithoutMarkers(v)
}
// SafeFormat implements the redact.SafeFormatter interface.
func (v Vector) SafeFormat(w redact.SafePrinter, _ rune) {
var buf redact.StringBuilder
buf.SafeRune('(')
for i, val := range v {
if i > 0 {
buf.SafeRune(' ')
}
dim := Dimension(i)
buf.Printf("%v=%v", dim, dim.format(val))
}
buf.SafeRune(')')
w.Print(buf)
}
| pkg/kv/kvserver/allocator/load/vector.go | 0 | https://github.com/cockroachdb/cockroach/commit/592272ca2aa909fc5dd3aed29eb3fff30c567120 | [
0.00017984768783207983,
0.00017618823039811105,
0.0001672168873483315,
0.00017758479225449264,
0.000004277114840078866
] |
{
"id": 2,
"code_window": [
"\tep.root, err = b.factory.ConstructExport(\n",
"\t\tinput.root,\n",
"\t\tfileName,\n",
"\t\texport.FileFormat,\n",
"\t\topts,\n",
"\t\tnotNullColsSet,\n",
"\t)\n",
"\tif err != nil {\n",
"\t\treturn execPlan{}, colOrdMap{}, err\n",
"\t}\n",
"\treturn ep, b.outputColsFromList(export.Columns), nil\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tnotNullOrds,\n"
],
"file_path": "pkg/sql/opt/exec/execbuilder/statement.go",
"type": "replace",
"edit_start_line_idx": 456
} | // Copyright 2021 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package netutil
import (
"fmt"
"io"
"net"
"syscall"
"testing"
"github.com/cockroachdb/cmux"
"github.com/cockroachdb/cockroach/pkg/util/timeutil"
"github.com/stretchr/testify/assert"
"google.golang.org/grpc"
)
func TestIsClosedConnection(t *testing.T) {
for _, tc := range []struct {
err error
isClosedError bool
}{
{
fmt.Errorf("an error"),
false,
},
{
net.ErrClosed,
true,
},
{
cmux.ErrListenerClosed,
true,
},
{
grpc.ErrServerStopped,
true,
},
{
io.EOF,
true,
},
{
// TODO(rafi): should this be treated the same as EOF?
io.ErrUnexpectedEOF,
false,
},
{
&net.AddrError{Err: "addr", Addr: "err"},
true,
},
{
syscall.ECONNRESET,
true,
},
{
syscall.EADDRINUSE,
true,
},
{
syscall.ECONNABORTED,
true,
},
{
syscall.ECONNREFUSED,
true,
},
{
syscall.EBADMSG,
true,
},
{
syscall.EINTR,
false,
},
{
&timeutil.TimeoutError{},
false,
},
} {
assert.Equalf(t, tc.isClosedError, IsClosedConnection(tc.err),
"expected %q to be evaluated as %v", tc.err, tc.isClosedError,
)
}
}
| pkg/util/netutil/net_test.go | 0 | https://github.com/cockroachdb/cockroach/commit/592272ca2aa909fc5dd3aed29eb3fff30c567120 | [
0.00017935122014023364,
0.0001737373968353495,
0.00017021450912579894,
0.00017290518735535443,
0.0000027953399239777355
] |
{
"id": 0,
"code_window": [
"}\n",
"\n",
"// doModifyColumn updates the column information and reorders all columns.\n",
"func (w *worker) doModifyColumn(t *meta.Meta, job *model.Job, newCol *model.ColumnInfo, oldName *model.CIStr, pos *ast.ColumnPosition, modifyColumnTp byte) (ver int64, _ error) {\n",
"\tdbInfo, err := t.GetDatabase(job.SchemaID)\n",
"\tif err != nil {\n",
"\t\treturn ver, errors.Trace(err)\n",
"\t}\n",
"\n",
"\ttblInfo, err := getTableInfoAndCancelFaultJob(t, job, job.SchemaID)\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tdbInfo, err := checkSchemaExistAndCancelNotExistJob(t, job)\n"
],
"file_path": "ddl/column.go",
"type": "replace",
"edit_start_line_idx": 598
} | // Copyright 2015 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package ddl
import (
"github.com/pingcap/errors"
"github.com/pingcap/parser/model"
"github.com/pingcap/tidb/infoschema"
"github.com/pingcap/tidb/meta"
)
func onCreateSchema(d *ddlCtx, t *meta.Meta, job *model.Job) (ver int64, _ error) {
schemaID := job.SchemaID
dbInfo := &model.DBInfo{}
if err := job.DecodeArgs(dbInfo); err != nil {
// Invalid arguments, cancel this job.
job.State = model.JobStateCancelled
return ver, errors.Trace(err)
}
dbInfo.ID = schemaID
dbInfo.State = model.StateNone
err := checkSchemaNotExists(d, t, schemaID, dbInfo)
if err != nil {
if infoschema.ErrDatabaseExists.Equal(err) {
// The database already exists, can't create it, we should cancel this job now.
job.State = model.JobStateCancelled
}
return ver, errors.Trace(err)
}
ver, err = updateSchemaVersion(t, job)
if err != nil {
return ver, errors.Trace(err)
}
switch dbInfo.State {
case model.StateNone:
// none -> public
dbInfo.State = model.StatePublic
err = t.CreateDatabase(dbInfo)
if err != nil {
return ver, errors.Trace(err)
}
// Finish this job.
job.FinishDBJob(model.JobStateDone, model.StatePublic, ver, dbInfo)
return ver, nil
default:
// We can't enter here.
return ver, errors.Errorf("invalid db state %v", dbInfo.State)
}
}
func checkSchemaNotExists(d *ddlCtx, t *meta.Meta, schemaID int64, dbInfo *model.DBInfo) error {
// d.infoHandle maybe nil in some test.
if d.infoHandle == nil {
return checkSchemaNotExistsFromStore(t, schemaID, dbInfo)
}
// Try to use memory schema info to check first.
currVer, err := t.GetSchemaVersion()
if err != nil {
return err
}
is := d.infoHandle.Get()
if is.SchemaMetaVersion() == currVer {
return checkSchemaNotExistsFromInfoSchema(is, schemaID, dbInfo)
}
return checkSchemaNotExistsFromStore(t, schemaID, dbInfo)
}
func checkSchemaNotExistsFromInfoSchema(is infoschema.InfoSchema, schemaID int64, dbInfo *model.DBInfo) error {
// Check database exists by name.
if is.SchemaExists(dbInfo.Name) {
return infoschema.ErrDatabaseExists.GenWithStackByArgs(dbInfo.Name)
}
// Check database exists by ID.
if _, ok := is.SchemaByID(schemaID); ok {
return infoschema.ErrDatabaseExists.GenWithStackByArgs(dbInfo.Name)
}
return nil
}
func checkSchemaNotExistsFromStore(t *meta.Meta, schemaID int64, dbInfo *model.DBInfo) error {
dbs, err := t.ListDatabases()
if err != nil {
return errors.Trace(err)
}
for _, db := range dbs {
if db.Name.L == dbInfo.Name.L {
if db.ID != schemaID {
return infoschema.ErrDatabaseExists.GenWithStackByArgs(db.Name)
}
dbInfo = db
}
}
return nil
}
func onModifySchemaCharsetAndCollate(t *meta.Meta, job *model.Job) (ver int64, _ error) {
var toCharset, toCollate string
if err := job.DecodeArgs(&toCharset, &toCollate); err != nil {
job.State = model.JobStateCancelled
return ver, errors.Trace(err)
}
dbInfo, err := t.GetDatabase(job.SchemaID)
if err != nil {
job.State = model.JobStateCancelled
return ver, errors.Trace(err)
}
if dbInfo.Charset == toCharset && dbInfo.Collate == toCollate {
job.FinishDBJob(model.JobStateDone, model.StatePublic, ver, dbInfo)
return ver, nil
}
dbInfo.Charset = toCharset
dbInfo.Collate = toCollate
if err = t.UpdateDatabase(dbInfo); err != nil {
return ver, errors.Trace(err)
}
if ver, err = updateSchemaVersion(t, job); err != nil {
return ver, errors.Trace(err)
}
job.FinishDBJob(model.JobStateDone, model.StatePublic, ver, dbInfo)
return ver, nil
}
func onDropSchema(t *meta.Meta, job *model.Job) (ver int64, _ error) {
dbInfo, err := checkSchemaExistAndCancelNotExistJob(t, job)
if err != nil {
return ver, errors.Trace(err)
}
ver, err = updateSchemaVersion(t, job)
if err != nil {
return ver, errors.Trace(err)
}
switch dbInfo.State {
case model.StatePublic:
// public -> write only
job.SchemaState = model.StateWriteOnly
dbInfo.State = model.StateWriteOnly
err = t.UpdateDatabase(dbInfo)
case model.StateWriteOnly:
// write only -> delete only
job.SchemaState = model.StateDeleteOnly
dbInfo.State = model.StateDeleteOnly
err = t.UpdateDatabase(dbInfo)
case model.StateDeleteOnly:
dbInfo.State = model.StateNone
var tables []*model.TableInfo
tables, err = t.ListTables(job.SchemaID)
if err != nil {
return ver, errors.Trace(err)
}
err = t.UpdateDatabase(dbInfo)
if err != nil {
return ver, errors.Trace(err)
}
if err = t.DropDatabase(dbInfo.ID); err != nil {
break
}
// Finish this job.
if len(tables) > 0 {
job.Args = append(job.Args, getIDs(tables))
}
job.FinishDBJob(model.JobStateDone, model.StateNone, ver, dbInfo)
default:
// We can't enter here.
err = errors.Errorf("invalid db state %v", dbInfo.State)
}
return ver, errors.Trace(err)
}
func checkSchemaExistAndCancelNotExistJob(t *meta.Meta, job *model.Job) (*model.DBInfo, error) {
dbInfo, err := t.GetDatabase(job.SchemaID)
if err != nil {
return nil, errors.Trace(err)
}
if dbInfo == nil {
job.State = model.JobStateCancelled
return nil, infoschema.ErrDatabaseDropExists.GenWithStackByArgs("")
}
return dbInfo, nil
}
func getIDs(tables []*model.TableInfo) []int64 {
ids := make([]int64, 0, len(tables))
for _, t := range tables {
ids = append(ids, t.ID)
if t.GetPartitionInfo() != nil {
ids = append(ids, getPartitionIDs(t)...)
}
}
return ids
}
| ddl/schema.go | 1 | https://github.com/pingcap/tidb/commit/788dc28a7423d4b85ebe11bbce632733b056d63a | [
0.9969032406806946,
0.5970633029937744,
0.00017309165559709072,
0.9785999655723572,
0.4721469581127167
] |
{
"id": 0,
"code_window": [
"}\n",
"\n",
"// doModifyColumn updates the column information and reorders all columns.\n",
"func (w *worker) doModifyColumn(t *meta.Meta, job *model.Job, newCol *model.ColumnInfo, oldName *model.CIStr, pos *ast.ColumnPosition, modifyColumnTp byte) (ver int64, _ error) {\n",
"\tdbInfo, err := t.GetDatabase(job.SchemaID)\n",
"\tif err != nil {\n",
"\t\treturn ver, errors.Trace(err)\n",
"\t}\n",
"\n",
"\ttblInfo, err := getTableInfoAndCancelFaultJob(t, job, job.SchemaID)\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tdbInfo, err := checkSchemaExistAndCancelNotExistJob(t, job)\n"
],
"file_path": "ddl/column.go",
"type": "replace",
"edit_start_line_idx": 598
} | // Copyright 2016 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"fmt"
"strconv"
"strings"
"github.com/pingcap/errors"
"github.com/pingcap/parser"
"github.com/pingcap/parser/ast"
"github.com/pingcap/parser/model"
"github.com/pingcap/tidb/ddl"
_ "github.com/pingcap/tidb/planner/core"
"github.com/pingcap/tidb/types"
"github.com/pingcap/tidb/util/mock"
log "github.com/sirupsen/logrus"
)
type column struct {
idx int
name string
data *datum
tp *types.FieldType
comment string
min string
max string
incremental bool
set []string
table *table
hist *histogram
}
func (col *column) String() string {
if col == nil {
return "<nil>"
}
return fmt.Sprintf("[column]idx: %d, name: %s, tp: %v, min: %s, max: %s, step: %d, set: %v\n",
col.idx, col.name, col.tp, col.min, col.max, col.data.step, col.set)
}
func (col *column) parseRule(kvs []string, uniq bool) {
if len(kvs) != 2 {
return
}
key := strings.TrimSpace(kvs[0])
value := strings.TrimSpace(kvs[1])
if key == "range" {
fields := strings.Split(value, ",")
if len(fields) == 1 {
col.min = strings.TrimSpace(fields[0])
} else if len(fields) == 2 {
col.min = strings.TrimSpace(fields[0])
col.max = strings.TrimSpace(fields[1])
}
} else if key == "step" {
var err error
col.data.step, err = strconv.ParseInt(value, 10, 64)
if err != nil {
log.Fatal(err)
}
} else if key == "set" {
fields := strings.Split(value, ",")
for _, field := range fields {
col.set = append(col.set, strings.TrimSpace(field))
}
} else if key == "incremental" {
var err error
col.incremental, err = strconv.ParseBool(value)
if err != nil {
log.Fatal(err)
}
} else if key == "repeats" {
repeats, err := strconv.ParseUint(value, 10, 64)
if err != nil {
log.Fatal(err)
}
if uniq && repeats > 1 {
log.Fatal("cannot repeat more than 1 times on unique columns")
}
col.data.repeats = repeats
col.data.remains = repeats
} else if key == "probability" {
prob, err := strconv.ParseUint(value, 10, 32)
if err != nil {
log.Fatal(err)
}
if prob > 100 || prob == 0 {
log.Fatal("probability must be in (0, 100]")
}
col.data.probability = uint32(prob)
}
}
// parse the data rules.
// rules like `a int unique comment '[[range=1,10;step=1]]'`,
// then we will get value from 1,2...10
func (col *column) parseColumnComment(uniq bool) {
comment := strings.TrimSpace(col.comment)
start := strings.Index(comment, "[[")
end := strings.Index(comment, "]]")
var content string
if start < end {
content = comment[start+2 : end]
}
fields := strings.Split(content, ";")
for _, field := range fields {
field = strings.TrimSpace(field)
kvs := strings.Split(field, "=")
col.parseRule(kvs, uniq)
}
}
func (col *column) parseColumn(cd *ast.ColumnDef) {
col.name = cd.Name.Name.L
col.tp = cd.Tp
col.parseColumnOptions(cd.Options)
_, uniq := col.table.uniqIndices[col.name]
col.parseColumnComment(uniq)
col.table.columns = append(col.table.columns, col)
}
func (col *column) parseColumnOptions(ops []*ast.ColumnOption) {
for _, op := range ops {
switch op.Tp {
case ast.ColumnOptionPrimaryKey, ast.ColumnOptionUniqKey, ast.ColumnOptionAutoIncrement:
col.table.uniqIndices[col.name] = col
case ast.ColumnOptionComment:
col.comment = op.Expr.(ast.ValueExpr).GetDatumString()
}
}
}
type table struct {
name string
columns []*column
columnList string
indices map[string]*column
uniqIndices map[string]*column
tblInfo *model.TableInfo
}
func (t *table) printColumns() string {
ret := ""
for _, col := range t.columns {
ret += fmt.Sprintf("%v", col)
}
return ret
}
func (t *table) String() string {
if t == nil {
return "<nil>"
}
ret := fmt.Sprintf("[table]name: %s\n", t.name)
ret += fmt.Sprintf("[table]columns:\n")
ret += t.printColumns()
ret += fmt.Sprintf("[table]column list: %s\n", t.columnList)
ret += fmt.Sprintf("[table]indices:\n")
for k, v := range t.indices {
ret += fmt.Sprintf("key->%s, value->%v", k, v)
}
ret += fmt.Sprintf("[table]unique indices:\n")
for k, v := range t.uniqIndices {
ret += fmt.Sprintf("key->%s, value->%v", k, v)
}
return ret
}
func newTable() *table {
return &table{
indices: make(map[string]*column),
uniqIndices: make(map[string]*column),
}
}
func (t *table) findCol(cols []*column, name string) *column {
for _, col := range cols {
if col.name == name {
return col
}
}
return nil
}
func (t *table) parseTableConstraint(cons *ast.Constraint) {
switch cons.Tp {
case ast.ConstraintPrimaryKey, ast.ConstraintKey, ast.ConstraintUniq,
ast.ConstraintUniqKey, ast.ConstraintUniqIndex:
for _, indexCol := range cons.Keys {
name := indexCol.Column.Name.L
t.uniqIndices[name] = t.findCol(t.columns, name)
}
case ast.ConstraintIndex:
for _, indexCol := range cons.Keys {
name := indexCol.Column.Name.L
t.indices[name] = t.findCol(t.columns, name)
}
}
}
func (t *table) buildColumnList() {
columns := make([]string, 0, len(t.columns))
for _, column := range t.columns {
columns = append(columns, column.name)
}
t.columnList = strings.Join(columns, ",")
}
func parseTable(t *table, stmt *ast.CreateTableStmt) error {
t.name = stmt.Table.Name.L
t.columns = make([]*column, 0, len(stmt.Cols))
mockTbl, err := ddl.MockTableInfo(mock.NewContext(), stmt, 1)
if err != nil {
return errors.Trace(err)
}
t.tblInfo = mockTbl
for i, col := range stmt.Cols {
column := &column{idx: i + 1, table: t, data: newDatum()}
column.parseColumn(col)
}
for _, cons := range stmt.Constraints {
t.parseTableConstraint(cons)
}
t.buildColumnList()
return nil
}
func parseTableSQL(table *table, sql string) error {
stmt, err := parser.New().ParseOneStmt(sql, "", "")
if err != nil {
return errors.Trace(err)
}
switch node := stmt.(type) {
case *ast.CreateTableStmt:
err = parseTable(table, node)
default:
err = errors.Errorf("invalid statement - %v", stmt.Text())
}
return errors.Trace(err)
}
func parseIndex(table *table, stmt *ast.CreateIndexStmt) error {
if table.name != stmt.Table.Name.L {
return errors.Errorf("mismatch table name for create index - %s : %s", table.name, stmt.Table.Name.L)
}
for _, indexCol := range stmt.IndexPartSpecifications {
name := indexCol.Column.Name.L
if stmt.KeyType == ast.IndexKeyTypeUnique {
table.uniqIndices[name] = table.findCol(table.columns, name)
} else if stmt.KeyType == ast.IndexKeyTypeNone {
table.indices[name] = table.findCol(table.columns, name)
} else {
return errors.Errorf("unsupported index type on column %s.%s", table.name, name)
}
}
return nil
}
func parseIndexSQL(table *table, sql string) error {
if len(sql) == 0 {
return nil
}
stmt, err := parser.New().ParseOneStmt(sql, "", "")
if err != nil {
return errors.Trace(err)
}
switch node := stmt.(type) {
case *ast.CreateIndexStmt:
err = parseIndex(table, node)
default:
err = errors.Errorf("invalid statement - %v", stmt.Text())
}
return errors.Trace(err)
}
| cmd/importer/parser.go | 0 | https://github.com/pingcap/tidb/commit/788dc28a7423d4b85ebe11bbce632733b056d63a | [
0.008972355164587498,
0.0013749462086707354,
0.00016508952830918133,
0.00025596050545573235,
0.0019605744164437056
] |
{
"id": 0,
"code_window": [
"}\n",
"\n",
"// doModifyColumn updates the column information and reorders all columns.\n",
"func (w *worker) doModifyColumn(t *meta.Meta, job *model.Job, newCol *model.ColumnInfo, oldName *model.CIStr, pos *ast.ColumnPosition, modifyColumnTp byte) (ver int64, _ error) {\n",
"\tdbInfo, err := t.GetDatabase(job.SchemaID)\n",
"\tif err != nil {\n",
"\t\treturn ver, errors.Trace(err)\n",
"\t}\n",
"\n",
"\ttblInfo, err := getTableInfoAndCancelFaultJob(t, job, job.SchemaID)\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tdbInfo, err := checkSchemaExistAndCancelNotExistJob(t, job)\n"
],
"file_path": "ddl/column.go",
"type": "replace",
"edit_start_line_idx": 598
} | // Copyright 2018 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package tracing
import (
"context"
"github.com/opentracing/basictracer-go"
"github.com/opentracing/opentracing-go"
)
// TiDBTrace is set as Baggage on traces which are used for tidb tracing.
const TiDBTrace = "tr"
// A CallbackRecorder immediately invokes itself on received trace spans.
type CallbackRecorder func(sp basictracer.RawSpan)
// RecordSpan implements basictracer.SpanRecorder.
func (cr CallbackRecorder) RecordSpan(sp basictracer.RawSpan) {
cr(sp)
}
// NewRecordedTrace returns a Span which records directly via the specified
// callback.
func NewRecordedTrace(opName string, callback func(sp basictracer.RawSpan)) opentracing.Span {
tr := basictracer.New(CallbackRecorder(callback))
opentracing.SetGlobalTracer(tr)
sp := tr.StartSpan(opName)
sp.SetBaggageItem(TiDBTrace, "1")
return sp
}
// noopSpan returns a Span which discards all operations.
func noopSpan() opentracing.Span {
return (opentracing.NoopTracer{}).StartSpan("DefaultSpan")
}
// SpanFromContext returns the span obtained from the context or, if none is found, a new one started through tracer.
func SpanFromContext(ctx context.Context) (sp opentracing.Span) {
if sp = opentracing.SpanFromContext(ctx); sp == nil {
return noopSpan()
}
return sp
}
// ChildSpanFromContxt return a non-nil span. If span can be got from ctx, then returned span is
// a child of such span. Otherwise, returned span is a noop span.
func ChildSpanFromContxt(ctx context.Context, opName string) (opentracing.Span, context.Context) {
if sp := opentracing.SpanFromContext(ctx); sp != nil {
if _, ok := sp.Tracer().(opentracing.NoopTracer); !ok {
child := opentracing.StartSpan(opName, opentracing.ChildOf(sp.Context()))
return child, opentracing.ContextWithSpan(ctx, child)
}
}
return noopSpan(), ctx
}
| util/tracing/util.go | 0 | https://github.com/pingcap/tidb/commit/788dc28a7423d4b85ebe11bbce632733b056d63a | [
0.0001799150777515024,
0.00017081026453524828,
0.00016427440277766436,
0.0001699884160188958,
0.000005436658739199629
] |
{
"id": 0,
"code_window": [
"}\n",
"\n",
"// doModifyColumn updates the column information and reorders all columns.\n",
"func (w *worker) doModifyColumn(t *meta.Meta, job *model.Job, newCol *model.ColumnInfo, oldName *model.CIStr, pos *ast.ColumnPosition, modifyColumnTp byte) (ver int64, _ error) {\n",
"\tdbInfo, err := t.GetDatabase(job.SchemaID)\n",
"\tif err != nil {\n",
"\t\treturn ver, errors.Trace(err)\n",
"\t}\n",
"\n",
"\ttblInfo, err := getTableInfoAndCancelFaultJob(t, job, job.SchemaID)\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tdbInfo, err := checkSchemaExistAndCancelNotExistJob(t, job)\n"
],
"file_path": "ddl/column.go",
"type": "replace",
"edit_start_line_idx": 598
} | // Copyright 2016 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package mocktikv
import (
"bytes"
"context"
"fmt"
"math"
"strconv"
"sync"
"time"
"github.com/golang/protobuf/proto"
"github.com/opentracing/opentracing-go"
"github.com/pingcap/errors"
"github.com/pingcap/failpoint"
"github.com/pingcap/kvproto/pkg/coprocessor"
"github.com/pingcap/kvproto/pkg/debugpb"
"github.com/pingcap/kvproto/pkg/errorpb"
"github.com/pingcap/kvproto/pkg/kvrpcpb"
"github.com/pingcap/kvproto/pkg/metapb"
"github.com/pingcap/parser/terror"
"github.com/pingcap/tidb/kv"
"github.com/pingcap/tidb/store/tikv/tikvrpc"
)
// For gofail injection.
var undeterminedErr = terror.ErrResultUndetermined
const requestMaxSize = 8 * 1024 * 1024
func checkGoContext(ctx context.Context) error {
select {
case <-ctx.Done():
return ctx.Err()
default:
return nil
}
}
func convertToKeyError(err error) *kvrpcpb.KeyError {
if locked, ok := errors.Cause(err).(*ErrLocked); ok {
return &kvrpcpb.KeyError{
Locked: &kvrpcpb.LockInfo{
Key: locked.Key.Raw(),
PrimaryLock: locked.Primary,
LockVersion: locked.StartTS,
LockTtl: locked.TTL,
TxnSize: locked.TxnSize,
LockType: locked.LockType,
LockForUpdateTs: locked.ForUpdateTS,
},
}
}
if alreadyExist, ok := errors.Cause(err).(*ErrKeyAlreadyExist); ok {
return &kvrpcpb.KeyError{
AlreadyExist: &kvrpcpb.AlreadyExist{
Key: alreadyExist.Key,
},
}
}
if writeConflict, ok := errors.Cause(err).(*ErrConflict); ok {
return &kvrpcpb.KeyError{
Conflict: &kvrpcpb.WriteConflict{
Key: writeConflict.Key,
ConflictTs: writeConflict.ConflictTS,
ConflictCommitTs: writeConflict.ConflictCommitTS,
StartTs: writeConflict.StartTS,
},
}
}
if dead, ok := errors.Cause(err).(*ErrDeadlock); ok {
return &kvrpcpb.KeyError{
Deadlock: &kvrpcpb.Deadlock{
LockTs: dead.LockTS,
LockKey: dead.LockKey,
DeadlockKeyHash: dead.DealockKeyHash,
},
}
}
if retryable, ok := errors.Cause(err).(ErrRetryable); ok {
return &kvrpcpb.KeyError{
Retryable: retryable.Error(),
}
}
if expired, ok := errors.Cause(err).(*ErrCommitTSExpired); ok {
return &kvrpcpb.KeyError{
CommitTsExpired: &expired.CommitTsExpired,
}
}
if tmp, ok := errors.Cause(err).(*ErrTxnNotFound); ok {
return &kvrpcpb.KeyError{
TxnNotFound: &tmp.TxnNotFound,
}
}
return &kvrpcpb.KeyError{
Abort: err.Error(),
}
}
func convertToKeyErrors(errs []error) []*kvrpcpb.KeyError {
var keyErrors = make([]*kvrpcpb.KeyError, 0)
for _, err := range errs {
if err != nil {
keyErrors = append(keyErrors, convertToKeyError(err))
}
}
return keyErrors
}
func convertToPbPairs(pairs []Pair) []*kvrpcpb.KvPair {
kvPairs := make([]*kvrpcpb.KvPair, 0, len(pairs))
for _, p := range pairs {
var kvPair *kvrpcpb.KvPair
if p.Err == nil {
kvPair = &kvrpcpb.KvPair{
Key: p.Key,
Value: p.Value,
}
} else {
kvPair = &kvrpcpb.KvPair{
Error: convertToKeyError(p.Err),
}
}
kvPairs = append(kvPairs, kvPair)
}
return kvPairs
}
// rpcHandler mocks tikv's side handler behavior. In general, you may assume
// TiKV just translate the logic from Go to Rust.
type rpcHandler struct {
cluster *Cluster
mvccStore MVCCStore
// storeID stores id for current request
storeID uint64
// startKey is used for handling normal request.
startKey []byte
endKey []byte
// rawStartKey is used for handling coprocessor request.
rawStartKey []byte
rawEndKey []byte
// isolationLevel is used for current request.
isolationLevel kvrpcpb.IsolationLevel
resolvedLocks []uint64
}
func (h *rpcHandler) checkRequestContext(ctx *kvrpcpb.Context) *errorpb.Error {
ctxPeer := ctx.GetPeer()
if ctxPeer != nil && ctxPeer.GetStoreId() != h.storeID {
return &errorpb.Error{
Message: *proto.String("store not match"),
StoreNotMatch: &errorpb.StoreNotMatch{},
}
}
region, leaderID := h.cluster.GetRegion(ctx.GetRegionId())
// No region found.
if region == nil {
return &errorpb.Error{
Message: *proto.String("region not found"),
RegionNotFound: &errorpb.RegionNotFound{
RegionId: *proto.Uint64(ctx.GetRegionId()),
},
}
}
var storePeer, leaderPeer *metapb.Peer
for _, p := range region.Peers {
if p.GetStoreId() == h.storeID {
storePeer = p
}
if p.GetId() == leaderID {
leaderPeer = p
}
}
// The Store does not contain a Peer of the Region.
if storePeer == nil {
return &errorpb.Error{
Message: *proto.String("region not found"),
RegionNotFound: &errorpb.RegionNotFound{
RegionId: *proto.Uint64(ctx.GetRegionId()),
},
}
}
// No leader.
if leaderPeer == nil {
return &errorpb.Error{
Message: *proto.String("no leader"),
NotLeader: &errorpb.NotLeader{
RegionId: *proto.Uint64(ctx.GetRegionId()),
},
}
}
// The Peer on the Store is not leader.
if storePeer.GetId() != leaderPeer.GetId() {
return &errorpb.Error{
Message: *proto.String("not leader"),
NotLeader: &errorpb.NotLeader{
RegionId: *proto.Uint64(ctx.GetRegionId()),
Leader: leaderPeer,
},
}
}
// Region epoch does not match.
if !proto.Equal(region.GetRegionEpoch(), ctx.GetRegionEpoch()) {
nextRegion, _ := h.cluster.GetRegionByKey(region.GetEndKey())
currentRegions := []*metapb.Region{region}
if nextRegion != nil {
currentRegions = append(currentRegions, nextRegion)
}
return &errorpb.Error{
Message: *proto.String("epoch not match"),
EpochNotMatch: &errorpb.EpochNotMatch{
CurrentRegions: currentRegions,
},
}
}
h.startKey, h.endKey = region.StartKey, region.EndKey
h.isolationLevel = ctx.IsolationLevel
h.resolvedLocks = ctx.ResolvedLocks
return nil
}
func (h *rpcHandler) checkRequestSize(size int) *errorpb.Error {
// TiKV has a limitation on raft log size.
// mocktikv has no raft inside, so we check the request's size instead.
if size >= requestMaxSize {
return &errorpb.Error{
RaftEntryTooLarge: &errorpb.RaftEntryTooLarge{},
}
}
return nil
}
func (h *rpcHandler) checkRequest(ctx *kvrpcpb.Context, size int) *errorpb.Error {
if err := h.checkRequestContext(ctx); err != nil {
return err
}
return h.checkRequestSize(size)
}
func (h *rpcHandler) checkKeyInRegion(key []byte) bool {
return regionContains(h.startKey, h.endKey, NewMvccKey(key))
}
func (h *rpcHandler) handleKvGet(req *kvrpcpb.GetRequest) *kvrpcpb.GetResponse {
if !h.checkKeyInRegion(req.Key) {
panic("KvGet: key not in region")
}
val, err := h.mvccStore.Get(req.Key, req.GetVersion(), h.isolationLevel, req.Context.GetResolvedLocks())
if err != nil {
return &kvrpcpb.GetResponse{
Error: convertToKeyError(err),
}
}
return &kvrpcpb.GetResponse{
Value: val,
}
}
func (h *rpcHandler) handleKvScan(req *kvrpcpb.ScanRequest) *kvrpcpb.ScanResponse {
endKey := MvccKey(h.endKey).Raw()
var pairs []Pair
if !req.Reverse {
if !h.checkKeyInRegion(req.GetStartKey()) {
panic("KvScan: startKey not in region")
}
if len(req.EndKey) > 0 && (len(endKey) == 0 || bytes.Compare(NewMvccKey(req.EndKey), h.endKey) < 0) {
endKey = req.EndKey
}
pairs = h.mvccStore.Scan(req.GetStartKey(), endKey, int(req.GetLimit()), req.GetVersion(), h.isolationLevel, req.Context.ResolvedLocks)
} else {
// TiKV use range [end_key, start_key) for reverse scan.
// Should use the req.EndKey to check in region.
if !h.checkKeyInRegion(req.GetEndKey()) {
panic("KvScan: startKey not in region")
}
// TiKV use range [end_key, start_key) for reverse scan.
// So the req.StartKey actually is the end_key.
if len(req.StartKey) > 0 && (len(endKey) == 0 || bytes.Compare(NewMvccKey(req.StartKey), h.endKey) < 0) {
endKey = req.StartKey
}
pairs = h.mvccStore.ReverseScan(req.EndKey, endKey, int(req.GetLimit()), req.GetVersion(), h.isolationLevel, req.Context.ResolvedLocks)
}
return &kvrpcpb.ScanResponse{
Pairs: convertToPbPairs(pairs),
}
}
func (h *rpcHandler) handleKvPrewrite(req *kvrpcpb.PrewriteRequest) *kvrpcpb.PrewriteResponse {
regionID := req.Context.RegionId
h.cluster.handleDelay(req.StartVersion, regionID)
for _, m := range req.Mutations {
if !h.checkKeyInRegion(m.Key) {
panic("KvPrewrite: key not in region")
}
}
errs := h.mvccStore.Prewrite(req)
return &kvrpcpb.PrewriteResponse{
Errors: convertToKeyErrors(errs),
}
}
func (h *rpcHandler) handleKvPessimisticLock(req *kvrpcpb.PessimisticLockRequest) *kvrpcpb.PessimisticLockResponse {
for _, m := range req.Mutations {
if !h.checkKeyInRegion(m.Key) {
panic("KvPessimisticLock: key not in region")
}
}
startTS := req.StartVersion
regionID := req.Context.RegionId
h.cluster.handleDelay(startTS, regionID)
return h.mvccStore.PessimisticLock(req)
}
func simulateServerSideWaitLock(errs []error) {
for _, err := range errs {
if _, ok := err.(*ErrLocked); ok {
time.Sleep(time.Millisecond * 5)
break
}
}
}
func (h *rpcHandler) handleKvPessimisticRollback(req *kvrpcpb.PessimisticRollbackRequest) *kvrpcpb.PessimisticRollbackResponse {
for _, key := range req.Keys {
if !h.checkKeyInRegion(key) {
panic("KvPessimisticRollback: key not in region")
}
}
errs := h.mvccStore.PessimisticRollback(req.Keys, req.StartVersion, req.ForUpdateTs)
return &kvrpcpb.PessimisticRollbackResponse{
Errors: convertToKeyErrors(errs),
}
}
func (h *rpcHandler) handleKvCommit(req *kvrpcpb.CommitRequest) *kvrpcpb.CommitResponse {
for _, k := range req.Keys {
if !h.checkKeyInRegion(k) {
panic("KvCommit: key not in region")
}
}
var resp kvrpcpb.CommitResponse
err := h.mvccStore.Commit(req.Keys, req.GetStartVersion(), req.GetCommitVersion())
if err != nil {
resp.Error = convertToKeyError(err)
}
return &resp
}
func (h *rpcHandler) handleKvCleanup(req *kvrpcpb.CleanupRequest) *kvrpcpb.CleanupResponse {
if !h.checkKeyInRegion(req.Key) {
panic("KvCleanup: key not in region")
}
var resp kvrpcpb.CleanupResponse
err := h.mvccStore.Cleanup(req.Key, req.GetStartVersion(), req.GetCurrentTs())
if err != nil {
if commitTS, ok := errors.Cause(err).(ErrAlreadyCommitted); ok {
resp.CommitVersion = uint64(commitTS)
} else {
resp.Error = convertToKeyError(err)
}
}
return &resp
}
func (h *rpcHandler) handleKvCheckTxnStatus(req *kvrpcpb.CheckTxnStatusRequest) *kvrpcpb.CheckTxnStatusResponse {
if !h.checkKeyInRegion(req.PrimaryKey) {
panic("KvCheckTxnStatus: key not in region")
}
var resp kvrpcpb.CheckTxnStatusResponse
ttl, commitTS, action, err := h.mvccStore.CheckTxnStatus(req.GetPrimaryKey(), req.GetLockTs(), req.GetCallerStartTs(), req.GetCurrentTs(), req.GetRollbackIfNotExist())
if err != nil {
resp.Error = convertToKeyError(err)
} else {
resp.LockTtl, resp.CommitVersion, resp.Action = ttl, commitTS, action
}
return &resp
}
func (h *rpcHandler) handleTxnHeartBeat(req *kvrpcpb.TxnHeartBeatRequest) *kvrpcpb.TxnHeartBeatResponse {
if !h.checkKeyInRegion(req.PrimaryLock) {
panic("KvTxnHeartBeat: key not in region")
}
var resp kvrpcpb.TxnHeartBeatResponse
ttl, err := h.mvccStore.TxnHeartBeat(req.PrimaryLock, req.StartVersion, req.AdviseLockTtl)
if err != nil {
resp.Error = convertToKeyError(err)
}
resp.LockTtl = ttl
return &resp
}
func (h *rpcHandler) handleKvBatchGet(req *kvrpcpb.BatchGetRequest) *kvrpcpb.BatchGetResponse {
for _, k := range req.Keys {
if !h.checkKeyInRegion(k) {
panic("KvBatchGet: key not in region")
}
}
pairs := h.mvccStore.BatchGet(req.Keys, req.GetVersion(), h.isolationLevel, req.Context.GetResolvedLocks())
return &kvrpcpb.BatchGetResponse{
Pairs: convertToPbPairs(pairs),
}
}
func (h *rpcHandler) handleMvccGetByKey(req *kvrpcpb.MvccGetByKeyRequest) *kvrpcpb.MvccGetByKeyResponse {
debugger, ok := h.mvccStore.(MVCCDebugger)
if !ok {
return &kvrpcpb.MvccGetByKeyResponse{
Error: "not implement",
}
}
if !h.checkKeyInRegion(req.Key) {
panic("MvccGetByKey: key not in region")
}
var resp kvrpcpb.MvccGetByKeyResponse
resp.Info = debugger.MvccGetByKey(req.Key)
return &resp
}
func (h *rpcHandler) handleMvccGetByStartTS(req *kvrpcpb.MvccGetByStartTsRequest) *kvrpcpb.MvccGetByStartTsResponse {
debugger, ok := h.mvccStore.(MVCCDebugger)
if !ok {
return &kvrpcpb.MvccGetByStartTsResponse{
Error: "not implement",
}
}
var resp kvrpcpb.MvccGetByStartTsResponse
resp.Info, resp.Key = debugger.MvccGetByStartTS(req.StartTs)
return &resp
}
func (h *rpcHandler) handleKvBatchRollback(req *kvrpcpb.BatchRollbackRequest) *kvrpcpb.BatchRollbackResponse {
err := h.mvccStore.Rollback(req.Keys, req.StartVersion)
if err != nil {
return &kvrpcpb.BatchRollbackResponse{
Error: convertToKeyError(err),
}
}
return &kvrpcpb.BatchRollbackResponse{}
}
func (h *rpcHandler) handleKvScanLock(req *kvrpcpb.ScanLockRequest) *kvrpcpb.ScanLockResponse {
startKey := MvccKey(h.startKey).Raw()
endKey := MvccKey(h.endKey).Raw()
locks, err := h.mvccStore.ScanLock(startKey, endKey, req.GetMaxVersion())
if err != nil {
return &kvrpcpb.ScanLockResponse{
Error: convertToKeyError(err),
}
}
return &kvrpcpb.ScanLockResponse{
Locks: locks,
}
}
func (h *rpcHandler) handleKvResolveLock(req *kvrpcpb.ResolveLockRequest) *kvrpcpb.ResolveLockResponse {
startKey := MvccKey(h.startKey).Raw()
endKey := MvccKey(h.endKey).Raw()
err := h.mvccStore.ResolveLock(startKey, endKey, req.GetStartVersion(), req.GetCommitVersion())
if err != nil {
return &kvrpcpb.ResolveLockResponse{
Error: convertToKeyError(err),
}
}
return &kvrpcpb.ResolveLockResponse{}
}
func (h *rpcHandler) handleKvGC(req *kvrpcpb.GCRequest) *kvrpcpb.GCResponse {
startKey := MvccKey(h.startKey).Raw()
endKey := MvccKey(h.endKey).Raw()
err := h.mvccStore.GC(startKey, endKey, req.GetSafePoint())
if err != nil {
return &kvrpcpb.GCResponse{
Error: convertToKeyError(err),
}
}
return &kvrpcpb.GCResponse{}
}
func (h *rpcHandler) handleKvDeleteRange(req *kvrpcpb.DeleteRangeRequest) *kvrpcpb.DeleteRangeResponse {
if !h.checkKeyInRegion(req.StartKey) {
panic("KvDeleteRange: key not in region")
}
var resp kvrpcpb.DeleteRangeResponse
err := h.mvccStore.DeleteRange(req.StartKey, req.EndKey)
if err != nil {
resp.Error = err.Error()
}
return &resp
}
func (h *rpcHandler) handleKvRawGet(req *kvrpcpb.RawGetRequest) *kvrpcpb.RawGetResponse {
rawKV, ok := h.mvccStore.(RawKV)
if !ok {
return &kvrpcpb.RawGetResponse{
Error: "not implemented",
}
}
return &kvrpcpb.RawGetResponse{
Value: rawKV.RawGet(req.GetKey()),
}
}
func (h *rpcHandler) handleKvRawBatchGet(req *kvrpcpb.RawBatchGetRequest) *kvrpcpb.RawBatchGetResponse {
rawKV, ok := h.mvccStore.(RawKV)
if !ok {
// TODO should we add error ?
return &kvrpcpb.RawBatchGetResponse{
RegionError: &errorpb.Error{
Message: "not implemented",
},
}
}
values := rawKV.RawBatchGet(req.Keys)
kvPairs := make([]*kvrpcpb.KvPair, len(values))
for i, key := range req.Keys {
kvPairs[i] = &kvrpcpb.KvPair{
Key: key,
Value: values[i],
}
}
return &kvrpcpb.RawBatchGetResponse{
Pairs: kvPairs,
}
}
func (h *rpcHandler) handleKvRawPut(req *kvrpcpb.RawPutRequest) *kvrpcpb.RawPutResponse {
rawKV, ok := h.mvccStore.(RawKV)
if !ok {
return &kvrpcpb.RawPutResponse{
Error: "not implemented",
}
}
rawKV.RawPut(req.GetKey(), req.GetValue())
return &kvrpcpb.RawPutResponse{}
}
func (h *rpcHandler) handleKvRawBatchPut(req *kvrpcpb.RawBatchPutRequest) *kvrpcpb.RawBatchPutResponse {
rawKV, ok := h.mvccStore.(RawKV)
if !ok {
return &kvrpcpb.RawBatchPutResponse{
Error: "not implemented",
}
}
keys := make([][]byte, 0, len(req.Pairs))
values := make([][]byte, 0, len(req.Pairs))
for _, pair := range req.Pairs {
keys = append(keys, pair.Key)
values = append(values, pair.Value)
}
rawKV.RawBatchPut(keys, values)
return &kvrpcpb.RawBatchPutResponse{}
}
func (h *rpcHandler) handleKvRawDelete(req *kvrpcpb.RawDeleteRequest) *kvrpcpb.RawDeleteResponse {
rawKV, ok := h.mvccStore.(RawKV)
if !ok {
return &kvrpcpb.RawDeleteResponse{
Error: "not implemented",
}
}
rawKV.RawDelete(req.GetKey())
return &kvrpcpb.RawDeleteResponse{}
}
func (h *rpcHandler) handleKvRawBatchDelete(req *kvrpcpb.RawBatchDeleteRequest) *kvrpcpb.RawBatchDeleteResponse {
rawKV, ok := h.mvccStore.(RawKV)
if !ok {
return &kvrpcpb.RawBatchDeleteResponse{
Error: "not implemented",
}
}
rawKV.RawBatchDelete(req.Keys)
return &kvrpcpb.RawBatchDeleteResponse{}
}
func (h *rpcHandler) handleKvRawDeleteRange(req *kvrpcpb.RawDeleteRangeRequest) *kvrpcpb.RawDeleteRangeResponse {
rawKV, ok := h.mvccStore.(RawKV)
if !ok {
return &kvrpcpb.RawDeleteRangeResponse{
Error: "not implemented",
}
}
rawKV.RawDeleteRange(req.GetStartKey(), req.GetEndKey())
return &kvrpcpb.RawDeleteRangeResponse{}
}
func (h *rpcHandler) handleKvRawScan(req *kvrpcpb.RawScanRequest) *kvrpcpb.RawScanResponse {
rawKV, ok := h.mvccStore.(RawKV)
if !ok {
errStr := "not implemented"
return &kvrpcpb.RawScanResponse{
RegionError: &errorpb.Error{
Message: errStr,
},
}
}
var pairs []Pair
if req.Reverse {
lowerBound := h.startKey
if bytes.Compare(req.EndKey, lowerBound) > 0 {
lowerBound = req.EndKey
}
pairs = rawKV.RawReverseScan(
req.StartKey,
lowerBound,
int(req.GetLimit()),
)
} else {
upperBound := h.endKey
if len(req.EndKey) > 0 && (len(upperBound) == 0 || bytes.Compare(req.EndKey, upperBound) < 0) {
upperBound = req.EndKey
}
pairs = rawKV.RawScan(
req.StartKey,
upperBound,
int(req.GetLimit()),
)
}
return &kvrpcpb.RawScanResponse{
Kvs: convertToPbPairs(pairs),
}
}
func (h *rpcHandler) handleSplitRegion(req *kvrpcpb.SplitRegionRequest) *kvrpcpb.SplitRegionResponse {
keys := req.GetSplitKeys()
resp := &kvrpcpb.SplitRegionResponse{Regions: make([]*metapb.Region, 0, len(keys)+1)}
for i, key := range keys {
k := NewMvccKey(key)
region, _ := h.cluster.GetRegionByKey(k)
if bytes.Equal(region.GetStartKey(), key) {
continue
}
if i == 0 {
// Set the leftmost region.
resp.Regions = append(resp.Regions, region)
}
newRegionID, newPeerIDs := h.cluster.AllocID(), h.cluster.AllocIDs(len(region.Peers))
newRegion := h.cluster.SplitRaw(region.GetId(), newRegionID, k, newPeerIDs, newPeerIDs[0])
resp.Regions = append(resp.Regions, newRegion)
}
return resp
}
// Client is a client that sends RPC.
// This is same with tikv.Client, define again for avoid circle import.
type Client interface {
// Close should release all data.
Close() error
// SendRequest sends Request.
SendRequest(ctx context.Context, addr string, req *tikvrpc.Request, timeout time.Duration) (*tikvrpc.Response, error)
}
// RPCClient sends kv RPC calls to mock cluster. RPCClient mocks the behavior of
// a rpc client at tikv's side.
type RPCClient struct {
Cluster *Cluster
MvccStore MVCCStore
streamTimeout chan *tikvrpc.Lease
done chan struct{}
// rpcCli uses to redirects RPC request to TiDB rpc server, It is only use for test.
// Mock TiDB rpc service will have circle import problem, so just use a real RPC client to send this RPC server.
// sync.Once uses to avoid concurrency initialize rpcCli.
sync.Once
rpcCli Client
}
// NewRPCClient creates an RPCClient.
// Note that close the RPCClient may close the underlying MvccStore.
func NewRPCClient(cluster *Cluster, mvccStore MVCCStore) *RPCClient {
ch := make(chan *tikvrpc.Lease, 1024)
done := make(chan struct{})
go tikvrpc.CheckStreamTimeoutLoop(ch, done)
return &RPCClient{
Cluster: cluster,
MvccStore: mvccStore,
streamTimeout: ch,
done: done,
}
}
func (c *RPCClient) getAndCheckStoreByAddr(addr string) (*metapb.Store, error) {
store, err := c.Cluster.GetAndCheckStoreByAddr(addr)
if err != nil {
return nil, err
}
if store == nil {
return nil, errors.New("connect fail")
}
if store.GetState() == metapb.StoreState_Offline ||
store.GetState() == metapb.StoreState_Tombstone {
return nil, errors.New("connection refused")
}
return store, nil
}
func (c *RPCClient) checkArgs(ctx context.Context, addr string) (*rpcHandler, error) {
if err := checkGoContext(ctx); err != nil {
return nil, err
}
store, err := c.getAndCheckStoreByAddr(addr)
if err != nil {
return nil, err
}
handler := &rpcHandler{
cluster: c.Cluster,
mvccStore: c.MvccStore,
// set store id for current request
storeID: store.GetId(),
}
return handler, nil
}
// GRPCClientFactory is the GRPC client factory.
// Use global variable to avoid circle import.
// TODO: remove this global variable.
var GRPCClientFactory func() Client
// redirectRequestToRPCServer redirects RPC request to TiDB rpc server, It is only use for test.
// Mock TiDB rpc service will have circle import problem, so just use a real RPC client to send this RPC server.
func (c *RPCClient) redirectRequestToRPCServer(ctx context.Context, addr string, req *tikvrpc.Request, timeout time.Duration) (*tikvrpc.Response, error) {
c.Once.Do(func() {
if GRPCClientFactory != nil {
c.rpcCli = GRPCClientFactory()
}
})
if c.rpcCli == nil {
return nil, errors.Errorf("GRPCClientFactory is nil")
}
return c.rpcCli.SendRequest(ctx, addr, req, timeout)
}
// SendRequest sends a request to mock cluster.
func (c *RPCClient) SendRequest(ctx context.Context, addr string, req *tikvrpc.Request, timeout time.Duration) (*tikvrpc.Response, error) {
if span := opentracing.SpanFromContext(ctx); span != nil && span.Tracer() != nil {
span1 := span.Tracer().StartSpan("RPCClient.SendRequest", opentracing.ChildOf(span.Context()))
defer span1.Finish()
ctx = opentracing.ContextWithSpan(ctx, span1)
}
failpoint.Inject("rpcServerBusy", func(val failpoint.Value) {
if val.(bool) {
failpoint.Return(tikvrpc.GenRegionErrorResp(req, &errorpb.Error{ServerIsBusy: &errorpb.ServerIsBusy{}}))
}
})
// increase coverage for mock tikv
_ = req.Type.String()
_ = req.ToBatchCommandsRequest()
reqCtx := &req.Context
resp := &tikvrpc.Response{}
// When the store type is TiDB, the request should handle over to TiDB rpc server to handle.
if req.StoreTp == kv.TiDB {
return c.redirectRequestToRPCServer(ctx, addr, req, timeout)
}
handler, err := c.checkArgs(ctx, addr)
if err != nil {
return nil, err
}
switch req.Type {
case tikvrpc.CmdGet:
r := req.Get()
if err := handler.checkRequest(reqCtx, r.Size()); err != nil {
resp.Resp = &kvrpcpb.GetResponse{RegionError: err}
return resp, nil
}
resp.Resp = handler.handleKvGet(r)
case tikvrpc.CmdScan:
r := req.Scan()
if err := handler.checkRequest(reqCtx, r.Size()); err != nil {
resp.Resp = &kvrpcpb.ScanResponse{RegionError: err}
return resp, nil
}
resp.Resp = handler.handleKvScan(r)
case tikvrpc.CmdPrewrite:
failpoint.Inject("rpcPrewriteResult", func(val failpoint.Value) {
switch val.(string) {
case "notLeader":
failpoint.Return(&tikvrpc.Response{
Resp: &kvrpcpb.PrewriteResponse{RegionError: &errorpb.Error{NotLeader: &errorpb.NotLeader{}}},
}, nil)
}
})
r := req.Prewrite()
if err := handler.checkRequest(reqCtx, r.Size()); err != nil {
resp.Resp = &kvrpcpb.PrewriteResponse{RegionError: err}
return resp, nil
}
resp.Resp = handler.handleKvPrewrite(r)
case tikvrpc.CmdPessimisticLock:
r := req.PessimisticLock()
if err := handler.checkRequest(reqCtx, r.Size()); err != nil {
resp.Resp = &kvrpcpb.PessimisticLockResponse{RegionError: err}
return resp, nil
}
resp.Resp = handler.handleKvPessimisticLock(r)
case tikvrpc.CmdPessimisticRollback:
r := req.PessimisticRollback()
if err := handler.checkRequest(reqCtx, r.Size()); err != nil {
resp.Resp = &kvrpcpb.PessimisticRollbackResponse{RegionError: err}
return resp, nil
}
resp.Resp = handler.handleKvPessimisticRollback(r)
case tikvrpc.CmdCommit:
failpoint.Inject("rpcCommitResult", func(val failpoint.Value) {
switch val.(string) {
case "timeout":
failpoint.Return(nil, errors.New("timeout"))
case "notLeader":
failpoint.Return(&tikvrpc.Response{
Resp: &kvrpcpb.CommitResponse{RegionError: &errorpb.Error{NotLeader: &errorpb.NotLeader{}}},
}, nil)
case "keyError":
failpoint.Return(&tikvrpc.Response{
Resp: &kvrpcpb.CommitResponse{Error: &kvrpcpb.KeyError{}},
}, nil)
}
})
r := req.Commit()
if err := handler.checkRequest(reqCtx, r.Size()); err != nil {
resp.Resp = &kvrpcpb.CommitResponse{RegionError: err}
return resp, nil
}
resp.Resp = handler.handleKvCommit(r)
failpoint.Inject("rpcCommitTimeout", func(val failpoint.Value) {
if val.(bool) {
failpoint.Return(nil, undeterminedErr)
}
})
case tikvrpc.CmdCleanup:
r := req.Cleanup()
if err := handler.checkRequest(reqCtx, r.Size()); err != nil {
resp.Resp = &kvrpcpb.CleanupResponse{RegionError: err}
return resp, nil
}
resp.Resp = handler.handleKvCleanup(r)
case tikvrpc.CmdCheckTxnStatus:
r := req.CheckTxnStatus()
if err := handler.checkRequest(reqCtx, r.Size()); err != nil {
resp.Resp = &kvrpcpb.CheckTxnStatusResponse{RegionError: err}
return resp, nil
}
resp.Resp = handler.handleKvCheckTxnStatus(r)
case tikvrpc.CmdTxnHeartBeat:
r := req.TxnHeartBeat()
if err := handler.checkRequest(reqCtx, r.Size()); err != nil {
resp.Resp = &kvrpcpb.TxnHeartBeatResponse{RegionError: err}
return resp, nil
}
resp.Resp = handler.handleTxnHeartBeat(r)
case tikvrpc.CmdBatchGet:
r := req.BatchGet()
if err := handler.checkRequest(reqCtx, r.Size()); err != nil {
resp.Resp = &kvrpcpb.BatchGetResponse{RegionError: err}
return resp, nil
}
resp.Resp = handler.handleKvBatchGet(r)
case tikvrpc.CmdBatchRollback:
r := req.BatchRollback()
if err := handler.checkRequest(reqCtx, r.Size()); err != nil {
resp.Resp = &kvrpcpb.BatchRollbackResponse{RegionError: err}
return resp, nil
}
resp.Resp = handler.handleKvBatchRollback(r)
case tikvrpc.CmdScanLock:
r := req.ScanLock()
if err := handler.checkRequest(reqCtx, r.Size()); err != nil {
resp.Resp = &kvrpcpb.ScanLockResponse{RegionError: err}
return resp, nil
}
resp.Resp = handler.handleKvScanLock(r)
case tikvrpc.CmdResolveLock:
r := req.ResolveLock()
if err := handler.checkRequest(reqCtx, r.Size()); err != nil {
resp.Resp = &kvrpcpb.ResolveLockResponse{RegionError: err}
return resp, nil
}
resp.Resp = handler.handleKvResolveLock(r)
case tikvrpc.CmdGC:
r := req.GC()
if err := handler.checkRequest(reqCtx, r.Size()); err != nil {
resp.Resp = &kvrpcpb.GCResponse{RegionError: err}
return resp, nil
}
resp.Resp = handler.handleKvGC(r)
case tikvrpc.CmdDeleteRange:
r := req.DeleteRange()
if err := handler.checkRequest(reqCtx, r.Size()); err != nil {
resp.Resp = &kvrpcpb.DeleteRangeResponse{RegionError: err}
return resp, nil
}
resp.Resp = handler.handleKvDeleteRange(r)
case tikvrpc.CmdRawGet:
r := req.RawGet()
if err := handler.checkRequest(reqCtx, r.Size()); err != nil {
resp.Resp = &kvrpcpb.RawGetResponse{RegionError: err}
return resp, nil
}
resp.Resp = handler.handleKvRawGet(r)
case tikvrpc.CmdRawBatchGet:
r := req.RawBatchGet()
if err := handler.checkRequest(reqCtx, r.Size()); err != nil {
resp.Resp = &kvrpcpb.RawBatchGetResponse{RegionError: err}
return resp, nil
}
resp.Resp = handler.handleKvRawBatchGet(r)
case tikvrpc.CmdRawPut:
r := req.RawPut()
if err := handler.checkRequest(reqCtx, r.Size()); err != nil {
resp.Resp = &kvrpcpb.RawPutResponse{RegionError: err}
return resp, nil
}
resp.Resp = handler.handleKvRawPut(r)
case tikvrpc.CmdRawBatchPut:
r := req.RawBatchPut()
if err := handler.checkRequest(reqCtx, r.Size()); err != nil {
resp.Resp = &kvrpcpb.RawBatchPutResponse{RegionError: err}
return resp, nil
}
resp.Resp = handler.handleKvRawBatchPut(r)
case tikvrpc.CmdRawDelete:
r := req.RawDelete()
if err := handler.checkRequest(reqCtx, r.Size()); err != nil {
resp.Resp = &kvrpcpb.RawDeleteResponse{RegionError: err}
return resp, nil
}
resp.Resp = handler.handleKvRawDelete(r)
case tikvrpc.CmdRawBatchDelete:
r := req.RawBatchDelete()
if err := handler.checkRequest(reqCtx, r.Size()); err != nil {
resp.Resp = &kvrpcpb.RawBatchDeleteResponse{RegionError: err}
}
resp.Resp = handler.handleKvRawBatchDelete(r)
case tikvrpc.CmdRawDeleteRange:
r := req.RawDeleteRange()
if err := handler.checkRequest(reqCtx, r.Size()); err != nil {
resp.Resp = &kvrpcpb.RawDeleteRangeResponse{RegionError: err}
return resp, nil
}
resp.Resp = handler.handleKvRawDeleteRange(r)
case tikvrpc.CmdRawScan:
r := req.RawScan()
if err := handler.checkRequest(reqCtx, r.Size()); err != nil {
resp.Resp = &kvrpcpb.RawScanResponse{RegionError: err}
return resp, nil
}
resp.Resp = handler.handleKvRawScan(r)
case tikvrpc.CmdUnsafeDestroyRange:
panic("unimplemented")
case tikvrpc.CmdRegisterLockObserver:
return nil, errors.New("unimplemented")
case tikvrpc.CmdCheckLockObserver:
return nil, errors.New("unimplemented")
case tikvrpc.CmdRemoveLockObserver:
return nil, errors.New("unimplemented")
case tikvrpc.CmdPhysicalScanLock:
return nil, errors.New("unimplemented")
case tikvrpc.CmdCop:
r := req.Cop()
if err := handler.checkRequestContext(reqCtx); err != nil {
resp.Resp = &coprocessor.Response{RegionError: err}
return resp, nil
}
handler.rawStartKey = MvccKey(handler.startKey).Raw()
handler.rawEndKey = MvccKey(handler.endKey).Raw()
var res *coprocessor.Response
switch r.GetTp() {
case kv.ReqTypeDAG:
res = handler.handleCopDAGRequest(r)
case kv.ReqTypeAnalyze:
res = handler.handleCopAnalyzeRequest(r)
case kv.ReqTypeChecksum:
res = handler.handleCopChecksumRequest(r)
default:
panic(fmt.Sprintf("unknown coprocessor request type: %v", r.GetTp()))
}
resp.Resp = res
case tikvrpc.CmdCopStream:
r := req.Cop()
if err := handler.checkRequestContext(reqCtx); err != nil {
resp.Resp = &tikvrpc.CopStreamResponse{
Tikv_CoprocessorStreamClient: &mockCopStreamErrClient{Error: err},
Response: &coprocessor.Response{
RegionError: err,
},
}
return resp, nil
}
handler.rawStartKey = MvccKey(handler.startKey).Raw()
handler.rawEndKey = MvccKey(handler.endKey).Raw()
ctx1, cancel := context.WithCancel(ctx)
copStream, err := handler.handleCopStream(ctx1, r)
if err != nil {
cancel()
return nil, errors.Trace(err)
}
streamResp := &tikvrpc.CopStreamResponse{
Tikv_CoprocessorStreamClient: copStream,
}
streamResp.Lease.Cancel = cancel
streamResp.Timeout = timeout
c.streamTimeout <- &streamResp.Lease
first, err := streamResp.Recv()
if err != nil {
return nil, errors.Trace(err)
}
streamResp.Response = first
resp.Resp = streamResp
case tikvrpc.CmdMvccGetByKey:
r := req.MvccGetByKey()
if err := handler.checkRequest(reqCtx, r.Size()); err != nil {
resp.Resp = &kvrpcpb.MvccGetByKeyResponse{RegionError: err}
return resp, nil
}
resp.Resp = handler.handleMvccGetByKey(r)
case tikvrpc.CmdMvccGetByStartTs:
r := req.MvccGetByStartTs()
if err := handler.checkRequest(reqCtx, r.Size()); err != nil {
resp.Resp = &kvrpcpb.MvccGetByStartTsResponse{RegionError: err}
return resp, nil
}
resp.Resp = handler.handleMvccGetByStartTS(r)
case tikvrpc.CmdSplitRegion:
r := req.SplitRegion()
if err := handler.checkRequest(reqCtx, r.Size()); err != nil {
resp.Resp = &kvrpcpb.SplitRegionResponse{RegionError: err}
return resp, nil
}
resp.Resp = handler.handleSplitRegion(r)
// DebugGetRegionProperties is for fast analyze in mock tikv.
case tikvrpc.CmdDebugGetRegionProperties:
r := req.DebugGetRegionProperties()
region, _ := c.Cluster.GetRegion(r.RegionId)
var reqCtx kvrpcpb.Context
scanResp := handler.handleKvScan(&kvrpcpb.ScanRequest{
Context: &reqCtx,
StartKey: MvccKey(region.StartKey).Raw(),
EndKey: MvccKey(region.EndKey).Raw(),
Version: math.MaxUint64,
Limit: math.MaxUint32})
resp.Resp = &debugpb.GetRegionPropertiesResponse{
Props: []*debugpb.Property{{
Name: "mvcc.num_rows",
Value: strconv.Itoa(len(scanResp.Pairs)),
}}}
default:
return nil, errors.Errorf("unsupported this request type %v", req.Type)
}
return resp, nil
}
// Close closes the client.
func (c *RPCClient) Close() error {
close(c.done)
var err error
if c.MvccStore != nil {
err = c.MvccStore.Close()
if err != nil {
return err
}
}
if c.rpcCli != nil {
err = c.rpcCli.Close()
if err != nil {
return err
}
}
return nil
}
| store/mockstore/mocktikv/rpc.go | 0 | https://github.com/pingcap/tidb/commit/788dc28a7423d4b85ebe11bbce632733b056d63a | [
0.0005281163030304015,
0.0001749081420712173,
0.00016003349446691573,
0.00017265978385694325,
0.00003450331132626161
] |
{
"id": 1,
"code_window": [
"\t}\n",
"\ts.testControlParallelExecSQL(c, sql1, sql2, f)\n",
"}\n",
"\n",
"type checkRet func(c *C, err1, err2 error)\n",
"\n",
"func (s *testStateChangeSuiteBase) prepareTestControlParallelExecSQL(c *C) (session.Session, session.Session, chan struct{}, ddl.Callback) {\n",
"\tcallback := &ddl.TestDDLCallback{}\n",
"\ttimes := 0\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"func (s *testStateChangeSuite) TestParallelAlterAndDropSchema(c *C) {\n",
"\t_, err := s.se.Execute(context.Background(), \"create database db_drop_db\")\n",
"\tc.Assert(err, IsNil)\n",
"\tsql1 := \"DROP SCHEMA db_drop_db\"\n",
"\tsql2 := \"ALTER SCHEMA db_drop_db CHARSET utf8mb4 COLLATE utf8mb4_general_ci\"\n",
"\tf := func(c *C, err1, err2 error) {\n",
"\t\tc.Assert(err1, IsNil)\n",
"\t\tc.Assert(err2, NotNil)\n",
"\t\tc.Assert(err2.Error(), Equals, \"[schema:1008]Can't drop database ''; database doesn't exist\")\n",
"\t}\n",
"\ts.testControlParallelExecSQL(c, sql1, sql2, f)\n",
"}\n",
"\n"
],
"file_path": "ddl/db_change_test.go",
"type": "add",
"edit_start_line_idx": 952
} | // Copyright 2017 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package ddl_test
import (
"context"
"fmt"
"strings"
"sync"
"sync/atomic"
"time"
. "github.com/pingcap/check"
"github.com/pingcap/errors"
"github.com/pingcap/failpoint"
"github.com/pingcap/log"
"github.com/pingcap/parser"
"github.com/pingcap/parser/ast"
"github.com/pingcap/parser/model"
"github.com/pingcap/parser/terror"
"github.com/pingcap/tidb/config"
"github.com/pingcap/tidb/ddl"
"github.com/pingcap/tidb/domain"
"github.com/pingcap/tidb/executor"
"github.com/pingcap/tidb/infoschema"
"github.com/pingcap/tidb/kv"
"github.com/pingcap/tidb/session"
"github.com/pingcap/tidb/sessionctx"
"github.com/pingcap/tidb/store/mockstore"
"github.com/pingcap/tidb/util/admin"
"github.com/pingcap/tidb/util/gcutil"
"github.com/pingcap/tidb/util/sqlexec"
"github.com/pingcap/tidb/util/testkit"
"go.uber.org/zap"
)
var _ = Suite(&testStateChangeSuite{})
var _ = SerialSuites(&serialTestStateChangeSuite{})
type serialTestStateChangeSuite struct {
testStateChangeSuiteBase
}
type testStateChangeSuite struct {
testStateChangeSuiteBase
}
type testStateChangeSuiteBase struct {
lease time.Duration
store kv.Storage
dom *domain.Domain
se session.Session
p *parser.Parser
preSQL string
}
func (s *testStateChangeSuiteBase) SetUpSuite(c *C) {
s.lease = 200 * time.Millisecond
ddl.SetWaitTimeWhenErrorOccurred(1 * time.Microsecond)
var err error
s.store, err = mockstore.NewMockStore()
c.Assert(err, IsNil)
session.SetSchemaLease(s.lease)
s.dom, err = session.BootstrapSession(s.store)
c.Assert(err, IsNil)
s.se, err = session.CreateSession4Test(s.store)
c.Assert(err, IsNil)
_, err = s.se.Execute(context.Background(), "create database test_db_state default charset utf8 default collate utf8_bin")
c.Assert(err, IsNil)
_, err = s.se.Execute(context.Background(), "use test_db_state")
c.Assert(err, IsNil)
s.p = parser.New()
}
func (s *testStateChangeSuiteBase) TearDownSuite(c *C) {
s.se.Execute(context.Background(), "drop database if exists test_db_state")
s.se.Close()
s.dom.Close()
s.store.Close()
}
// TestShowCreateTable tests the result of "show create table" when we are running "add index" or "add column".
func (s *serialTestStateChangeSuite) TestShowCreateTable(c *C) {
config.GetGlobalConfig().Experimental.AllowsExpressionIndex = true
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("create table t (id int)")
tk.MustExec("create table t2 (a int, b varchar(10)) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_general_ci")
// tkInternal is used to execute additional sql (here show create table) in ddl change callback.
// Using same `tk` in different goroutines may lead to data race.
tkInternal := testkit.NewTestKit(c, s.store)
tkInternal.MustExec("use test")
var checkErr error
testCases := []struct {
sql string
expectedRet string
}{
{"alter table t add index idx(id)",
"CREATE TABLE `t` (\n `id` int(11) DEFAULT NULL\n) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin"},
{"alter table t add index idx1(id)",
"CREATE TABLE `t` (\n `id` int(11) DEFAULT NULL,\n KEY `idx` (`id`)\n) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin"},
{"alter table t add column c int",
"CREATE TABLE `t` (\n `id` int(11) DEFAULT NULL,\n KEY `idx` (`id`),\n KEY `idx1` (`id`)\n) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin"},
{"alter table t2 add column c varchar(1)",
"CREATE TABLE `t2` (\n `a` int(11) DEFAULT NULL,\n `b` varchar(10) COLLATE utf8mb4_general_ci DEFAULT NULL\n) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_general_ci"},
{"alter table t2 add column d varchar(1)",
"CREATE TABLE `t2` (\n `a` int(11) DEFAULT NULL,\n `b` varchar(10) COLLATE utf8mb4_general_ci DEFAULT NULL,\n `c` varchar(1) COLLATE utf8mb4_general_ci DEFAULT NULL\n) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_general_ci"},
}
prevState := model.StateNone
callback := &ddl.TestDDLCallback{}
currTestCaseOffset := 0
callback.OnJobUpdatedExported = func(job *model.Job) {
if job.SchemaState == prevState || checkErr != nil {
return
}
if job.State == model.JobStateDone {
currTestCaseOffset++
}
if job.SchemaState != model.StatePublic {
var result sqlexec.RecordSet
tbl2 := testGetTableByName(c, tkInternal.Se, "test", "t2")
if job.TableID == tbl2.Meta().ID {
// Try to do not use mustQuery in hook func, cause assert fail in mustQuery will cause ddl job hung.
result, checkErr = tkInternal.Exec("show create table t2")
if checkErr != nil {
return
}
} else {
result, checkErr = tkInternal.Exec("show create table t")
if checkErr != nil {
return
}
}
req := result.NewChunk()
checkErr = result.Next(context.Background(), req)
if checkErr != nil {
return
}
got := req.GetRow(0).GetString(1)
expected := testCases[currTestCaseOffset].expectedRet
if got != expected {
checkErr = errors.Errorf("got %s, expected %s", got, expected)
}
}
}
d := s.dom.DDL()
originalCallback := d.GetHook()
defer d.(ddl.DDLForTest).SetHook(originalCallback)
d.(ddl.DDLForTest).SetHook(callback)
for _, tc := range testCases {
tk.MustExec(tc.sql)
c.Assert(checkErr, IsNil)
}
}
// TestDropNotNullColumn is used to test issue #8654.
func (s *testStateChangeSuite) TestDropNotNullColumn(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("create table t (id int, a int not null default 11)")
tk.MustExec("insert into t values(1, 1)")
tk.MustExec("create table t1 (id int, b varchar(255) not null)")
tk.MustExec("insert into t1 values(2, '')")
tk.MustExec("create table t2 (id int, c time not null)")
tk.MustExec("insert into t2 values(3, '11:22:33')")
tk.MustExec("create table t3 (id int, d json not null)")
tk.MustExec("insert into t3 values(4, d)")
tk1 := testkit.NewTestKit(c, s.store)
tk1.MustExec("use test")
var checkErr error
d := s.dom.DDL()
originalCallback := d.GetHook()
callback := &ddl.TestDDLCallback{}
sqlNum := 0
callback.OnJobUpdatedExported = func(job *model.Job) {
if checkErr != nil {
return
}
originalCallback.OnChanged(nil)
if job.SchemaState == model.StateWriteOnly {
switch sqlNum {
case 0:
_, checkErr = tk1.Exec("insert into t set id = 1")
case 1:
_, checkErr = tk1.Exec("insert into t1 set id = 2")
case 2:
_, checkErr = tk1.Exec("insert into t2 set id = 3")
case 3:
_, checkErr = tk1.Exec("insert into t3 set id = 4")
}
}
}
d.(ddl.DDLForTest).SetHook(callback)
tk.MustExec("alter table t drop column a")
c.Assert(checkErr, IsNil)
sqlNum++
tk.MustExec("alter table t1 drop column b")
c.Assert(checkErr, IsNil)
sqlNum++
tk.MustExec("alter table t2 drop column c")
c.Assert(checkErr, IsNil)
sqlNum++
tk.MustExec("alter table t3 drop column d")
c.Assert(checkErr, IsNil)
d.(ddl.DDLForTest).SetHook(originalCallback)
tk.MustExec("drop table t, t1, t2, t3")
}
func (s *testStateChangeSuite) TestTwoStates(c *C) {
cnt := 5
// New the testExecInfo.
testInfo := &testExecInfo{
execCases: cnt,
sqlInfos: make([]*sqlInfo, 4),
}
for i := 0; i < len(testInfo.sqlInfos); i++ {
sqlInfo := &sqlInfo{cases: make([]*stateCase, cnt)}
for j := 0; j < cnt; j++ {
sqlInfo.cases[j] = new(stateCase)
}
testInfo.sqlInfos[i] = sqlInfo
}
err := testInfo.createSessions(s.store, "test_db_state")
c.Assert(err, IsNil)
// Fill the SQLs and expected error messages.
testInfo.sqlInfos[0].sql = "insert into t (c1, c2, c3, c4) value(2, 'b', 'N', '2017-07-02')"
testInfo.sqlInfos[1].sql = "insert into t (c1, c2, c3, d3, c4) value(3, 'b', 'N', 'a', '2017-07-03')"
unknownColErr := "[planner:1054]Unknown column 'd3' in 'field list'"
testInfo.sqlInfos[1].cases[0].expectedCompileErr = unknownColErr
testInfo.sqlInfos[1].cases[1].expectedCompileErr = unknownColErr
testInfo.sqlInfos[1].cases[2].expectedCompileErr = unknownColErr
testInfo.sqlInfos[1].cases[3].expectedCompileErr = unknownColErr
testInfo.sqlInfos[2].sql = "update t set c2 = 'c2_update'"
testInfo.sqlInfos[3].sql = "replace into t values(5, 'e', 'N', '2017-07-05')"
testInfo.sqlInfos[3].cases[4].expectedCompileErr = "[planner:1136]Column count doesn't match value count at row 1"
alterTableSQL := "alter table t add column d3 enum('a', 'b') not null default 'a' after c3"
s.test(c, "", alterTableSQL, testInfo)
// TODO: Add more DDL statements.
}
func (s *testStateChangeSuite) test(c *C, tableName, alterTableSQL string, testInfo *testExecInfo) {
_, err := s.se.Execute(context.Background(), `create table t (
c1 int,
c2 varchar(64),
c3 enum('N','Y') not null default 'N',
c4 timestamp on update current_timestamp,
key(c1, c2))`)
c.Assert(err, IsNil)
defer s.se.Execute(context.Background(), "drop table t")
_, err = s.se.Execute(context.Background(), "insert into t values(1, 'a', 'N', '2017-07-01')")
c.Assert(err, IsNil)
callback := &ddl.TestDDLCallback{}
prevState := model.StateNone
var checkErr error
err = testInfo.parseSQLs(s.p)
c.Assert(err, IsNil, Commentf("error stack %v", errors.ErrorStack(err)))
times := 0
callback.OnJobUpdatedExported = func(job *model.Job) {
if job.SchemaState == prevState || checkErr != nil || times >= 3 {
return
}
times++
switch job.SchemaState {
case model.StateDeleteOnly:
// This state we execute every sqlInfo one time using the first session and other information.
err = testInfo.compileSQL(0)
if err != nil {
checkErr = err
break
}
err = testInfo.execSQL(0)
if err != nil {
checkErr = err
}
case model.StateWriteOnly:
// This state we put the schema information to the second case.
err = testInfo.compileSQL(1)
if err != nil {
checkErr = err
}
case model.StateWriteReorganization:
// This state we execute every sqlInfo one time using the third session and other information.
err = testInfo.compileSQL(2)
if err != nil {
checkErr = err
break
}
err = testInfo.execSQL(2)
if err != nil {
checkErr = err
break
}
// Mock the server is in `write only` state.
err = testInfo.execSQL(1)
if err != nil {
checkErr = err
break
}
// This state we put the schema information to the fourth case.
err = testInfo.compileSQL(3)
if err != nil {
checkErr = err
}
}
}
d := s.dom.DDL()
originalCallback := d.GetHook()
defer d.(ddl.DDLForTest).SetHook(originalCallback)
d.(ddl.DDLForTest).SetHook(callback)
_, err = s.se.Execute(context.Background(), alterTableSQL)
c.Assert(err, IsNil)
err = testInfo.compileSQL(4)
c.Assert(err, IsNil)
err = testInfo.execSQL(4)
c.Assert(err, IsNil)
// Mock the server is in `write reorg` state.
err = testInfo.execSQL(3)
c.Assert(err, IsNil)
c.Assert(errors.ErrorStack(checkErr), Equals, "")
}
type stateCase struct {
session session.Session
rawStmt ast.StmtNode
stmt sqlexec.Statement
expectedExecErr string
expectedCompileErr string
}
type sqlInfo struct {
sql string
// cases is multiple stateCases.
// Every case need to be executed with the different schema state.
cases []*stateCase
}
// testExecInfo contains some SQL information and the number of times each SQL is executed
// in a DDL statement.
type testExecInfo struct {
// execCases represents every SQL need to be executed execCases times.
// And the schema state is different at each execution.
execCases int
// sqlInfos represents this test information has multiple SQLs to test.
sqlInfos []*sqlInfo
}
func (t *testExecInfo) createSessions(store kv.Storage, useDB string) error {
var err error
for i, info := range t.sqlInfos {
for j, c := range info.cases {
c.session, err = session.CreateSession4Test(store)
if err != nil {
return errors.Trace(err)
}
_, err = c.session.Execute(context.Background(), "use "+useDB)
if err != nil {
return errors.Trace(err)
}
// It's used to debug.
c.session.SetConnectionID(uint64(i*10 + j))
}
}
return nil
}
func (t *testExecInfo) parseSQLs(p *parser.Parser) error {
if t.execCases <= 0 {
return nil
}
var err error
for _, sqlInfo := range t.sqlInfos {
seVars := sqlInfo.cases[0].session.GetSessionVars()
charset, collation := seVars.GetCharsetInfo()
for j := 0; j < t.execCases; j++ {
sqlInfo.cases[j].rawStmt, err = p.ParseOneStmt(sqlInfo.sql, charset, collation)
if err != nil {
return errors.Trace(err)
}
}
}
return nil
}
func (t *testExecInfo) compileSQL(idx int) (err error) {
for _, info := range t.sqlInfos {
c := info.cases[idx]
compiler := executor.Compiler{Ctx: c.session}
se := c.session
ctx := context.TODO()
se.PrepareTxnCtx(ctx)
sctx := se.(sessionctx.Context)
if err = executor.ResetContextOfStmt(sctx, c.rawStmt); err != nil {
return errors.Trace(err)
}
c.stmt, err = compiler.Compile(ctx, c.rawStmt)
if c.expectedCompileErr != "" {
if err == nil {
err = errors.Errorf("expected error %s but got nil", c.expectedCompileErr)
} else if err.Error() == c.expectedCompileErr {
err = nil
}
}
if err != nil {
return errors.Trace(err)
}
}
return nil
}
func (t *testExecInfo) execSQL(idx int) error {
for _, sqlInfo := range t.sqlInfos {
c := sqlInfo.cases[idx]
if c.expectedCompileErr != "" {
continue
}
_, err := c.stmt.Exec(context.TODO())
if c.expectedExecErr != "" {
if err == nil {
err = errors.Errorf("expected error %s but got nil", c.expectedExecErr)
} else if err.Error() == c.expectedExecErr {
err = nil
}
}
if err != nil {
return errors.Trace(err)
}
err = c.session.CommitTxn(context.TODO())
if err != nil {
return errors.Trace(err)
}
}
return nil
}
type sqlWithErr struct {
sql string
expectErr error
}
type expectQuery struct {
sql string
rows []string
}
func (s *testStateChangeSuite) TestAppendEnum(c *C) {
_, err := s.se.Execute(context.Background(), `create table t (
c1 varchar(64),
c2 enum('N','Y') not null default 'N',
c3 timestamp on update current_timestamp,
c4 int primary key,
unique key idx2 (c2, c3))`)
c.Assert(err, IsNil)
defer s.se.Execute(context.Background(), "drop table t")
_, err = s.se.Execute(context.Background(), "insert into t values('a', 'N', '2017-07-01', 8)")
c.Assert(err, IsNil)
// Make sure these sqls use the the plan of index scan.
_, err = s.se.Execute(context.Background(), "drop stats t")
c.Assert(err, IsNil)
se, err := session.CreateSession(s.store)
c.Assert(err, IsNil)
_, err = se.Execute(context.Background(), "use test_db_state")
c.Assert(err, IsNil)
_, err = s.se.Execute(context.Background(), "insert into t values('a', 'A', '2018-09-19', 9)")
c.Assert(err.Error(), Equals, "[table:1366]Incorrect enum value: 'A' for column 'c2' at row 1")
failAlterTableSQL1 := "alter table t change c2 c2 enum('N') DEFAULT 'N'"
_, err = s.se.Execute(context.Background(), failAlterTableSQL1)
c.Assert(err.Error(), Equals, "[ddl:8200]Unsupported modify column: the number of enum column's elements is less than the original: 2")
failAlterTableSQL2 := "alter table t change c2 c2 int default 0"
_, err = s.se.Execute(context.Background(), failAlterTableSQL2)
c.Assert(err.Error(), Equals, "[ddl:8200]Unsupported modify column: cannot modify enum type column's to type int(11)")
alterTableSQL := "alter table t change c2 c2 enum('N','Y','A') DEFAULT 'A'"
_, err = s.se.Execute(context.Background(), alterTableSQL)
c.Assert(err, IsNil)
_, err = se.Execute(context.Background(), "insert into t values('a', 'A', '2018-09-20', 10)")
c.Assert(err, IsNil)
_, err = se.Execute(context.Background(), "insert into t (c1, c3, c4) values('a', '2018-09-21', 11)")
c.Assert(err, IsNil)
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test_db_state")
result, err := s.execQuery(tk, "select c4, c2 from t order by c4 asc")
c.Assert(err, IsNil)
expected := []string{"8 N", "10 A", "11 A"}
checkResult(result, testkit.Rows(expected...))
_, err = s.se.Execute(context.Background(), "update t set c2='N' where c4 = 10")
c.Assert(err, IsNil)
result, err = s.execQuery(tk, "select c2 from t where c4 = 10")
c.Assert(err, IsNil)
expected = []string{"8 N", "10 N", "11 A"}
checkResult(result, testkit.Rows(expected...))
}
// https://github.com/pingcap/tidb/pull/6249 fixes the following two test cases.
func (s *testStateChangeSuite) TestWriteOnlyWriteNULL(c *C) {
sqls := make([]sqlWithErr, 1)
sqls[0] = sqlWithErr{"insert t set c1 = 'c1_new', c3 = '2019-02-12', c4 = 8 on duplicate key update c1 = values(c1)", nil}
addColumnSQL := "alter table t add column c5 int not null default 1 after c4"
expectQuery := &expectQuery{"select c4, c5 from t", []string{"8 1"}}
// TODO: This case should always fail in write-only state, but it doesn't. We use write-reorganization state here to keep it running stable. It need a double check.
s.runTestInSchemaState(c, model.StateWriteReorganization, true, addColumnSQL, sqls, expectQuery)
}
func (s *testStateChangeSuite) TestWriteOnlyOnDupUpdate(c *C) {
sqls := make([]sqlWithErr, 3)
sqls[0] = sqlWithErr{"delete from t", nil}
sqls[1] = sqlWithErr{"insert t set c1 = 'c1_dup', c3 = '2018-02-12', c4 = 2 on duplicate key update c1 = values(c1)", nil}
sqls[2] = sqlWithErr{"insert t set c1 = 'c1_new', c3 = '2019-02-12', c4 = 2 on duplicate key update c1 = values(c1)", nil}
addColumnSQL := "alter table t add column c5 int not null default 1 after c4"
expectQuery := &expectQuery{"select c4, c5 from t", []string{"2 1"}}
// TODO: This case should always fail in write-only state, but it doesn't. We use write-reorganization state here to keep it running stable. It need a double check.
s.runTestInSchemaState(c, model.StateWriteReorganization, true, addColumnSQL, sqls, expectQuery)
}
func (s *testStateChangeSuite) TestWriteOnlyOnDupUpdateForAddColumns(c *C) {
sqls := make([]sqlWithErr, 3)
sqls[0] = sqlWithErr{"delete from t", nil}
sqls[1] = sqlWithErr{"insert t set c1 = 'c1_dup', c3 = '2018-02-12', c4 = 2 on duplicate key update c1 = values(c1)", nil}
sqls[2] = sqlWithErr{"insert t set c1 = 'c1_new', c3 = '2019-02-12', c4 = 2 on duplicate key update c1 = values(c1)", nil}
addColumnsSQL := "alter table t add column c5 int not null default 1 after c4, add column c44 int not null default 1"
expectQuery := &expectQuery{"select c4, c5, c44 from t", []string{"2 1 1"}}
// TODO: This case should always fail in write-only state, but it doesn't. We use write-reorganization state here to keep it running stable. It need a double check.
s.runTestInSchemaState(c, model.StateWriteReorganization, true, addColumnsSQL, sqls, expectQuery)
}
// TestWriteOnly tests whether the correct columns is used in PhysicalIndexScan's ToPB function.
func (s *testStateChangeSuite) TestWriteOnly(c *C) {
sqls := make([]sqlWithErr, 3)
sqls[0] = sqlWithErr{"delete from t where c1 = 'a'", nil}
sqls[1] = sqlWithErr{"update t use index(idx2) set c1 = 'c1_update' where c1 = 'a'", nil}
sqls[2] = sqlWithErr{"insert t set c1 = 'c1_insert', c3 = '2018-02-12', c4 = 1", nil}
addColumnSQL := "alter table t add column c5 int not null default 1 first"
s.runTestInSchemaState(c, model.StateWriteOnly, true, addColumnSQL, sqls, nil)
}
// TestWriteOnlyForAddColumns tests whether the correct columns is used in PhysicalIndexScan's ToPB function.
func (s *testStateChangeSuite) TestWriteOnlyForAddColumns(c *C) {
sqls := make([]sqlWithErr, 3)
sqls[0] = sqlWithErr{"delete from t where c1 = 'a'", nil}
sqls[1] = sqlWithErr{"update t use index(idx2) set c1 = 'c1_update' where c1 = 'a'", nil}
sqls[2] = sqlWithErr{"insert t set c1 = 'c1_insert', c3 = '2018-02-12', c4 = 1", nil}
addColumnsSQL := "alter table t add column c5 int not null default 1 first, add column c6 int not null default 1"
s.runTestInSchemaState(c, model.StateWriteOnly, true, addColumnsSQL, sqls, nil)
}
// TestDeletaOnly tests whether the correct columns is used in PhysicalIndexScan's ToPB function.
func (s *testStateChangeSuite) TestDeleteOnly(c *C) {
sqls := make([]sqlWithErr, 1)
sqls[0] = sqlWithErr{"insert t set c1 = 'c1_insert', c3 = '2018-02-12', c4 = 1",
errors.Errorf("Can't find column c1")}
dropColumnSQL := "alter table t drop column c1"
s.runTestInSchemaState(c, model.StateDeleteOnly, true, dropColumnSQL, sqls, nil)
}
// TestDeleteOnlyForDropColumns tests whether the correct columns is used in PhysicalIndexScan's ToPB function.
func (s *testStateChangeSuite) TestDeleteOnlyForDropColumns(c *C) {
sqls := make([]sqlWithErr, 1)
sqls[0] = sqlWithErr{"insert t set c1 = 'c1_insert', c3 = '2018-02-12', c4 = 1",
errors.Errorf("Can't find column c1")}
dropColumnsSQL := "alter table t drop column c1, drop column c3"
s.runTestInSchemaState(c, model.StateDeleteOnly, true, dropColumnsSQL, sqls, nil)
}
func (s *testStateChangeSuite) TestWriteOnlyForDropColumn(c *C) {
_, err := s.se.Execute(context.Background(), "use test_db_state")
c.Assert(err, IsNil)
_, err = s.se.Execute(context.Background(), `create table tt (c1 int, c4 int)`)
c.Assert(err, IsNil)
_, err = s.se.Execute(context.Background(), "insert into tt (c1, c4) values(8, 8)")
c.Assert(err, IsNil)
defer s.se.Execute(context.Background(), "drop table tt")
sqls := make([]sqlWithErr, 2)
sqls[0] = sqlWithErr{"update t set c1='5', c3='2020-03-01';", errors.New("[planner:1054]Unknown column 'c3' in 'field list'")}
sqls[1] = sqlWithErr{"update t t1, tt t2 set t1.c1='5', t1.c3='2020-03-01', t2.c1='10' where t1.c4=t2.c4",
errors.New("[planner:1054]Unknown column 'c3' in 'field list'")}
// TODO: Fix the case of sqls[2].
// sqls[2] = sqlWithErr{"update t set c1='5' where c3='2017-07-01';", errors.New("[planner:1054]Unknown column 'c3' in 'field list'")}
dropColumnSQL := "alter table t drop column c3"
query := &expectQuery{sql: "select * from t;", rows: []string{"a N 8"}}
s.runTestInSchemaState(c, model.StateWriteOnly, false, dropColumnSQL, sqls, query)
}
func (s *testStateChangeSuite) TestWriteOnlyForDropColumns(c *C) {
_, err := s.se.Execute(context.Background(), "use test_db_state")
c.Assert(err, IsNil)
_, err = s.se.Execute(context.Background(), `create table t_drop_columns (c1 int, c4 int)`)
c.Assert(err, IsNil)
_, err = s.se.Execute(context.Background(), "insert into t_drop_columns (c1, c4) values(8, 8)")
c.Assert(err, IsNil)
defer s.se.Execute(context.Background(), "drop table t_drop_columns")
sqls := make([]sqlWithErr, 2)
sqls[0] = sqlWithErr{"update t set c1='5', c3='2020-03-01';", errors.New("[planner:1054]Unknown column 'c3' in 'field list'")}
sqls[1] = sqlWithErr{"update t t1, t_drop_columns t2 set t1.c1='5', t1.c3='2020-03-01', t2.c1='10' where t1.c4=t2.c4",
errors.New("[planner:1054]Unknown column 'c3' in 'field list'")}
// TODO: Fix the case of sqls[2].
// sqls[2] = sqlWithErr{"update t set c1='5' where c3='2017-07-01';", errors.New("[planner:1054]Unknown column 'c3' in 'field list'")}
dropColumnsSQL := "alter table t drop column c3, drop column c1"
query := &expectQuery{sql: "select * from t;", rows: []string{"N 8"}}
s.runTestInSchemaState(c, model.StateWriteOnly, false, dropColumnsSQL, sqls, query)
}
func (s *testStateChangeSuiteBase) runTestInSchemaState(c *C, state model.SchemaState, isOnJobUpdated bool, alterTableSQL string,
sqlWithErrs []sqlWithErr, expectQuery *expectQuery) {
_, err := s.se.Execute(context.Background(), `create table t (
c1 varchar(64),
c2 enum('N','Y') not null default 'N',
c3 timestamp on update current_timestamp,
c4 int primary key,
unique key idx2 (c2))`)
c.Assert(err, IsNil)
defer s.se.Execute(context.Background(), "drop table t")
_, err = s.se.Execute(context.Background(), "insert into t values('a', 'N', '2017-07-01', 8)")
c.Assert(err, IsNil)
// Make sure these sqls use the the plan of index scan.
_, err = s.se.Execute(context.Background(), "drop stats t")
c.Assert(err, IsNil)
callback := &ddl.TestDDLCallback{}
prevState := model.StateNone
var checkErr error
times := 0
se, err := session.CreateSession(s.store)
c.Assert(err, IsNil)
_, err = se.Execute(context.Background(), "use test_db_state")
c.Assert(err, IsNil)
cbFunc := func(job *model.Job) {
if job.SchemaState == prevState || checkErr != nil || times >= 3 {
return
}
times++
if job.SchemaState != state {
return
}
for _, sqlWithErr := range sqlWithErrs {
_, err = se.Execute(context.Background(), sqlWithErr.sql)
if !terror.ErrorEqual(err, sqlWithErr.expectErr) {
checkErr = err
if checkErr == nil {
checkErr = errors.New("err can't be nil")
}
break
}
}
}
if isOnJobUpdated {
callback.OnJobUpdatedExported = cbFunc
} else {
callback.OnJobRunBeforeExported = cbFunc
}
d := s.dom.DDL()
originalCallback := d.GetHook()
d.(ddl.DDLForTest).SetHook(callback)
_, err = s.se.Execute(context.Background(), alterTableSQL)
c.Assert(err, IsNil)
c.Assert(errors.ErrorStack(checkErr), Equals, "")
d.(ddl.DDLForTest).SetHook(originalCallback)
if expectQuery != nil {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test_db_state")
result, err := s.execQuery(tk, expectQuery.sql)
c.Assert(err, IsNil)
err = checkResult(result, testkit.Rows(expectQuery.rows...))
c.Assert(err, IsNil)
}
}
func (s *testStateChangeSuiteBase) execQuery(tk *testkit.TestKit, sql string, args ...interface{}) (*testkit.Result, error) {
comment := Commentf("sql:%s, args:%v", sql, args)
rs, err := tk.Exec(sql, args...)
if err != nil {
return nil, err
}
result := tk.ResultSetToResult(rs, comment)
return result, nil
}
func checkResult(result *testkit.Result, expected [][]interface{}) error {
got := fmt.Sprintf("%s", result.Rows())
need := fmt.Sprintf("%s", expected)
if got != need {
return fmt.Errorf("need %v, but got %v", need, got)
}
return nil
}
func (s *testStateChangeSuiteBase) CheckResult(tk *testkit.TestKit, sql string, args ...interface{}) (*testkit.Result, error) {
comment := Commentf("sql:%s, args:%v", sql, args)
rs, err := tk.Exec(sql, args...)
if err != nil {
return nil, err
}
result := tk.ResultSetToResult(rs, comment)
return result, nil
}
func (s *testStateChangeSuite) TestShowIndex(c *C) {
_, err := s.se.Execute(context.Background(), `create table t(c1 int primary key, c2 int)`)
c.Assert(err, IsNil)
defer s.se.Execute(context.Background(), "drop table t")
callback := &ddl.TestDDLCallback{}
prevState := model.StateNone
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test_db_state")
showIndexSQL := `show index from t`
var checkErr error
callback.OnJobUpdatedExported = func(job *model.Job) {
if job.SchemaState == prevState || checkErr != nil {
return
}
switch job.SchemaState {
case model.StateDeleteOnly, model.StateWriteOnly, model.StateWriteReorganization:
result, err1 := s.execQuery(tk, showIndexSQL)
if err1 != nil {
checkErr = err1
break
}
checkErr = checkResult(result, testkit.Rows("t 0 PRIMARY 1 c1 A 0 <nil> <nil> BTREE YES NULL"))
}
}
d := s.dom.DDL()
originalCallback := d.GetHook()
d.(ddl.DDLForTest).SetHook(callback)
alterTableSQL := `alter table t add index c2(c2)`
_, err = s.se.Execute(context.Background(), alterTableSQL)
c.Assert(err, IsNil)
c.Assert(errors.ErrorStack(checkErr), Equals, "")
result, err := s.execQuery(tk, showIndexSQL)
c.Assert(err, IsNil)
err = checkResult(result, testkit.Rows(
"t 0 PRIMARY 1 c1 A 0 <nil> <nil> BTREE YES NULL",
"t 1 c2 1 c2 A 0 <nil> <nil> YES BTREE YES NULL",
))
c.Assert(err, IsNil)
d.(ddl.DDLForTest).SetHook(originalCallback)
c.Assert(err, IsNil)
_, err = s.se.Execute(context.Background(), `create table tr(
id int, name varchar(50),
purchased date
)
partition by range( year(purchased) ) (
partition p0 values less than (1990),
partition p1 values less than (1995),
partition p2 values less than (2000),
partition p3 values less than (2005),
partition p4 values less than (2010),
partition p5 values less than (2015)
);`)
c.Assert(err, IsNil)
defer s.se.Execute(context.Background(), "drop table tr")
_, err = s.se.Execute(context.Background(), "create index idx1 on tr (purchased);")
c.Assert(err, IsNil)
result, err = s.execQuery(tk, "show index from tr;")
c.Assert(err, IsNil)
err = checkResult(result, testkit.Rows("tr 1 idx1 1 purchased A 0 <nil> <nil> YES BTREE YES NULL"))
c.Assert(err, IsNil)
}
func (s *testStateChangeSuite) TestParallelAlterModifyColumn(c *C) {
sql := "ALTER TABLE t MODIFY COLUMN b int FIRST;"
f := func(c *C, err1, err2 error) {
c.Assert(err1, IsNil)
c.Assert(err2, IsNil)
_, err := s.se.Execute(context.Background(), "select * from t")
c.Assert(err, IsNil)
}
s.testControlParallelExecSQL(c, sql, sql, f)
}
// TODO: This test is not a test that performs two DDLs in parallel.
// So we should not use the function of testControlParallelExecSQL. We will handle this test in the next PR.
// func (s *testStateChangeSuite) TestParallelColumnModifyingDefinition(c *C) {
// sql1 := "insert into t(b) values (null);"
// sql2 := "alter table t change b b2 bigint not null;"
// f := func(c *C, err1, err2 error) {
// c.Assert(err1, IsNil)
// if err2 != nil {
// c.Assert(err2.Error(), Equals, "[ddl:1265]Data truncated for column 'b2' at row 1")
// }
// }
// s.testControlParallelExecSQL(c, sql1, sql2, f)
// }
func (s *testStateChangeSuite) TestParallelAddColumAndSetDefaultValue(c *C) {
_, err := s.se.Execute(context.Background(), "use test_db_state")
c.Assert(err, IsNil)
_, err = s.se.Execute(context.Background(), `create table tx (
c1 varchar(64),
c2 enum('N','Y') not null default 'N',
primary key idx2 (c2, c1))`)
c.Assert(err, IsNil)
_, err = s.se.Execute(context.Background(), "insert into tx values('a', 'N')")
c.Assert(err, IsNil)
defer s.se.Execute(context.Background(), "drop table tx")
sql1 := "alter table tx add column cx int after c1"
sql2 := "alter table tx alter c2 set default 'N'"
f := func(c *C, err1, err2 error) {
c.Assert(err1, IsNil)
c.Assert(err2, IsNil)
_, err := s.se.Execute(context.Background(), "delete from tx where c1='a'")
c.Assert(err, IsNil)
}
s.testControlParallelExecSQL(c, sql1, sql2, f)
}
func (s *testStateChangeSuite) TestParallelChangeColumnName(c *C) {
sql1 := "ALTER TABLE t CHANGE a aa int;"
sql2 := "ALTER TABLE t CHANGE b aa int;"
f := func(c *C, err1, err2 error) {
// Make sure only a DDL encounters the error of 'duplicate column name'.
var oneErr error
if (err1 != nil && err2 == nil) || (err1 == nil && err2 != nil) {
if err1 != nil {
oneErr = err1
} else {
oneErr = err2
}
}
c.Assert(oneErr.Error(), Equals, "[schema:1060]Duplicate column name 'aa'")
}
s.testControlParallelExecSQL(c, sql1, sql2, f)
}
func (s *testStateChangeSuite) TestParallelAlterAddIndex(c *C) {
sql1 := "ALTER TABLE t add index index_b(b);"
sql2 := "CREATE INDEX index_b ON t (c);"
f := func(c *C, err1, err2 error) {
c.Assert(err1, IsNil)
c.Assert(err2.Error(), Equals, "[ddl:1061]index already exist index_b")
}
s.testControlParallelExecSQL(c, sql1, sql2, f)
}
func (s *serialTestStateChangeSuite) TestParallelAlterAddExpressionIndex(c *C) {
config.GetGlobalConfig().Experimental.AllowsExpressionIndex = true
sql1 := "ALTER TABLE t add index expr_index_b((b+1));"
sql2 := "CREATE INDEX expr_index_b ON t ((c+1));"
f := func(c *C, err1, err2 error) {
c.Assert(err1, IsNil)
c.Assert(err2.Error(), Equals, "[ddl:1061]index already exist expr_index_b")
}
s.testControlParallelExecSQL(c, sql1, sql2, f)
}
func (s *testStateChangeSuite) TestParallelAddPrimaryKey(c *C) {
sql1 := "ALTER TABLE t add primary key index_b(b);"
sql2 := "ALTER TABLE t add primary key index_b(c);"
f := func(c *C, err1, err2 error) {
c.Assert(err1, IsNil)
c.Assert(err2.Error(), Equals, "[schema:1068]Multiple primary key defined")
}
s.testControlParallelExecSQL(c, sql1, sql2, f)
}
func (s *testStateChangeSuite) TestParallelAlterAddPartition(c *C) {
sql1 := `alter table t_part add partition (
partition p2 values less than (30)
);`
f := func(c *C, err1, err2 error) {
c.Assert(err1, IsNil)
c.Assert(err2.Error(), Equals, "[ddl:1493]VALUES LESS THAN value must be strictly increasing for each partition")
}
s.testControlParallelExecSQL(c, sql1, sql1, f)
}
func (s *testStateChangeSuite) TestParallelDropColumn(c *C) {
sql := "ALTER TABLE t drop COLUMN c ;"
f := func(c *C, err1, err2 error) {
c.Assert(err1, IsNil)
c.Assert(err2.Error(), Equals, "[ddl:1091]column c doesn't exist")
}
s.testControlParallelExecSQL(c, sql, sql, f)
}
func (s *testStateChangeSuite) TestParallelDropColumns(c *C) {
sql := "ALTER TABLE t drop COLUMN b, drop COLUMN c;"
f := func(c *C, err1, err2 error) {
c.Assert(err1, IsNil)
c.Assert(err2.Error(), Equals, "[ddl:1091]column b doesn't exist")
}
s.testControlParallelExecSQL(c, sql, sql, f)
}
func (s *testStateChangeSuite) TestParallelDropIfExistsColumns(c *C) {
sql := "ALTER TABLE t drop COLUMN if exists b, drop COLUMN if exists c;"
f := func(c *C, err1, err2 error) {
c.Assert(err1, IsNil)
c.Assert(err2, IsNil)
}
s.testControlParallelExecSQL(c, sql, sql, f)
}
func (s *testStateChangeSuite) TestParallelDropIndex(c *C) {
sql1 := "alter table t drop index idx1 ;"
sql2 := "alter table t drop index idx2 ;"
f := func(c *C, err1, err2 error) {
c.Assert(err1, IsNil)
c.Assert(err2.Error(), Equals, "[autoid:1075]Incorrect table definition; there can be only one auto column and it must be defined as a key")
}
s.testControlParallelExecSQL(c, sql1, sql2, f)
}
func (s *testStateChangeSuite) TestParallelDropPrimaryKey(c *C) {
s.preSQL = "ALTER TABLE t add primary key index_b(c);"
defer func() {
s.preSQL = ""
}()
sql1 := "alter table t drop primary key;"
sql2 := "alter table t drop primary key;"
f := func(c *C, err1, err2 error) {
c.Assert(err1, IsNil)
c.Assert(err2.Error(), Equals, "[ddl:1091]index PRIMARY doesn't exist")
}
s.testControlParallelExecSQL(c, sql1, sql2, f)
}
func (s *testStateChangeSuite) TestParallelCreateAndRename(c *C) {
sql1 := "create table t_exists(c int);"
sql2 := "alter table t rename to t_exists;"
defer s.se.Execute(context.Background(), "drop table t_exists")
f := func(c *C, err1, err2 error) {
c.Assert(err1, IsNil)
c.Assert(err2.Error(), Equals, "[schema:1050]Table 't_exists' already exists")
}
s.testControlParallelExecSQL(c, sql1, sql2, f)
}
type checkRet func(c *C, err1, err2 error)
func (s *testStateChangeSuiteBase) prepareTestControlParallelExecSQL(c *C) (session.Session, session.Session, chan struct{}, ddl.Callback) {
callback := &ddl.TestDDLCallback{}
times := 0
callback.OnJobUpdatedExported = func(job *model.Job) {
if times != 0 {
return
}
var qLen int
for {
kv.RunInNewTxn(s.store, false, func(txn kv.Transaction) error {
jobs, err1 := admin.GetDDLJobs(txn)
if err1 != nil {
return err1
}
qLen = len(jobs)
return nil
})
if qLen == 2 {
break
}
time.Sleep(5 * time.Millisecond)
}
times++
}
d := s.dom.DDL()
originalCallback := d.GetHook()
d.(ddl.DDLForTest).SetHook(callback)
se, err := session.CreateSession(s.store)
c.Assert(err, IsNil)
_, err = se.Execute(context.Background(), "use test_db_state")
c.Assert(err, IsNil)
se1, err := session.CreateSession(s.store)
c.Assert(err, IsNil)
_, err = se1.Execute(context.Background(), "use test_db_state")
c.Assert(err, IsNil)
ch := make(chan struct{})
// Make sure the sql1 is put into the DDLJobQueue.
go func() {
var qLen int
for {
kv.RunInNewTxn(s.store, false, func(txn kv.Transaction) error {
jobs, err3 := admin.GetDDLJobs(txn)
if err3 != nil {
return err3
}
qLen = len(jobs)
return nil
})
if qLen == 1 {
// Make sure sql2 is executed after the sql1.
close(ch)
break
}
time.Sleep(5 * time.Millisecond)
}
}()
return se, se1, ch, originalCallback
}
func (s *testStateChangeSuiteBase) testControlParallelExecSQL(c *C, sql1, sql2 string, f checkRet) {
_, err := s.se.Execute(context.Background(), "use test_db_state")
c.Assert(err, IsNil)
_, err = s.se.Execute(context.Background(), "create table t(a int, b int, c int, d int auto_increment,e int, index idx1(d), index idx2(d,e))")
c.Assert(err, IsNil)
if len(s.preSQL) != 0 {
_, err := s.se.Execute(context.Background(), s.preSQL)
c.Assert(err, IsNil)
}
defer s.se.Execute(context.Background(), "drop table t")
_, err = s.se.Execute(context.Background(), "drop database if exists t_part")
c.Assert(err, IsNil)
s.se.Execute(context.Background(), `create table t_part (a int key)
partition by range(a) (
partition p0 values less than (10),
partition p1 values less than (20)
);`)
se, se1, ch, originalCallback := s.prepareTestControlParallelExecSQL(c)
defer s.dom.DDL().(ddl.DDLForTest).SetHook(originalCallback)
var err1 error
var err2 error
wg := sync.WaitGroup{}
wg.Add(2)
go func() {
defer wg.Done()
_, err1 = se.Execute(context.Background(), sql1)
}()
go func() {
defer wg.Done()
<-ch
_, err2 = se1.Execute(context.Background(), sql2)
}()
wg.Wait()
f(c, err1, err2)
}
func (s *testStateChangeSuite) TestParallelUpdateTableReplica(c *C) {
ctx := context.Background()
_, err := s.se.Execute(context.Background(), "use test_db_state")
c.Assert(err, IsNil)
_, err = s.se.Execute(ctx, "drop table if exists t1;")
c.Assert(err, IsNil)
_, err = s.se.Execute(ctx, "create table t1 (a int);")
c.Assert(err, IsNil)
_, err = s.se.Execute(ctx, "alter table t1 set tiflash replica 3 location labels 'a','b';")
c.Assert(err, IsNil)
se, se1, ch, originalCallback := s.prepareTestControlParallelExecSQL(c)
defer s.dom.DDL().(ddl.DDLForTest).SetHook(originalCallback)
t1 := testGetTableByName(c, se, "test_db_state", "t1")
var err1 error
var err2 error
wg := sync.WaitGroup{}
wg.Add(2)
go func() {
defer wg.Done()
// Mock for table tiflash replica was available.
err1 = domain.GetDomain(se).DDL().UpdateTableReplicaInfo(se, t1.Meta().ID, true)
}()
go func() {
defer wg.Done()
<-ch
// Mock for table tiflash replica was available.
err2 = domain.GetDomain(se1).DDL().UpdateTableReplicaInfo(se1, t1.Meta().ID, true)
}()
wg.Wait()
c.Assert(err1, IsNil)
c.Assert(err2.Error(), Equals, "[ddl:-1]the replica available status of table t1 is already updated")
}
func (s *testStateChangeSuite) testParallelExecSQL(c *C, sql string) {
se, err := session.CreateSession(s.store)
c.Assert(err, IsNil)
_, err = se.Execute(context.Background(), "use test_db_state")
c.Assert(err, IsNil)
se1, err1 := session.CreateSession(s.store)
c.Assert(err1, IsNil)
_, err = se1.Execute(context.Background(), "use test_db_state")
c.Assert(err, IsNil)
var err2, err3 error
wg := sync.WaitGroup{}
callback := &ddl.TestDDLCallback{}
once := sync.Once{}
callback.OnJobUpdatedExported = func(job *model.Job) {
// sleep a while, let other job enqueue.
once.Do(func() {
time.Sleep(time.Millisecond * 10)
})
}
d := s.dom.DDL()
originalCallback := d.GetHook()
defer d.(ddl.DDLForTest).SetHook(originalCallback)
d.(ddl.DDLForTest).SetHook(callback)
wg.Add(2)
go func() {
defer wg.Done()
_, err2 = se.Execute(context.Background(), sql)
}()
go func() {
defer wg.Done()
_, err3 = se1.Execute(context.Background(), sql)
}()
wg.Wait()
c.Assert(err2, IsNil)
c.Assert(err3, IsNil)
}
// TestCreateTableIfNotExists parallel exec create table if not exists xxx. No error returns is expected.
func (s *testStateChangeSuite) TestCreateTableIfNotExists(c *C) {
defer s.se.Execute(context.Background(), "drop table test_not_exists")
s.testParallelExecSQL(c, "create table if not exists test_not_exists(a int);")
}
// TestCreateDBIfNotExists parallel exec create database if not exists xxx. No error returns is expected.
func (s *testStateChangeSuite) TestCreateDBIfNotExists(c *C) {
defer s.se.Execute(context.Background(), "drop database test_not_exists")
s.testParallelExecSQL(c, "create database if not exists test_not_exists;")
}
// TestDDLIfNotExists parallel exec some DDLs with `if not exists` clause. No error returns is expected.
func (s *testStateChangeSuite) TestDDLIfNotExists(c *C) {
defer s.se.Execute(context.Background(), "drop table test_not_exists")
_, err := s.se.Execute(context.Background(), "create table if not exists test_not_exists(a int)")
c.Assert(err, IsNil)
// ADD COLUMN
s.testParallelExecSQL(c, "alter table test_not_exists add column if not exists b int")
// ADD COLUMNS
s.testParallelExecSQL(c, "alter table test_not_exists add column if not exists (c11 int, d11 int)")
// ADD INDEX
s.testParallelExecSQL(c, "alter table test_not_exists add index if not exists idx_b (b)")
// CREATE INDEX
s.testParallelExecSQL(c, "create index if not exists idx_b on test_not_exists (b)")
}
// TestDDLIfExists parallel exec some DDLs with `if exists` clause. No error returns is expected.
func (s *testStateChangeSuite) TestDDLIfExists(c *C) {
defer func() {
s.se.Execute(context.Background(), "drop table test_exists")
s.se.Execute(context.Background(), "drop table test_exists_2")
}()
_, err := s.se.Execute(context.Background(), "create table if not exists test_exists (a int key, b int)")
c.Assert(err, IsNil)
// DROP COLUMNS
s.testParallelExecSQL(c, "alter table test_exists drop column if exists c, drop column if exists d")
// DROP COLUMN
s.testParallelExecSQL(c, "alter table test_exists drop column if exists b") // only `a` exists now
// CHANGE COLUMN
s.testParallelExecSQL(c, "alter table test_exists change column if exists a c int") // only, `c` exists now
// MODIFY COLUMN
s.testParallelExecSQL(c, "alter table test_exists modify column if exists a bigint")
// DROP INDEX
_, err = s.se.Execute(context.Background(), "alter table test_exists add index idx_c (c)")
c.Assert(err, IsNil)
s.testParallelExecSQL(c, "alter table test_exists drop index if exists idx_c")
// DROP PARTITION (ADD PARTITION tested in TestParallelAlterAddPartition)
_, err = s.se.Execute(context.Background(), "create table test_exists_2 (a int key) partition by range(a) (partition p0 values less than (10), partition p1 values less than (20))")
c.Assert(err, IsNil)
s.testParallelExecSQL(c, "alter table test_exists_2 drop partition if exists p1")
}
// TestParallelDDLBeforeRunDDLJob tests a session to execute DDL with an outdated information schema.
// This test is used to simulate the following conditions:
// In a cluster, TiDB "a" executes the DDL.
// TiDB "b" fails to load schema, then TiDB "b" executes the DDL statement associated with the DDL statement executed by "a".
func (s *testStateChangeSuite) TestParallelDDLBeforeRunDDLJob(c *C) {
defer s.se.Execute(context.Background(), "drop table test_table")
_, err := s.se.Execute(context.Background(), "use test_db_state")
c.Assert(err, IsNil)
_, err = s.se.Execute(context.Background(), "create table test_table (c1 int, c2 int default 1, index (c1))")
c.Assert(err, IsNil)
// Create two sessions.
se, err := session.CreateSession(s.store)
c.Assert(err, IsNil)
_, err = se.Execute(context.Background(), "use test_db_state")
c.Assert(err, IsNil)
se1, err := session.CreateSession(s.store)
c.Assert(err, IsNil)
_, err = se1.Execute(context.Background(), "use test_db_state")
c.Assert(err, IsNil)
intercept := &ddl.TestInterceptor{}
firstConnID := uint64(1)
finishedCnt := int32(0)
interval := 5 * time.Millisecond
var sessionCnt int32 // sessionCnt is the number of sessions that goes into the function of OnGetInfoSchema.
intercept.OnGetInfoSchemaExported = func(ctx sessionctx.Context, is infoschema.InfoSchema) infoschema.InfoSchema {
// The following code is for testing.
// Make sure the two sessions get the same information schema before executing DDL.
// After the first session executes its DDL, then the second session executes its DDL.
var info infoschema.InfoSchema
atomic.AddInt32(&sessionCnt, 1)
for {
// Make sure there are two sessions running here.
if atomic.LoadInt32(&sessionCnt) == 2 {
info = is
break
}
// Print log to notify if TestParallelDDLBeforeRunDDLJob hang up
log.Info("sleep in TestParallelDDLBeforeRunDDLJob", zap.String("interval", interval.String()))
time.Sleep(interval)
}
currID := ctx.GetSessionVars().ConnectionID
for {
seCnt := atomic.LoadInt32(&sessionCnt)
// Make sure the two session have got the same information schema. And the first session can continue to go on,
// or the frist session finished this SQL(seCnt = finishedCnt), then other sessions can continue to go on.
if currID == firstConnID || seCnt == finishedCnt {
break
}
// Print log to notify if TestParallelDDLBeforeRunDDLJob hang up
log.Info("sleep in TestParallelDDLBeforeRunDDLJob", zap.String("interval", interval.String()))
time.Sleep(interval)
}
return info
}
d := s.dom.DDL()
d.(ddl.DDLForTest).SetInterceptoror(intercept)
// Make sure the connection 1 executes a SQL before the connection 2.
// And the connection 2 executes a SQL with an outdated information schema.
wg := sync.WaitGroup{}
wg.Add(2)
go func() {
defer wg.Done()
se.SetConnectionID(firstConnID)
_, err1 := se.Execute(context.Background(), "alter table test_table drop column c2")
c.Assert(err1, IsNil)
atomic.StoreInt32(&sessionCnt, finishedCnt)
}()
go func() {
defer wg.Done()
se1.SetConnectionID(2)
_, err2 := se1.Execute(context.Background(), "alter table test_table add column c2 int")
c.Assert(err2, NotNil)
c.Assert(strings.Contains(err2.Error(), "Information schema is changed"), IsTrue)
}()
wg.Wait()
intercept = &ddl.TestInterceptor{}
d.(ddl.DDLForTest).SetInterceptoror(intercept)
}
func (s *testStateChangeSuite) TestParallelAlterSchemaCharsetAndCollate(c *C) {
sql := "ALTER SCHEMA test_db_state CHARSET utf8mb4 COLLATE utf8mb4_general_ci"
f := func(c *C, err1, err2 error) {
c.Assert(err1, IsNil)
c.Assert(err2, IsNil)
}
s.testControlParallelExecSQL(c, sql, sql, f)
sql = `SELECT default_character_set_name, default_collation_name
FROM information_schema.schemata
WHERE schema_name='test_db_state'`
tk := testkit.NewTestKit(c, s.store)
tk.MustQuery(sql).Check(testkit.Rows("utf8mb4 utf8mb4_general_ci"))
}
// TestParallelTruncateTableAndAddColumn tests add column when truncate table.
func (s *testStateChangeSuite) TestParallelTruncateTableAndAddColumn(c *C) {
sql1 := "truncate table t"
sql2 := "alter table t add column c3 int"
f := func(c *C, err1, err2 error) {
c.Assert(err1, IsNil)
c.Assert(err2, NotNil)
c.Assert(err2.Error(), Equals, "[domain:8028]Information schema is changed during the execution of the statement(for example, table definition may be updated by other DDL ran in parallel). If you see this error often, try increasing `tidb_max_delta_schema_count`. [try again later]")
}
s.testControlParallelExecSQL(c, sql1, sql2, f)
}
// TestParallelTruncateTableAndAddColumns tests add columns when truncate table.
func (s *testStateChangeSuite) TestParallelTruncateTableAndAddColumns(c *C) {
sql1 := "truncate table t"
sql2 := "alter table t add column c3 int, add column c4 int"
f := func(c *C, err1, err2 error) {
c.Assert(err1, IsNil)
c.Assert(err2, NotNil)
c.Assert(err2.Error(), Equals, "[domain:8028]Information schema is changed during the execution of the statement(for example, table definition may be updated by other DDL ran in parallel). If you see this error often, try increasing `tidb_max_delta_schema_count`. [try again later]")
}
s.testControlParallelExecSQL(c, sql1, sql2, f)
}
// TestParallelFlashbackTable tests parallel flashback table.
func (s *serialTestStateChangeSuite) TestParallelFlashbackTable(c *C) {
c.Assert(failpoint.Enable("github.com/pingcap/tidb/meta/autoid/mockAutoIDChange", `return(true)`), IsNil)
defer func(originGC bool) {
c.Assert(failpoint.Disable("github.com/pingcap/tidb/meta/autoid/mockAutoIDChange"), IsNil)
if originGC {
ddl.EmulatorGCEnable()
} else {
ddl.EmulatorGCDisable()
}
}(ddl.IsEmulatorGCEnable())
// disable emulator GC.
// Disable emulator GC, otherwise, emulator GC will delete table record as soon as possible after executing drop table DDL.
ddl.EmulatorGCDisable()
gcTimeFormat := "20060102-15:04:05 -0700 MST"
timeBeforeDrop := time.Now().Add(0 - 48*60*60*time.Second).Format(gcTimeFormat)
safePointSQL := `INSERT HIGH_PRIORITY INTO mysql.tidb VALUES ('tikv_gc_safe_point', '%[1]s', '')
ON DUPLICATE KEY
UPDATE variable_value = '%[1]s'`
tk := testkit.NewTestKit(c, s.store)
// clear GC variables first.
tk.MustExec("delete from mysql.tidb where variable_name in ( 'tikv_gc_safe_point','tikv_gc_enable' )")
// set GC safe point
tk.MustExec(fmt.Sprintf(safePointSQL, timeBeforeDrop))
// set GC enable.
err := gcutil.EnableGC(tk.Se)
c.Assert(err, IsNil)
// prepare dropped table.
tk.MustExec("use test_db_state")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (a int);")
tk.MustExec("drop table if exists t")
// Test parallel flashback table.
sql1 := "flashback table t to t_flashback"
f := func(c *C, err1, err2 error) {
c.Assert(err1, IsNil)
c.Assert(err2, NotNil)
c.Assert(err2.Error(), Equals, "[schema:1050]Table 't_flashback' already exists")
}
s.testControlParallelExecSQL(c, sql1, sql1, f)
// Test parallel flashback table with different name
tk.MustExec("drop table t_flashback")
sql1 = "flashback table t_flashback"
sql2 := "flashback table t_flashback to t_flashback2"
s.testControlParallelExecSQL(c, sql1, sql2, f)
}
| ddl/db_change_test.go | 1 | https://github.com/pingcap/tidb/commit/788dc28a7423d4b85ebe11bbce632733b056d63a | [
0.9989818930625916,
0.105166956782341,
0.000163150456501171,
0.002612474374473095,
0.2543219029903412
] |
{
"id": 1,
"code_window": [
"\t}\n",
"\ts.testControlParallelExecSQL(c, sql1, sql2, f)\n",
"}\n",
"\n",
"type checkRet func(c *C, err1, err2 error)\n",
"\n",
"func (s *testStateChangeSuiteBase) prepareTestControlParallelExecSQL(c *C) (session.Session, session.Session, chan struct{}, ddl.Callback) {\n",
"\tcallback := &ddl.TestDDLCallback{}\n",
"\ttimes := 0\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"func (s *testStateChangeSuite) TestParallelAlterAndDropSchema(c *C) {\n",
"\t_, err := s.se.Execute(context.Background(), \"create database db_drop_db\")\n",
"\tc.Assert(err, IsNil)\n",
"\tsql1 := \"DROP SCHEMA db_drop_db\"\n",
"\tsql2 := \"ALTER SCHEMA db_drop_db CHARSET utf8mb4 COLLATE utf8mb4_general_ci\"\n",
"\tf := func(c *C, err1, err2 error) {\n",
"\t\tc.Assert(err1, IsNil)\n",
"\t\tc.Assert(err2, NotNil)\n",
"\t\tc.Assert(err2.Error(), Equals, \"[schema:1008]Can't drop database ''; database doesn't exist\")\n",
"\t}\n",
"\ts.testControlParallelExecSQL(c, sql1, sql2, f)\n",
"}\n",
"\n"
],
"file_path": "ddl/db_change_test.go",
"type": "add",
"edit_start_line_idx": 952
} | // Copyright 2019 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package executor_test
import (
"flag"
"fmt"
. "github.com/pingcap/check"
"github.com/pingcap/parser"
"github.com/pingcap/tidb/domain"
"github.com/pingcap/tidb/kv"
"github.com/pingcap/tidb/session"
"github.com/pingcap/tidb/store/mockstore"
"github.com/pingcap/tidb/store/mockstore/cluster"
"github.com/pingcap/tidb/util/mock"
"github.com/pingcap/tidb/util/testkit"
)
type testUpdateSuite struct {
cluster cluster.Cluster
store kv.Storage
domain *domain.Domain
*parser.Parser
ctx *mock.Context
}
func (s *testUpdateSuite) SetUpSuite(c *C) {
s.Parser = parser.New()
flag.Lookup("mockTikv")
useMockTikv := *mockTikv
if useMockTikv {
store, err := mockstore.NewMockStore(
mockstore.WithClusterInspector(func(c cluster.Cluster) {
mockstore.BootstrapWithSingleStore(c)
s.cluster = c
}),
)
c.Assert(err, IsNil)
s.store = store
session.SetSchemaLease(0)
session.DisableStats4Test()
}
d, err := session.BootstrapSession(s.store)
c.Assert(err, IsNil)
d.SetStatsUpdating(true)
s.domain = d
}
func (s *testUpdateSuite) TearDownSuite(c *C) {
s.domain.Close()
s.store.Close()
}
func (s *testUpdateSuite) TearDownTest(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
r := tk.MustQuery("show tables")
for _, tb := range r.Rows() {
tableName := tb[0]
tk.MustExec(fmt.Sprintf("drop table %v", tableName))
}
}
func (s *testUpdateSuite) TestUpdateGenColInTxn(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec(`create table t(a bigint, b bigint as (a+1));`)
tk.MustExec(`begin;`)
tk.MustExec(`insert into t(a) values(1);`)
err := tk.ExecToErr(`update t set b=6 where b=2;`)
c.Assert(err.Error(), Equals, "[planner:3105]The value specified for generated column 'b' in table 't' is not allowed.")
tk.MustExec(`commit;`)
tk.MustQuery(`select * from t;`).Check(testkit.Rows(
`1 2`))
}
func (s *testUpdateSuite) TestUpdateWithAutoidSchema(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec(`use test`)
tk.MustExec(`create table t1(id int primary key auto_increment, n int);`)
tk.MustExec(`create table t2(id int primary key, n float auto_increment, key I_n(n));`)
tk.MustExec(`create table t3(id int primary key, n double auto_increment, key I_n(n));`)
tests := []struct {
exec string
query string
result [][]interface{}
}{
{
`insert into t1 set n = 1`,
`select * from t1 where id = 1`,
testkit.Rows(`1 1`),
},
{
`update t1 set id = id+1`,
`select * from t1 where id = 2`,
testkit.Rows(`2 1`),
},
{
`insert into t1 set n = 2`,
`select * from t1 where id = 3`,
testkit.Rows(`3 2`),
},
{
`update t1 set id = id + '1.1' where id = 3`,
`select * from t1 where id = 4`,
testkit.Rows(`4 2`),
},
{
`insert into t1 set n = 3`,
`select * from t1 where id = 5`,
testkit.Rows(`5 3`),
},
{
`update t1 set id = id + '0.5' where id = 5`,
`select * from t1 where id = 6`,
testkit.Rows(`6 3`),
},
{
`insert into t1 set n = 4`,
`select * from t1 where id = 7`,
testkit.Rows(`7 4`),
},
{
`insert into t2 set id = 1`,
`select * from t2 where id = 1`,
testkit.Rows(`1 1`),
},
{
`update t2 set n = n+1`,
`select * from t2 where id = 1`,
testkit.Rows(`1 2`),
},
{
`insert into t2 set id = 2`,
`select * from t2 where id = 2`,
testkit.Rows(`2 3`),
},
{
`update t2 set n = n + '2.2'`,
`select * from t2 where id = 2`,
testkit.Rows(`2 5.2`),
},
{
`insert into t2 set id = 3`,
`select * from t2 where id = 3`,
testkit.Rows(`3 6`),
},
{
`update t2 set n = n + '0.5' where id = 3`,
`select * from t2 where id = 3`,
testkit.Rows(`3 6.5`),
},
{
`insert into t2 set id = 4`,
`select * from t2 where id = 4`,
testkit.Rows(`4 7`),
},
{
`insert into t3 set id = 1`,
`select * from t3 where id = 1`,
testkit.Rows(`1 1`),
},
{
`update t3 set n = n+1`,
`select * from t3 where id = 1`,
testkit.Rows(`1 2`),
},
{
`insert into t3 set id = 2`,
`select * from t3 where id = 2`,
testkit.Rows(`2 3`),
},
{
`update t3 set n = n + '3.3'`,
`select * from t3 where id = 2`,
testkit.Rows(`2 6.3`),
},
{
`insert into t3 set id = 3`,
`select * from t3 where id = 3`,
testkit.Rows(`3 7`),
},
{
`update t3 set n = n + '0.5' where id = 3`,
`select * from t3 where id = 3`,
testkit.Rows(`3 7.5`),
},
{
`insert into t3 set id = 4`,
`select * from t3 where id = 4`,
testkit.Rows(`4 8`),
},
}
for _, tt := range tests {
tk.MustExec(tt.exec)
tk.MustQuery(tt.query).Check(tt.result)
}
}
func (s *testUpdateSuite) TestUpdateSchemaChange(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec(`create table t(a bigint, b bigint as (a+1));`)
tk.MustExec(`begin;`)
tk.MustExec(`insert into t(a) values(1);`)
err := tk.ExecToErr(`update t set b=6 where b=2;`)
c.Assert(err.Error(), Equals, "[planner:3105]The value specified for generated column 'b' in table 't' is not allowed.")
tk.MustExec(`commit;`)
tk.MustQuery(`select * from t;`).Check(testkit.Rows(
`1 2`))
}
func (s *testUpdateSuite) TestUpdateMultiDatabaseTable(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop database if exists test2")
tk.MustExec("create database test2")
tk.MustExec("create table t(a int, b int generated always as (a+1) virtual)")
tk.MustExec("create table test2.t(a int, b int generated always as (a+1) virtual)")
tk.MustExec("update t, test2.t set test.t.a=1")
}
| executor/update_test.go | 0 | https://github.com/pingcap/tidb/commit/788dc28a7423d4b85ebe11bbce632733b056d63a | [
0.003091587219387293,
0.0003115398867521435,
0.00016233083442784846,
0.000171207488165237,
0.0005839371006004512
] |
{
"id": 1,
"code_window": [
"\t}\n",
"\ts.testControlParallelExecSQL(c, sql1, sql2, f)\n",
"}\n",
"\n",
"type checkRet func(c *C, err1, err2 error)\n",
"\n",
"func (s *testStateChangeSuiteBase) prepareTestControlParallelExecSQL(c *C) (session.Session, session.Session, chan struct{}, ddl.Callback) {\n",
"\tcallback := &ddl.TestDDLCallback{}\n",
"\ttimes := 0\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"func (s *testStateChangeSuite) TestParallelAlterAndDropSchema(c *C) {\n",
"\t_, err := s.se.Execute(context.Background(), \"create database db_drop_db\")\n",
"\tc.Assert(err, IsNil)\n",
"\tsql1 := \"DROP SCHEMA db_drop_db\"\n",
"\tsql2 := \"ALTER SCHEMA db_drop_db CHARSET utf8mb4 COLLATE utf8mb4_general_ci\"\n",
"\tf := func(c *C, err1, err2 error) {\n",
"\t\tc.Assert(err1, IsNil)\n",
"\t\tc.Assert(err2, NotNil)\n",
"\t\tc.Assert(err2.Error(), Equals, \"[schema:1008]Can't drop database ''; database doesn't exist\")\n",
"\t}\n",
"\ts.testControlParallelExecSQL(c, sql1, sql2, f)\n",
"}\n",
"\n"
],
"file_path": "ddl/db_change_test.go",
"type": "add",
"edit_start_line_idx": 952
} | // Copyright 2019 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package core
import (
"context"
"github.com/pingcap/tidb/expression"
"github.com/pingcap/tidb/sessionctx"
)
// extractJoinGroup extracts all the join nodes connected with continuous
// InnerJoins to construct a join group. This join group is further used to
// construct a new join order based on a reorder algorithm.
//
// For example: "InnerJoin(InnerJoin(a, b), LeftJoin(c, d))"
// results in a join group {a, b, LeftJoin(c, d)}.
func extractJoinGroup(p LogicalPlan) (group []LogicalPlan, eqEdges []*expression.ScalarFunction, otherConds []expression.Expression) {
join, isJoin := p.(*LogicalJoin)
if !isJoin || join.preferJoinType > uint(0) || join.JoinType != InnerJoin || join.StraightJoin {
return []LogicalPlan{p}, nil, nil
}
lhsGroup, lhsEqualConds, lhsOtherConds := extractJoinGroup(join.children[0])
rhsGroup, rhsEqualConds, rhsOtherConds := extractJoinGroup(join.children[1])
group = append(group, lhsGroup...)
group = append(group, rhsGroup...)
eqEdges = append(eqEdges, join.EqualConditions...)
eqEdges = append(eqEdges, lhsEqualConds...)
eqEdges = append(eqEdges, rhsEqualConds...)
otherConds = append(otherConds, join.OtherConditions...)
otherConds = append(otherConds, lhsOtherConds...)
otherConds = append(otherConds, rhsOtherConds...)
return group, eqEdges, otherConds
}
type joinReOrderSolver struct {
}
type jrNode struct {
p LogicalPlan
cumCost float64
}
func (s *joinReOrderSolver) optimize(ctx context.Context, p LogicalPlan) (LogicalPlan, error) {
return s.optimizeRecursive(p.SCtx(), p)
}
// optimizeRecursive recursively collects join groups and applies join reorder algorithm for each group.
func (s *joinReOrderSolver) optimizeRecursive(ctx sessionctx.Context, p LogicalPlan) (LogicalPlan, error) {
var err error
curJoinGroup, eqEdges, otherConds := extractJoinGroup(p)
if len(curJoinGroup) > 1 {
for i := range curJoinGroup {
curJoinGroup[i], err = s.optimizeRecursive(ctx, curJoinGroup[i])
if err != nil {
return nil, err
}
}
baseGroupSolver := &baseSingleGroupJoinOrderSolver{
ctx: ctx,
otherConds: otherConds,
}
if len(curJoinGroup) > ctx.GetSessionVars().TiDBOptJoinReorderThreshold {
groupSolver := &joinReorderGreedySolver{
baseSingleGroupJoinOrderSolver: baseGroupSolver,
eqEdges: eqEdges,
}
p, err = groupSolver.solve(curJoinGroup)
} else {
dpSolver := &joinReorderDPSolver{
baseSingleGroupJoinOrderSolver: baseGroupSolver,
}
dpSolver.newJoin = dpSolver.newJoinWithEdges
p, err = dpSolver.solve(curJoinGroup, expression.ScalarFuncs2Exprs(eqEdges))
}
if err != nil {
return nil, err
}
return p, nil
}
newChildren := make([]LogicalPlan, 0, len(p.Children()))
for _, child := range p.Children() {
newChild, err := s.optimizeRecursive(ctx, child)
if err != nil {
return nil, err
}
newChildren = append(newChildren, newChild)
}
p.SetChildren(newChildren...)
return p, nil
}
type baseSingleGroupJoinOrderSolver struct {
ctx sessionctx.Context
curJoinGroup []*jrNode
otherConds []expression.Expression
}
// baseNodeCumCost calculate the cumulative cost of the node in the join group.
func (s *baseSingleGroupJoinOrderSolver) baseNodeCumCost(groupNode LogicalPlan) float64 {
cost := groupNode.statsInfo().RowCount
for _, child := range groupNode.Children() {
cost += s.baseNodeCumCost(child)
}
return cost
}
// makeBushyJoin build bushy tree for the nodes which have no equal condition to connect them.
func (s *baseSingleGroupJoinOrderSolver) makeBushyJoin(cartesianJoinGroup []LogicalPlan) LogicalPlan {
resultJoinGroup := make([]LogicalPlan, 0, (len(cartesianJoinGroup)+1)/2)
for len(cartesianJoinGroup) > 1 {
resultJoinGroup = resultJoinGroup[:0]
for i := 0; i < len(cartesianJoinGroup); i += 2 {
if i+1 == len(cartesianJoinGroup) {
resultJoinGroup = append(resultJoinGroup, cartesianJoinGroup[i])
break
}
newJoin := s.newCartesianJoin(cartesianJoinGroup[i], cartesianJoinGroup[i+1])
for i := len(s.otherConds) - 1; i >= 0; i-- {
cols := expression.ExtractColumns(s.otherConds[i])
if newJoin.schema.ColumnsIndices(cols) != nil {
newJoin.OtherConditions = append(newJoin.OtherConditions, s.otherConds[i])
s.otherConds = append(s.otherConds[:i], s.otherConds[i+1:]...)
}
}
resultJoinGroup = append(resultJoinGroup, newJoin)
}
cartesianJoinGroup, resultJoinGroup = resultJoinGroup, cartesianJoinGroup
}
return cartesianJoinGroup[0]
}
func (s *baseSingleGroupJoinOrderSolver) newCartesianJoin(lChild, rChild LogicalPlan) *LogicalJoin {
offset := lChild.SelectBlockOffset()
if offset != rChild.SelectBlockOffset() {
offset = -1
}
join := LogicalJoin{
JoinType: InnerJoin,
reordered: true,
}.Init(s.ctx, offset)
join.SetSchema(expression.MergeSchema(lChild.Schema(), rChild.Schema()))
join.SetChildren(lChild, rChild)
return join
}
func (s *baseSingleGroupJoinOrderSolver) newJoinWithEdges(lChild, rChild LogicalPlan, eqEdges []*expression.ScalarFunction, otherConds []expression.Expression) LogicalPlan {
newJoin := s.newCartesianJoin(lChild, rChild)
newJoin.EqualConditions = eqEdges
newJoin.OtherConditions = otherConds
return newJoin
}
// calcJoinCumCost calculates the cumulative cost of the join node.
func (s *baseSingleGroupJoinOrderSolver) calcJoinCumCost(join LogicalPlan, lNode, rNode *jrNode) float64 {
return join.statsInfo().RowCount + lNode.cumCost + rNode.cumCost
}
func (*joinReOrderSolver) name() string {
return "join_reorder"
}
| planner/core/rule_join_reorder.go | 0 | https://github.com/pingcap/tidb/commit/788dc28a7423d4b85ebe11bbce632733b056d63a | [
0.0009015739196911454,
0.000239757020608522,
0.00016484776278957725,
0.0001732638047542423,
0.00017247637151740491
] |
{
"id": 1,
"code_window": [
"\t}\n",
"\ts.testControlParallelExecSQL(c, sql1, sql2, f)\n",
"}\n",
"\n",
"type checkRet func(c *C, err1, err2 error)\n",
"\n",
"func (s *testStateChangeSuiteBase) prepareTestControlParallelExecSQL(c *C) (session.Session, session.Session, chan struct{}, ddl.Callback) {\n",
"\tcallback := &ddl.TestDDLCallback{}\n",
"\ttimes := 0\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"func (s *testStateChangeSuite) TestParallelAlterAndDropSchema(c *C) {\n",
"\t_, err := s.se.Execute(context.Background(), \"create database db_drop_db\")\n",
"\tc.Assert(err, IsNil)\n",
"\tsql1 := \"DROP SCHEMA db_drop_db\"\n",
"\tsql2 := \"ALTER SCHEMA db_drop_db CHARSET utf8mb4 COLLATE utf8mb4_general_ci\"\n",
"\tf := func(c *C, err1, err2 error) {\n",
"\t\tc.Assert(err1, IsNil)\n",
"\t\tc.Assert(err2, NotNil)\n",
"\t\tc.Assert(err2.Error(), Equals, \"[schema:1008]Can't drop database ''; database doesn't exist\")\n",
"\t}\n",
"\ts.testControlParallelExecSQL(c, sql1, sql2, f)\n",
"}\n",
"\n"
],
"file_path": "ddl/db_change_test.go",
"type": "add",
"edit_start_line_idx": 952
} | // Copyright 2019 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package plugin
import (
"context"
"io"
"strconv"
"testing"
"github.com/pingcap/check"
"github.com/pingcap/tidb/sessionctx/variable"
)
func TestT(t *testing.T) {
check.TestingT(t)
}
func TestLoadPluginSuccess(t *testing.T) {
ctx := context.Background()
pluginName := "tplugin"
pluginVersion := uint16(1)
pluginSign := pluginName + "-" + strconv.Itoa(int(pluginVersion))
cfg := Config{
Plugins: []string{pluginSign},
PluginDir: "",
GlobalSysVar: &variable.SysVars,
PluginVarNames: &variable.PluginVarNames,
EnvVersion: map[string]uint16{"go": 1112},
}
// setup load test hook.
testHook = &struct{ loadOne loadFn }{loadOne: func(plugin *Plugin, dir string, pluginID ID) (manifest func() *Manifest, err error) {
return func() *Manifest {
m := &AuditManifest{
Manifest: Manifest{
Kind: Authentication,
Name: pluginName,
Version: pluginVersion,
SysVars: map[string]*variable.SysVar{pluginName + "_key": {Scope: variable.ScopeGlobal, Name: pluginName + "_key", Value: "v1"}},
OnInit: func(ctx context.Context, manifest *Manifest) error {
return nil
},
OnShutdown: func(ctx context.Context, manifest *Manifest) error {
return nil
},
Validate: func(ctx context.Context, manifest *Manifest) error {
return nil
},
},
OnGeneralEvent: func(ctx context.Context, sctx *variable.SessionVars, event GeneralEvent, cmd string) {
},
}
return ExportManifest(m)
}, nil
}}
defer func() {
testHook = nil
}()
// trigger load.
err := Load(ctx, cfg)
if err != nil {
t.Errorf("load plugin [%s] fail", pluginSign)
}
err = Init(ctx, cfg)
if err != nil {
t.Errorf("init plugin [%s] fail", pluginSign)
}
// load all.
ps := GetAll()
if len(ps) != 1 {
t.Errorf("loaded plugins is empty")
}
// find plugin by type and name
p := Get(Authentication, "tplugin")
if p == nil {
t.Errorf("tplugin can not be load")
}
p = Get(Authentication, "tplugin2")
if p != nil {
t.Errorf("found miss plugin")
}
p = getByName("tplugin")
if p == nil {
t.Errorf("can not find miss plugin")
}
// foreach plugin
err = ForeachPlugin(Authentication, func(plugin *Plugin) error {
return nil
})
if err != nil {
t.Errorf("foreach error %v", err)
}
err = ForeachPlugin(Authentication, func(plugin *Plugin) error {
return io.EOF
})
if err != io.EOF {
t.Errorf("foreach should return EOF error")
}
Shutdown(ctx)
}
func TestLoadPluginSkipError(t *testing.T) {
ctx := context.Background()
pluginName := "tplugin"
pluginVersion := uint16(1)
pluginSign := pluginName + "-" + strconv.Itoa(int(pluginVersion))
cfg := Config{
Plugins: []string{pluginSign, pluginSign, "notExists-2"},
PluginDir: "",
PluginVarNames: &variable.PluginVarNames,
EnvVersion: map[string]uint16{"go": 1112},
SkipWhenFail: true,
}
// setup load test hook.
testHook = &struct{ loadOne loadFn }{loadOne: func(plugin *Plugin, dir string, pluginID ID) (manifest func() *Manifest, err error) {
return func() *Manifest {
m := &AuditManifest{
Manifest: Manifest{
Kind: Audit,
Name: pluginName,
Version: pluginVersion,
SysVars: map[string]*variable.SysVar{pluginName + "_key": {Scope: variable.ScopeGlobal, Name: pluginName + "_key", Value: "v1"}},
OnInit: func(ctx context.Context, manifest *Manifest) error {
return io.EOF
},
OnShutdown: func(ctx context.Context, manifest *Manifest) error {
return io.EOF
},
Validate: func(ctx context.Context, manifest *Manifest) error {
return io.EOF
},
},
OnGeneralEvent: func(ctx context.Context, sctx *variable.SessionVars, event GeneralEvent, cmd string) {
},
}
return ExportManifest(m)
}, nil
}}
defer func() {
testHook = nil
}()
// trigger load.
err := Load(ctx, cfg)
if err != nil {
t.Errorf("load plugin [%s] fail %v", pluginSign, err)
}
err = Init(ctx, cfg)
if err != nil {
t.Errorf("init plugin [%s] fail", pluginSign)
}
// load all.
ps := GetAll()
if len(ps) != 1 {
t.Errorf("loaded plugins is empty")
}
// find plugin by type and name
p := Get(Audit, "tplugin")
if p == nil {
t.Errorf("tplugin can not be load")
}
p = Get(Audit, "tplugin2")
if p != nil {
t.Errorf("found miss plugin")
}
p = getByName("tplugin")
if p == nil {
t.Errorf("can not find miss plugin")
}
p = getByName("not exists")
if p != nil {
t.Errorf("got not exists plugin")
}
// foreach plugin
readyCount := 0
err = ForeachPlugin(Authentication, func(plugin *Plugin) error {
readyCount++
return nil
})
if err != nil {
t.Errorf("foreach meet error %v", err)
}
if readyCount != 0 {
t.Errorf("validate fail can be load but no ready")
}
Shutdown(ctx)
}
func TestLoadFail(t *testing.T) {
ctx := context.Background()
pluginName := "tplugin"
pluginVersion := uint16(1)
pluginSign := pluginName + "-" + strconv.Itoa(int(pluginVersion))
cfg := Config{
Plugins: []string{pluginSign, pluginSign, "notExists-2"},
PluginDir: "",
PluginVarNames: &variable.PluginVarNames,
EnvVersion: map[string]uint16{"go": 1112},
SkipWhenFail: false,
}
// setup load test hook.
testHook = &struct{ loadOne loadFn }{loadOne: func(plugin *Plugin, dir string, pluginID ID) (manifest func() *Manifest, err error) {
return func() *Manifest {
m := &AuditManifest{
Manifest: Manifest{
Kind: Audit,
Name: pluginName,
Version: pluginVersion,
SysVars: map[string]*variable.SysVar{pluginName + "_key": {Scope: variable.ScopeGlobal, Name: pluginName + "_key", Value: "v1"}},
OnInit: func(ctx context.Context, manifest *Manifest) error {
return io.EOF
},
OnShutdown: func(ctx context.Context, manifest *Manifest) error {
return io.EOF
},
Validate: func(ctx context.Context, manifest *Manifest) error {
return io.EOF
},
},
OnGeneralEvent: func(ctx context.Context, sctx *variable.SessionVars, event GeneralEvent, cmd string) {
},
}
return ExportManifest(m)
}, nil
}}
defer func() {
testHook = nil
}()
err := Load(ctx, cfg)
if err == nil {
t.Errorf("load plugin should fail")
}
}
func TestPluginsClone(t *testing.T) {
ps := &plugins{
plugins: map[Kind][]Plugin{
Audit: {{}},
},
versions: map[string]uint16{
"whitelist": 1,
},
dyingPlugins: []Plugin{{}},
}
cps := ps.clone()
ps.dyingPlugins = append(ps.dyingPlugins, Plugin{})
ps.versions["w"] = 2
as := ps.plugins[Audit]
ps.plugins[Audit] = append(as, Plugin{})
if len(cps.plugins) != 1 || len(cps.plugins[Audit]) != 1 || len(cps.versions) != 1 || len(cps.dyingPlugins) != 1 {
t.Errorf("clone plugins failure")
}
}
| plugin/plugin_test.go | 0 | https://github.com/pingcap/tidb/commit/788dc28a7423d4b85ebe11bbce632733b056d63a | [
0.0004239229019731283,
0.0001891355641419068,
0.00016590220911893994,
0.00017041637329384685,
0.000054366886615753174
] |
{
"id": 2,
"code_window": [
"\t\treturn nil, nil\n",
"\t}\n",
"\n",
"\tdbInfo, err := t.GetDatabase(job.SchemaID)\n",
"\tif err != nil {\n",
"\t\treturn nil, err\n",
"\t}\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tdbInfo, err := checkSchemaExistAndCancelNotExistJob(t, job)\n"
],
"file_path": "ddl/index.go",
"type": "replace",
"edit_start_line_idx": 353
} | // Copyright 2017 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package ddl_test
import (
"context"
"fmt"
"strings"
"sync"
"sync/atomic"
"time"
. "github.com/pingcap/check"
"github.com/pingcap/errors"
"github.com/pingcap/failpoint"
"github.com/pingcap/log"
"github.com/pingcap/parser"
"github.com/pingcap/parser/ast"
"github.com/pingcap/parser/model"
"github.com/pingcap/parser/terror"
"github.com/pingcap/tidb/config"
"github.com/pingcap/tidb/ddl"
"github.com/pingcap/tidb/domain"
"github.com/pingcap/tidb/executor"
"github.com/pingcap/tidb/infoschema"
"github.com/pingcap/tidb/kv"
"github.com/pingcap/tidb/session"
"github.com/pingcap/tidb/sessionctx"
"github.com/pingcap/tidb/store/mockstore"
"github.com/pingcap/tidb/util/admin"
"github.com/pingcap/tidb/util/gcutil"
"github.com/pingcap/tidb/util/sqlexec"
"github.com/pingcap/tidb/util/testkit"
"go.uber.org/zap"
)
var _ = Suite(&testStateChangeSuite{})
var _ = SerialSuites(&serialTestStateChangeSuite{})
type serialTestStateChangeSuite struct {
testStateChangeSuiteBase
}
type testStateChangeSuite struct {
testStateChangeSuiteBase
}
type testStateChangeSuiteBase struct {
lease time.Duration
store kv.Storage
dom *domain.Domain
se session.Session
p *parser.Parser
preSQL string
}
func (s *testStateChangeSuiteBase) SetUpSuite(c *C) {
s.lease = 200 * time.Millisecond
ddl.SetWaitTimeWhenErrorOccurred(1 * time.Microsecond)
var err error
s.store, err = mockstore.NewMockStore()
c.Assert(err, IsNil)
session.SetSchemaLease(s.lease)
s.dom, err = session.BootstrapSession(s.store)
c.Assert(err, IsNil)
s.se, err = session.CreateSession4Test(s.store)
c.Assert(err, IsNil)
_, err = s.se.Execute(context.Background(), "create database test_db_state default charset utf8 default collate utf8_bin")
c.Assert(err, IsNil)
_, err = s.se.Execute(context.Background(), "use test_db_state")
c.Assert(err, IsNil)
s.p = parser.New()
}
func (s *testStateChangeSuiteBase) TearDownSuite(c *C) {
s.se.Execute(context.Background(), "drop database if exists test_db_state")
s.se.Close()
s.dom.Close()
s.store.Close()
}
// TestShowCreateTable tests the result of "show create table" when we are running "add index" or "add column".
func (s *serialTestStateChangeSuite) TestShowCreateTable(c *C) {
config.GetGlobalConfig().Experimental.AllowsExpressionIndex = true
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("create table t (id int)")
tk.MustExec("create table t2 (a int, b varchar(10)) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_general_ci")
// tkInternal is used to execute additional sql (here show create table) in ddl change callback.
// Using same `tk` in different goroutines may lead to data race.
tkInternal := testkit.NewTestKit(c, s.store)
tkInternal.MustExec("use test")
var checkErr error
testCases := []struct {
sql string
expectedRet string
}{
{"alter table t add index idx(id)",
"CREATE TABLE `t` (\n `id` int(11) DEFAULT NULL\n) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin"},
{"alter table t add index idx1(id)",
"CREATE TABLE `t` (\n `id` int(11) DEFAULT NULL,\n KEY `idx` (`id`)\n) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin"},
{"alter table t add column c int",
"CREATE TABLE `t` (\n `id` int(11) DEFAULT NULL,\n KEY `idx` (`id`),\n KEY `idx1` (`id`)\n) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin"},
{"alter table t2 add column c varchar(1)",
"CREATE TABLE `t2` (\n `a` int(11) DEFAULT NULL,\n `b` varchar(10) COLLATE utf8mb4_general_ci DEFAULT NULL\n) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_general_ci"},
{"alter table t2 add column d varchar(1)",
"CREATE TABLE `t2` (\n `a` int(11) DEFAULT NULL,\n `b` varchar(10) COLLATE utf8mb4_general_ci DEFAULT NULL,\n `c` varchar(1) COLLATE utf8mb4_general_ci DEFAULT NULL\n) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_general_ci"},
}
prevState := model.StateNone
callback := &ddl.TestDDLCallback{}
currTestCaseOffset := 0
callback.OnJobUpdatedExported = func(job *model.Job) {
if job.SchemaState == prevState || checkErr != nil {
return
}
if job.State == model.JobStateDone {
currTestCaseOffset++
}
if job.SchemaState != model.StatePublic {
var result sqlexec.RecordSet
tbl2 := testGetTableByName(c, tkInternal.Se, "test", "t2")
if job.TableID == tbl2.Meta().ID {
// Try to do not use mustQuery in hook func, cause assert fail in mustQuery will cause ddl job hung.
result, checkErr = tkInternal.Exec("show create table t2")
if checkErr != nil {
return
}
} else {
result, checkErr = tkInternal.Exec("show create table t")
if checkErr != nil {
return
}
}
req := result.NewChunk()
checkErr = result.Next(context.Background(), req)
if checkErr != nil {
return
}
got := req.GetRow(0).GetString(1)
expected := testCases[currTestCaseOffset].expectedRet
if got != expected {
checkErr = errors.Errorf("got %s, expected %s", got, expected)
}
}
}
d := s.dom.DDL()
originalCallback := d.GetHook()
defer d.(ddl.DDLForTest).SetHook(originalCallback)
d.(ddl.DDLForTest).SetHook(callback)
for _, tc := range testCases {
tk.MustExec(tc.sql)
c.Assert(checkErr, IsNil)
}
}
// TestDropNotNullColumn is used to test issue #8654.
func (s *testStateChangeSuite) TestDropNotNullColumn(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("create table t (id int, a int not null default 11)")
tk.MustExec("insert into t values(1, 1)")
tk.MustExec("create table t1 (id int, b varchar(255) not null)")
tk.MustExec("insert into t1 values(2, '')")
tk.MustExec("create table t2 (id int, c time not null)")
tk.MustExec("insert into t2 values(3, '11:22:33')")
tk.MustExec("create table t3 (id int, d json not null)")
tk.MustExec("insert into t3 values(4, d)")
tk1 := testkit.NewTestKit(c, s.store)
tk1.MustExec("use test")
var checkErr error
d := s.dom.DDL()
originalCallback := d.GetHook()
callback := &ddl.TestDDLCallback{}
sqlNum := 0
callback.OnJobUpdatedExported = func(job *model.Job) {
if checkErr != nil {
return
}
originalCallback.OnChanged(nil)
if job.SchemaState == model.StateWriteOnly {
switch sqlNum {
case 0:
_, checkErr = tk1.Exec("insert into t set id = 1")
case 1:
_, checkErr = tk1.Exec("insert into t1 set id = 2")
case 2:
_, checkErr = tk1.Exec("insert into t2 set id = 3")
case 3:
_, checkErr = tk1.Exec("insert into t3 set id = 4")
}
}
}
d.(ddl.DDLForTest).SetHook(callback)
tk.MustExec("alter table t drop column a")
c.Assert(checkErr, IsNil)
sqlNum++
tk.MustExec("alter table t1 drop column b")
c.Assert(checkErr, IsNil)
sqlNum++
tk.MustExec("alter table t2 drop column c")
c.Assert(checkErr, IsNil)
sqlNum++
tk.MustExec("alter table t3 drop column d")
c.Assert(checkErr, IsNil)
d.(ddl.DDLForTest).SetHook(originalCallback)
tk.MustExec("drop table t, t1, t2, t3")
}
func (s *testStateChangeSuite) TestTwoStates(c *C) {
cnt := 5
// New the testExecInfo.
testInfo := &testExecInfo{
execCases: cnt,
sqlInfos: make([]*sqlInfo, 4),
}
for i := 0; i < len(testInfo.sqlInfos); i++ {
sqlInfo := &sqlInfo{cases: make([]*stateCase, cnt)}
for j := 0; j < cnt; j++ {
sqlInfo.cases[j] = new(stateCase)
}
testInfo.sqlInfos[i] = sqlInfo
}
err := testInfo.createSessions(s.store, "test_db_state")
c.Assert(err, IsNil)
// Fill the SQLs and expected error messages.
testInfo.sqlInfos[0].sql = "insert into t (c1, c2, c3, c4) value(2, 'b', 'N', '2017-07-02')"
testInfo.sqlInfos[1].sql = "insert into t (c1, c2, c3, d3, c4) value(3, 'b', 'N', 'a', '2017-07-03')"
unknownColErr := "[planner:1054]Unknown column 'd3' in 'field list'"
testInfo.sqlInfos[1].cases[0].expectedCompileErr = unknownColErr
testInfo.sqlInfos[1].cases[1].expectedCompileErr = unknownColErr
testInfo.sqlInfos[1].cases[2].expectedCompileErr = unknownColErr
testInfo.sqlInfos[1].cases[3].expectedCompileErr = unknownColErr
testInfo.sqlInfos[2].sql = "update t set c2 = 'c2_update'"
testInfo.sqlInfos[3].sql = "replace into t values(5, 'e', 'N', '2017-07-05')"
testInfo.sqlInfos[3].cases[4].expectedCompileErr = "[planner:1136]Column count doesn't match value count at row 1"
alterTableSQL := "alter table t add column d3 enum('a', 'b') not null default 'a' after c3"
s.test(c, "", alterTableSQL, testInfo)
// TODO: Add more DDL statements.
}
func (s *testStateChangeSuite) test(c *C, tableName, alterTableSQL string, testInfo *testExecInfo) {
_, err := s.se.Execute(context.Background(), `create table t (
c1 int,
c2 varchar(64),
c3 enum('N','Y') not null default 'N',
c4 timestamp on update current_timestamp,
key(c1, c2))`)
c.Assert(err, IsNil)
defer s.se.Execute(context.Background(), "drop table t")
_, err = s.se.Execute(context.Background(), "insert into t values(1, 'a', 'N', '2017-07-01')")
c.Assert(err, IsNil)
callback := &ddl.TestDDLCallback{}
prevState := model.StateNone
var checkErr error
err = testInfo.parseSQLs(s.p)
c.Assert(err, IsNil, Commentf("error stack %v", errors.ErrorStack(err)))
times := 0
callback.OnJobUpdatedExported = func(job *model.Job) {
if job.SchemaState == prevState || checkErr != nil || times >= 3 {
return
}
times++
switch job.SchemaState {
case model.StateDeleteOnly:
// This state we execute every sqlInfo one time using the first session and other information.
err = testInfo.compileSQL(0)
if err != nil {
checkErr = err
break
}
err = testInfo.execSQL(0)
if err != nil {
checkErr = err
}
case model.StateWriteOnly:
// This state we put the schema information to the second case.
err = testInfo.compileSQL(1)
if err != nil {
checkErr = err
}
case model.StateWriteReorganization:
// This state we execute every sqlInfo one time using the third session and other information.
err = testInfo.compileSQL(2)
if err != nil {
checkErr = err
break
}
err = testInfo.execSQL(2)
if err != nil {
checkErr = err
break
}
// Mock the server is in `write only` state.
err = testInfo.execSQL(1)
if err != nil {
checkErr = err
break
}
// This state we put the schema information to the fourth case.
err = testInfo.compileSQL(3)
if err != nil {
checkErr = err
}
}
}
d := s.dom.DDL()
originalCallback := d.GetHook()
defer d.(ddl.DDLForTest).SetHook(originalCallback)
d.(ddl.DDLForTest).SetHook(callback)
_, err = s.se.Execute(context.Background(), alterTableSQL)
c.Assert(err, IsNil)
err = testInfo.compileSQL(4)
c.Assert(err, IsNil)
err = testInfo.execSQL(4)
c.Assert(err, IsNil)
// Mock the server is in `write reorg` state.
err = testInfo.execSQL(3)
c.Assert(err, IsNil)
c.Assert(errors.ErrorStack(checkErr), Equals, "")
}
type stateCase struct {
session session.Session
rawStmt ast.StmtNode
stmt sqlexec.Statement
expectedExecErr string
expectedCompileErr string
}
type sqlInfo struct {
sql string
// cases is multiple stateCases.
// Every case need to be executed with the different schema state.
cases []*stateCase
}
// testExecInfo contains some SQL information and the number of times each SQL is executed
// in a DDL statement.
type testExecInfo struct {
// execCases represents every SQL need to be executed execCases times.
// And the schema state is different at each execution.
execCases int
// sqlInfos represents this test information has multiple SQLs to test.
sqlInfos []*sqlInfo
}
func (t *testExecInfo) createSessions(store kv.Storage, useDB string) error {
var err error
for i, info := range t.sqlInfos {
for j, c := range info.cases {
c.session, err = session.CreateSession4Test(store)
if err != nil {
return errors.Trace(err)
}
_, err = c.session.Execute(context.Background(), "use "+useDB)
if err != nil {
return errors.Trace(err)
}
// It's used to debug.
c.session.SetConnectionID(uint64(i*10 + j))
}
}
return nil
}
func (t *testExecInfo) parseSQLs(p *parser.Parser) error {
if t.execCases <= 0 {
return nil
}
var err error
for _, sqlInfo := range t.sqlInfos {
seVars := sqlInfo.cases[0].session.GetSessionVars()
charset, collation := seVars.GetCharsetInfo()
for j := 0; j < t.execCases; j++ {
sqlInfo.cases[j].rawStmt, err = p.ParseOneStmt(sqlInfo.sql, charset, collation)
if err != nil {
return errors.Trace(err)
}
}
}
return nil
}
func (t *testExecInfo) compileSQL(idx int) (err error) {
for _, info := range t.sqlInfos {
c := info.cases[idx]
compiler := executor.Compiler{Ctx: c.session}
se := c.session
ctx := context.TODO()
se.PrepareTxnCtx(ctx)
sctx := se.(sessionctx.Context)
if err = executor.ResetContextOfStmt(sctx, c.rawStmt); err != nil {
return errors.Trace(err)
}
c.stmt, err = compiler.Compile(ctx, c.rawStmt)
if c.expectedCompileErr != "" {
if err == nil {
err = errors.Errorf("expected error %s but got nil", c.expectedCompileErr)
} else if err.Error() == c.expectedCompileErr {
err = nil
}
}
if err != nil {
return errors.Trace(err)
}
}
return nil
}
func (t *testExecInfo) execSQL(idx int) error {
for _, sqlInfo := range t.sqlInfos {
c := sqlInfo.cases[idx]
if c.expectedCompileErr != "" {
continue
}
_, err := c.stmt.Exec(context.TODO())
if c.expectedExecErr != "" {
if err == nil {
err = errors.Errorf("expected error %s but got nil", c.expectedExecErr)
} else if err.Error() == c.expectedExecErr {
err = nil
}
}
if err != nil {
return errors.Trace(err)
}
err = c.session.CommitTxn(context.TODO())
if err != nil {
return errors.Trace(err)
}
}
return nil
}
type sqlWithErr struct {
sql string
expectErr error
}
type expectQuery struct {
sql string
rows []string
}
func (s *testStateChangeSuite) TestAppendEnum(c *C) {
_, err := s.se.Execute(context.Background(), `create table t (
c1 varchar(64),
c2 enum('N','Y') not null default 'N',
c3 timestamp on update current_timestamp,
c4 int primary key,
unique key idx2 (c2, c3))`)
c.Assert(err, IsNil)
defer s.se.Execute(context.Background(), "drop table t")
_, err = s.se.Execute(context.Background(), "insert into t values('a', 'N', '2017-07-01', 8)")
c.Assert(err, IsNil)
// Make sure these sqls use the the plan of index scan.
_, err = s.se.Execute(context.Background(), "drop stats t")
c.Assert(err, IsNil)
se, err := session.CreateSession(s.store)
c.Assert(err, IsNil)
_, err = se.Execute(context.Background(), "use test_db_state")
c.Assert(err, IsNil)
_, err = s.se.Execute(context.Background(), "insert into t values('a', 'A', '2018-09-19', 9)")
c.Assert(err.Error(), Equals, "[table:1366]Incorrect enum value: 'A' for column 'c2' at row 1")
failAlterTableSQL1 := "alter table t change c2 c2 enum('N') DEFAULT 'N'"
_, err = s.se.Execute(context.Background(), failAlterTableSQL1)
c.Assert(err.Error(), Equals, "[ddl:8200]Unsupported modify column: the number of enum column's elements is less than the original: 2")
failAlterTableSQL2 := "alter table t change c2 c2 int default 0"
_, err = s.se.Execute(context.Background(), failAlterTableSQL2)
c.Assert(err.Error(), Equals, "[ddl:8200]Unsupported modify column: cannot modify enum type column's to type int(11)")
alterTableSQL := "alter table t change c2 c2 enum('N','Y','A') DEFAULT 'A'"
_, err = s.se.Execute(context.Background(), alterTableSQL)
c.Assert(err, IsNil)
_, err = se.Execute(context.Background(), "insert into t values('a', 'A', '2018-09-20', 10)")
c.Assert(err, IsNil)
_, err = se.Execute(context.Background(), "insert into t (c1, c3, c4) values('a', '2018-09-21', 11)")
c.Assert(err, IsNil)
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test_db_state")
result, err := s.execQuery(tk, "select c4, c2 from t order by c4 asc")
c.Assert(err, IsNil)
expected := []string{"8 N", "10 A", "11 A"}
checkResult(result, testkit.Rows(expected...))
_, err = s.se.Execute(context.Background(), "update t set c2='N' where c4 = 10")
c.Assert(err, IsNil)
result, err = s.execQuery(tk, "select c2 from t where c4 = 10")
c.Assert(err, IsNil)
expected = []string{"8 N", "10 N", "11 A"}
checkResult(result, testkit.Rows(expected...))
}
// https://github.com/pingcap/tidb/pull/6249 fixes the following two test cases.
func (s *testStateChangeSuite) TestWriteOnlyWriteNULL(c *C) {
sqls := make([]sqlWithErr, 1)
sqls[0] = sqlWithErr{"insert t set c1 = 'c1_new', c3 = '2019-02-12', c4 = 8 on duplicate key update c1 = values(c1)", nil}
addColumnSQL := "alter table t add column c5 int not null default 1 after c4"
expectQuery := &expectQuery{"select c4, c5 from t", []string{"8 1"}}
// TODO: This case should always fail in write-only state, but it doesn't. We use write-reorganization state here to keep it running stable. It need a double check.
s.runTestInSchemaState(c, model.StateWriteReorganization, true, addColumnSQL, sqls, expectQuery)
}
func (s *testStateChangeSuite) TestWriteOnlyOnDupUpdate(c *C) {
sqls := make([]sqlWithErr, 3)
sqls[0] = sqlWithErr{"delete from t", nil}
sqls[1] = sqlWithErr{"insert t set c1 = 'c1_dup', c3 = '2018-02-12', c4 = 2 on duplicate key update c1 = values(c1)", nil}
sqls[2] = sqlWithErr{"insert t set c1 = 'c1_new', c3 = '2019-02-12', c4 = 2 on duplicate key update c1 = values(c1)", nil}
addColumnSQL := "alter table t add column c5 int not null default 1 after c4"
expectQuery := &expectQuery{"select c4, c5 from t", []string{"2 1"}}
// TODO: This case should always fail in write-only state, but it doesn't. We use write-reorganization state here to keep it running stable. It need a double check.
s.runTestInSchemaState(c, model.StateWriteReorganization, true, addColumnSQL, sqls, expectQuery)
}
func (s *testStateChangeSuite) TestWriteOnlyOnDupUpdateForAddColumns(c *C) {
sqls := make([]sqlWithErr, 3)
sqls[0] = sqlWithErr{"delete from t", nil}
sqls[1] = sqlWithErr{"insert t set c1 = 'c1_dup', c3 = '2018-02-12', c4 = 2 on duplicate key update c1 = values(c1)", nil}
sqls[2] = sqlWithErr{"insert t set c1 = 'c1_new', c3 = '2019-02-12', c4 = 2 on duplicate key update c1 = values(c1)", nil}
addColumnsSQL := "alter table t add column c5 int not null default 1 after c4, add column c44 int not null default 1"
expectQuery := &expectQuery{"select c4, c5, c44 from t", []string{"2 1 1"}}
// TODO: This case should always fail in write-only state, but it doesn't. We use write-reorganization state here to keep it running stable. It need a double check.
s.runTestInSchemaState(c, model.StateWriteReorganization, true, addColumnsSQL, sqls, expectQuery)
}
// TestWriteOnly tests whether the correct columns is used in PhysicalIndexScan's ToPB function.
func (s *testStateChangeSuite) TestWriteOnly(c *C) {
sqls := make([]sqlWithErr, 3)
sqls[0] = sqlWithErr{"delete from t where c1 = 'a'", nil}
sqls[1] = sqlWithErr{"update t use index(idx2) set c1 = 'c1_update' where c1 = 'a'", nil}
sqls[2] = sqlWithErr{"insert t set c1 = 'c1_insert', c3 = '2018-02-12', c4 = 1", nil}
addColumnSQL := "alter table t add column c5 int not null default 1 first"
s.runTestInSchemaState(c, model.StateWriteOnly, true, addColumnSQL, sqls, nil)
}
// TestWriteOnlyForAddColumns tests whether the correct columns is used in PhysicalIndexScan's ToPB function.
func (s *testStateChangeSuite) TestWriteOnlyForAddColumns(c *C) {
sqls := make([]sqlWithErr, 3)
sqls[0] = sqlWithErr{"delete from t where c1 = 'a'", nil}
sqls[1] = sqlWithErr{"update t use index(idx2) set c1 = 'c1_update' where c1 = 'a'", nil}
sqls[2] = sqlWithErr{"insert t set c1 = 'c1_insert', c3 = '2018-02-12', c4 = 1", nil}
addColumnsSQL := "alter table t add column c5 int not null default 1 first, add column c6 int not null default 1"
s.runTestInSchemaState(c, model.StateWriteOnly, true, addColumnsSQL, sqls, nil)
}
// TestDeletaOnly tests whether the correct columns is used in PhysicalIndexScan's ToPB function.
func (s *testStateChangeSuite) TestDeleteOnly(c *C) {
sqls := make([]sqlWithErr, 1)
sqls[0] = sqlWithErr{"insert t set c1 = 'c1_insert', c3 = '2018-02-12', c4 = 1",
errors.Errorf("Can't find column c1")}
dropColumnSQL := "alter table t drop column c1"
s.runTestInSchemaState(c, model.StateDeleteOnly, true, dropColumnSQL, sqls, nil)
}
// TestDeleteOnlyForDropColumns tests whether the correct columns is used in PhysicalIndexScan's ToPB function.
func (s *testStateChangeSuite) TestDeleteOnlyForDropColumns(c *C) {
sqls := make([]sqlWithErr, 1)
sqls[0] = sqlWithErr{"insert t set c1 = 'c1_insert', c3 = '2018-02-12', c4 = 1",
errors.Errorf("Can't find column c1")}
dropColumnsSQL := "alter table t drop column c1, drop column c3"
s.runTestInSchemaState(c, model.StateDeleteOnly, true, dropColumnsSQL, sqls, nil)
}
func (s *testStateChangeSuite) TestWriteOnlyForDropColumn(c *C) {
_, err := s.se.Execute(context.Background(), "use test_db_state")
c.Assert(err, IsNil)
_, err = s.se.Execute(context.Background(), `create table tt (c1 int, c4 int)`)
c.Assert(err, IsNil)
_, err = s.se.Execute(context.Background(), "insert into tt (c1, c4) values(8, 8)")
c.Assert(err, IsNil)
defer s.se.Execute(context.Background(), "drop table tt")
sqls := make([]sqlWithErr, 2)
sqls[0] = sqlWithErr{"update t set c1='5', c3='2020-03-01';", errors.New("[planner:1054]Unknown column 'c3' in 'field list'")}
sqls[1] = sqlWithErr{"update t t1, tt t2 set t1.c1='5', t1.c3='2020-03-01', t2.c1='10' where t1.c4=t2.c4",
errors.New("[planner:1054]Unknown column 'c3' in 'field list'")}
// TODO: Fix the case of sqls[2].
// sqls[2] = sqlWithErr{"update t set c1='5' where c3='2017-07-01';", errors.New("[planner:1054]Unknown column 'c3' in 'field list'")}
dropColumnSQL := "alter table t drop column c3"
query := &expectQuery{sql: "select * from t;", rows: []string{"a N 8"}}
s.runTestInSchemaState(c, model.StateWriteOnly, false, dropColumnSQL, sqls, query)
}
func (s *testStateChangeSuite) TestWriteOnlyForDropColumns(c *C) {
_, err := s.se.Execute(context.Background(), "use test_db_state")
c.Assert(err, IsNil)
_, err = s.se.Execute(context.Background(), `create table t_drop_columns (c1 int, c4 int)`)
c.Assert(err, IsNil)
_, err = s.se.Execute(context.Background(), "insert into t_drop_columns (c1, c4) values(8, 8)")
c.Assert(err, IsNil)
defer s.se.Execute(context.Background(), "drop table t_drop_columns")
sqls := make([]sqlWithErr, 2)
sqls[0] = sqlWithErr{"update t set c1='5', c3='2020-03-01';", errors.New("[planner:1054]Unknown column 'c3' in 'field list'")}
sqls[1] = sqlWithErr{"update t t1, t_drop_columns t2 set t1.c1='5', t1.c3='2020-03-01', t2.c1='10' where t1.c4=t2.c4",
errors.New("[planner:1054]Unknown column 'c3' in 'field list'")}
// TODO: Fix the case of sqls[2].
// sqls[2] = sqlWithErr{"update t set c1='5' where c3='2017-07-01';", errors.New("[planner:1054]Unknown column 'c3' in 'field list'")}
dropColumnsSQL := "alter table t drop column c3, drop column c1"
query := &expectQuery{sql: "select * from t;", rows: []string{"N 8"}}
s.runTestInSchemaState(c, model.StateWriteOnly, false, dropColumnsSQL, sqls, query)
}
func (s *testStateChangeSuiteBase) runTestInSchemaState(c *C, state model.SchemaState, isOnJobUpdated bool, alterTableSQL string,
sqlWithErrs []sqlWithErr, expectQuery *expectQuery) {
_, err := s.se.Execute(context.Background(), `create table t (
c1 varchar(64),
c2 enum('N','Y') not null default 'N',
c3 timestamp on update current_timestamp,
c4 int primary key,
unique key idx2 (c2))`)
c.Assert(err, IsNil)
defer s.se.Execute(context.Background(), "drop table t")
_, err = s.se.Execute(context.Background(), "insert into t values('a', 'N', '2017-07-01', 8)")
c.Assert(err, IsNil)
// Make sure these sqls use the the plan of index scan.
_, err = s.se.Execute(context.Background(), "drop stats t")
c.Assert(err, IsNil)
callback := &ddl.TestDDLCallback{}
prevState := model.StateNone
var checkErr error
times := 0
se, err := session.CreateSession(s.store)
c.Assert(err, IsNil)
_, err = se.Execute(context.Background(), "use test_db_state")
c.Assert(err, IsNil)
cbFunc := func(job *model.Job) {
if job.SchemaState == prevState || checkErr != nil || times >= 3 {
return
}
times++
if job.SchemaState != state {
return
}
for _, sqlWithErr := range sqlWithErrs {
_, err = se.Execute(context.Background(), sqlWithErr.sql)
if !terror.ErrorEqual(err, sqlWithErr.expectErr) {
checkErr = err
if checkErr == nil {
checkErr = errors.New("err can't be nil")
}
break
}
}
}
if isOnJobUpdated {
callback.OnJobUpdatedExported = cbFunc
} else {
callback.OnJobRunBeforeExported = cbFunc
}
d := s.dom.DDL()
originalCallback := d.GetHook()
d.(ddl.DDLForTest).SetHook(callback)
_, err = s.se.Execute(context.Background(), alterTableSQL)
c.Assert(err, IsNil)
c.Assert(errors.ErrorStack(checkErr), Equals, "")
d.(ddl.DDLForTest).SetHook(originalCallback)
if expectQuery != nil {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test_db_state")
result, err := s.execQuery(tk, expectQuery.sql)
c.Assert(err, IsNil)
err = checkResult(result, testkit.Rows(expectQuery.rows...))
c.Assert(err, IsNil)
}
}
func (s *testStateChangeSuiteBase) execQuery(tk *testkit.TestKit, sql string, args ...interface{}) (*testkit.Result, error) {
comment := Commentf("sql:%s, args:%v", sql, args)
rs, err := tk.Exec(sql, args...)
if err != nil {
return nil, err
}
result := tk.ResultSetToResult(rs, comment)
return result, nil
}
func checkResult(result *testkit.Result, expected [][]interface{}) error {
got := fmt.Sprintf("%s", result.Rows())
need := fmt.Sprintf("%s", expected)
if got != need {
return fmt.Errorf("need %v, but got %v", need, got)
}
return nil
}
func (s *testStateChangeSuiteBase) CheckResult(tk *testkit.TestKit, sql string, args ...interface{}) (*testkit.Result, error) {
comment := Commentf("sql:%s, args:%v", sql, args)
rs, err := tk.Exec(sql, args...)
if err != nil {
return nil, err
}
result := tk.ResultSetToResult(rs, comment)
return result, nil
}
func (s *testStateChangeSuite) TestShowIndex(c *C) {
_, err := s.se.Execute(context.Background(), `create table t(c1 int primary key, c2 int)`)
c.Assert(err, IsNil)
defer s.se.Execute(context.Background(), "drop table t")
callback := &ddl.TestDDLCallback{}
prevState := model.StateNone
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test_db_state")
showIndexSQL := `show index from t`
var checkErr error
callback.OnJobUpdatedExported = func(job *model.Job) {
if job.SchemaState == prevState || checkErr != nil {
return
}
switch job.SchemaState {
case model.StateDeleteOnly, model.StateWriteOnly, model.StateWriteReorganization:
result, err1 := s.execQuery(tk, showIndexSQL)
if err1 != nil {
checkErr = err1
break
}
checkErr = checkResult(result, testkit.Rows("t 0 PRIMARY 1 c1 A 0 <nil> <nil> BTREE YES NULL"))
}
}
d := s.dom.DDL()
originalCallback := d.GetHook()
d.(ddl.DDLForTest).SetHook(callback)
alterTableSQL := `alter table t add index c2(c2)`
_, err = s.se.Execute(context.Background(), alterTableSQL)
c.Assert(err, IsNil)
c.Assert(errors.ErrorStack(checkErr), Equals, "")
result, err := s.execQuery(tk, showIndexSQL)
c.Assert(err, IsNil)
err = checkResult(result, testkit.Rows(
"t 0 PRIMARY 1 c1 A 0 <nil> <nil> BTREE YES NULL",
"t 1 c2 1 c2 A 0 <nil> <nil> YES BTREE YES NULL",
))
c.Assert(err, IsNil)
d.(ddl.DDLForTest).SetHook(originalCallback)
c.Assert(err, IsNil)
_, err = s.se.Execute(context.Background(), `create table tr(
id int, name varchar(50),
purchased date
)
partition by range( year(purchased) ) (
partition p0 values less than (1990),
partition p1 values less than (1995),
partition p2 values less than (2000),
partition p3 values less than (2005),
partition p4 values less than (2010),
partition p5 values less than (2015)
);`)
c.Assert(err, IsNil)
defer s.se.Execute(context.Background(), "drop table tr")
_, err = s.se.Execute(context.Background(), "create index idx1 on tr (purchased);")
c.Assert(err, IsNil)
result, err = s.execQuery(tk, "show index from tr;")
c.Assert(err, IsNil)
err = checkResult(result, testkit.Rows("tr 1 idx1 1 purchased A 0 <nil> <nil> YES BTREE YES NULL"))
c.Assert(err, IsNil)
}
func (s *testStateChangeSuite) TestParallelAlterModifyColumn(c *C) {
sql := "ALTER TABLE t MODIFY COLUMN b int FIRST;"
f := func(c *C, err1, err2 error) {
c.Assert(err1, IsNil)
c.Assert(err2, IsNil)
_, err := s.se.Execute(context.Background(), "select * from t")
c.Assert(err, IsNil)
}
s.testControlParallelExecSQL(c, sql, sql, f)
}
// TODO: This test is not a test that performs two DDLs in parallel.
// So we should not use the function of testControlParallelExecSQL. We will handle this test in the next PR.
// func (s *testStateChangeSuite) TestParallelColumnModifyingDefinition(c *C) {
// sql1 := "insert into t(b) values (null);"
// sql2 := "alter table t change b b2 bigint not null;"
// f := func(c *C, err1, err2 error) {
// c.Assert(err1, IsNil)
// if err2 != nil {
// c.Assert(err2.Error(), Equals, "[ddl:1265]Data truncated for column 'b2' at row 1")
// }
// }
// s.testControlParallelExecSQL(c, sql1, sql2, f)
// }
func (s *testStateChangeSuite) TestParallelAddColumAndSetDefaultValue(c *C) {
_, err := s.se.Execute(context.Background(), "use test_db_state")
c.Assert(err, IsNil)
_, err = s.se.Execute(context.Background(), `create table tx (
c1 varchar(64),
c2 enum('N','Y') not null default 'N',
primary key idx2 (c2, c1))`)
c.Assert(err, IsNil)
_, err = s.se.Execute(context.Background(), "insert into tx values('a', 'N')")
c.Assert(err, IsNil)
defer s.se.Execute(context.Background(), "drop table tx")
sql1 := "alter table tx add column cx int after c1"
sql2 := "alter table tx alter c2 set default 'N'"
f := func(c *C, err1, err2 error) {
c.Assert(err1, IsNil)
c.Assert(err2, IsNil)
_, err := s.se.Execute(context.Background(), "delete from tx where c1='a'")
c.Assert(err, IsNil)
}
s.testControlParallelExecSQL(c, sql1, sql2, f)
}
func (s *testStateChangeSuite) TestParallelChangeColumnName(c *C) {
sql1 := "ALTER TABLE t CHANGE a aa int;"
sql2 := "ALTER TABLE t CHANGE b aa int;"
f := func(c *C, err1, err2 error) {
// Make sure only a DDL encounters the error of 'duplicate column name'.
var oneErr error
if (err1 != nil && err2 == nil) || (err1 == nil && err2 != nil) {
if err1 != nil {
oneErr = err1
} else {
oneErr = err2
}
}
c.Assert(oneErr.Error(), Equals, "[schema:1060]Duplicate column name 'aa'")
}
s.testControlParallelExecSQL(c, sql1, sql2, f)
}
func (s *testStateChangeSuite) TestParallelAlterAddIndex(c *C) {
sql1 := "ALTER TABLE t add index index_b(b);"
sql2 := "CREATE INDEX index_b ON t (c);"
f := func(c *C, err1, err2 error) {
c.Assert(err1, IsNil)
c.Assert(err2.Error(), Equals, "[ddl:1061]index already exist index_b")
}
s.testControlParallelExecSQL(c, sql1, sql2, f)
}
func (s *serialTestStateChangeSuite) TestParallelAlterAddExpressionIndex(c *C) {
config.GetGlobalConfig().Experimental.AllowsExpressionIndex = true
sql1 := "ALTER TABLE t add index expr_index_b((b+1));"
sql2 := "CREATE INDEX expr_index_b ON t ((c+1));"
f := func(c *C, err1, err2 error) {
c.Assert(err1, IsNil)
c.Assert(err2.Error(), Equals, "[ddl:1061]index already exist expr_index_b")
}
s.testControlParallelExecSQL(c, sql1, sql2, f)
}
func (s *testStateChangeSuite) TestParallelAddPrimaryKey(c *C) {
sql1 := "ALTER TABLE t add primary key index_b(b);"
sql2 := "ALTER TABLE t add primary key index_b(c);"
f := func(c *C, err1, err2 error) {
c.Assert(err1, IsNil)
c.Assert(err2.Error(), Equals, "[schema:1068]Multiple primary key defined")
}
s.testControlParallelExecSQL(c, sql1, sql2, f)
}
func (s *testStateChangeSuite) TestParallelAlterAddPartition(c *C) {
sql1 := `alter table t_part add partition (
partition p2 values less than (30)
);`
f := func(c *C, err1, err2 error) {
c.Assert(err1, IsNil)
c.Assert(err2.Error(), Equals, "[ddl:1493]VALUES LESS THAN value must be strictly increasing for each partition")
}
s.testControlParallelExecSQL(c, sql1, sql1, f)
}
func (s *testStateChangeSuite) TestParallelDropColumn(c *C) {
sql := "ALTER TABLE t drop COLUMN c ;"
f := func(c *C, err1, err2 error) {
c.Assert(err1, IsNil)
c.Assert(err2.Error(), Equals, "[ddl:1091]column c doesn't exist")
}
s.testControlParallelExecSQL(c, sql, sql, f)
}
func (s *testStateChangeSuite) TestParallelDropColumns(c *C) {
sql := "ALTER TABLE t drop COLUMN b, drop COLUMN c;"
f := func(c *C, err1, err2 error) {
c.Assert(err1, IsNil)
c.Assert(err2.Error(), Equals, "[ddl:1091]column b doesn't exist")
}
s.testControlParallelExecSQL(c, sql, sql, f)
}
func (s *testStateChangeSuite) TestParallelDropIfExistsColumns(c *C) {
sql := "ALTER TABLE t drop COLUMN if exists b, drop COLUMN if exists c;"
f := func(c *C, err1, err2 error) {
c.Assert(err1, IsNil)
c.Assert(err2, IsNil)
}
s.testControlParallelExecSQL(c, sql, sql, f)
}
func (s *testStateChangeSuite) TestParallelDropIndex(c *C) {
sql1 := "alter table t drop index idx1 ;"
sql2 := "alter table t drop index idx2 ;"
f := func(c *C, err1, err2 error) {
c.Assert(err1, IsNil)
c.Assert(err2.Error(), Equals, "[autoid:1075]Incorrect table definition; there can be only one auto column and it must be defined as a key")
}
s.testControlParallelExecSQL(c, sql1, sql2, f)
}
func (s *testStateChangeSuite) TestParallelDropPrimaryKey(c *C) {
s.preSQL = "ALTER TABLE t add primary key index_b(c);"
defer func() {
s.preSQL = ""
}()
sql1 := "alter table t drop primary key;"
sql2 := "alter table t drop primary key;"
f := func(c *C, err1, err2 error) {
c.Assert(err1, IsNil)
c.Assert(err2.Error(), Equals, "[ddl:1091]index PRIMARY doesn't exist")
}
s.testControlParallelExecSQL(c, sql1, sql2, f)
}
func (s *testStateChangeSuite) TestParallelCreateAndRename(c *C) {
sql1 := "create table t_exists(c int);"
sql2 := "alter table t rename to t_exists;"
defer s.se.Execute(context.Background(), "drop table t_exists")
f := func(c *C, err1, err2 error) {
c.Assert(err1, IsNil)
c.Assert(err2.Error(), Equals, "[schema:1050]Table 't_exists' already exists")
}
s.testControlParallelExecSQL(c, sql1, sql2, f)
}
type checkRet func(c *C, err1, err2 error)
func (s *testStateChangeSuiteBase) prepareTestControlParallelExecSQL(c *C) (session.Session, session.Session, chan struct{}, ddl.Callback) {
callback := &ddl.TestDDLCallback{}
times := 0
callback.OnJobUpdatedExported = func(job *model.Job) {
if times != 0 {
return
}
var qLen int
for {
kv.RunInNewTxn(s.store, false, func(txn kv.Transaction) error {
jobs, err1 := admin.GetDDLJobs(txn)
if err1 != nil {
return err1
}
qLen = len(jobs)
return nil
})
if qLen == 2 {
break
}
time.Sleep(5 * time.Millisecond)
}
times++
}
d := s.dom.DDL()
originalCallback := d.GetHook()
d.(ddl.DDLForTest).SetHook(callback)
se, err := session.CreateSession(s.store)
c.Assert(err, IsNil)
_, err = se.Execute(context.Background(), "use test_db_state")
c.Assert(err, IsNil)
se1, err := session.CreateSession(s.store)
c.Assert(err, IsNil)
_, err = se1.Execute(context.Background(), "use test_db_state")
c.Assert(err, IsNil)
ch := make(chan struct{})
// Make sure the sql1 is put into the DDLJobQueue.
go func() {
var qLen int
for {
kv.RunInNewTxn(s.store, false, func(txn kv.Transaction) error {
jobs, err3 := admin.GetDDLJobs(txn)
if err3 != nil {
return err3
}
qLen = len(jobs)
return nil
})
if qLen == 1 {
// Make sure sql2 is executed after the sql1.
close(ch)
break
}
time.Sleep(5 * time.Millisecond)
}
}()
return se, se1, ch, originalCallback
}
func (s *testStateChangeSuiteBase) testControlParallelExecSQL(c *C, sql1, sql2 string, f checkRet) {
_, err := s.se.Execute(context.Background(), "use test_db_state")
c.Assert(err, IsNil)
_, err = s.se.Execute(context.Background(), "create table t(a int, b int, c int, d int auto_increment,e int, index idx1(d), index idx2(d,e))")
c.Assert(err, IsNil)
if len(s.preSQL) != 0 {
_, err := s.se.Execute(context.Background(), s.preSQL)
c.Assert(err, IsNil)
}
defer s.se.Execute(context.Background(), "drop table t")
_, err = s.se.Execute(context.Background(), "drop database if exists t_part")
c.Assert(err, IsNil)
s.se.Execute(context.Background(), `create table t_part (a int key)
partition by range(a) (
partition p0 values less than (10),
partition p1 values less than (20)
);`)
se, se1, ch, originalCallback := s.prepareTestControlParallelExecSQL(c)
defer s.dom.DDL().(ddl.DDLForTest).SetHook(originalCallback)
var err1 error
var err2 error
wg := sync.WaitGroup{}
wg.Add(2)
go func() {
defer wg.Done()
_, err1 = se.Execute(context.Background(), sql1)
}()
go func() {
defer wg.Done()
<-ch
_, err2 = se1.Execute(context.Background(), sql2)
}()
wg.Wait()
f(c, err1, err2)
}
func (s *testStateChangeSuite) TestParallelUpdateTableReplica(c *C) {
ctx := context.Background()
_, err := s.se.Execute(context.Background(), "use test_db_state")
c.Assert(err, IsNil)
_, err = s.se.Execute(ctx, "drop table if exists t1;")
c.Assert(err, IsNil)
_, err = s.se.Execute(ctx, "create table t1 (a int);")
c.Assert(err, IsNil)
_, err = s.se.Execute(ctx, "alter table t1 set tiflash replica 3 location labels 'a','b';")
c.Assert(err, IsNil)
se, se1, ch, originalCallback := s.prepareTestControlParallelExecSQL(c)
defer s.dom.DDL().(ddl.DDLForTest).SetHook(originalCallback)
t1 := testGetTableByName(c, se, "test_db_state", "t1")
var err1 error
var err2 error
wg := sync.WaitGroup{}
wg.Add(2)
go func() {
defer wg.Done()
// Mock for table tiflash replica was available.
err1 = domain.GetDomain(se).DDL().UpdateTableReplicaInfo(se, t1.Meta().ID, true)
}()
go func() {
defer wg.Done()
<-ch
// Mock for table tiflash replica was available.
err2 = domain.GetDomain(se1).DDL().UpdateTableReplicaInfo(se1, t1.Meta().ID, true)
}()
wg.Wait()
c.Assert(err1, IsNil)
c.Assert(err2.Error(), Equals, "[ddl:-1]the replica available status of table t1 is already updated")
}
func (s *testStateChangeSuite) testParallelExecSQL(c *C, sql string) {
se, err := session.CreateSession(s.store)
c.Assert(err, IsNil)
_, err = se.Execute(context.Background(), "use test_db_state")
c.Assert(err, IsNil)
se1, err1 := session.CreateSession(s.store)
c.Assert(err1, IsNil)
_, err = se1.Execute(context.Background(), "use test_db_state")
c.Assert(err, IsNil)
var err2, err3 error
wg := sync.WaitGroup{}
callback := &ddl.TestDDLCallback{}
once := sync.Once{}
callback.OnJobUpdatedExported = func(job *model.Job) {
// sleep a while, let other job enqueue.
once.Do(func() {
time.Sleep(time.Millisecond * 10)
})
}
d := s.dom.DDL()
originalCallback := d.GetHook()
defer d.(ddl.DDLForTest).SetHook(originalCallback)
d.(ddl.DDLForTest).SetHook(callback)
wg.Add(2)
go func() {
defer wg.Done()
_, err2 = se.Execute(context.Background(), sql)
}()
go func() {
defer wg.Done()
_, err3 = se1.Execute(context.Background(), sql)
}()
wg.Wait()
c.Assert(err2, IsNil)
c.Assert(err3, IsNil)
}
// TestCreateTableIfNotExists parallel exec create table if not exists xxx. No error returns is expected.
func (s *testStateChangeSuite) TestCreateTableIfNotExists(c *C) {
defer s.se.Execute(context.Background(), "drop table test_not_exists")
s.testParallelExecSQL(c, "create table if not exists test_not_exists(a int);")
}
// TestCreateDBIfNotExists parallel exec create database if not exists xxx. No error returns is expected.
func (s *testStateChangeSuite) TestCreateDBIfNotExists(c *C) {
defer s.se.Execute(context.Background(), "drop database test_not_exists")
s.testParallelExecSQL(c, "create database if not exists test_not_exists;")
}
// TestDDLIfNotExists parallel exec some DDLs with `if not exists` clause. No error returns is expected.
func (s *testStateChangeSuite) TestDDLIfNotExists(c *C) {
defer s.se.Execute(context.Background(), "drop table test_not_exists")
_, err := s.se.Execute(context.Background(), "create table if not exists test_not_exists(a int)")
c.Assert(err, IsNil)
// ADD COLUMN
s.testParallelExecSQL(c, "alter table test_not_exists add column if not exists b int")
// ADD COLUMNS
s.testParallelExecSQL(c, "alter table test_not_exists add column if not exists (c11 int, d11 int)")
// ADD INDEX
s.testParallelExecSQL(c, "alter table test_not_exists add index if not exists idx_b (b)")
// CREATE INDEX
s.testParallelExecSQL(c, "create index if not exists idx_b on test_not_exists (b)")
}
// TestDDLIfExists parallel exec some DDLs with `if exists` clause. No error returns is expected.
func (s *testStateChangeSuite) TestDDLIfExists(c *C) {
defer func() {
s.se.Execute(context.Background(), "drop table test_exists")
s.se.Execute(context.Background(), "drop table test_exists_2")
}()
_, err := s.se.Execute(context.Background(), "create table if not exists test_exists (a int key, b int)")
c.Assert(err, IsNil)
// DROP COLUMNS
s.testParallelExecSQL(c, "alter table test_exists drop column if exists c, drop column if exists d")
// DROP COLUMN
s.testParallelExecSQL(c, "alter table test_exists drop column if exists b") // only `a` exists now
// CHANGE COLUMN
s.testParallelExecSQL(c, "alter table test_exists change column if exists a c int") // only, `c` exists now
// MODIFY COLUMN
s.testParallelExecSQL(c, "alter table test_exists modify column if exists a bigint")
// DROP INDEX
_, err = s.se.Execute(context.Background(), "alter table test_exists add index idx_c (c)")
c.Assert(err, IsNil)
s.testParallelExecSQL(c, "alter table test_exists drop index if exists idx_c")
// DROP PARTITION (ADD PARTITION tested in TestParallelAlterAddPartition)
_, err = s.se.Execute(context.Background(), "create table test_exists_2 (a int key) partition by range(a) (partition p0 values less than (10), partition p1 values less than (20))")
c.Assert(err, IsNil)
s.testParallelExecSQL(c, "alter table test_exists_2 drop partition if exists p1")
}
// TestParallelDDLBeforeRunDDLJob tests a session to execute DDL with an outdated information schema.
// This test is used to simulate the following conditions:
// In a cluster, TiDB "a" executes the DDL.
// TiDB "b" fails to load schema, then TiDB "b" executes the DDL statement associated with the DDL statement executed by "a".
func (s *testStateChangeSuite) TestParallelDDLBeforeRunDDLJob(c *C) {
defer s.se.Execute(context.Background(), "drop table test_table")
_, err := s.se.Execute(context.Background(), "use test_db_state")
c.Assert(err, IsNil)
_, err = s.se.Execute(context.Background(), "create table test_table (c1 int, c2 int default 1, index (c1))")
c.Assert(err, IsNil)
// Create two sessions.
se, err := session.CreateSession(s.store)
c.Assert(err, IsNil)
_, err = se.Execute(context.Background(), "use test_db_state")
c.Assert(err, IsNil)
se1, err := session.CreateSession(s.store)
c.Assert(err, IsNil)
_, err = se1.Execute(context.Background(), "use test_db_state")
c.Assert(err, IsNil)
intercept := &ddl.TestInterceptor{}
firstConnID := uint64(1)
finishedCnt := int32(0)
interval := 5 * time.Millisecond
var sessionCnt int32 // sessionCnt is the number of sessions that goes into the function of OnGetInfoSchema.
intercept.OnGetInfoSchemaExported = func(ctx sessionctx.Context, is infoschema.InfoSchema) infoschema.InfoSchema {
// The following code is for testing.
// Make sure the two sessions get the same information schema before executing DDL.
// After the first session executes its DDL, then the second session executes its DDL.
var info infoschema.InfoSchema
atomic.AddInt32(&sessionCnt, 1)
for {
// Make sure there are two sessions running here.
if atomic.LoadInt32(&sessionCnt) == 2 {
info = is
break
}
// Print log to notify if TestParallelDDLBeforeRunDDLJob hang up
log.Info("sleep in TestParallelDDLBeforeRunDDLJob", zap.String("interval", interval.String()))
time.Sleep(interval)
}
currID := ctx.GetSessionVars().ConnectionID
for {
seCnt := atomic.LoadInt32(&sessionCnt)
// Make sure the two session have got the same information schema. And the first session can continue to go on,
// or the frist session finished this SQL(seCnt = finishedCnt), then other sessions can continue to go on.
if currID == firstConnID || seCnt == finishedCnt {
break
}
// Print log to notify if TestParallelDDLBeforeRunDDLJob hang up
log.Info("sleep in TestParallelDDLBeforeRunDDLJob", zap.String("interval", interval.String()))
time.Sleep(interval)
}
return info
}
d := s.dom.DDL()
d.(ddl.DDLForTest).SetInterceptoror(intercept)
// Make sure the connection 1 executes a SQL before the connection 2.
// And the connection 2 executes a SQL with an outdated information schema.
wg := sync.WaitGroup{}
wg.Add(2)
go func() {
defer wg.Done()
se.SetConnectionID(firstConnID)
_, err1 := se.Execute(context.Background(), "alter table test_table drop column c2")
c.Assert(err1, IsNil)
atomic.StoreInt32(&sessionCnt, finishedCnt)
}()
go func() {
defer wg.Done()
se1.SetConnectionID(2)
_, err2 := se1.Execute(context.Background(), "alter table test_table add column c2 int")
c.Assert(err2, NotNil)
c.Assert(strings.Contains(err2.Error(), "Information schema is changed"), IsTrue)
}()
wg.Wait()
intercept = &ddl.TestInterceptor{}
d.(ddl.DDLForTest).SetInterceptoror(intercept)
}
func (s *testStateChangeSuite) TestParallelAlterSchemaCharsetAndCollate(c *C) {
sql := "ALTER SCHEMA test_db_state CHARSET utf8mb4 COLLATE utf8mb4_general_ci"
f := func(c *C, err1, err2 error) {
c.Assert(err1, IsNil)
c.Assert(err2, IsNil)
}
s.testControlParallelExecSQL(c, sql, sql, f)
sql = `SELECT default_character_set_name, default_collation_name
FROM information_schema.schemata
WHERE schema_name='test_db_state'`
tk := testkit.NewTestKit(c, s.store)
tk.MustQuery(sql).Check(testkit.Rows("utf8mb4 utf8mb4_general_ci"))
}
// TestParallelTruncateTableAndAddColumn tests add column when truncate table.
func (s *testStateChangeSuite) TestParallelTruncateTableAndAddColumn(c *C) {
sql1 := "truncate table t"
sql2 := "alter table t add column c3 int"
f := func(c *C, err1, err2 error) {
c.Assert(err1, IsNil)
c.Assert(err2, NotNil)
c.Assert(err2.Error(), Equals, "[domain:8028]Information schema is changed during the execution of the statement(for example, table definition may be updated by other DDL ran in parallel). If you see this error often, try increasing `tidb_max_delta_schema_count`. [try again later]")
}
s.testControlParallelExecSQL(c, sql1, sql2, f)
}
// TestParallelTruncateTableAndAddColumns tests add columns when truncate table.
func (s *testStateChangeSuite) TestParallelTruncateTableAndAddColumns(c *C) {
sql1 := "truncate table t"
sql2 := "alter table t add column c3 int, add column c4 int"
f := func(c *C, err1, err2 error) {
c.Assert(err1, IsNil)
c.Assert(err2, NotNil)
c.Assert(err2.Error(), Equals, "[domain:8028]Information schema is changed during the execution of the statement(for example, table definition may be updated by other DDL ran in parallel). If you see this error often, try increasing `tidb_max_delta_schema_count`. [try again later]")
}
s.testControlParallelExecSQL(c, sql1, sql2, f)
}
// TestParallelFlashbackTable tests parallel flashback table.
func (s *serialTestStateChangeSuite) TestParallelFlashbackTable(c *C) {
c.Assert(failpoint.Enable("github.com/pingcap/tidb/meta/autoid/mockAutoIDChange", `return(true)`), IsNil)
defer func(originGC bool) {
c.Assert(failpoint.Disable("github.com/pingcap/tidb/meta/autoid/mockAutoIDChange"), IsNil)
if originGC {
ddl.EmulatorGCEnable()
} else {
ddl.EmulatorGCDisable()
}
}(ddl.IsEmulatorGCEnable())
// disable emulator GC.
// Disable emulator GC, otherwise, emulator GC will delete table record as soon as possible after executing drop table DDL.
ddl.EmulatorGCDisable()
gcTimeFormat := "20060102-15:04:05 -0700 MST"
timeBeforeDrop := time.Now().Add(0 - 48*60*60*time.Second).Format(gcTimeFormat)
safePointSQL := `INSERT HIGH_PRIORITY INTO mysql.tidb VALUES ('tikv_gc_safe_point', '%[1]s', '')
ON DUPLICATE KEY
UPDATE variable_value = '%[1]s'`
tk := testkit.NewTestKit(c, s.store)
// clear GC variables first.
tk.MustExec("delete from mysql.tidb where variable_name in ( 'tikv_gc_safe_point','tikv_gc_enable' )")
// set GC safe point
tk.MustExec(fmt.Sprintf(safePointSQL, timeBeforeDrop))
// set GC enable.
err := gcutil.EnableGC(tk.Se)
c.Assert(err, IsNil)
// prepare dropped table.
tk.MustExec("use test_db_state")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (a int);")
tk.MustExec("drop table if exists t")
// Test parallel flashback table.
sql1 := "flashback table t to t_flashback"
f := func(c *C, err1, err2 error) {
c.Assert(err1, IsNil)
c.Assert(err2, NotNil)
c.Assert(err2.Error(), Equals, "[schema:1050]Table 't_flashback' already exists")
}
s.testControlParallelExecSQL(c, sql1, sql1, f)
// Test parallel flashback table with different name
tk.MustExec("drop table t_flashback")
sql1 = "flashback table t_flashback"
sql2 := "flashback table t_flashback to t_flashback2"
s.testControlParallelExecSQL(c, sql1, sql2, f)
}
| ddl/db_change_test.go | 1 | https://github.com/pingcap/tidb/commit/788dc28a7423d4b85ebe11bbce632733b056d63a | [
0.9652205109596252,
0.015189855359494686,
0.00016381312161684036,
0.00017504773859400302,
0.11437883973121643
] |
{
"id": 2,
"code_window": [
"\t\treturn nil, nil\n",
"\t}\n",
"\n",
"\tdbInfo, err := t.GetDatabase(job.SchemaID)\n",
"\tif err != nil {\n",
"\t\treturn nil, err\n",
"\t}\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tdbInfo, err := checkSchemaExistAndCancelNotExistJob(t, job)\n"
],
"file_path": "ddl/index.go",
"type": "replace",
"edit_start_line_idx": 353
} | // Copyright 2019 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package executor
import (
"context"
"github.com/cznic/mathutil"
"github.com/pingcap/errors"
"github.com/pingcap/parser/ast"
"github.com/pingcap/tidb/executor/aggfuncs"
"github.com/pingcap/tidb/expression"
"github.com/pingcap/tidb/planner/core"
"github.com/pingcap/tidb/sessionctx"
"github.com/pingcap/tidb/util/chunk"
)
// WindowExec is the executor for window functions.
type WindowExec struct {
baseExecutor
groupChecker *vecGroupChecker
// childResult stores the child chunk
childResult *chunk.Chunk
// executed indicates the child executor is drained or something unexpected happened.
executed bool
// resultChunks stores the chunks to return
resultChunks []*chunk.Chunk
// remainingRowsInChunk indicates how many rows the resultChunks[i] is not prepared.
remainingRowsInChunk []int
numWindowFuncs int
processor windowProcessor
}
// Close implements the Executor Close interface.
func (e *WindowExec) Close() error {
return errors.Trace(e.baseExecutor.Close())
}
// Next implements the Executor Next interface.
func (e *WindowExec) Next(ctx context.Context, chk *chunk.Chunk) error {
chk.Reset()
for !e.executed && !e.preparedChunkAvailable() {
err := e.consumeOneGroup(ctx)
if err != nil {
e.executed = true
return err
}
}
if len(e.resultChunks) > 0 {
chk.SwapColumns(e.resultChunks[0])
e.resultChunks[0] = nil // GC it. TODO: Reuse it.
e.resultChunks = e.resultChunks[1:]
e.remainingRowsInChunk = e.remainingRowsInChunk[1:]
}
return nil
}
func (e *WindowExec) preparedChunkAvailable() bool {
return len(e.resultChunks) > 0 && e.remainingRowsInChunk[0] == 0
}
func (e *WindowExec) consumeOneGroup(ctx context.Context) error {
var groupRows []chunk.Row
if e.groupChecker.isExhausted() {
eof, err := e.fetchChild(ctx)
if err != nil {
return errors.Trace(err)
}
if eof {
e.executed = true
return e.consumeGroupRows(groupRows)
}
_, err = e.groupChecker.splitIntoGroups(e.childResult)
if err != nil {
return errors.Trace(err)
}
}
begin, end := e.groupChecker.getNextGroup()
for i := begin; i < end; i++ {
groupRows = append(groupRows, e.childResult.GetRow(i))
}
for meetLastGroup := end == e.childResult.NumRows(); meetLastGroup; {
meetLastGroup = false
eof, err := e.fetchChild(ctx)
if err != nil {
return errors.Trace(err)
}
if eof {
e.executed = true
return e.consumeGroupRows(groupRows)
}
isFirstGroupSameAsPrev, err := e.groupChecker.splitIntoGroups(e.childResult)
if err != nil {
return errors.Trace(err)
}
if isFirstGroupSameAsPrev {
begin, end = e.groupChecker.getNextGroup()
for i := begin; i < end; i++ {
groupRows = append(groupRows, e.childResult.GetRow(i))
}
meetLastGroup = end == e.childResult.NumRows()
}
}
return e.consumeGroupRows(groupRows)
}
func (e *WindowExec) consumeGroupRows(groupRows []chunk.Row) (err error) {
remainingRowsInGroup := len(groupRows)
if remainingRowsInGroup == 0 {
return nil
}
for i := 0; i < len(e.resultChunks); i++ {
remained := mathutil.Min(e.remainingRowsInChunk[i], remainingRowsInGroup)
e.remainingRowsInChunk[i] -= remained
remainingRowsInGroup -= remained
// TODO: Combine these three methods.
// The old implementation needs the processor has these three methods
// but now it does not have to.
groupRows, err = e.processor.consumeGroupRows(e.ctx, groupRows)
if err != nil {
return errors.Trace(err)
}
_, err = e.processor.appendResult2Chunk(e.ctx, groupRows, e.resultChunks[i], remained)
if err != nil {
return errors.Trace(err)
}
if remainingRowsInGroup == 0 {
e.processor.resetPartialResult()
break
}
}
return nil
}
func (e *WindowExec) fetchChild(ctx context.Context) (EOF bool, err error) {
childResult := newFirstChunk(e.children[0])
err = Next(ctx, e.children[0], childResult)
if err != nil {
return false, errors.Trace(err)
}
// No more data.
numRows := childResult.NumRows()
if numRows == 0 {
return true, nil
}
resultChk := chunk.New(e.retFieldTypes, 0, numRows)
err = e.copyChk(childResult, resultChk)
if err != nil {
return false, err
}
e.resultChunks = append(e.resultChunks, resultChk)
e.remainingRowsInChunk = append(e.remainingRowsInChunk, numRows)
e.childResult = childResult
return false, nil
}
func (e *WindowExec) copyChk(src, dst *chunk.Chunk) error {
columns := e.Schema().Columns[:len(e.Schema().Columns)-e.numWindowFuncs]
for i, col := range columns {
if err := dst.MakeRefTo(i, src, col.Index); err != nil {
return err
}
}
return nil
}
// windowProcessor is the interface for processing different kinds of windows.
type windowProcessor interface {
// consumeGroupRows updates the result for an window function using the input rows
// which belong to the same partition.
consumeGroupRows(ctx sessionctx.Context, rows []chunk.Row) ([]chunk.Row, error)
// appendResult2Chunk appends the final results to chunk.
// It is called when there are no more rows in current partition.
appendResult2Chunk(ctx sessionctx.Context, rows []chunk.Row, chk *chunk.Chunk, remained int) ([]chunk.Row, error)
// resetPartialResult resets the partial result to the original state for a specific window function.
resetPartialResult()
}
type aggWindowProcessor struct {
windowFuncs []aggfuncs.AggFunc
partialResults []aggfuncs.PartialResult
}
func (p *aggWindowProcessor) consumeGroupRows(ctx sessionctx.Context, rows []chunk.Row) ([]chunk.Row, error) {
for i, windowFunc := range p.windowFuncs {
err := windowFunc.UpdatePartialResult(ctx, rows, p.partialResults[i])
if err != nil {
return nil, err
}
}
rows = rows[:0]
return rows, nil
}
func (p *aggWindowProcessor) appendResult2Chunk(ctx sessionctx.Context, rows []chunk.Row, chk *chunk.Chunk, remained int) ([]chunk.Row, error) {
for remained > 0 {
for i, windowFunc := range p.windowFuncs {
// TODO: We can extend the agg func interface to avoid the `for` loop here.
err := windowFunc.AppendFinalResult2Chunk(ctx, p.partialResults[i], chk)
if err != nil {
return nil, err
}
}
remained--
}
return rows, nil
}
func (p *aggWindowProcessor) resetPartialResult() {
for i, windowFunc := range p.windowFuncs {
windowFunc.ResetPartialResult(p.partialResults[i])
}
}
type rowFrameWindowProcessor struct {
windowFuncs []aggfuncs.AggFunc
partialResults []aggfuncs.PartialResult
start *core.FrameBound
end *core.FrameBound
curRowIdx uint64
}
func (p *rowFrameWindowProcessor) getStartOffset(numRows uint64) uint64 {
if p.start.UnBounded {
return 0
}
switch p.start.Type {
case ast.Preceding:
if p.curRowIdx >= p.start.Num {
return p.curRowIdx - p.start.Num
}
return 0
case ast.Following:
offset := p.curRowIdx + p.start.Num
if offset >= numRows {
return numRows
}
return offset
case ast.CurrentRow:
return p.curRowIdx
}
// It will never reach here.
return 0
}
func (p *rowFrameWindowProcessor) getEndOffset(numRows uint64) uint64 {
if p.end.UnBounded {
return numRows
}
switch p.end.Type {
case ast.Preceding:
if p.curRowIdx >= p.end.Num {
return p.curRowIdx - p.end.Num + 1
}
return 0
case ast.Following:
offset := p.curRowIdx + p.end.Num
if offset >= numRows {
return numRows
}
return offset + 1
case ast.CurrentRow:
return p.curRowIdx + 1
}
// It will never reach here.
return 0
}
func (p *rowFrameWindowProcessor) consumeGroupRows(ctx sessionctx.Context, rows []chunk.Row) ([]chunk.Row, error) {
return rows, nil
}
func (p *rowFrameWindowProcessor) appendResult2Chunk(ctx sessionctx.Context, rows []chunk.Row, chk *chunk.Chunk, remained int) ([]chunk.Row, error) {
numRows := uint64(len(rows))
var (
err error
initializedSlidingWindow bool
start uint64
end uint64
lastStart uint64
lastEnd uint64
shiftStart uint64
shiftEnd uint64
)
slidingWindowAggFuncs := make([]aggfuncs.SlidingWindowAggFunc, len(p.windowFuncs))
for i, windowFunc := range p.windowFuncs {
if slidingWindowAggFunc, ok := windowFunc.(aggfuncs.SlidingWindowAggFunc); ok {
slidingWindowAggFuncs[i] = slidingWindowAggFunc
}
}
for ; remained > 0; lastStart, lastEnd = start, end {
start = p.getStartOffset(numRows)
end = p.getEndOffset(numRows)
p.curRowIdx++
remained--
shiftStart = start - lastStart
shiftEnd = end - lastEnd
if start >= end {
for i, windowFunc := range p.windowFuncs {
slidingWindowAggFunc := slidingWindowAggFuncs[i]
if slidingWindowAggFunc != nil && initializedSlidingWindow {
err = slidingWindowAggFunc.Slide(ctx, rows, lastStart, lastEnd, shiftStart, shiftEnd, p.partialResults[i])
if err != nil {
return nil, err
}
}
err = windowFunc.AppendFinalResult2Chunk(ctx, p.partialResults[i], chk)
if err != nil {
return nil, err
}
}
continue
}
for i, windowFunc := range p.windowFuncs {
slidingWindowAggFunc := slidingWindowAggFuncs[i]
if slidingWindowAggFunc != nil && initializedSlidingWindow {
err = slidingWindowAggFunc.Slide(ctx, rows, lastStart, lastEnd, shiftStart, shiftEnd, p.partialResults[i])
} else {
err = windowFunc.UpdatePartialResult(ctx, rows[start:end], p.partialResults[i])
}
if err != nil {
return nil, err
}
err = windowFunc.AppendFinalResult2Chunk(ctx, p.partialResults[i], chk)
if err != nil {
return nil, err
}
if slidingWindowAggFunc == nil {
windowFunc.ResetPartialResult(p.partialResults[i])
}
}
if !initializedSlidingWindow {
initializedSlidingWindow = true
}
}
for i, windowFunc := range p.windowFuncs {
windowFunc.ResetPartialResult(p.partialResults[i])
}
return rows, nil
}
func (p *rowFrameWindowProcessor) resetPartialResult() {
p.curRowIdx = 0
}
type rangeFrameWindowProcessor struct {
windowFuncs []aggfuncs.AggFunc
partialResults []aggfuncs.PartialResult
start *core.FrameBound
end *core.FrameBound
curRowIdx uint64
lastStartOffset uint64
lastEndOffset uint64
orderByCols []*expression.Column
// expectedCmpResult is used to decide if one value is included in the frame.
expectedCmpResult int64
}
func (p *rangeFrameWindowProcessor) getStartOffset(ctx sessionctx.Context, rows []chunk.Row) (uint64, error) {
if p.start.UnBounded {
return 0, nil
}
numRows := uint64(len(rows))
for ; p.lastStartOffset < numRows; p.lastStartOffset++ {
var res int64
var err error
for i := range p.orderByCols {
res, _, err = p.start.CmpFuncs[i](ctx, p.orderByCols[i], p.start.CalcFuncs[i], rows[p.lastStartOffset], rows[p.curRowIdx])
if err != nil {
return 0, err
}
if res != 0 {
break
}
}
// For asc, break when the current value is greater or equal to the calculated result;
// For desc, break when the current value is less or equal to the calculated result.
if res != p.expectedCmpResult {
break
}
}
return p.lastStartOffset, nil
}
func (p *rangeFrameWindowProcessor) getEndOffset(ctx sessionctx.Context, rows []chunk.Row) (uint64, error) {
numRows := uint64(len(rows))
if p.end.UnBounded {
return numRows, nil
}
for ; p.lastEndOffset < numRows; p.lastEndOffset++ {
var res int64
var err error
for i := range p.orderByCols {
res, _, err = p.end.CmpFuncs[i](ctx, p.end.CalcFuncs[i], p.orderByCols[i], rows[p.curRowIdx], rows[p.lastEndOffset])
if err != nil {
return 0, err
}
if res != 0 {
break
}
}
// For asc, break when the calculated result is greater than the current value.
// For desc, break when the calculated result is less than the current value.
if res == p.expectedCmpResult {
break
}
}
return p.lastEndOffset, nil
}
func (p *rangeFrameWindowProcessor) appendResult2Chunk(ctx sessionctx.Context, rows []chunk.Row, chk *chunk.Chunk, remained int) ([]chunk.Row, error) {
var (
err error
initializedSlidingWindow bool
start uint64
end uint64
lastStart uint64
lastEnd uint64
shiftStart uint64
shiftEnd uint64
)
slidingWindowAggFuncs := make([]aggfuncs.SlidingWindowAggFunc, len(p.windowFuncs))
for i, windowFunc := range p.windowFuncs {
if slidingWindowAggFunc, ok := windowFunc.(aggfuncs.SlidingWindowAggFunc); ok {
slidingWindowAggFuncs[i] = slidingWindowAggFunc
}
}
for ; remained > 0; lastStart, lastEnd = start, end {
start, err = p.getStartOffset(ctx, rows)
if err != nil {
return nil, err
}
end, err = p.getEndOffset(ctx, rows)
if err != nil {
return nil, err
}
p.curRowIdx++
remained--
shiftStart = start - lastStart
shiftEnd = end - lastEnd
if start >= end {
for i, windowFunc := range p.windowFuncs {
slidingWindowAggFunc := slidingWindowAggFuncs[i]
if slidingWindowAggFunc != nil && initializedSlidingWindow {
err = slidingWindowAggFunc.Slide(ctx, rows, lastStart, lastEnd, shiftStart, shiftEnd, p.partialResults[i])
if err != nil {
return nil, err
}
}
err = windowFunc.AppendFinalResult2Chunk(ctx, p.partialResults[i], chk)
if err != nil {
return nil, err
}
}
continue
}
for i, windowFunc := range p.windowFuncs {
slidingWindowAggFunc := slidingWindowAggFuncs[i]
if slidingWindowAggFunc != nil && initializedSlidingWindow {
err = slidingWindowAggFunc.Slide(ctx, rows, lastStart, lastEnd, shiftStart, shiftEnd, p.partialResults[i])
} else {
err = windowFunc.UpdatePartialResult(ctx, rows[start:end], p.partialResults[i])
}
if err != nil {
return nil, err
}
err = windowFunc.AppendFinalResult2Chunk(ctx, p.partialResults[i], chk)
if err != nil {
return nil, err
}
if slidingWindowAggFunc == nil {
windowFunc.ResetPartialResult(p.partialResults[i])
}
}
if !initializedSlidingWindow {
initializedSlidingWindow = true
}
}
for i, windowFunc := range p.windowFuncs {
windowFunc.ResetPartialResult(p.partialResults[i])
}
return rows, nil
}
func (p *rangeFrameWindowProcessor) consumeGroupRows(ctx sessionctx.Context, rows []chunk.Row) ([]chunk.Row, error) {
return rows, nil
}
func (p *rangeFrameWindowProcessor) resetPartialResult() {
p.curRowIdx = 0
p.lastStartOffset = 0
p.lastEndOffset = 0
}
| executor/window.go | 0 | https://github.com/pingcap/tidb/commit/788dc28a7423d4b85ebe11bbce632733b056d63a | [
0.8348392248153687,
0.01791558973491192,
0.0001651230559218675,
0.00017289721290580928,
0.11470532417297363
] |
{
"id": 2,
"code_window": [
"\t\treturn nil, nil\n",
"\t}\n",
"\n",
"\tdbInfo, err := t.GetDatabase(job.SchemaID)\n",
"\tif err != nil {\n",
"\t\treturn nil, err\n",
"\t}\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tdbInfo, err := checkSchemaExistAndCancelNotExistJob(t, job)\n"
],
"file_path": "ddl/index.go",
"type": "replace",
"edit_start_line_idx": 353
} | // Copyright 2019 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package expression
import (
"bytes"
"encoding/binary"
"fmt"
"math"
"net"
"strings"
"sync/atomic"
"time"
"github.com/google/uuid"
"github.com/pingcap/tidb/sessionctx/variable"
"github.com/pingcap/tidb/types"
"github.com/pingcap/tidb/util/chunk"
)
func (b *builtinInetNtoaSig) vecEvalString(input *chunk.Chunk, result *chunk.Column) error {
n := input.NumRows()
buf, err := b.bufAllocator.get(types.ETInt, n)
if err != nil {
return err
}
defer b.bufAllocator.put(buf)
if err := b.args[0].VecEvalInt(b.ctx, input, buf); err != nil {
return err
}
result.ReserveString(n)
i64s := buf.Int64s()
ip := make(net.IP, net.IPv4len)
for i := 0; i < n; i++ {
val := i64s[i]
if buf.IsNull(i) || val < 0 || uint64(val) > math.MaxUint32 {
result.AppendNull()
continue
}
binary.BigEndian.PutUint32(ip, uint32(val))
ipv4 := ip.To4()
if ipv4 == nil {
//Not a vaild ipv4 address.
result.AppendNull()
continue
}
result.AppendString(ipv4.String())
}
return nil
}
func (b *builtinInetNtoaSig) vectorized() bool {
return true
}
func (b *builtinIsIPv4Sig) vecEvalInt(input *chunk.Chunk, result *chunk.Column) error {
n := input.NumRows()
buf, err := b.bufAllocator.get(types.ETString, n)
if err != nil {
return err
}
defer b.bufAllocator.put(buf)
if err := b.args[0].VecEvalString(b.ctx, input, buf); err != nil {
return err
}
result.ResizeInt64(n, false)
i64s := result.Int64s()
for i := 0; i < n; i++ {
// Note that even when the i-th input string is null, the output is
// 0 instead of null, therefore we do not set the null bit mask in
// result's corresponding row.
// See https://dev.mysql.com/doc/refman/5.7/en/miscellaneous-functions.html#function_is-ipv4
if isIPv4(buf.GetString(i)) {
i64s[i] = 1
} else {
i64s[i] = 0
}
}
return nil
}
func (b *builtinIsIPv4Sig) vectorized() bool {
return true
}
func (b *builtinJSONAnyValueSig) vectorized() bool {
return true
}
func (b *builtinJSONAnyValueSig) vecEvalJSON(input *chunk.Chunk, result *chunk.Column) error {
return b.args[0].VecEvalJSON(b.ctx, input, result)
}
func (b *builtinRealAnyValueSig) vectorized() bool {
return true
}
func (b *builtinRealAnyValueSig) vecEvalReal(input *chunk.Chunk, result *chunk.Column) error {
return b.args[0].VecEvalReal(b.ctx, input, result)
}
func (b *builtinStringAnyValueSig) vectorized() bool {
return true
}
func (b *builtinStringAnyValueSig) vecEvalString(input *chunk.Chunk, result *chunk.Column) error {
return b.args[0].VecEvalString(b.ctx, input, result)
}
func (b *builtinIsIPv6Sig) vectorized() bool {
return true
}
func (b *builtinIsIPv6Sig) vecEvalInt(input *chunk.Chunk, result *chunk.Column) error {
n := input.NumRows()
buf, err := b.bufAllocator.get(types.ETString, n)
if err != nil {
return err
}
defer b.bufAllocator.put(buf)
if err := b.args[0].VecEvalString(b.ctx, input, buf); err != nil {
return err
}
result.ResizeInt64(n, false)
i64s := result.Int64s()
for i := 0; i < n; i++ {
// Note that even when the i-th input string is null, the output is
// 0 instead of null, therefore we do not set the null bit mask in
// result's corresponding row.
// See https://dev.mysql.com/doc/refman/5.7/en/miscellaneous-functions.html#function_is-ipv6
if buf.IsNull(i) {
i64s[i] = 0
} else {
ipStr := buf.GetString(i)
if ip := net.ParseIP(ipStr); ip != nil && !isIPv4(ipStr) {
i64s[i] = 1
} else {
i64s[i] = 0
}
}
}
return nil
}
func (b *builtinNameConstStringSig) vectorized() bool {
return true
}
func (b *builtinNameConstStringSig) vecEvalString(input *chunk.Chunk, result *chunk.Column) error {
return b.args[1].VecEvalString(b.ctx, input, result)
}
func (b *builtinDecimalAnyValueSig) vectorized() bool {
return true
}
func (b *builtinDecimalAnyValueSig) vecEvalDecimal(input *chunk.Chunk, result *chunk.Column) error {
return b.args[0].VecEvalDecimal(b.ctx, input, result)
}
func (b *builtinUUIDSig) vectorized() bool {
return true
}
func (b *builtinUUIDSig) vecEvalString(input *chunk.Chunk, result *chunk.Column) error {
n := input.NumRows()
result.ReserveString(n)
var id uuid.UUID
var err error
for i := 0; i < n; i++ {
id, err = uuid.NewUUID()
if err != nil {
return err
}
result.AppendString(id.String())
}
return nil
}
func (b *builtinNameConstDurationSig) vectorized() bool {
return true
}
func (b *builtinNameConstDurationSig) vecEvalDuration(input *chunk.Chunk, result *chunk.Column) error {
return b.args[1].VecEvalDuration(b.ctx, input, result)
}
func (b *builtinLockSig) vectorized() bool {
return true
}
// See https://dev.mysql.com/doc/refman/5.7/en/miscellaneous-functions.html#function_get-lock
// The lock function will do nothing.
// Warning: get_lock() function is parsed but ignored.
func (b *builtinLockSig) vecEvalInt(input *chunk.Chunk, result *chunk.Column) error {
n := input.NumRows()
result.ResizeInt64(n, false)
i64s := result.Int64s()
for i := range i64s {
i64s[i] = 1
}
return nil
}
func (b *builtinDurationAnyValueSig) vectorized() bool {
return true
}
func (b *builtinDurationAnyValueSig) vecEvalDuration(input *chunk.Chunk, result *chunk.Column) error {
return b.args[0].VecEvalDuration(b.ctx, input, result)
}
func (b *builtinIntAnyValueSig) vectorized() bool {
return true
}
func (b *builtinIntAnyValueSig) vecEvalInt(input *chunk.Chunk, result *chunk.Column) error {
return b.args[0].VecEvalInt(b.ctx, input, result)
}
func (b *builtinIsIPv4CompatSig) vectorized() bool {
return true
}
func (b *builtinIsIPv4CompatSig) vecEvalInt(input *chunk.Chunk, result *chunk.Column) error {
n := input.NumRows()
buf, err := b.bufAllocator.get(types.ETString, n)
if err != nil {
return err
}
defer b.bufAllocator.put(buf)
if err := b.args[0].VecEvalString(b.ctx, input, buf); err != nil {
return err
}
result.ResizeInt64(n, false)
i64s := result.Int64s()
prefixCompat := []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
for i := 0; i < n; i++ {
if buf.IsNull(i) {
i64s[i] = 0
} else {
// Note that the input should be IP address in byte format.
// For IPv4, it should be byte slice with 4 bytes.
// For IPv6, it should be byte slice with 16 bytes.
// See example https://dev.mysql.com/doc/refman/5.7/en/miscellaneous-functions.html#function_is-ipv4-compat
ipAddress := buf.GetBytes(i)
if len(ipAddress) != net.IPv6len || !bytes.HasPrefix(ipAddress, prefixCompat) {
//Not an IPv6 address, return false
i64s[i] = 0
} else {
i64s[i] = 1
}
}
}
return nil
}
func (b *builtinNameConstIntSig) vectorized() bool {
return true
}
func (b *builtinNameConstIntSig) vecEvalInt(input *chunk.Chunk, result *chunk.Column) error {
return b.args[1].VecEvalInt(b.ctx, input, result)
}
func (b *builtinNameConstTimeSig) vectorized() bool {
return true
}
func (b *builtinNameConstTimeSig) vecEvalTime(input *chunk.Chunk, result *chunk.Column) error {
return b.args[1].VecEvalTime(b.ctx, input, result)
}
func (b *builtinSleepSig) vectorized() bool {
return true
}
// vecEvalInt evals a builtinSleepSig in a vectorized manner.
// See https://dev.mysql.com/doc/refman/5.7/en/miscellaneous-functions.html#function_sleep
func (b *builtinSleepSig) vecEvalInt(input *chunk.Chunk, result *chunk.Column) error {
n := input.NumRows()
buf, err := b.bufAllocator.get(types.ETReal, n)
if err != nil {
return err
}
defer b.bufAllocator.put(buf)
err = b.args[0].VecEvalReal(b.ctx, input, buf)
if err != nil {
return err
}
result.ResizeInt64(n, false)
i64s := result.Int64s()
for i := 0; i < n; i++ {
isNull := buf.IsNull(i)
val := buf.GetFloat64(i)
sessVars := b.ctx.GetSessionVars()
if isNull || val < 0 {
if sessVars.StrictSQLMode {
return errIncorrectArgs.GenWithStackByArgs("sleep")
}
err := errIncorrectArgs.GenWithStackByArgs("sleep")
sessVars.StmtCtx.AppendWarning(err)
continue
}
if val > math.MaxFloat64/float64(time.Second.Nanoseconds()) {
return errIncorrectArgs.GenWithStackByArgs("sleep")
}
if isKilled := doSleep(val, sessVars); isKilled {
for j := i; j < n; j++ {
i64s[j] = 1
}
return nil
}
}
return nil
}
func doSleep(secs float64, sessVars *variable.SessionVars) (isKilled bool) {
if secs <= 0.0 {
return false
}
dur := time.Duration(secs * float64(time.Second.Nanoseconds()))
ticker := time.NewTicker(10 * time.Millisecond)
defer ticker.Stop()
timer := time.NewTimer(dur)
for {
select {
case <-ticker.C:
if atomic.CompareAndSwapUint32(&sessVars.Killed, 1, 0) {
timer.Stop()
return true
}
case <-timer.C:
return false
}
}
}
func (b *builtinIsIPv4MappedSig) vectorized() bool {
return true
}
func (b *builtinIsIPv4MappedSig) vecEvalInt(input *chunk.Chunk, result *chunk.Column) error {
n := input.NumRows()
buf, err := b.bufAllocator.get(types.ETString, n)
if err != nil {
return err
}
defer b.bufAllocator.put(buf)
if err := b.args[0].VecEvalString(b.ctx, input, buf); err != nil {
return err
}
result.ResizeInt64(n, false)
i64s := result.Int64s()
prefixMapped := []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff}
for i := 0; i < n; i++ {
if buf.IsNull(i) {
i64s[i] = 0
} else {
// Note that the input should be IP address in byte format.
// For IPv4, it should be byte slice with 4 bytes.
// For IPv6, it should be byte slice with 16 bytes.
// See example https://dev.mysql.com/doc/refman/5.7/en/miscellaneous-functions.html#function_is-ipv4-mapped
ipAddress := buf.GetBytes(i)
if len(ipAddress) != net.IPv6len || !bytes.HasPrefix(ipAddress, prefixMapped) {
//Not an IPv6 address, return false
i64s[i] = 0
} else {
i64s[i] = 1
}
}
}
return nil
}
func (b *builtinNameConstDecimalSig) vectorized() bool {
return true
}
func (b *builtinNameConstDecimalSig) vecEvalDecimal(input *chunk.Chunk, result *chunk.Column) error {
return b.args[1].VecEvalDecimal(b.ctx, input, result)
}
func (b *builtinNameConstJSONSig) vectorized() bool {
return true
}
func (b *builtinNameConstJSONSig) vecEvalJSON(input *chunk.Chunk, result *chunk.Column) error {
return b.args[1].VecEvalJSON(b.ctx, input, result)
}
func (b *builtinInet6AtonSig) vectorized() bool {
return true
}
// vecEvalString evals a builtinInet6AtonSig.
// See https://dev.mysql.com/doc/refman/5.7/en/miscellaneous-functions.html#function_inet6-aton
func (b *builtinInet6AtonSig) vecEvalString(input *chunk.Chunk, result *chunk.Column) error {
n := input.NumRows()
buf, err := b.bufAllocator.get(types.ETString, n)
if err != nil {
return err
}
defer b.bufAllocator.put(buf)
if err := b.args[0].VecEvalString(b.ctx, input, buf); err != nil {
return err
}
var (
resv4 []byte
resv6 []byte
res []byte
)
result.ReserveString(n)
for i := 0; i < n; i++ {
if buf.IsNull(i) {
result.AppendNull()
continue
}
val := buf.GetString(i)
if len(val) == 0 {
result.AppendNull()
continue
}
ip := net.ParseIP(val)
if ip == nil {
result.AppendNull()
continue
}
var isMappedIpv6 bool
ipTo4 := ip.To4()
if ipTo4 != nil && strings.Contains(val, ":") {
//mapped ipv6 address.
isMappedIpv6 = true
}
if isMappedIpv6 || ipTo4 == nil {
if resv6 == nil {
resv6 = make([]byte, net.IPv6len)
}
res = resv6
} else {
if resv4 == nil {
resv4 = make([]byte, net.IPv4len)
}
res = resv4
}
if isMappedIpv6 {
copy(res[12:], ipTo4)
res[11] = 0xff
res[10] = 0xff
} else if ipTo4 == nil {
copy(res, ip.To16())
} else {
copy(res, ipTo4)
}
result.AppendBytes(res)
}
return nil
}
func (b *builtinTimeAnyValueSig) vectorized() bool {
return true
}
func (b *builtinTimeAnyValueSig) vecEvalTime(input *chunk.Chunk, result *chunk.Column) error {
return b.args[0].VecEvalTime(b.ctx, input, result)
}
func (b *builtinInetAtonSig) vectorized() bool {
return true
}
func (b *builtinInetAtonSig) vecEvalInt(input *chunk.Chunk, result *chunk.Column) error {
n := input.NumRows()
buf, err := b.bufAllocator.get(types.ETString, n)
if err != nil {
return err
}
defer b.bufAllocator.put(buf)
if err := b.args[0].VecEvalString(b.ctx, input, buf); err != nil {
return err
}
var (
byteResult, res uint64
dotCount int
)
result.ResizeInt64(n, false)
i64s := result.Int64s()
result.MergeNulls(buf)
for i := 0; i < n; i++ {
if result.IsNull(i) {
continue
}
ipAddr := buf.GetString(i)
if len(ipAddr) == 0 || ipAddr[len(ipAddr)-1] == '.' {
// ip address should not end with '.'.
result.SetNull(i, true)
continue
}
//reset
byteResult = 0
res = 0
dotCount = 0
for _, c := range ipAddr {
if c >= '0' && c <= '9' {
digit := uint64(c - '0')
byteResult = byteResult*10 + digit
if byteResult > 255 {
result.SetNull(i, true)
break
}
} else if c == '.' {
dotCount++
if dotCount > 3 {
result.SetNull(i, true)
break
}
res = (res << 8) + byteResult
byteResult = 0
} else {
result.SetNull(i, true)
break // illegal char (not number or .)
}
}
// 127 -> 0.0.0.127
// 127.255 -> 127.0.0.255
// 127.256 -> NULL
// 127.2.1 -> 127.2.0.1
if !result.IsNull(i) {
if dotCount == 1 {
res <<= 16
}
if dotCount == 2 {
res <<= 8
}
i64s[i] = int64((res << 8) + byteResult)
}
}
return nil
}
func (b *builtinInet6NtoaSig) vectorized() bool {
return true
}
func (b *builtinInet6NtoaSig) vecEvalString(input *chunk.Chunk, result *chunk.Column) error {
n := input.NumRows()
val, err := b.bufAllocator.get(types.ETString, n)
if err != nil {
return err
}
defer b.bufAllocator.put(val)
if err := b.args[0].VecEvalString(b.ctx, input, val); err != nil {
return err
}
result.ReserveString(n)
for i := 0; i < n; i++ {
if val.IsNull(i) {
result.AppendNull()
continue
}
valI := val.GetString(i)
ip := net.IP(valI).String()
if len(valI) == net.IPv6len && !strings.Contains(ip, ":") {
ip = fmt.Sprintf("::ffff:%s", ip)
}
if net.ParseIP(ip) == nil {
result.AppendNull()
continue
}
result.AppendString(ip)
}
return nil
}
func (b *builtinNameConstRealSig) vectorized() bool {
return true
}
func (b *builtinNameConstRealSig) vecEvalReal(input *chunk.Chunk, result *chunk.Column) error {
return b.args[1].VecEvalReal(b.ctx, input, result)
}
func (b *builtinReleaseLockSig) vectorized() bool {
return true
}
// See https://dev.mysql.com/doc/refman/5.7/en/miscellaneous-functions.html#function_release-lock
// The release lock function will do nothing.
// Warning: release_lock() function is parsed but ignored.
func (b *builtinReleaseLockSig) vecEvalInt(input *chunk.Chunk, result *chunk.Column) error {
n := input.NumRows()
result.ResizeInt64(n, false)
i64s := result.Int64s()
for i := range i64s {
i64s[i] = 1
}
return nil
}
| expression/builtin_miscellaneous_vec.go | 0 | https://github.com/pingcap/tidb/commit/788dc28a7423d4b85ebe11bbce632733b056d63a | [
0.00048059289110824466,
0.00017684133490547538,
0.00016573729226365685,
0.00016983045497909188,
0.00003999685213784687
] |
{
"id": 2,
"code_window": [
"\t\treturn nil, nil\n",
"\t}\n",
"\n",
"\tdbInfo, err := t.GetDatabase(job.SchemaID)\n",
"\tif err != nil {\n",
"\t\treturn nil, err\n",
"\t}\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tdbInfo, err := checkSchemaExistAndCancelNotExistJob(t, job)\n"
],
"file_path": "ddl/index.go",
"type": "replace",
"edit_start_line_idx": 353
} | # RFC: TiDB Built-in SQL Diagnostics
## Summary
Currently, TiDB obtains diagnostic information mainly relying on external tools (perf/iosnoop/iotop/iostat/vmstat/sar/...), monitoring systems (Prometheus/Grafana), log files, HTTP APIs, and system tables provided by TiDB. The decentralized toolchains and cumbersome acquisition methods lead to high barriers to the use of TiDB clusters, difficulty in operation and maintenance, failure to detect problems in advance, and failure to timely investigate,diagnose, and recover clusters.
This proposal proposes a new method of acquiring diagnostic information in TiDB for exposure in system tables so that users can query diagnostic information using SQL statements.
## Motivation
This proposal mainly solves the following problems in TiDB's process of obtaining diagnostic information:
- The toolchains are scattered, so TiDB needs to switch back and forth between different tools. In addition, some Linux distributions don't have corresponding built-in tools; even if they do, the versions of the tools aren't as expected.
- The information acquisition methods are inconsistent, for example, SQL, HTTP, exported monitoring, viewing logs by logging into each node, and so on.
- There are many TiDB cluster components. and the comparison and correlation of the monitoring information between different components are inefficient and cumbersome.
- TiDB does not have centralized log management components, so there are no efficient ways to filter, retrieve, analyze, and aggregate logs of the entire cluster.
- The system table only contains the information of the current node, which does not reflect the state of the entire cluster, such as: SLOW_QUERY, PROCESSLIST, STATEMENTS_SUMMARY.
The efficiency of cluster-scale information query, state acquisition, log retrieval, one-click inspection, and fault diagnosis can be improved after the multi-dimensional cluster-level system table and the cluster's diagnostic rule framework are provided. And basic data can be provided for subsequent exception alarming.
## Detailed design
### System overview
The implementation of this proposal is divided into four layers:
- L1: The lowest level implements the information collection module at each node, including monitoring information, hardware information, network IO recorded in the kernel, Disk IO information, CPU usage, memory usage, etc, of TiDB/TiKV/PD.
- L2: The second layer can obtain the information collected by the current node by calling the underlying information collection module and providing data to the upper layer through external service interfaces (HTTP API/gRPC Service).
- L3: In the third layer, TiDB pulls the information from each node for aggregation and summarizing, and provides the data to the upper layer in the form of the system table.
- L4: The fourth layer implements the diagnostic framework. The diagnostic framework obtains the status of the entire cluster by querying the system table and obtains the diagnostic result according to the diagnostic rules.
The following chart shows the data flow from information collection to analysis using the diagnostic rules:
```
+-L1--------------+ +-L3-----+
| +-------------+ | | |
| | Metrics | | | |
| +-------------+ | | |
| +-------------+ | | |
| | Disk IO | +---L2:gRPC-->+ |
| +-------------+ | | |
| +-------------+ | | TiDB |
| | Network IO | | | |
| +-------------+ | | |
| +-------------+ | | |
| | Hardware | +---L2:HTTP-->+ |
| +-------------+ | | |
| +-------------+ | | |
| | System Info | | | |
| +-------------+ | | |
+-----------------+ +---+----+
|
+---infoschema---+
|
v
+-L4---------------+---------------------+
| |
| Diagnosis Framework |
| |
| +---------+ +---------+ +---------+ |
| | rule1 | | rule2 | | rule3 | |
| +---------+ +---------+ +---------+ |
+----------------------------------------+
```
### System information collection
The system information collection module needs to be implemented for all of TiDB/TiKV/PD components. TiDB/PD uses Golang to implement with reuse of the underlying logic; TiKV needs separation implementation in Rust.
#### Node hardware information
The hardware information that each node needs to obtain includes:
- CPU information: physical core number, logical core number, NUMA information, CPU frequency, CPU vendor, L1/L2/L3 cache
- NIC information: NIC device name, NIC enabled status, manufacturer, model, bandwidth, driver version, number of interface queues (optional)
- Disk information: disk name, disk capacity, disk usage, disk partition, mount information
- USB device list
- Memory information
#### Node System Information
The hardware information that each node needs to obtain includes:
- CPU Usage, loads in 1/5/15 minutes:
- Memory: Total/Free/Available/Buffers/Cached/Active/Inactive/Swap
- Disk IO:
- tps: number of transfers per second that were issued to the device.
- rrqm/s: number of read requests merged per second that were queued to the device.
- wrqm/s: number of write requests merged per second that were queued to the device.
- r/s: number (after merges) of read requests completed per second for the device.
- r/s: number (after merges) of read requests completed per second for the device.
- r/s: number (after merges) of read requests completed per second for the device.
- r/s: number (after merges) of read requests completed per second for the device.
- r/s: number (after merges) of read requests completed per second for the device.
- w/s: number (after merges) of write requests completed per second for the device.
- rsec/s: number of sectors (kilobytes, megabytes) read from the device per second.
- wsec/s: number of sectors (kilobytes, megabytes) written to the device per second.
- await: average time (in milliseconds) for I/O requests issued to the device to be served.
- %util: percentage of elapsed time during which I/O requests were issued to the device (bandwidth utilization for the device)
- Network IO
- IFACE: name of the network interface for which statistics are reported.
- rxpck/s: total number of packets received per second.
- txpck/s: total number of packets transmitted per second.
- rxkB/s: total number of kilobytes received per second.
- txkB/s: total number of kilobytes transmitted per second.
- rxcmp/s: number of compressed packets received per second.
- txcmp/s: number of compressed packets transmitted per second.
- rxmcst/s: number of multicast packets received per second.
- System configuration: `sysctl -a`
#### Node configuration information
All nodes contain the effective configuration for the current node, and no additional steps are required to get the configuration information.
#### Node log information
Currently, the logs generated by TiDB/TiKV/PD are saved on their respective nodes, and no additional log collection components are deployed during TiDB cluster deployment, so there are the following problems in log retrieval:
- Logs are distributed on each node. You need to log in to each node to search using keywords.
- Log files are rotated every day, so we need to search among multiple log files even on a single node.
- There is no easy way to combine logs of multiple nodes into a single file which sorted by the time.
This proposal provides the following two solutions to the above problems:
- Introduce a third-party log collection component to collect logs from all nodes
- Advantages: with a unified log management mechanism, logs can be saved for a long time, and are easy to retrieve; logs of multiple components can be sorted by time.
- Disadvantages: third-party components are not easy to integrate with TiDB SQL engine, which may increase the difficulty of cluster operation and maintenance; the log collection tool collects logs fully, so the collection process will take up system resources (Disk IO, Network IO).
- Each node provides a log service. TiDB pushes the predicate to the log retrieval interface through the log service of each node, and directly merges the logs returned by each node.
- Advantages: no third-party component is introduced. Only logs that have been filtered by the pushdown predicates are returned; the implementation can easily be integrated with TiDB SQL and reuse SQL engine functions such as filter and aggregation.
- Disadvantages: If the log files are deleted in some nodes, the corresponding log cannot be retrieved.
This proposal uses the second way after weighing in on the above advantages and disadvantages. That is, each node provides a log search service, and TiDB pushes the predicate in the log search SQL to each node. The semantics of the log search service is: search for local log files, and filter using predicates, and then return the matched results.
The following are the predicates that the log interface needs to process:
- `start_time`: start time of the log retrieval (Unix timestamp, in milliseconds). If there is no such predicate, the default is 0.
- `end_time:`: end time of the log retrieval (Unix timestamp, in milliseconds). If there is no such predicate, the default is `int64::MAX`.
- `pattern`: filter pattern determined by the keyword. For example, `SELECT * FROM cluster_log` WHERE "%gc%" `%gc%` is the filtered keyword.
- `level`: log level; can be selected as DEBUG/INFO/WARN/WARNING/TRACE/CRITICAL/ERROR
- `limit`: the maximum of logs items to return, preventing the log from being too large and occupying a large bandwidth of the network.. If not specified, the default limit is 64k.
#### Node performance sampling data
In a TiDB cluster, when performance bottlenecks occur, we usually need a way to quickly locate the problem. The Flame Graph was invented by Brendan Gregg. Unlike other trace and profiling methods, Flame Graph looks at the time distribution in a global view, listing all possible call stacks from bottom to top. Other rendering methods generally only list a single call stack or a non-hierarchical time distribution.
TiKV and TiDB currently have different ways of obtaining a flame graph and all of them rely on external tools.
- TiKV retrieves the flame graph via:
```
perf record -F 99 -p proc_pid -g -- sleep 60
perf script > out.perf
/opt/FlameGraph/stackcollapse-perf.pl out.perf > out.folded
/opt/FlameGraph/flamegraph.pl out.folded > cpu.svg
```
- TiDB retrieves the flame graph via:
```
curl http://127.0.0.1:10080/debug/pprof/profile > cpu.pprof
go tool pprof -svg cpu.svn cpu.pprof
```
There are two main problems currently:
- The production environment may not contain the corresponding external tool (perf/flamegraph.pl/go)
- There is no unified way for TiKV and TiDB.
In order to solve the above two problems, this proposal proposes to build the flame map in TiDB in the flame map, so that TiDB and TiKV can both use SQL to trigger sampling, and the sampled data can be converted into query results in flame maps. This way we can reduce the dependency on external tools, and at the same time improve efficiency greatly. For each node, a sampling data acquisition function is implemented and the sampling interface provided, through which a specified format is output to the upper layer. The tentative output is the ProtoBuf format defined by `[pprof](github.com/google/pprof)`.
Sampling data acquisition method:
- TiDB/PD: Use the sample data acquisition interface built in Golang runtime
- TiKV: Collect sample data using the `[pprof-rs](github.com/tikv/pprof-rs)` library
#### Node monitoring information
Monitoring information mainly includes monitoring metrics defined internally by each component. At present, TiDB/TiKV/PD provides the `/metrics` HTTP API, through which the deployed Prometheus component pulls the monitoring metrics of each node of the cluster in a timing manner (15s interval by default). And the Grafana component is deployed to pull the monitoring data from Prometheus for visualization.
The monitoring information is different from the system information acquired in real time. The monitoring data is time-series data which contains the data of each node at each time point. It is very useful for troubleshooting and diagnosing problems, so how monitoring information is saved and inquired is very important for this proposal. In order to be able to use SQL to query monitoring data in TiDB, there are currently the following options:
- Use Prometheus client and PromQL to query the data from the Prometheus server
- Advantages: there is a ready-made solution; just register the address of Prometheus server to TiDB, which is simple to implement.
- Disadvantages: TiDB will rely more on Prometheus, which increases the difficulty for subsequent removal of Prometheus.
- Save the monitoring data for the most recent period (tentative 1 day) to PD and query monitoring data from PD.
- Advantages: this solution does not depend on the Prometheus server. Easy for subsequent removal of Prometheus.
- Disadvantages: requires implementation of time-series saving logic in PD and the corresponding query engine. The workload and difficulty are high.
In this proposal, we are opt to the second solution. Although it is more difficult to implement, it will benefit the follow-up work. Considering the difficulty and long development cycle, we can implement this function in three stages (the third stage is implemented depending on the specific situation):
1. Add the `remote-metrics-storage` configuration to the PD and temporarily configure it as the address of the Prometheus Server. PD acts as a proxy, and the request is transferred to Prometheus for execution. The main considerations are as follows:
- PD will have its own implementation of the query interface to realize bootstraping. No other changes needed for TiDB.
- With bootstrapping realized, users can still use SQL to query monitoring information and diagnostic frameworks without relying on the Prometheus component deployed by TiDB
2. Extract the modules for persisting and querying Prometheus time series data and embed it in PD.
3. PD internally implements its own module for persisting and query time series data (currently CockroachDB's solution)
##### PD performance analysis
PD mainly handles scheduling and TSO services for TiDB clusters, where:
1. TSO fetching accumulates only one atomic variable in Leader memory
2. The Operator and OperatorStep generated by the schedule are only stored in memory, and the state in memory is updated according to the heartbeat information of the Region.
From the above information, it can be concluded that the performance impact of the new monitoring function on the PD can be ignored in most cases.
### Retrieve system information
Since the TiDB/TiKV/PD component has previously exposed some system information through the HTTP API, and the PD mainly provides external services through the HTTP API, some interfaces of this proposal reuse existing logics and use the HTTP API to obtain data from various components. For example, configuration information.
However, because TiKV plan to completely removes the HTTP API in the future, only the existing interfaces will be reused for TiKV, and no new HTTP APIs will be added. There will be a unified gRPC service defined for log retrieval, hardware information, and system information acquisition. Each component implements their own services, which are registered to the gRPC Server during startup.
#### gRPC service definition
```proto
// Diagnostics service for TiDB cluster components.
service Diagnostics {
// Searchs log in the target node
rpc search_log(SearchLogRequest) returns (SearchLogResponse) {};
// Retrieves server info in the target node
rpc server_info(ServerInfoRequest) returns (ServerInfoResponse) {};
}
enum LogLevel {
Debug = 0;
Info = 1;
Warn = 2;
Trace = 3;
Critical = 4;
Error = 5;
}
message SearchLogRequest {
int64 start_time = 1;
int64 end_time = 2;
LogLevel level = 3;
string pattern = 4;
int64 limit = 5;
}
message SearchLogResponse {
repeated LogMessage messages = 1;
}
message LogMessage {
int64 time = 1;
LogLevel level = 2;
string message = 3;
}
enum ServerInfoType {
All = 0;
HardwareInfo = 1;
SystemInfo = 2;
LoadInfo = 3;
}
message ServerInfoRequest {
ServerInfoType tp = 1;
}
message ServerInfoItem {
// cpu, memory, disk, network ...
string tp = 1;
// eg. network: lo1/eth0, cpu: core1/core2, disk: sda1/sda2
string name = 1;
string key = 2;
string value = 3;
}
message ServerInfoResponse {
repeated ServerInfoItem items = 1;
}
```
#### Reusable HTTP API
Currently, TiDB/TiKV/PD includes a partially reusable HTTP API. This proposal does not migrate the corresponding interface to the gRPC Service. The migration will be completed by other subsequent plans. All HTTP APIs need to return data in JSON format. The following is a list of HTTP APIs that may be used in this proposal:
- Retrieve configuration information
- PD: /pd/api/v1/config
- TiDB/TiKV: /config
- Performance sampling interface: TiDB and PD contain all the following interfaces, while TiKV temporarily only contains the CPU performance sampling interface
- CPU: /debug/pprof/profile
- Memory: /debug/pprof/heap
- Allocs: /debug/pprof/allocs
- Mutex: /debug/pprof/mutex
- Block: /debug/pprof/block
#### Cluster information system tables
Each TiDB instance can access the information of other nodes through the HTTP API or gRPC Service provided by the first two layers. This way we can implement the Global View forw the cluster. In this proposal, the collected cluster information is provided to the upper layer by creating a series of related system tables. The upper layer includes not limited to:
- End User: Users can obtain cluster information directly through SQL query to troubleshooting problem
- Operation and maintenance system: The ability to obtain cluster information through SQL will make it easier for users to integrate TiDB into their own operation and maintenance systems.
- Eco-system tools: External tools get the cluster information through SQL to realize function customization. For example, `[sqltop](https://github.com/ngaut/sqltop)` can directly obtain the SQL sampling information of the entire cluster through the `statements_summary` table of the cluster.
#### Cluster Topology System Table
To realize **Global View** for the TiDB instance, we need to provide a topology system table, where we can obtain the HTTP API Address and gRPC Service Address of each node. This way the Endpoints can be easily constructed for remote APIs. The Endpoint further acquires the information collected by the target node.
The implementation of this proposal can query the following results through SQL:
```
mysql> use information_schema;
Database changed
mysql> desc CLUSTER_INFO;
+----------------+---------------------+------+------+---------+-------+
| Field | Type | Null | Key | Default | Extra |
+----------------+---------------------+------+------+---------+-------+
| TYPE | varchar(64) | YES | | NULL | |
| ADDRESS | varchar(64) | YES | | NULL | |
| STATUS_ADDRESS | varchar(64) | YES | | NULL | |
| VERSION | varchar(64) | YES | | NULL | |
| GIT_HASH | varchar(64) | YES | | NULL | |
+----------------+---------------------+------+------+---------+-------+
5 rows in set (0.00 sec)
mysql> select TYPE, ADDRESS, STATUS_ADDRESS,VERSION from CLUSTER_INFO;
+------+-----------------+-----------------+-----------------------------------------------+
| TYPE | ADDRESS | STATUS_ADDRESS | VERSION |
+------+-----------------+-----------------+-----------------------------------------------+
| tidb | 127.0.0.1:4000 | 127.0.0.1:10080 | 5.7.25-TiDB-v4.0.0-alpha-793-g79eef48a3-dirty |
| pd | 127.0.0.1:2379 | 127.0.0.1:2379 | 4.0.0-alpha |
| tikv | 127.0.0.1:20160 | 127.0.0.1:20180 | 4.0.0-alpha |
+------+-----------------+-----------------+-----------------------------------------------+
3 rows in set (0.00 sec)
```
#### Monitoring information system table
Monitoring metrics are added and deleted as the program is iterated. For this reason, the same monitoring metric might be obtained through different PromQL expressions to monitor information in different dimensions. Therefore, it is necessary to design a flexible monitoring system table frame. This proposal temporarily adopts the this scheme - mapping expressions to system tables in the `metrics_schema` database. The relationship between expressions and system tables can be mapped in the following ways:
- Define in the configuration file
```
# tidb.toml
[metrics_schema]
qps = `sum(rate(tidb_server_query_total[$INTERVAL] offset $OFFSET_TIME)) by (result)`
memory_usage = `process_resident_memory_bytes{job="tidb"}`
goroutines = `rate(go_gc_duration_seconds_sum{job="tidb"}[$INTERVAL] offset $OFFSET_TIME)`
```
- Inject via HTTP API
```
curl -XPOST http://host:port/metrics_schema?name=distsql_duration&expr=`histogram_quantile(0.999,
sum(rate(tidb_distsql_handle_query_duration_seconds_bucket[$INTERVAL] offset $OFFSET_TIME)) by (le, type))`
```
- Use special SQL commands
```
mysql> admin metrics_schema add parse_duration `histogram_quantile(0.95, sum(rate(tidb_session_parse_duration_seconds_bucket[$INTERVAL] offset $OFFSET_TIME)) by (le, sql_type))`
```
- Load from file
```
mysql> admin metrics_schema load external_metrics.txt
#external_metrics.txt
execution_duration = `histogram_quantile(0.95, sum(rate(tidb_session_execute_duration_seconds_bucket[$INTERVAL] offset $OFFSET_TIME)) by (le, sql_type))`
pd_client_cmd_ops = `sum(rate(pd_client_cmd_handle_cmds_duration_seconds_count{type!="tso"}[$INTERVAL] offset $OFFSET_TIME)) by (type)`
```
After passing the above mapping, you can view the following table in the `metrics_schema` library:
```
mysql> use metrics_schema;
Database changed
mysql> show tables;
+-------------------------------------+
| Tables_in_metrics_schema |
+-------------------------------------+
| qps |
| memory_usage |
| goroutines |
| distsql_duration |
| parse_duration |
| execution_duration |
| pd_client_cmd_ops |
+-------------------------------------+
7 rows in set (0.00 sec)
```
The way the field is determined when the expression is mapped to the system table depends mainly on the result data of the expression execution. Take expression `sum(rate(pd_client_cmd_handle_cmds_duration_seconds_count{type!="tso"}[1m]offset 0)) by (type)` as an example, the result of the query is:
| Element | Value |
|---------|-------|
| {type="update_gc_safe_point"} | 0 |
| {type="wait"} | 2.910521666666667 |
| {type="get_all_stores"} | 0 |
| {type="get_prev_region"} | 0 |
| {type="get_region"} | 0 |
| {type="get_region_byid"} | 0 |
| {type="scan_regions"} | 0 |
| {type="tso_async_wait"} | 2.910521666666667 |
| {type="get_operator"} | 0 |
| {type="get_store"} | 0 |
| {type="scatter_region"} | 0 |
The following are the query results mapped to the system table schema:
```
mysql> desc pd_client_cmd_ops;
+------------+-------------+------+-----+-------------------+-------+
| Field | Type | Null | Key | Default | Extra |
+------------+-------------+------+-----+-------------------+-------+
| address | varchar(32) | YES | | NULL | |
| type | varchar(32) | YES | | NULL | |
| value | float | YES | | NULL | |
| interval | int | YES | | 60 | |
| start_time | int | YES | | CURRENT_TIMESTAMP | |
+------------+-------------+------+-----+-------------------+-------+
3 rows in set (0.02 sec)
mysql> select address, type, value from pd_client_cmd_ops;
+------------------+----------------------+---------+
| address | type | value |
+------------------+----------------------+---------+
| 172.16.5.33:2379 | update_gc_safe_point | 0 |
| 172.16.5.33:2379 | wait | 2.91052 |
| 172.16.5.33:2379 | get_all_stores | 0 |
| 172.16.5.33:2379 | get_prev_region | 0 |
| 172.16.5.33:2379 | get_region | 0 |
| 172.16.5.33:2379 | get_region_byid | 0 |
| 172.16.5.33:2379 | scan_regions | 0 |
| 172.16.5.33:2379 | tso_async_wait | 2.91052 |
| 172.16.5.33:2379 | get_operator | 0 |
| 172.16.5.33:2379 | get_store | 0 |
| 172.16.5.33:2379 | scatter_region | 0 |
+------------------+----------------------+---------+
11 rows in set (0.00 sec)
mysql> select address, type, value from pd_client_cmd_ops where start_time=’2019-11-14 10:00:00’;
+------------------+----------------------+---------+
| address | type | value |
+------------------+----------------------+---------+
| 172.16.5.33:2379 | update_gc_safe_point | 0 |
| 172.16.5.33:2379 | wait | 0.82052 |
| 172.16.5.33:2379 | get_all_stores | 0 |
| 172.16.5.33:2379 | get_prev_region | 0 |
| 172.16.5.33:2379 | get_region | 0 |
| 172.16.5.33:2379 | get_region_byid | 0 |
| 172.16.5.33:2379 | scan_regions | 0 |
| 172.16.5.33:2379 | tso_async_wait | 0.82052 |
| 172.16.5.33:2379 | get_operator | 0 |
| 172.16.5.33:2379 | get_store | 0 |
| 172.16.5.33:2379 | scatter_region | 0 |
+------------------+----------------------+---------+
11 rows in set (0.00 sec)
```
PromQL query statements with multiple labels will be mapped to multiple columns of data, which can be easily filtered and aggregated using existing SQL execution engines.
#### Performance profiling system table
The corresponding node performance sampling data is obtained by `/debug/pprof/profile` of each node, and then aggregated performance profiling results are output to the user in the form of SQL query. Since the SQL query results cannot be output in svg format, we need to solve the problem of output display.
Our proposed solution is to aggregate the sampled data and display all the call paths on a line-by-line basis in a tree structure. This can be implemented by using flamegraph. The core ideas and the corresponding implementations are described as below:
- Provide a global view: use a separate column for each aggregate result to show the global usage scale, which can be used to facilitate filtering and sorting to
- Show all call paths: all call paths are used as query results. And use a separate column to number the subtrees of each call path, we can easily view only one subtreeby filtering
- Hierarchical display: use the tree structure to display the stack, use a separate column to record the depth of the stack, which is convenient Filtering the depth of different stacks
This proposal needs to implement the following performance profiling table:
| Table | Description |
|------|-----|
| tidb_profile_cpu | TiDB CPU flame graph |
| tikv_profile_cpu | TiKV CPU flame graph |
| tidb_profile_block | Stack traces that led to blocking on synchronization primitives |
| tidb_profile_memory | A sampling of memory allocations of live objects |
| tidb_profile_allocs | A sampling of all past memory allocations |
| tidb_profile_mutex | Stack traces of holders of contended mutexes |
| tidb_profile_goroutines | Stack traces of all current goroutines |
#### Globalized memory system table
Current the `slow_query`/`statements_summary`/`processlist` memory tables only contain single-node data. This proposal allows any TiDB instance to view information about the entire cluster by adding the following three cluster-level system tables:
| Table Name | Description |
|------|-----|
| cluster_slow_query | slow_query table data for all TiDB nodes |
| cluster_statements_summary | statements summary table Data for all TiDB nodes |
| cluster_processlist | processlist table data for all TiDB nodes |
#### Configuration information of all nodes
For a large cluster, the way to obtain configuration by each node through HTTP API is cumbersome and inefficient. This proposal provides a full cluster configuration information system table, which simplifies the acquisition, filtering, and aggregation of the entire cluster configuration information.
See the following example for some expected results of this proposal:
```
mysql> use information_schema;
Database changed
mysql> select * from cluster_config where `key` like 'log%';
+------+-----------------+-----------------------------+---------------+
| TYPE | ADDRESS | KEY | VALUE |
+------+-----------------+-----------------------------+---------------+
| pd | 127.0.0.1:2379 | log-file | |
| pd | 127.0.0.1:2379 | log-level | |
| pd | 127.0.0.1:2379 | log.development | false |
| pd | 127.0.0.1:2379 | log.disable-caller | false |
| pd | 127.0.0.1:2379 | log.disable-error-verbose | true |
| pd | 127.0.0.1:2379 | log.disable-stacktrace | false |
| pd | 127.0.0.1:2379 | log.disable-timestamp | false |
| pd | 127.0.0.1:2379 | log.file.filename | |
| pd | 127.0.0.1:2379 | log.file.log-rotate | true |
| pd | 127.0.0.1:2379 | log.file.max-backups | 0 |
| pd | 127.0.0.1:2379 | log.file.max-days | 0 |
| pd | 127.0.0.1:2379 | log.file.max-size | 0 |
| pd | 127.0.0.1:2379 | log.format | text |
| pd | 127.0.0.1:2379 | log.level | |
| pd | 127.0.0.1:2379 | log.sampling | <nil> |
| tidb | 127.0.0.1:4000 | log.disable-error-stack | <nil> |
| tidb | 127.0.0.1:4000 | log.disable-timestamp | <nil> |
| tidb | 127.0.0.1:4000 | log.enable-error-stack | <nil> |
| tidb | 127.0.0.1:4000 | log.enable-timestamp | <nil> |
| tidb | 127.0.0.1:4000 | log.expensive-threshold | 10000 |
| tidb | 127.0.0.1:4000 | log.file.filename | |
| tidb | 127.0.0.1:4000 | log.file.max-backups | 0 |
| tidb | 127.0.0.1:4000 | log.file.max-days | 0 |
| tidb | 127.0.0.1:4000 | log.file.max-size | 300 |
| tidb | 127.0.0.1:4000 | log.format | text |
| tidb | 127.0.0.1:4000 | log.level | info |
| tidb | 127.0.0.1:4000 | log.query-log-max-len | 4096 |
| tidb | 127.0.0.1:4000 | log.record-plan-in-slow-log | 1 |
| tidb | 127.0.0.1:4000 | log.slow-query-file | tidb-slow.log |
| tidb | 127.0.0.1:4000 | log.slow-threshold | 300 |
| tikv | 127.0.0.1:20160 | log-file | |
| tikv | 127.0.0.1:20160 | log-level | info |
| tikv | 127.0.0.1:20160 | log-rotation-timespan | 1d |
+------+-----------------+-----------------------------+---------------+
33 rows in set (0.00 sec)
mysql> select * from cluster_config where type='tikv' and `key` like 'raftdb.wal%';
+------+-----------------+---------------------------+--------+
| TYPE | ADDRESS | KEY | VALUE |
+------+-----------------+---------------------------+--------+
| tikv | 127.0.0.1:20160 | raftdb.wal-bytes-per-sync | 512KiB |
| tikv | 127.0.0.1:20160 | raftdb.wal-dir | |
| tikv | 127.0.0.1:20160 | raftdb.wal-recovery-mode | 2 |
| tikv | 127.0.0.1:20160 | raftdb.wal-size-limit | 0KiB |
| tikv | 127.0.0.1:20160 | raftdb.wal-ttl-seconds | 0 |
+------+-----------------+---------------------------+--------+
5 rows in set (0.01 sec)
```
#### Node hardware/system/load information system tables
According to the definition of `gRPC Service` protocol, each `ServerInfoItem` contains the name of the information and the corresponding key-value pair. When presented to the user, the type of the node and the node address need to be added.
```
mysql> use information_schema;
Database changed
mysql> select * from cluster_hardware
+------+-----------------+----------+----------+-------------+--------+
| TYPE | ADDRESS | HW_TYPE | HW_NAME | KEY | VALUE |
+------+-----------------+----------+----------+-------------+--------+
| tikv | 127.0.0.1:20160 | cpu | cpu-1 | frequency | 3.3GHz |
| tikv | 127.0.0.1:20160 | cpu | cpu-2 | frequency | 3.6GHz |
| tikv | 127.0.0.1:20160 | cpu | cpu-1 | core | 40 |
| tikv | 127.0.0.1:20160 | cpu | cpu-2 | core | 48 |
| tikv | 127.0.0.1:20160 | cpu | cpu-1 | vcore | 80 |
| tikv | 127.0.0.1:20160 | cpu | cpu-2 | vcore | 96 |
| tikv | 127.0.0.1:20160 | network | memory | capacity | 256GB |
| tikv | 127.0.0.1:20160 | network | lo0 | bandwidth | 10000M |
| tikv | 127.0.0.1:20160 | network | eth0 | bandwidth | 1000M |
| tikv | 127.0.0.1:20160 | disk | /dev/sda | capacity | 4096GB |
+------+-----------------+----------+----------+-------------+--------+
10 rows in set (0.01 sec)
mysql> select * from cluster_systeminfo
+------+-----------------+----------+--------------+--------+
| TYPE | ADDRESS | MODULE | KEY | VALUE |
+------+-----------------+----------+--------------+--------+
| tikv | 127.0.0.1:20160 | sysctl | ktrace.state | 0 |
| tikv | 127.0.0.1:20160 | sysctl | hw.byteorder | 1234 |
| ... |
+------+-----------------+----------+--------------+--------+
20 rows in set (0.01 sec)
mysql> select * from cluster_load
+------+-----------------+----------+-------------+--------+
| TYPE | ADDRESS | MODULE | KEY | VALUE |
+------+-----------------+----------+-------------+--------+
| tikv | 127.0.0.1:20160 | network | rsec/s | 1000Kb |
| ... |
+------+-----------------+----------+-------------+--------+
100 rows in set (0.01 sec)
```
#### Full-chain log system table
To search in the current log, users need to log in to multiple machines for retrieval respectively, and there is no easy way to sort the retrieval results of multiple machines by time. This proposal creates a new `cluster_log` system table to provide full-link logs, thereby simplifying the way to troubleshoot problems through logs and improving efficiency. This is achieved by pushing the log-filtering predicates down to the nodes through the `search_log` interface of the gRPC Diagnosis Service. The filtered logs will be eventually merged by time.
The following example shows the expected results of this proposal:
```
mysql> use information_schema;
Database changed
mysql> desc cluster_log;
+---------+-------------+------+------+---------+-------+
| Field | Type | Null | Key | Default | Extra |
+---------+-------------+------+------+---------+-------+
| type | varchar(16) | YES | | NULL | |
| address | varchar(32) | YES | | NULL | |
| time | varchar(32) | YES | | NULL | |
| level | varchar(8) | YES | | NULL | |
| message | text | YES | | NULL | |
+---------+-------------+------+------+---------+-------+
5 rows in set (0.00 sec)
mysql> select * from cluster_log where content like '%412134239937495042%'; -- Query the full link log related to TSO 412134239937495042
+------+--------------------------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
| TYPE | ADDRESS | LEVEL | CONTENT |
+------+------------------------+-------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
| tidb | 10.9.120.251:10080 | INFO | [coprocessor.go:725] ["[TIME_COP_PROCESS] resp_time:501.60574ms txnStartTS:412134239937495042 region_id:180 store_addr:10.9.82.29:20160 kv_process_ms:416 scan_total_write:340807 scan_processed_write:340806 scan_total_data:0 scan_processed_data:0 scan_total_lock:1 scan_processed_lock:0"] |
| tidb | 10.9.120.251:10080 | INFO | [coprocessor.go:725] ["[TIME_COP_PROCESS] resp_time:698.095216ms txnStartTS:412134239937495042 region_id:88 store_addr:10.9.1.128:20160 kv_process_ms:583 scan_total_write:491123 scan_processed_write:491122 scan_total_data:0 scan_processed_data:0 scan_total_lock:1 scan_processed_lock:0"] |
| tidb | 10.9.120.251:10080 | INFO | [coprocessor.go:725] ["[TIME_COP_PROCESS] resp_time:1.529574387s txnStartTS:412134239937495042 region_id:112 store_addr:10.9.1.128:20160 kv_process_ms:945 scan_total_write:831931 scan_processed_write:831930 scan_total_data:0 scan_processed_data:0 scan_total_lock:1 scan_processed_lock:0"] |
| tidb | 10.9.120.251:10080 | INFO | [coprocessor.go:725] ["[TIME_COP_PROCESS] resp_time:1.55722114s txnStartTS:412134239937495042 region_id:100 store_addr:10.9.82.29:20160 kv_process_ms:1000 scan_total_write:831929 scan_processed_write:831928 scan_total_data:0 scan_processed_data:0 scan_total_lock:1 scan_processed_lock:0"] |
| tidb | 10.9.120.251:10080 | INFO | [coprocessor.go:725] ["[TIME_COP_PROCESS] resp_time:1.608597018s txnStartTS:412134239937495042 region_id:96 store_addr:10.9.137.171:20160 kv_process_ms:1048 scan_total_write:831929 scan_processed_write:831928 scan_total_data:0 scan_processed_data:0 scan_total_lock:1 scan_processed_lock:0"] |
| tidb | 10.9.120.251:10080 | INFO | [coprocessor.go:725] ["[TIME_COP_PROCESS] resp_time:1.614233631s txnStartTS:412134239937495042 region_id:92 store_addr:10.9.137.171:20160 kv_process_ms:1000 scan_total_write:831931 scan_processed_write:831930 scan_total_data:0 scan_processed_data:0 scan_total_lock:1 scan_processed_lock:0"] |
| tidb | 10.9.120.251:10080 | INFO | [coprocessor.go:725] ["[TIME_COP_PROCESS] resp_time:1.67587146s txnStartTS:412134239937495042 region_id:116 store_addr:10.9.137.171:20160 kv_process_ms:950 scan_total_write:831929 scan_processed_write:831928 scan_total_data:0 scan_processed_data:0 scan_total_lock:1 scan_processed_lock:0"] |
| tidb | 10.9.120.251:10080 | INFO | [coprocessor.go:725] ["[TIME_COP_PROCESS] resp_time:1.693188495s txnStartTS:412134239937495042 region_id:108 store_addr:10.9.1.128:20160 kv_process_ms:949 scan_total_write:831929 scan_processed_write:831928 scan_total_data:0 scan_processed_data:0 scan_total_lock:1 scan_processed_lock:0"] |
| tidb | 10.9.120.251:10080 | INFO | [coprocessor.go:725] ["[TIME_COP_PROCESS] resp_time:1.693383633s txnStartTS:412134239937495042 region_id:120 store_addr:10.9.1.128:20160 kv_process_ms:951 scan_total_write:831929 scan_processed_write:831928 scan_total_data:0 scan_processed_data:0 scan_total_lock:1 scan_processed_lock:0"] |
| tidb | 10.9.120.251:10080 | INFO | [coprocessor.go:725] ["[TIME_COP_PROCESS] resp_time:1.731990066s txnStartTS:412134239937495042 region_id:128 store_addr:10.9.82.29:20160 kv_process_ms:1035 scan_total_write:831931 scan_processed_write:831930 scan_total_data:0 scan_processed_data:0 scan_total_lock:1 scan_processed_lock:0"] |
| tidb | 10.9.120.251:10080 | INFO | [coprocessor.go:725] ["[TIME_COP_PROCESS] resp_time:1.744524732s txnStartTS:412134239937495042 region_id:104 store_addr:10.9.137.171:20160 kv_process_ms:1030 scan_total_write:831929 scan_processed_write:831928 scan_total_data:0 scan_processed_data:0 scan_total_lock:1 scan_processed_lock:0"] |
| tidb | 10.9.120.251:10080 | INFO | [coprocessor.go:725] ["[TIME_COP_PROCESS] resp_time:1.786915459s txnStartTS:412134239937495042 region_id:132 store_addr:10.9.82.29:20160 kv_process_ms:1014 scan_total_write:831929 scan_processed_write:831928 scan_total_data:0 scan_processed_data:0 scan_total_lock:1 scan_processed_lock:0"] |
| tidb | 10.9.120.251:10080 | INFO | [coprocessor.go:725] ["[TIME_COP_PROCESS] resp_time:1.786978732s txnStartTS:412134239937495042 region_id:124 store_addr:10.9.82.29:20160 kv_process_ms:1002 scan_total_write:831929 scan_processed_write:831928 scan_total_data:0 scan_processed_data:0 scan_total_lock:1 scan_processed_lock:0"] |
| tikv | 10.9.82.29:20180 | WARN | [tracker.rs:150] [slow-query] [internal_key_skipped_count=831928] [internal_delete_skipped_count=0] [block_cache_hit_count=17] [block_read_count=1810] [block_read_byte=114945337] [scan_first_range="Some(start: 74800000000000002B5F728000000000130A96 end: 74800000000000002B5F728000000000196372)"] [scan_ranges=1] [scan_iter_processed=831928] [scan_iter_ops=831930] [scan_is_desc=false] [tag=select] [table_id=43] [txn_start_ts=412134239937495042] [wait_time=1ms] [total_process_time=1.001s] [peer_id=ipv4:10.9.120.251:47968] [region_id=100] |
| tikv | 10.9.82.29:20180 | WARN | [tracker.rs:150] [slow-query] [internal_key_skipped_count=831928] [internal_delete_skipped_count=0] [block_cache_hit_count=19] [block_read_count=1793] [block_read_byte=96014381] [scan_first_range="Some(start: 74800000000000002B5F728000000000393526 end: 74800000000000002B5F7280000000003F97A6)"] [scan_ranges=1] [scan_iter_processed=831928] [scan_iter_ops=831930] [scan_is_desc=false] [tag=select] [table_id=43] [txn_start_ts=412134239937495042] [wait_time=1ms] [total_process_time=1.002s] [peer_id=ipv4:10.9.120.251:47994] [region_id=124] |
| tikv | 10.9.82.29:20180 | WARN | [tracker.rs:150] [slow-query] [internal_key_skipped_count=831928] [internal_delete_skipped_count=0] [block_cache_hit_count=17] [block_read_count=1811] [block_read_byte=96620574] [scan_first_range="Some(start: 74800000000000002B5F72800000000045F083 end: 74800000000000002B5F7280000000004C51E4)"] [scan_ranges=1] [scan_iter_processed=831928] [scan_iter_ops=831930] [scan_is_desc=false] [tag=select] [table_id=43] [txn_start_ts=412134239937495042] [wait_time=1ms] [total_process_time=1.014s] [peer_id=ipv4:10.9.120.251:47998] [region_id=132] |
| tikv | 10.9.137.171:20180 | WARN | [tracker.rs:150] [slow-query] [internal_key_skipped_count=831928] [internal_delete_skipped_count=0] [block_cache_hit_count=17] [block_read_count=1779] [block_read_byte=95095959] [scan_first_range="Some(start: 74800000000000002B5F7280000000004C51E4 end: 74800000000000002B5F72800000000052B456)"] [scan_ranges=1] [scan_iter_processed=831928] [scan_iter_ops=831930] [scan_is_desc=false] [tag=select] [table_id=43] [txn_start_ts=412134239937495042] [wait_time=2ms] [total_process_time=1.025s] [peer_id=ipv4:10.9.120.251:34926] [region_id=136] |
| tikv | 10.9.137.171:20180 | WARN | [tracker.rs:150] [slow-query] [internal_key_skipped_count=831928] [internal_delete_skipped_count=0] [block_cache_hit_count=15] [block_read_count=1793] [block_read_byte=114024055] [scan_first_range="Some(start: 74800000000000002B5F728000000000196372 end: 74800000000000002B5F7280000000001FC628)"] [scan_ranges=1] [scan_iter_processed=831928] [scan_iter_ops=831930] [scan_is_desc=false] [tag=select] [table_id=43] [txn_start_ts=412134239937495042] [wait_time=2ms] [total_process_time=1.03s] [peer_id=ipv4:10.9.120.251:34954] [region_id=104] |
| tikv | 10.9.82.29:20180 | WARN | [tracker.rs:150] [slow-query] [internal_key_skipped_count=831930] [internal_delete_skipped_count=0] [block_cache_hit_count=18] [block_read_count=1796] [block_read_byte=96116255] [scan_first_range="Some(start: 74800000000000002B5F7280000000003F97A6 end: 74800000000000002B5F72800000000045F083)"] [scan_ranges=1] [scan_iter_processed=831930] [scan_iter_ops=831932] [scan_is_desc=false] [tag=select] [table_id=43] [txn_start_ts=412134239937495042] [wait_time=1ms] [total_process_time=1.035s] [peer_id=ipv4:10.9.120.251:47996] [region_id=128] |
| tikv | 10.9.137.171:20180 | WARN | [tracker.rs:150] [slow-query] [internal_key_skipped_count=831928] [internal_delete_skipped_count=0] [block_cache_hit_count=15] [block_read_count=1792] [block_read_byte=113958562] [scan_first_range="Some(start: 74800000000000002B5F7280000000000CB1BA end: 74800000000000002B5F728000000000130A96)"] [scan_ranges=1] [scan_iter_processed=831928] [scan_iter_ops=831930] [scan_is_desc=false] [tag=select] [table_id=43] [txn_start_ts=412134239937495042] [wait_time=1ms] [total_process_time=1.048s] [peer_id=ipv4:10.9.120.251:34924] [region_id=96] |
| tidb | 10.9.120.251:10080 | INFO | [coprocessor.go:725] ["[TIME_COP_PROCESS] resp_time:1.841528722s txnStartTS:412134239937495042 region_id:140 store_addr:10.9.137.171:20160 kv_process_ms:991 scan_total_write:831929 scan_processed_write:831928 scan_total_data:0 scan_processed_data:0 scan_total_lock:1 scan_processed_lock:0"] |
| tidb | 10.9.120.251:10080 | INFO | [coprocessor.go:725] ["[TIME_COP_PROCESS] resp_time:1.410650751s txnStartTS:412134239937495042 region_id:144 store_addr:10.9.82.29:20160 kv_process_ms:1000 scan_total_write:831929 scan_processed_write:831928 scan_total_data:0 scan_processed_data:0 scan_total_lock:1 scan_processed_lock:0"] |
| tidb | 10.9.120.251:10080 | INFO | [coprocessor.go:725] ["[TIME_COP_PROCESS] resp_time:1.930478221s txnStartTS:412134239937495042 region_id:136 store_addr:10.9.137.171:20160 kv_process_ms:1025 scan_total_write:831929 scan_processed_write:831928 scan_total_data:0 scan_processed_data:0 scan_total_lock:1 scan_processed_lock:0"] |
| tidb | 10.9.120.251:10080 | INFO | [coprocessor.go:725] ["[TIME_COP_PROCESS] resp_time:1.26929792s txnStartTS:412134239937495042 region_id:148 store_addr:10.9.82.29:20160 kv_process_ms:901 scan_total_write:831931 scan_processed_write:831930 scan_total_data:0 scan_processed_data:0 scan_total_lock:1 scan_processed_lock:0"] |
| tidb | 10.9.120.251:10080 | INFO | [coprocessor.go:725] ["[TIME_COP_PROCESS] resp_time:1.116672983s txnStartTS:412134239937495042 region_id:152 store_addr:10.9.82.29:20160 kv_process_ms:828 scan_total_write:831929 scan_processed_write:831928 scan_total_data:0 scan_processed_data:0 scan_total_lock:1 scan_processed_lock:0"] |
| tidb | 10.9.120.251:10080 | INFO | [coprocessor.go:725] ["[TIME_COP_PROCESS] resp_time:1.642668083s txnStartTS:412134239937495042 region_id:156 store_addr:10.9.1.128:20160 kv_process_ms:888 scan_total_write:831929 scan_processed_write:831928 scan_total_data:0 scan_processed_data:0 scan_total_lock:1 scan_processed_lock:0"] |
| tidb | 10.9.120.251:10080 | INFO | [coprocessor.go:725] ["[TIME_COP_PROCESS] resp_time:1.537375971s txnStartTS:412134239937495042 region_id:168 store_addr:10.9.137.171:20160 kv_process_ms:728 scan_total_write:831931 scan_processed_write:831930 scan_total_data:0 scan_processed_data:0 scan_total_lock:1 scan_processed_lock:0"] |
| tidb | 10.9.120.251:10080 | INFO | [coprocessor.go:725] ["[TIME_COP_PROCESS] resp_time:1.602765417s txnStartTS:412134239937495042 region_id:164 store_addr:10.9.82.29:20160 kv_process_ms:871 scan_total_write:831929 scan_processed_write:831928 scan_total_data:0 scan_processed_data:0 scan_total_lock:1 scan_processed_lock:0"] |
| tidb | 10.9.120.251:10080 | INFO | [coprocessor.go:725] ["[TIME_COP_PROCESS] resp_time:1.583965975s txnStartTS:412134239937495042 region_id:172 store_addr:10.9.1.128:20160 kv_process_ms:933 scan_total_write:831929 scan_processed_write:831928 scan_total_data:0 scan_processed_data:0 scan_total_lock:1 scan_processed_lock:0"] |
| tidb | 10.9.120.251:10080 | INFO | [coprocessor.go:725] ["[TIME_COP_PROCESS] resp_time:1.712528952s txnStartTS:412134239937495042 region_id:160 store_addr:10.9.1.128:20160 kv_process_ms:959 scan_total_write:831929 scan_processed_write:831928 scan_total_data:0 scan_processed_data:0 scan_total_lock:1 scan_processed_lock:0"] |
| tidb | 10.9.120.251:10080 | INFO | [coprocessor.go:725] ["[TIME_COP_PROCESS] resp_time:1.664343044s txnStartTS:412134239937495042 region_id:220 store_addr:10.9.1.128:20160 kv_process_ms:976 scan_total_write:865647 scan_processed_write:865646 scan_total_data:0 scan_processed_data:0 scan_total_lock:1 scan_processed_lock:0"] |
| tidb | 10.9.120.251:10080 | INFO | [coprocessor.go:725] ["[TIME_COP_PROCESS] resp_time:1.713342373s txnStartTS:412134239937495042 region_id:176 store_addr:10.9.1.128:20160 kv_process_ms:950 scan_total_write:831929 scan_processed_write:831928 scan_total_data:0 scan_processed_data:0 scan_total_lock:1 scan_processed_lock:0"] |
+------+--------------------------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
31 rows in set (0.01 sec)
mysql> select * from cluster_log where type='pd' and content like '%scheduler%'; -- Query scheduler logs of PD
mysql> select * from cluster_log where type='tidb' and content like '%ddl%'; -- Query DDL logs of TiDB
```
### Cluster diagnostics
In the current cluster topology, each component is dispersed. The data sources and data formats are heterogeneous. It is not convenient to perform cluster diagnosis through programmatic ways, so manual diagnosis is required. With the data system tables provided by the previous layers, each TiDB node has a stable Global View for the full cluster. Based on this, a problem diagnosis framework can be implemented. By defining diagnostic rules, we can quickly discover existing and potential problems on your cluster.
**Diagnostic rule definition**: Diagnostic rules are the logics for finding problems by reading data from various system tables and detecting abnormal data.
Diagnostic rules can be divided into three levels:
- Discovery of potential problems: For example, identify insufficient disk capacity by determining the ratio of disk capacity and disk usage
- Locate an existing problem: For example, by looking at the load, we can know that the thread pool of Coprocessor has reached the bottleneck
- Provide fix suggestions: For example, by analyzing the disk IO, we observe a high latency. A recommendation to replace the disk can be provided to you.
This proposal mainly focuses on implementing the diagnostic framework and some diagnostic rules. More diagnostic rules need to be gradually accumulated based on experience, with the ultimate goal of becoming an expert system that reduces lower the bar of using and operation difficulty. The following content does not detail the specific diagnostic rules, mainly focusing on the implementation of the diagnostic framework.
#### Diagnostic framework design
A variety of user scenarios must be considered for the diagnostic framework design , including but not limited to:
- After selecting a fixed version, users won't upgrade the TiDB cluster version easily.
- User-defined diagnostic rules
- Loading new diagnostic rule without restarting the cluster
- The diagnosis framework needs to be easily integrated with the existing operation and maintenance system.
- Users may block some diagnostics. For example, if the user expects a heterogeneous system, heterogeneous diagnostic rules will be blocked by users.
- ...
Therefore, implementing a diagnostic system that supports hot rule loading is necessary. Currently there are the following options:
- Golang Plugin: Use different plugins to define diagnostic rules and load them into the TiDB processes
- Advantages: Low development threshold in Golang
- Disadvantages: Version management is error-prone and it requires compiling plugins with the same Golang version as the host TiDB
- Embedded Lua: Load Lua scripts in runtime or during startup. The script reads system table data from TiDB, evaluates and provide feedback based on diagnostic rules
- Advantages: Lua is a fully host-dependent language with simple syntaxes; easy integration with the host
- Disadvantages: Relying on another scripting language
- Shell Script: Shell supports process control, so you can define diagnostic rules with Shell
- Advantages: easy to write, load and execute
- Disadvantages: need to run on the machine where the MySQL client is installed
This proposal temporarily adopts the third option to write diagnostic rules using Shell. There is no intrusion into TiDB, and it also provides scalability for subsequent implementations of better solutions.
| docs/design/2019-11-14-tidb-builtin-diagnostics.md | 0 | https://github.com/pingcap/tidb/commit/788dc28a7423d4b85ebe11bbce632733b056d63a | [
0.024484766647219658,
0.0007097179186530411,
0.00016371443052776158,
0.00016784644685685635,
0.0031453503761440516
] |
{
"id": 3,
"code_window": [
"\t\treturn ver, errors.Trace(err)\n",
"\t}\n",
"\n",
"\tdbInfo, err := t.GetDatabase(job.SchemaID)\n",
"\tif err != nil {\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep"
],
"after_edit": [
"\tdbInfo, err := checkSchemaExistAndCancelNotExistJob(t, job)\n"
],
"file_path": "ddl/schema.go",
"type": "replace",
"edit_start_line_idx": 118
} | // Copyright 2017 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package ddl_test
import (
"context"
"fmt"
"strings"
"sync"
"sync/atomic"
"time"
. "github.com/pingcap/check"
"github.com/pingcap/errors"
"github.com/pingcap/failpoint"
"github.com/pingcap/log"
"github.com/pingcap/parser"
"github.com/pingcap/parser/ast"
"github.com/pingcap/parser/model"
"github.com/pingcap/parser/terror"
"github.com/pingcap/tidb/config"
"github.com/pingcap/tidb/ddl"
"github.com/pingcap/tidb/domain"
"github.com/pingcap/tidb/executor"
"github.com/pingcap/tidb/infoschema"
"github.com/pingcap/tidb/kv"
"github.com/pingcap/tidb/session"
"github.com/pingcap/tidb/sessionctx"
"github.com/pingcap/tidb/store/mockstore"
"github.com/pingcap/tidb/util/admin"
"github.com/pingcap/tidb/util/gcutil"
"github.com/pingcap/tidb/util/sqlexec"
"github.com/pingcap/tidb/util/testkit"
"go.uber.org/zap"
)
var _ = Suite(&testStateChangeSuite{})
var _ = SerialSuites(&serialTestStateChangeSuite{})
type serialTestStateChangeSuite struct {
testStateChangeSuiteBase
}
type testStateChangeSuite struct {
testStateChangeSuiteBase
}
type testStateChangeSuiteBase struct {
lease time.Duration
store kv.Storage
dom *domain.Domain
se session.Session
p *parser.Parser
preSQL string
}
func (s *testStateChangeSuiteBase) SetUpSuite(c *C) {
s.lease = 200 * time.Millisecond
ddl.SetWaitTimeWhenErrorOccurred(1 * time.Microsecond)
var err error
s.store, err = mockstore.NewMockStore()
c.Assert(err, IsNil)
session.SetSchemaLease(s.lease)
s.dom, err = session.BootstrapSession(s.store)
c.Assert(err, IsNil)
s.se, err = session.CreateSession4Test(s.store)
c.Assert(err, IsNil)
_, err = s.se.Execute(context.Background(), "create database test_db_state default charset utf8 default collate utf8_bin")
c.Assert(err, IsNil)
_, err = s.se.Execute(context.Background(), "use test_db_state")
c.Assert(err, IsNil)
s.p = parser.New()
}
func (s *testStateChangeSuiteBase) TearDownSuite(c *C) {
s.se.Execute(context.Background(), "drop database if exists test_db_state")
s.se.Close()
s.dom.Close()
s.store.Close()
}
// TestShowCreateTable tests the result of "show create table" when we are running "add index" or "add column".
func (s *serialTestStateChangeSuite) TestShowCreateTable(c *C) {
config.GetGlobalConfig().Experimental.AllowsExpressionIndex = true
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("create table t (id int)")
tk.MustExec("create table t2 (a int, b varchar(10)) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_general_ci")
// tkInternal is used to execute additional sql (here show create table) in ddl change callback.
// Using same `tk` in different goroutines may lead to data race.
tkInternal := testkit.NewTestKit(c, s.store)
tkInternal.MustExec("use test")
var checkErr error
testCases := []struct {
sql string
expectedRet string
}{
{"alter table t add index idx(id)",
"CREATE TABLE `t` (\n `id` int(11) DEFAULT NULL\n) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin"},
{"alter table t add index idx1(id)",
"CREATE TABLE `t` (\n `id` int(11) DEFAULT NULL,\n KEY `idx` (`id`)\n) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin"},
{"alter table t add column c int",
"CREATE TABLE `t` (\n `id` int(11) DEFAULT NULL,\n KEY `idx` (`id`),\n KEY `idx1` (`id`)\n) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin"},
{"alter table t2 add column c varchar(1)",
"CREATE TABLE `t2` (\n `a` int(11) DEFAULT NULL,\n `b` varchar(10) COLLATE utf8mb4_general_ci DEFAULT NULL\n) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_general_ci"},
{"alter table t2 add column d varchar(1)",
"CREATE TABLE `t2` (\n `a` int(11) DEFAULT NULL,\n `b` varchar(10) COLLATE utf8mb4_general_ci DEFAULT NULL,\n `c` varchar(1) COLLATE utf8mb4_general_ci DEFAULT NULL\n) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_general_ci"},
}
prevState := model.StateNone
callback := &ddl.TestDDLCallback{}
currTestCaseOffset := 0
callback.OnJobUpdatedExported = func(job *model.Job) {
if job.SchemaState == prevState || checkErr != nil {
return
}
if job.State == model.JobStateDone {
currTestCaseOffset++
}
if job.SchemaState != model.StatePublic {
var result sqlexec.RecordSet
tbl2 := testGetTableByName(c, tkInternal.Se, "test", "t2")
if job.TableID == tbl2.Meta().ID {
// Try to do not use mustQuery in hook func, cause assert fail in mustQuery will cause ddl job hung.
result, checkErr = tkInternal.Exec("show create table t2")
if checkErr != nil {
return
}
} else {
result, checkErr = tkInternal.Exec("show create table t")
if checkErr != nil {
return
}
}
req := result.NewChunk()
checkErr = result.Next(context.Background(), req)
if checkErr != nil {
return
}
got := req.GetRow(0).GetString(1)
expected := testCases[currTestCaseOffset].expectedRet
if got != expected {
checkErr = errors.Errorf("got %s, expected %s", got, expected)
}
}
}
d := s.dom.DDL()
originalCallback := d.GetHook()
defer d.(ddl.DDLForTest).SetHook(originalCallback)
d.(ddl.DDLForTest).SetHook(callback)
for _, tc := range testCases {
tk.MustExec(tc.sql)
c.Assert(checkErr, IsNil)
}
}
// TestDropNotNullColumn is used to test issue #8654.
func (s *testStateChangeSuite) TestDropNotNullColumn(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("create table t (id int, a int not null default 11)")
tk.MustExec("insert into t values(1, 1)")
tk.MustExec("create table t1 (id int, b varchar(255) not null)")
tk.MustExec("insert into t1 values(2, '')")
tk.MustExec("create table t2 (id int, c time not null)")
tk.MustExec("insert into t2 values(3, '11:22:33')")
tk.MustExec("create table t3 (id int, d json not null)")
tk.MustExec("insert into t3 values(4, d)")
tk1 := testkit.NewTestKit(c, s.store)
tk1.MustExec("use test")
var checkErr error
d := s.dom.DDL()
originalCallback := d.GetHook()
callback := &ddl.TestDDLCallback{}
sqlNum := 0
callback.OnJobUpdatedExported = func(job *model.Job) {
if checkErr != nil {
return
}
originalCallback.OnChanged(nil)
if job.SchemaState == model.StateWriteOnly {
switch sqlNum {
case 0:
_, checkErr = tk1.Exec("insert into t set id = 1")
case 1:
_, checkErr = tk1.Exec("insert into t1 set id = 2")
case 2:
_, checkErr = tk1.Exec("insert into t2 set id = 3")
case 3:
_, checkErr = tk1.Exec("insert into t3 set id = 4")
}
}
}
d.(ddl.DDLForTest).SetHook(callback)
tk.MustExec("alter table t drop column a")
c.Assert(checkErr, IsNil)
sqlNum++
tk.MustExec("alter table t1 drop column b")
c.Assert(checkErr, IsNil)
sqlNum++
tk.MustExec("alter table t2 drop column c")
c.Assert(checkErr, IsNil)
sqlNum++
tk.MustExec("alter table t3 drop column d")
c.Assert(checkErr, IsNil)
d.(ddl.DDLForTest).SetHook(originalCallback)
tk.MustExec("drop table t, t1, t2, t3")
}
func (s *testStateChangeSuite) TestTwoStates(c *C) {
cnt := 5
// New the testExecInfo.
testInfo := &testExecInfo{
execCases: cnt,
sqlInfos: make([]*sqlInfo, 4),
}
for i := 0; i < len(testInfo.sqlInfos); i++ {
sqlInfo := &sqlInfo{cases: make([]*stateCase, cnt)}
for j := 0; j < cnt; j++ {
sqlInfo.cases[j] = new(stateCase)
}
testInfo.sqlInfos[i] = sqlInfo
}
err := testInfo.createSessions(s.store, "test_db_state")
c.Assert(err, IsNil)
// Fill the SQLs and expected error messages.
testInfo.sqlInfos[0].sql = "insert into t (c1, c2, c3, c4) value(2, 'b', 'N', '2017-07-02')"
testInfo.sqlInfos[1].sql = "insert into t (c1, c2, c3, d3, c4) value(3, 'b', 'N', 'a', '2017-07-03')"
unknownColErr := "[planner:1054]Unknown column 'd3' in 'field list'"
testInfo.sqlInfos[1].cases[0].expectedCompileErr = unknownColErr
testInfo.sqlInfos[1].cases[1].expectedCompileErr = unknownColErr
testInfo.sqlInfos[1].cases[2].expectedCompileErr = unknownColErr
testInfo.sqlInfos[1].cases[3].expectedCompileErr = unknownColErr
testInfo.sqlInfos[2].sql = "update t set c2 = 'c2_update'"
testInfo.sqlInfos[3].sql = "replace into t values(5, 'e', 'N', '2017-07-05')"
testInfo.sqlInfos[3].cases[4].expectedCompileErr = "[planner:1136]Column count doesn't match value count at row 1"
alterTableSQL := "alter table t add column d3 enum('a', 'b') not null default 'a' after c3"
s.test(c, "", alterTableSQL, testInfo)
// TODO: Add more DDL statements.
}
func (s *testStateChangeSuite) test(c *C, tableName, alterTableSQL string, testInfo *testExecInfo) {
_, err := s.se.Execute(context.Background(), `create table t (
c1 int,
c2 varchar(64),
c3 enum('N','Y') not null default 'N',
c4 timestamp on update current_timestamp,
key(c1, c2))`)
c.Assert(err, IsNil)
defer s.se.Execute(context.Background(), "drop table t")
_, err = s.se.Execute(context.Background(), "insert into t values(1, 'a', 'N', '2017-07-01')")
c.Assert(err, IsNil)
callback := &ddl.TestDDLCallback{}
prevState := model.StateNone
var checkErr error
err = testInfo.parseSQLs(s.p)
c.Assert(err, IsNil, Commentf("error stack %v", errors.ErrorStack(err)))
times := 0
callback.OnJobUpdatedExported = func(job *model.Job) {
if job.SchemaState == prevState || checkErr != nil || times >= 3 {
return
}
times++
switch job.SchemaState {
case model.StateDeleteOnly:
// This state we execute every sqlInfo one time using the first session and other information.
err = testInfo.compileSQL(0)
if err != nil {
checkErr = err
break
}
err = testInfo.execSQL(0)
if err != nil {
checkErr = err
}
case model.StateWriteOnly:
// This state we put the schema information to the second case.
err = testInfo.compileSQL(1)
if err != nil {
checkErr = err
}
case model.StateWriteReorganization:
// This state we execute every sqlInfo one time using the third session and other information.
err = testInfo.compileSQL(2)
if err != nil {
checkErr = err
break
}
err = testInfo.execSQL(2)
if err != nil {
checkErr = err
break
}
// Mock the server is in `write only` state.
err = testInfo.execSQL(1)
if err != nil {
checkErr = err
break
}
// This state we put the schema information to the fourth case.
err = testInfo.compileSQL(3)
if err != nil {
checkErr = err
}
}
}
d := s.dom.DDL()
originalCallback := d.GetHook()
defer d.(ddl.DDLForTest).SetHook(originalCallback)
d.(ddl.DDLForTest).SetHook(callback)
_, err = s.se.Execute(context.Background(), alterTableSQL)
c.Assert(err, IsNil)
err = testInfo.compileSQL(4)
c.Assert(err, IsNil)
err = testInfo.execSQL(4)
c.Assert(err, IsNil)
// Mock the server is in `write reorg` state.
err = testInfo.execSQL(3)
c.Assert(err, IsNil)
c.Assert(errors.ErrorStack(checkErr), Equals, "")
}
type stateCase struct {
session session.Session
rawStmt ast.StmtNode
stmt sqlexec.Statement
expectedExecErr string
expectedCompileErr string
}
type sqlInfo struct {
sql string
// cases is multiple stateCases.
// Every case need to be executed with the different schema state.
cases []*stateCase
}
// testExecInfo contains some SQL information and the number of times each SQL is executed
// in a DDL statement.
type testExecInfo struct {
// execCases represents every SQL need to be executed execCases times.
// And the schema state is different at each execution.
execCases int
// sqlInfos represents this test information has multiple SQLs to test.
sqlInfos []*sqlInfo
}
func (t *testExecInfo) createSessions(store kv.Storage, useDB string) error {
var err error
for i, info := range t.sqlInfos {
for j, c := range info.cases {
c.session, err = session.CreateSession4Test(store)
if err != nil {
return errors.Trace(err)
}
_, err = c.session.Execute(context.Background(), "use "+useDB)
if err != nil {
return errors.Trace(err)
}
// It's used to debug.
c.session.SetConnectionID(uint64(i*10 + j))
}
}
return nil
}
func (t *testExecInfo) parseSQLs(p *parser.Parser) error {
if t.execCases <= 0 {
return nil
}
var err error
for _, sqlInfo := range t.sqlInfos {
seVars := sqlInfo.cases[0].session.GetSessionVars()
charset, collation := seVars.GetCharsetInfo()
for j := 0; j < t.execCases; j++ {
sqlInfo.cases[j].rawStmt, err = p.ParseOneStmt(sqlInfo.sql, charset, collation)
if err != nil {
return errors.Trace(err)
}
}
}
return nil
}
func (t *testExecInfo) compileSQL(idx int) (err error) {
for _, info := range t.sqlInfos {
c := info.cases[idx]
compiler := executor.Compiler{Ctx: c.session}
se := c.session
ctx := context.TODO()
se.PrepareTxnCtx(ctx)
sctx := se.(sessionctx.Context)
if err = executor.ResetContextOfStmt(sctx, c.rawStmt); err != nil {
return errors.Trace(err)
}
c.stmt, err = compiler.Compile(ctx, c.rawStmt)
if c.expectedCompileErr != "" {
if err == nil {
err = errors.Errorf("expected error %s but got nil", c.expectedCompileErr)
} else if err.Error() == c.expectedCompileErr {
err = nil
}
}
if err != nil {
return errors.Trace(err)
}
}
return nil
}
func (t *testExecInfo) execSQL(idx int) error {
for _, sqlInfo := range t.sqlInfos {
c := sqlInfo.cases[idx]
if c.expectedCompileErr != "" {
continue
}
_, err := c.stmt.Exec(context.TODO())
if c.expectedExecErr != "" {
if err == nil {
err = errors.Errorf("expected error %s but got nil", c.expectedExecErr)
} else if err.Error() == c.expectedExecErr {
err = nil
}
}
if err != nil {
return errors.Trace(err)
}
err = c.session.CommitTxn(context.TODO())
if err != nil {
return errors.Trace(err)
}
}
return nil
}
type sqlWithErr struct {
sql string
expectErr error
}
type expectQuery struct {
sql string
rows []string
}
func (s *testStateChangeSuite) TestAppendEnum(c *C) {
_, err := s.se.Execute(context.Background(), `create table t (
c1 varchar(64),
c2 enum('N','Y') not null default 'N',
c3 timestamp on update current_timestamp,
c4 int primary key,
unique key idx2 (c2, c3))`)
c.Assert(err, IsNil)
defer s.se.Execute(context.Background(), "drop table t")
_, err = s.se.Execute(context.Background(), "insert into t values('a', 'N', '2017-07-01', 8)")
c.Assert(err, IsNil)
// Make sure these sqls use the the plan of index scan.
_, err = s.se.Execute(context.Background(), "drop stats t")
c.Assert(err, IsNil)
se, err := session.CreateSession(s.store)
c.Assert(err, IsNil)
_, err = se.Execute(context.Background(), "use test_db_state")
c.Assert(err, IsNil)
_, err = s.se.Execute(context.Background(), "insert into t values('a', 'A', '2018-09-19', 9)")
c.Assert(err.Error(), Equals, "[table:1366]Incorrect enum value: 'A' for column 'c2' at row 1")
failAlterTableSQL1 := "alter table t change c2 c2 enum('N') DEFAULT 'N'"
_, err = s.se.Execute(context.Background(), failAlterTableSQL1)
c.Assert(err.Error(), Equals, "[ddl:8200]Unsupported modify column: the number of enum column's elements is less than the original: 2")
failAlterTableSQL2 := "alter table t change c2 c2 int default 0"
_, err = s.se.Execute(context.Background(), failAlterTableSQL2)
c.Assert(err.Error(), Equals, "[ddl:8200]Unsupported modify column: cannot modify enum type column's to type int(11)")
alterTableSQL := "alter table t change c2 c2 enum('N','Y','A') DEFAULT 'A'"
_, err = s.se.Execute(context.Background(), alterTableSQL)
c.Assert(err, IsNil)
_, err = se.Execute(context.Background(), "insert into t values('a', 'A', '2018-09-20', 10)")
c.Assert(err, IsNil)
_, err = se.Execute(context.Background(), "insert into t (c1, c3, c4) values('a', '2018-09-21', 11)")
c.Assert(err, IsNil)
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test_db_state")
result, err := s.execQuery(tk, "select c4, c2 from t order by c4 asc")
c.Assert(err, IsNil)
expected := []string{"8 N", "10 A", "11 A"}
checkResult(result, testkit.Rows(expected...))
_, err = s.se.Execute(context.Background(), "update t set c2='N' where c4 = 10")
c.Assert(err, IsNil)
result, err = s.execQuery(tk, "select c2 from t where c4 = 10")
c.Assert(err, IsNil)
expected = []string{"8 N", "10 N", "11 A"}
checkResult(result, testkit.Rows(expected...))
}
// https://github.com/pingcap/tidb/pull/6249 fixes the following two test cases.
func (s *testStateChangeSuite) TestWriteOnlyWriteNULL(c *C) {
sqls := make([]sqlWithErr, 1)
sqls[0] = sqlWithErr{"insert t set c1 = 'c1_new', c3 = '2019-02-12', c4 = 8 on duplicate key update c1 = values(c1)", nil}
addColumnSQL := "alter table t add column c5 int not null default 1 after c4"
expectQuery := &expectQuery{"select c4, c5 from t", []string{"8 1"}}
// TODO: This case should always fail in write-only state, but it doesn't. We use write-reorganization state here to keep it running stable. It need a double check.
s.runTestInSchemaState(c, model.StateWriteReorganization, true, addColumnSQL, sqls, expectQuery)
}
func (s *testStateChangeSuite) TestWriteOnlyOnDupUpdate(c *C) {
sqls := make([]sqlWithErr, 3)
sqls[0] = sqlWithErr{"delete from t", nil}
sqls[1] = sqlWithErr{"insert t set c1 = 'c1_dup', c3 = '2018-02-12', c4 = 2 on duplicate key update c1 = values(c1)", nil}
sqls[2] = sqlWithErr{"insert t set c1 = 'c1_new', c3 = '2019-02-12', c4 = 2 on duplicate key update c1 = values(c1)", nil}
addColumnSQL := "alter table t add column c5 int not null default 1 after c4"
expectQuery := &expectQuery{"select c4, c5 from t", []string{"2 1"}}
// TODO: This case should always fail in write-only state, but it doesn't. We use write-reorganization state here to keep it running stable. It need a double check.
s.runTestInSchemaState(c, model.StateWriteReorganization, true, addColumnSQL, sqls, expectQuery)
}
func (s *testStateChangeSuite) TestWriteOnlyOnDupUpdateForAddColumns(c *C) {
sqls := make([]sqlWithErr, 3)
sqls[0] = sqlWithErr{"delete from t", nil}
sqls[1] = sqlWithErr{"insert t set c1 = 'c1_dup', c3 = '2018-02-12', c4 = 2 on duplicate key update c1 = values(c1)", nil}
sqls[2] = sqlWithErr{"insert t set c1 = 'c1_new', c3 = '2019-02-12', c4 = 2 on duplicate key update c1 = values(c1)", nil}
addColumnsSQL := "alter table t add column c5 int not null default 1 after c4, add column c44 int not null default 1"
expectQuery := &expectQuery{"select c4, c5, c44 from t", []string{"2 1 1"}}
// TODO: This case should always fail in write-only state, but it doesn't. We use write-reorganization state here to keep it running stable. It need a double check.
s.runTestInSchemaState(c, model.StateWriteReorganization, true, addColumnsSQL, sqls, expectQuery)
}
// TestWriteOnly tests whether the correct columns is used in PhysicalIndexScan's ToPB function.
func (s *testStateChangeSuite) TestWriteOnly(c *C) {
sqls := make([]sqlWithErr, 3)
sqls[0] = sqlWithErr{"delete from t where c1 = 'a'", nil}
sqls[1] = sqlWithErr{"update t use index(idx2) set c1 = 'c1_update' where c1 = 'a'", nil}
sqls[2] = sqlWithErr{"insert t set c1 = 'c1_insert', c3 = '2018-02-12', c4 = 1", nil}
addColumnSQL := "alter table t add column c5 int not null default 1 first"
s.runTestInSchemaState(c, model.StateWriteOnly, true, addColumnSQL, sqls, nil)
}
// TestWriteOnlyForAddColumns tests whether the correct columns is used in PhysicalIndexScan's ToPB function.
func (s *testStateChangeSuite) TestWriteOnlyForAddColumns(c *C) {
sqls := make([]sqlWithErr, 3)
sqls[0] = sqlWithErr{"delete from t where c1 = 'a'", nil}
sqls[1] = sqlWithErr{"update t use index(idx2) set c1 = 'c1_update' where c1 = 'a'", nil}
sqls[2] = sqlWithErr{"insert t set c1 = 'c1_insert', c3 = '2018-02-12', c4 = 1", nil}
addColumnsSQL := "alter table t add column c5 int not null default 1 first, add column c6 int not null default 1"
s.runTestInSchemaState(c, model.StateWriteOnly, true, addColumnsSQL, sqls, nil)
}
// TestDeletaOnly tests whether the correct columns is used in PhysicalIndexScan's ToPB function.
func (s *testStateChangeSuite) TestDeleteOnly(c *C) {
sqls := make([]sqlWithErr, 1)
sqls[0] = sqlWithErr{"insert t set c1 = 'c1_insert', c3 = '2018-02-12', c4 = 1",
errors.Errorf("Can't find column c1")}
dropColumnSQL := "alter table t drop column c1"
s.runTestInSchemaState(c, model.StateDeleteOnly, true, dropColumnSQL, sqls, nil)
}
// TestDeleteOnlyForDropColumns tests whether the correct columns is used in PhysicalIndexScan's ToPB function.
func (s *testStateChangeSuite) TestDeleteOnlyForDropColumns(c *C) {
sqls := make([]sqlWithErr, 1)
sqls[0] = sqlWithErr{"insert t set c1 = 'c1_insert', c3 = '2018-02-12', c4 = 1",
errors.Errorf("Can't find column c1")}
dropColumnsSQL := "alter table t drop column c1, drop column c3"
s.runTestInSchemaState(c, model.StateDeleteOnly, true, dropColumnsSQL, sqls, nil)
}
func (s *testStateChangeSuite) TestWriteOnlyForDropColumn(c *C) {
_, err := s.se.Execute(context.Background(), "use test_db_state")
c.Assert(err, IsNil)
_, err = s.se.Execute(context.Background(), `create table tt (c1 int, c4 int)`)
c.Assert(err, IsNil)
_, err = s.se.Execute(context.Background(), "insert into tt (c1, c4) values(8, 8)")
c.Assert(err, IsNil)
defer s.se.Execute(context.Background(), "drop table tt")
sqls := make([]sqlWithErr, 2)
sqls[0] = sqlWithErr{"update t set c1='5', c3='2020-03-01';", errors.New("[planner:1054]Unknown column 'c3' in 'field list'")}
sqls[1] = sqlWithErr{"update t t1, tt t2 set t1.c1='5', t1.c3='2020-03-01', t2.c1='10' where t1.c4=t2.c4",
errors.New("[planner:1054]Unknown column 'c3' in 'field list'")}
// TODO: Fix the case of sqls[2].
// sqls[2] = sqlWithErr{"update t set c1='5' where c3='2017-07-01';", errors.New("[planner:1054]Unknown column 'c3' in 'field list'")}
dropColumnSQL := "alter table t drop column c3"
query := &expectQuery{sql: "select * from t;", rows: []string{"a N 8"}}
s.runTestInSchemaState(c, model.StateWriteOnly, false, dropColumnSQL, sqls, query)
}
func (s *testStateChangeSuite) TestWriteOnlyForDropColumns(c *C) {
_, err := s.se.Execute(context.Background(), "use test_db_state")
c.Assert(err, IsNil)
_, err = s.se.Execute(context.Background(), `create table t_drop_columns (c1 int, c4 int)`)
c.Assert(err, IsNil)
_, err = s.se.Execute(context.Background(), "insert into t_drop_columns (c1, c4) values(8, 8)")
c.Assert(err, IsNil)
defer s.se.Execute(context.Background(), "drop table t_drop_columns")
sqls := make([]sqlWithErr, 2)
sqls[0] = sqlWithErr{"update t set c1='5', c3='2020-03-01';", errors.New("[planner:1054]Unknown column 'c3' in 'field list'")}
sqls[1] = sqlWithErr{"update t t1, t_drop_columns t2 set t1.c1='5', t1.c3='2020-03-01', t2.c1='10' where t1.c4=t2.c4",
errors.New("[planner:1054]Unknown column 'c3' in 'field list'")}
// TODO: Fix the case of sqls[2].
// sqls[2] = sqlWithErr{"update t set c1='5' where c3='2017-07-01';", errors.New("[planner:1054]Unknown column 'c3' in 'field list'")}
dropColumnsSQL := "alter table t drop column c3, drop column c1"
query := &expectQuery{sql: "select * from t;", rows: []string{"N 8"}}
s.runTestInSchemaState(c, model.StateWriteOnly, false, dropColumnsSQL, sqls, query)
}
func (s *testStateChangeSuiteBase) runTestInSchemaState(c *C, state model.SchemaState, isOnJobUpdated bool, alterTableSQL string,
sqlWithErrs []sqlWithErr, expectQuery *expectQuery) {
_, err := s.se.Execute(context.Background(), `create table t (
c1 varchar(64),
c2 enum('N','Y') not null default 'N',
c3 timestamp on update current_timestamp,
c4 int primary key,
unique key idx2 (c2))`)
c.Assert(err, IsNil)
defer s.se.Execute(context.Background(), "drop table t")
_, err = s.se.Execute(context.Background(), "insert into t values('a', 'N', '2017-07-01', 8)")
c.Assert(err, IsNil)
// Make sure these sqls use the the plan of index scan.
_, err = s.se.Execute(context.Background(), "drop stats t")
c.Assert(err, IsNil)
callback := &ddl.TestDDLCallback{}
prevState := model.StateNone
var checkErr error
times := 0
se, err := session.CreateSession(s.store)
c.Assert(err, IsNil)
_, err = se.Execute(context.Background(), "use test_db_state")
c.Assert(err, IsNil)
cbFunc := func(job *model.Job) {
if job.SchemaState == prevState || checkErr != nil || times >= 3 {
return
}
times++
if job.SchemaState != state {
return
}
for _, sqlWithErr := range sqlWithErrs {
_, err = se.Execute(context.Background(), sqlWithErr.sql)
if !terror.ErrorEqual(err, sqlWithErr.expectErr) {
checkErr = err
if checkErr == nil {
checkErr = errors.New("err can't be nil")
}
break
}
}
}
if isOnJobUpdated {
callback.OnJobUpdatedExported = cbFunc
} else {
callback.OnJobRunBeforeExported = cbFunc
}
d := s.dom.DDL()
originalCallback := d.GetHook()
d.(ddl.DDLForTest).SetHook(callback)
_, err = s.se.Execute(context.Background(), alterTableSQL)
c.Assert(err, IsNil)
c.Assert(errors.ErrorStack(checkErr), Equals, "")
d.(ddl.DDLForTest).SetHook(originalCallback)
if expectQuery != nil {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test_db_state")
result, err := s.execQuery(tk, expectQuery.sql)
c.Assert(err, IsNil)
err = checkResult(result, testkit.Rows(expectQuery.rows...))
c.Assert(err, IsNil)
}
}
func (s *testStateChangeSuiteBase) execQuery(tk *testkit.TestKit, sql string, args ...interface{}) (*testkit.Result, error) {
comment := Commentf("sql:%s, args:%v", sql, args)
rs, err := tk.Exec(sql, args...)
if err != nil {
return nil, err
}
result := tk.ResultSetToResult(rs, comment)
return result, nil
}
func checkResult(result *testkit.Result, expected [][]interface{}) error {
got := fmt.Sprintf("%s", result.Rows())
need := fmt.Sprintf("%s", expected)
if got != need {
return fmt.Errorf("need %v, but got %v", need, got)
}
return nil
}
func (s *testStateChangeSuiteBase) CheckResult(tk *testkit.TestKit, sql string, args ...interface{}) (*testkit.Result, error) {
comment := Commentf("sql:%s, args:%v", sql, args)
rs, err := tk.Exec(sql, args...)
if err != nil {
return nil, err
}
result := tk.ResultSetToResult(rs, comment)
return result, nil
}
func (s *testStateChangeSuite) TestShowIndex(c *C) {
_, err := s.se.Execute(context.Background(), `create table t(c1 int primary key, c2 int)`)
c.Assert(err, IsNil)
defer s.se.Execute(context.Background(), "drop table t")
callback := &ddl.TestDDLCallback{}
prevState := model.StateNone
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test_db_state")
showIndexSQL := `show index from t`
var checkErr error
callback.OnJobUpdatedExported = func(job *model.Job) {
if job.SchemaState == prevState || checkErr != nil {
return
}
switch job.SchemaState {
case model.StateDeleteOnly, model.StateWriteOnly, model.StateWriteReorganization:
result, err1 := s.execQuery(tk, showIndexSQL)
if err1 != nil {
checkErr = err1
break
}
checkErr = checkResult(result, testkit.Rows("t 0 PRIMARY 1 c1 A 0 <nil> <nil> BTREE YES NULL"))
}
}
d := s.dom.DDL()
originalCallback := d.GetHook()
d.(ddl.DDLForTest).SetHook(callback)
alterTableSQL := `alter table t add index c2(c2)`
_, err = s.se.Execute(context.Background(), alterTableSQL)
c.Assert(err, IsNil)
c.Assert(errors.ErrorStack(checkErr), Equals, "")
result, err := s.execQuery(tk, showIndexSQL)
c.Assert(err, IsNil)
err = checkResult(result, testkit.Rows(
"t 0 PRIMARY 1 c1 A 0 <nil> <nil> BTREE YES NULL",
"t 1 c2 1 c2 A 0 <nil> <nil> YES BTREE YES NULL",
))
c.Assert(err, IsNil)
d.(ddl.DDLForTest).SetHook(originalCallback)
c.Assert(err, IsNil)
_, err = s.se.Execute(context.Background(), `create table tr(
id int, name varchar(50),
purchased date
)
partition by range( year(purchased) ) (
partition p0 values less than (1990),
partition p1 values less than (1995),
partition p2 values less than (2000),
partition p3 values less than (2005),
partition p4 values less than (2010),
partition p5 values less than (2015)
);`)
c.Assert(err, IsNil)
defer s.se.Execute(context.Background(), "drop table tr")
_, err = s.se.Execute(context.Background(), "create index idx1 on tr (purchased);")
c.Assert(err, IsNil)
result, err = s.execQuery(tk, "show index from tr;")
c.Assert(err, IsNil)
err = checkResult(result, testkit.Rows("tr 1 idx1 1 purchased A 0 <nil> <nil> YES BTREE YES NULL"))
c.Assert(err, IsNil)
}
func (s *testStateChangeSuite) TestParallelAlterModifyColumn(c *C) {
sql := "ALTER TABLE t MODIFY COLUMN b int FIRST;"
f := func(c *C, err1, err2 error) {
c.Assert(err1, IsNil)
c.Assert(err2, IsNil)
_, err := s.se.Execute(context.Background(), "select * from t")
c.Assert(err, IsNil)
}
s.testControlParallelExecSQL(c, sql, sql, f)
}
// TODO: This test is not a test that performs two DDLs in parallel.
// So we should not use the function of testControlParallelExecSQL. We will handle this test in the next PR.
// func (s *testStateChangeSuite) TestParallelColumnModifyingDefinition(c *C) {
// sql1 := "insert into t(b) values (null);"
// sql2 := "alter table t change b b2 bigint not null;"
// f := func(c *C, err1, err2 error) {
// c.Assert(err1, IsNil)
// if err2 != nil {
// c.Assert(err2.Error(), Equals, "[ddl:1265]Data truncated for column 'b2' at row 1")
// }
// }
// s.testControlParallelExecSQL(c, sql1, sql2, f)
// }
func (s *testStateChangeSuite) TestParallelAddColumAndSetDefaultValue(c *C) {
_, err := s.se.Execute(context.Background(), "use test_db_state")
c.Assert(err, IsNil)
_, err = s.se.Execute(context.Background(), `create table tx (
c1 varchar(64),
c2 enum('N','Y') not null default 'N',
primary key idx2 (c2, c1))`)
c.Assert(err, IsNil)
_, err = s.se.Execute(context.Background(), "insert into tx values('a', 'N')")
c.Assert(err, IsNil)
defer s.se.Execute(context.Background(), "drop table tx")
sql1 := "alter table tx add column cx int after c1"
sql2 := "alter table tx alter c2 set default 'N'"
f := func(c *C, err1, err2 error) {
c.Assert(err1, IsNil)
c.Assert(err2, IsNil)
_, err := s.se.Execute(context.Background(), "delete from tx where c1='a'")
c.Assert(err, IsNil)
}
s.testControlParallelExecSQL(c, sql1, sql2, f)
}
func (s *testStateChangeSuite) TestParallelChangeColumnName(c *C) {
sql1 := "ALTER TABLE t CHANGE a aa int;"
sql2 := "ALTER TABLE t CHANGE b aa int;"
f := func(c *C, err1, err2 error) {
// Make sure only a DDL encounters the error of 'duplicate column name'.
var oneErr error
if (err1 != nil && err2 == nil) || (err1 == nil && err2 != nil) {
if err1 != nil {
oneErr = err1
} else {
oneErr = err2
}
}
c.Assert(oneErr.Error(), Equals, "[schema:1060]Duplicate column name 'aa'")
}
s.testControlParallelExecSQL(c, sql1, sql2, f)
}
func (s *testStateChangeSuite) TestParallelAlterAddIndex(c *C) {
sql1 := "ALTER TABLE t add index index_b(b);"
sql2 := "CREATE INDEX index_b ON t (c);"
f := func(c *C, err1, err2 error) {
c.Assert(err1, IsNil)
c.Assert(err2.Error(), Equals, "[ddl:1061]index already exist index_b")
}
s.testControlParallelExecSQL(c, sql1, sql2, f)
}
func (s *serialTestStateChangeSuite) TestParallelAlterAddExpressionIndex(c *C) {
config.GetGlobalConfig().Experimental.AllowsExpressionIndex = true
sql1 := "ALTER TABLE t add index expr_index_b((b+1));"
sql2 := "CREATE INDEX expr_index_b ON t ((c+1));"
f := func(c *C, err1, err2 error) {
c.Assert(err1, IsNil)
c.Assert(err2.Error(), Equals, "[ddl:1061]index already exist expr_index_b")
}
s.testControlParallelExecSQL(c, sql1, sql2, f)
}
func (s *testStateChangeSuite) TestParallelAddPrimaryKey(c *C) {
sql1 := "ALTER TABLE t add primary key index_b(b);"
sql2 := "ALTER TABLE t add primary key index_b(c);"
f := func(c *C, err1, err2 error) {
c.Assert(err1, IsNil)
c.Assert(err2.Error(), Equals, "[schema:1068]Multiple primary key defined")
}
s.testControlParallelExecSQL(c, sql1, sql2, f)
}
func (s *testStateChangeSuite) TestParallelAlterAddPartition(c *C) {
sql1 := `alter table t_part add partition (
partition p2 values less than (30)
);`
f := func(c *C, err1, err2 error) {
c.Assert(err1, IsNil)
c.Assert(err2.Error(), Equals, "[ddl:1493]VALUES LESS THAN value must be strictly increasing for each partition")
}
s.testControlParallelExecSQL(c, sql1, sql1, f)
}
func (s *testStateChangeSuite) TestParallelDropColumn(c *C) {
sql := "ALTER TABLE t drop COLUMN c ;"
f := func(c *C, err1, err2 error) {
c.Assert(err1, IsNil)
c.Assert(err2.Error(), Equals, "[ddl:1091]column c doesn't exist")
}
s.testControlParallelExecSQL(c, sql, sql, f)
}
func (s *testStateChangeSuite) TestParallelDropColumns(c *C) {
sql := "ALTER TABLE t drop COLUMN b, drop COLUMN c;"
f := func(c *C, err1, err2 error) {
c.Assert(err1, IsNil)
c.Assert(err2.Error(), Equals, "[ddl:1091]column b doesn't exist")
}
s.testControlParallelExecSQL(c, sql, sql, f)
}
func (s *testStateChangeSuite) TestParallelDropIfExistsColumns(c *C) {
sql := "ALTER TABLE t drop COLUMN if exists b, drop COLUMN if exists c;"
f := func(c *C, err1, err2 error) {
c.Assert(err1, IsNil)
c.Assert(err2, IsNil)
}
s.testControlParallelExecSQL(c, sql, sql, f)
}
func (s *testStateChangeSuite) TestParallelDropIndex(c *C) {
sql1 := "alter table t drop index idx1 ;"
sql2 := "alter table t drop index idx2 ;"
f := func(c *C, err1, err2 error) {
c.Assert(err1, IsNil)
c.Assert(err2.Error(), Equals, "[autoid:1075]Incorrect table definition; there can be only one auto column and it must be defined as a key")
}
s.testControlParallelExecSQL(c, sql1, sql2, f)
}
func (s *testStateChangeSuite) TestParallelDropPrimaryKey(c *C) {
s.preSQL = "ALTER TABLE t add primary key index_b(c);"
defer func() {
s.preSQL = ""
}()
sql1 := "alter table t drop primary key;"
sql2 := "alter table t drop primary key;"
f := func(c *C, err1, err2 error) {
c.Assert(err1, IsNil)
c.Assert(err2.Error(), Equals, "[ddl:1091]index PRIMARY doesn't exist")
}
s.testControlParallelExecSQL(c, sql1, sql2, f)
}
func (s *testStateChangeSuite) TestParallelCreateAndRename(c *C) {
sql1 := "create table t_exists(c int);"
sql2 := "alter table t rename to t_exists;"
defer s.se.Execute(context.Background(), "drop table t_exists")
f := func(c *C, err1, err2 error) {
c.Assert(err1, IsNil)
c.Assert(err2.Error(), Equals, "[schema:1050]Table 't_exists' already exists")
}
s.testControlParallelExecSQL(c, sql1, sql2, f)
}
type checkRet func(c *C, err1, err2 error)
func (s *testStateChangeSuiteBase) prepareTestControlParallelExecSQL(c *C) (session.Session, session.Session, chan struct{}, ddl.Callback) {
callback := &ddl.TestDDLCallback{}
times := 0
callback.OnJobUpdatedExported = func(job *model.Job) {
if times != 0 {
return
}
var qLen int
for {
kv.RunInNewTxn(s.store, false, func(txn kv.Transaction) error {
jobs, err1 := admin.GetDDLJobs(txn)
if err1 != nil {
return err1
}
qLen = len(jobs)
return nil
})
if qLen == 2 {
break
}
time.Sleep(5 * time.Millisecond)
}
times++
}
d := s.dom.DDL()
originalCallback := d.GetHook()
d.(ddl.DDLForTest).SetHook(callback)
se, err := session.CreateSession(s.store)
c.Assert(err, IsNil)
_, err = se.Execute(context.Background(), "use test_db_state")
c.Assert(err, IsNil)
se1, err := session.CreateSession(s.store)
c.Assert(err, IsNil)
_, err = se1.Execute(context.Background(), "use test_db_state")
c.Assert(err, IsNil)
ch := make(chan struct{})
// Make sure the sql1 is put into the DDLJobQueue.
go func() {
var qLen int
for {
kv.RunInNewTxn(s.store, false, func(txn kv.Transaction) error {
jobs, err3 := admin.GetDDLJobs(txn)
if err3 != nil {
return err3
}
qLen = len(jobs)
return nil
})
if qLen == 1 {
// Make sure sql2 is executed after the sql1.
close(ch)
break
}
time.Sleep(5 * time.Millisecond)
}
}()
return se, se1, ch, originalCallback
}
func (s *testStateChangeSuiteBase) testControlParallelExecSQL(c *C, sql1, sql2 string, f checkRet) {
_, err := s.se.Execute(context.Background(), "use test_db_state")
c.Assert(err, IsNil)
_, err = s.se.Execute(context.Background(), "create table t(a int, b int, c int, d int auto_increment,e int, index idx1(d), index idx2(d,e))")
c.Assert(err, IsNil)
if len(s.preSQL) != 0 {
_, err := s.se.Execute(context.Background(), s.preSQL)
c.Assert(err, IsNil)
}
defer s.se.Execute(context.Background(), "drop table t")
_, err = s.se.Execute(context.Background(), "drop database if exists t_part")
c.Assert(err, IsNil)
s.se.Execute(context.Background(), `create table t_part (a int key)
partition by range(a) (
partition p0 values less than (10),
partition p1 values less than (20)
);`)
se, se1, ch, originalCallback := s.prepareTestControlParallelExecSQL(c)
defer s.dom.DDL().(ddl.DDLForTest).SetHook(originalCallback)
var err1 error
var err2 error
wg := sync.WaitGroup{}
wg.Add(2)
go func() {
defer wg.Done()
_, err1 = se.Execute(context.Background(), sql1)
}()
go func() {
defer wg.Done()
<-ch
_, err2 = se1.Execute(context.Background(), sql2)
}()
wg.Wait()
f(c, err1, err2)
}
func (s *testStateChangeSuite) TestParallelUpdateTableReplica(c *C) {
ctx := context.Background()
_, err := s.se.Execute(context.Background(), "use test_db_state")
c.Assert(err, IsNil)
_, err = s.se.Execute(ctx, "drop table if exists t1;")
c.Assert(err, IsNil)
_, err = s.se.Execute(ctx, "create table t1 (a int);")
c.Assert(err, IsNil)
_, err = s.se.Execute(ctx, "alter table t1 set tiflash replica 3 location labels 'a','b';")
c.Assert(err, IsNil)
se, se1, ch, originalCallback := s.prepareTestControlParallelExecSQL(c)
defer s.dom.DDL().(ddl.DDLForTest).SetHook(originalCallback)
t1 := testGetTableByName(c, se, "test_db_state", "t1")
var err1 error
var err2 error
wg := sync.WaitGroup{}
wg.Add(2)
go func() {
defer wg.Done()
// Mock for table tiflash replica was available.
err1 = domain.GetDomain(se).DDL().UpdateTableReplicaInfo(se, t1.Meta().ID, true)
}()
go func() {
defer wg.Done()
<-ch
// Mock for table tiflash replica was available.
err2 = domain.GetDomain(se1).DDL().UpdateTableReplicaInfo(se1, t1.Meta().ID, true)
}()
wg.Wait()
c.Assert(err1, IsNil)
c.Assert(err2.Error(), Equals, "[ddl:-1]the replica available status of table t1 is already updated")
}
func (s *testStateChangeSuite) testParallelExecSQL(c *C, sql string) {
se, err := session.CreateSession(s.store)
c.Assert(err, IsNil)
_, err = se.Execute(context.Background(), "use test_db_state")
c.Assert(err, IsNil)
se1, err1 := session.CreateSession(s.store)
c.Assert(err1, IsNil)
_, err = se1.Execute(context.Background(), "use test_db_state")
c.Assert(err, IsNil)
var err2, err3 error
wg := sync.WaitGroup{}
callback := &ddl.TestDDLCallback{}
once := sync.Once{}
callback.OnJobUpdatedExported = func(job *model.Job) {
// sleep a while, let other job enqueue.
once.Do(func() {
time.Sleep(time.Millisecond * 10)
})
}
d := s.dom.DDL()
originalCallback := d.GetHook()
defer d.(ddl.DDLForTest).SetHook(originalCallback)
d.(ddl.DDLForTest).SetHook(callback)
wg.Add(2)
go func() {
defer wg.Done()
_, err2 = se.Execute(context.Background(), sql)
}()
go func() {
defer wg.Done()
_, err3 = se1.Execute(context.Background(), sql)
}()
wg.Wait()
c.Assert(err2, IsNil)
c.Assert(err3, IsNil)
}
// TestCreateTableIfNotExists parallel exec create table if not exists xxx. No error returns is expected.
func (s *testStateChangeSuite) TestCreateTableIfNotExists(c *C) {
defer s.se.Execute(context.Background(), "drop table test_not_exists")
s.testParallelExecSQL(c, "create table if not exists test_not_exists(a int);")
}
// TestCreateDBIfNotExists parallel exec create database if not exists xxx. No error returns is expected.
func (s *testStateChangeSuite) TestCreateDBIfNotExists(c *C) {
defer s.se.Execute(context.Background(), "drop database test_not_exists")
s.testParallelExecSQL(c, "create database if not exists test_not_exists;")
}
// TestDDLIfNotExists parallel exec some DDLs with `if not exists` clause. No error returns is expected.
func (s *testStateChangeSuite) TestDDLIfNotExists(c *C) {
defer s.se.Execute(context.Background(), "drop table test_not_exists")
_, err := s.se.Execute(context.Background(), "create table if not exists test_not_exists(a int)")
c.Assert(err, IsNil)
// ADD COLUMN
s.testParallelExecSQL(c, "alter table test_not_exists add column if not exists b int")
// ADD COLUMNS
s.testParallelExecSQL(c, "alter table test_not_exists add column if not exists (c11 int, d11 int)")
// ADD INDEX
s.testParallelExecSQL(c, "alter table test_not_exists add index if not exists idx_b (b)")
// CREATE INDEX
s.testParallelExecSQL(c, "create index if not exists idx_b on test_not_exists (b)")
}
// TestDDLIfExists parallel exec some DDLs with `if exists` clause. No error returns is expected.
func (s *testStateChangeSuite) TestDDLIfExists(c *C) {
defer func() {
s.se.Execute(context.Background(), "drop table test_exists")
s.se.Execute(context.Background(), "drop table test_exists_2")
}()
_, err := s.se.Execute(context.Background(), "create table if not exists test_exists (a int key, b int)")
c.Assert(err, IsNil)
// DROP COLUMNS
s.testParallelExecSQL(c, "alter table test_exists drop column if exists c, drop column if exists d")
// DROP COLUMN
s.testParallelExecSQL(c, "alter table test_exists drop column if exists b") // only `a` exists now
// CHANGE COLUMN
s.testParallelExecSQL(c, "alter table test_exists change column if exists a c int") // only, `c` exists now
// MODIFY COLUMN
s.testParallelExecSQL(c, "alter table test_exists modify column if exists a bigint")
// DROP INDEX
_, err = s.se.Execute(context.Background(), "alter table test_exists add index idx_c (c)")
c.Assert(err, IsNil)
s.testParallelExecSQL(c, "alter table test_exists drop index if exists idx_c")
// DROP PARTITION (ADD PARTITION tested in TestParallelAlterAddPartition)
_, err = s.se.Execute(context.Background(), "create table test_exists_2 (a int key) partition by range(a) (partition p0 values less than (10), partition p1 values less than (20))")
c.Assert(err, IsNil)
s.testParallelExecSQL(c, "alter table test_exists_2 drop partition if exists p1")
}
// TestParallelDDLBeforeRunDDLJob tests a session to execute DDL with an outdated information schema.
// This test is used to simulate the following conditions:
// In a cluster, TiDB "a" executes the DDL.
// TiDB "b" fails to load schema, then TiDB "b" executes the DDL statement associated with the DDL statement executed by "a".
func (s *testStateChangeSuite) TestParallelDDLBeforeRunDDLJob(c *C) {
defer s.se.Execute(context.Background(), "drop table test_table")
_, err := s.se.Execute(context.Background(), "use test_db_state")
c.Assert(err, IsNil)
_, err = s.se.Execute(context.Background(), "create table test_table (c1 int, c2 int default 1, index (c1))")
c.Assert(err, IsNil)
// Create two sessions.
se, err := session.CreateSession(s.store)
c.Assert(err, IsNil)
_, err = se.Execute(context.Background(), "use test_db_state")
c.Assert(err, IsNil)
se1, err := session.CreateSession(s.store)
c.Assert(err, IsNil)
_, err = se1.Execute(context.Background(), "use test_db_state")
c.Assert(err, IsNil)
intercept := &ddl.TestInterceptor{}
firstConnID := uint64(1)
finishedCnt := int32(0)
interval := 5 * time.Millisecond
var sessionCnt int32 // sessionCnt is the number of sessions that goes into the function of OnGetInfoSchema.
intercept.OnGetInfoSchemaExported = func(ctx sessionctx.Context, is infoschema.InfoSchema) infoschema.InfoSchema {
// The following code is for testing.
// Make sure the two sessions get the same information schema before executing DDL.
// After the first session executes its DDL, then the second session executes its DDL.
var info infoschema.InfoSchema
atomic.AddInt32(&sessionCnt, 1)
for {
// Make sure there are two sessions running here.
if atomic.LoadInt32(&sessionCnt) == 2 {
info = is
break
}
// Print log to notify if TestParallelDDLBeforeRunDDLJob hang up
log.Info("sleep in TestParallelDDLBeforeRunDDLJob", zap.String("interval", interval.String()))
time.Sleep(interval)
}
currID := ctx.GetSessionVars().ConnectionID
for {
seCnt := atomic.LoadInt32(&sessionCnt)
// Make sure the two session have got the same information schema. And the first session can continue to go on,
// or the frist session finished this SQL(seCnt = finishedCnt), then other sessions can continue to go on.
if currID == firstConnID || seCnt == finishedCnt {
break
}
// Print log to notify if TestParallelDDLBeforeRunDDLJob hang up
log.Info("sleep in TestParallelDDLBeforeRunDDLJob", zap.String("interval", interval.String()))
time.Sleep(interval)
}
return info
}
d := s.dom.DDL()
d.(ddl.DDLForTest).SetInterceptoror(intercept)
// Make sure the connection 1 executes a SQL before the connection 2.
// And the connection 2 executes a SQL with an outdated information schema.
wg := sync.WaitGroup{}
wg.Add(2)
go func() {
defer wg.Done()
se.SetConnectionID(firstConnID)
_, err1 := se.Execute(context.Background(), "alter table test_table drop column c2")
c.Assert(err1, IsNil)
atomic.StoreInt32(&sessionCnt, finishedCnt)
}()
go func() {
defer wg.Done()
se1.SetConnectionID(2)
_, err2 := se1.Execute(context.Background(), "alter table test_table add column c2 int")
c.Assert(err2, NotNil)
c.Assert(strings.Contains(err2.Error(), "Information schema is changed"), IsTrue)
}()
wg.Wait()
intercept = &ddl.TestInterceptor{}
d.(ddl.DDLForTest).SetInterceptoror(intercept)
}
func (s *testStateChangeSuite) TestParallelAlterSchemaCharsetAndCollate(c *C) {
sql := "ALTER SCHEMA test_db_state CHARSET utf8mb4 COLLATE utf8mb4_general_ci"
f := func(c *C, err1, err2 error) {
c.Assert(err1, IsNil)
c.Assert(err2, IsNil)
}
s.testControlParallelExecSQL(c, sql, sql, f)
sql = `SELECT default_character_set_name, default_collation_name
FROM information_schema.schemata
WHERE schema_name='test_db_state'`
tk := testkit.NewTestKit(c, s.store)
tk.MustQuery(sql).Check(testkit.Rows("utf8mb4 utf8mb4_general_ci"))
}
// TestParallelTruncateTableAndAddColumn tests add column when truncate table.
func (s *testStateChangeSuite) TestParallelTruncateTableAndAddColumn(c *C) {
sql1 := "truncate table t"
sql2 := "alter table t add column c3 int"
f := func(c *C, err1, err2 error) {
c.Assert(err1, IsNil)
c.Assert(err2, NotNil)
c.Assert(err2.Error(), Equals, "[domain:8028]Information schema is changed during the execution of the statement(for example, table definition may be updated by other DDL ran in parallel). If you see this error often, try increasing `tidb_max_delta_schema_count`. [try again later]")
}
s.testControlParallelExecSQL(c, sql1, sql2, f)
}
// TestParallelTruncateTableAndAddColumns tests add columns when truncate table.
func (s *testStateChangeSuite) TestParallelTruncateTableAndAddColumns(c *C) {
sql1 := "truncate table t"
sql2 := "alter table t add column c3 int, add column c4 int"
f := func(c *C, err1, err2 error) {
c.Assert(err1, IsNil)
c.Assert(err2, NotNil)
c.Assert(err2.Error(), Equals, "[domain:8028]Information schema is changed during the execution of the statement(for example, table definition may be updated by other DDL ran in parallel). If you see this error often, try increasing `tidb_max_delta_schema_count`. [try again later]")
}
s.testControlParallelExecSQL(c, sql1, sql2, f)
}
// TestParallelFlashbackTable tests parallel flashback table.
func (s *serialTestStateChangeSuite) TestParallelFlashbackTable(c *C) {
c.Assert(failpoint.Enable("github.com/pingcap/tidb/meta/autoid/mockAutoIDChange", `return(true)`), IsNil)
defer func(originGC bool) {
c.Assert(failpoint.Disable("github.com/pingcap/tidb/meta/autoid/mockAutoIDChange"), IsNil)
if originGC {
ddl.EmulatorGCEnable()
} else {
ddl.EmulatorGCDisable()
}
}(ddl.IsEmulatorGCEnable())
// disable emulator GC.
// Disable emulator GC, otherwise, emulator GC will delete table record as soon as possible after executing drop table DDL.
ddl.EmulatorGCDisable()
gcTimeFormat := "20060102-15:04:05 -0700 MST"
timeBeforeDrop := time.Now().Add(0 - 48*60*60*time.Second).Format(gcTimeFormat)
safePointSQL := `INSERT HIGH_PRIORITY INTO mysql.tidb VALUES ('tikv_gc_safe_point', '%[1]s', '')
ON DUPLICATE KEY
UPDATE variable_value = '%[1]s'`
tk := testkit.NewTestKit(c, s.store)
// clear GC variables first.
tk.MustExec("delete from mysql.tidb where variable_name in ( 'tikv_gc_safe_point','tikv_gc_enable' )")
// set GC safe point
tk.MustExec(fmt.Sprintf(safePointSQL, timeBeforeDrop))
// set GC enable.
err := gcutil.EnableGC(tk.Se)
c.Assert(err, IsNil)
// prepare dropped table.
tk.MustExec("use test_db_state")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (a int);")
tk.MustExec("drop table if exists t")
// Test parallel flashback table.
sql1 := "flashback table t to t_flashback"
f := func(c *C, err1, err2 error) {
c.Assert(err1, IsNil)
c.Assert(err2, NotNil)
c.Assert(err2.Error(), Equals, "[schema:1050]Table 't_flashback' already exists")
}
s.testControlParallelExecSQL(c, sql1, sql1, f)
// Test parallel flashback table with different name
tk.MustExec("drop table t_flashback")
sql1 = "flashback table t_flashback"
sql2 := "flashback table t_flashback to t_flashback2"
s.testControlParallelExecSQL(c, sql1, sql2, f)
}
| ddl/db_change_test.go | 1 | https://github.com/pingcap/tidb/commit/788dc28a7423d4b85ebe11bbce632733b056d63a | [
0.9607949256896973,
0.0323009267449379,
0.0001662022405071184,
0.00017774762818589807,
0.16180121898651123
] |
{
"id": 3,
"code_window": [
"\t\treturn ver, errors.Trace(err)\n",
"\t}\n",
"\n",
"\tdbInfo, err := t.GetDatabase(job.SchemaID)\n",
"\tif err != nil {\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep"
],
"after_edit": [
"\tdbInfo, err := checkSchemaExistAndCancelNotExistJob(t, job)\n"
],
"file_path": "ddl/schema.go",
"type": "replace",
"edit_start_line_idx": 118
} | // Copyright 2020 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package memdb
import (
"math"
"unsafe"
)
type arenaAddr struct {
blockIdx uint32
blockOffset uint32
}
func (addr arenaAddr) isNull() bool {
return addr.blockIdx == 0 && addr.blockOffset == 0
}
func newArenaAddr(idx int, offset uint32) arenaAddr {
return arenaAddr{
blockIdx: uint32(idx) + 1,
blockOffset: offset,
}
}
const (
alignMask = 1<<32 - 8 // 29 bit 1 and 3 bit 0.
nullBlockOffset = math.MaxUint32
maxBlockSize = 128 << 20
)
type arena struct {
blockSize int
blocks []arenaBlock
}
type arenaSnapshot struct {
blockSize int
blocks int
offsetInBlock int
}
func newArenaLocator() *arena {
return new(arena)
}
func (a *arena) snapshot() arenaSnapshot {
snap := arenaSnapshot{
blockSize: a.blockSize,
blocks: len(a.blocks),
}
if len(a.blocks) > 0 {
snap.offsetInBlock = a.blocks[len(a.blocks)-1].length
}
return snap
}
func (a *arena) revert(snap arenaSnapshot) {
for i := snap.blocks; i < len(a.blocks); i++ {
a.blocks[i] = arenaBlock{}
}
a.blocks = a.blocks[:snap.blocks]
if len(a.blocks) > 0 {
a.blocks[len(a.blocks)-1].length = snap.offsetInBlock
}
a.blockSize = snap.blockSize
}
func (a *arena) newNode(key []byte, v []byte, height int) (*node, arenaAddr) {
// The base level is already allocated in the node struct.
nodeSize := nodeHeaderSize + height*8 + 8 + len(key) + len(v)
addr, data := a.alloc(nodeSize)
node := (*node)(unsafe.Pointer(&data[0]))
node.keyLen = uint16(len(key))
node.height = uint16(height)
node.valLen = uint32(len(v))
copy(data[node.nodeLen():], key)
copy(data[node.nodeLen()+int(node.keyLen):], v)
return node, addr
}
func (a *arena) getFrom(addr arenaAddr) []byte {
return a.blocks[addr.blockIdx-1].getFrom(addr.blockOffset)
}
func (a *arena) alloc(size int) (arenaAddr, []byte) {
if size > maxBlockSize {
panic("alloc size is larger than max block size")
}
if len(a.blocks) == 0 {
a.enlarge(size, initBlockSize)
}
addr, data := a.allocInLastBlock(size)
if !addr.isNull() {
return addr, data
}
a.enlarge(size, a.blockSize<<1)
return a.allocInLastBlock(size)
}
func (a *arena) enlarge(allocSize, blockSize int) {
a.blockSize = blockSize
for a.blockSize <= allocSize {
a.blockSize <<= 1
}
// Size will never larger than maxBlockSize.
if a.blockSize > maxBlockSize {
a.blockSize = maxBlockSize
}
a.blocks = append(a.blocks, newArenaBlock(a.blockSize))
}
func (a *arena) allocInLastBlock(size int) (arenaAddr, []byte) {
idx := len(a.blocks) - 1
offset, data := a.blocks[idx].alloc(size)
if offset == nullBlockOffset {
return arenaAddr{}, nil
}
return newArenaAddr(idx, offset), data
}
type arenaBlock struct {
buf []byte
length int
}
func newArenaBlock(blockSize int) arenaBlock {
return arenaBlock{
buf: make([]byte, blockSize),
}
}
func (a *arenaBlock) getFrom(offset uint32) []byte {
return a.buf[offset:]
}
func (a *arenaBlock) alloc(size int) (uint32, []byte) {
// We must align the allocated address for node
// to make runtime.checkptrAlignment happy.
offset := (a.length + 7) & alignMask
newLen := offset + size
if newLen > len(a.buf) {
return nullBlockOffset, nil
}
a.length = newLen
return uint32(offset), a.buf[offset : offset+size]
}
| kv/memdb/arena.go | 0 | https://github.com/pingcap/tidb/commit/788dc28a7423d4b85ebe11bbce632733b056d63a | [
0.0001777120924089104,
0.00017116253729909658,
0.00016649298777338117,
0.00017118474352173507,
0.0000025734680093592033
] |
{
"id": 3,
"code_window": [
"\t\treturn ver, errors.Trace(err)\n",
"\t}\n",
"\n",
"\tdbInfo, err := t.GetDatabase(job.SchemaID)\n",
"\tif err != nil {\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep"
],
"after_edit": [
"\tdbInfo, err := checkSchemaExistAndCancelNotExistJob(t, job)\n"
],
"file_path": "ddl/schema.go",
"type": "replace",
"edit_start_line_idx": 118
} | ---
name: "\U0001F947 Propose a Challenge Program task"
about: As a developer, I want to propose a Challenge Program task.
labels: challenge-program-2
---
## Description
## Score
* score number
## Mentor(s)
* [@xxxx](github url)
Contact the mentors: **#tidb-challenge-program** channel in [TiDB Community](https://join.slack.com/t/tidbcommunity/shared_invite/enQtNzc0MzI4ODExMDc4LWYwYmIzMjZkYzJiNDUxMmZlN2FiMGJkZjAyMzQ5NGU0NGY0NzI3NTYwMjAyNGQ1N2I2ZjAxNzc1OGUwYWM0NzE) Slack Workspace
## Recommended Skills
* skills 1
* skills 1
## Learning Materials
* Chinese: [TiDB 精选技术讲解文章](https://github.com/pingcap/presentations/blob/master/hackathon-2019/reference-document-of-hackathon-2019.md)
* English: [Awesome-Database-Learning](https://github.com/pingcap/awesome-database-learning)
| .github/ISSUE_TEMPLATE/challenge-program.md | 0 | https://github.com/pingcap/tidb/commit/788dc28a7423d4b85ebe11bbce632733b056d63a | [
0.0001711948571028188,
0.0001700549037195742,
0.00016923782823141664,
0.00016973201127257198,
8.309363579428464e-7
] |
{
"id": 3,
"code_window": [
"\t\treturn ver, errors.Trace(err)\n",
"\t}\n",
"\n",
"\tdbInfo, err := t.GetDatabase(job.SchemaID)\n",
"\tif err != nil {\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep"
],
"after_edit": [
"\tdbInfo, err := checkSchemaExistAndCancelNotExistJob(t, job)\n"
],
"file_path": "ddl/schema.go",
"type": "replace",
"edit_start_line_idx": 118
} | // Copyright 2016 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
// Package tikv provides tcp connection to kvserver.
package tikv
import (
"context"
"io"
"math"
"strconv"
"sync"
"sync/atomic"
"time"
"github.com/grpc-ecosystem/go-grpc-middleware/tracing/opentracing"
"github.com/opentracing/opentracing-go"
"github.com/pingcap/errors"
"github.com/pingcap/kvproto/pkg/coprocessor"
"github.com/pingcap/kvproto/pkg/debugpb"
"github.com/pingcap/kvproto/pkg/tikvpb"
"github.com/pingcap/parser/terror"
"github.com/pingcap/tidb/config"
"github.com/pingcap/tidb/kv"
"github.com/pingcap/tidb/metrics"
"github.com/pingcap/tidb/store/tikv/tikvrpc"
"github.com/pingcap/tidb/util/logutil"
"github.com/prometheus/client_golang/prometheus"
"google.golang.org/grpc"
"google.golang.org/grpc/backoff"
"google.golang.org/grpc/connectivity"
"google.golang.org/grpc/credentials"
"google.golang.org/grpc/keepalive"
)
// MaxRecvMsgSize set max gRPC receive message size received from server. If any message size is larger than
// current value, an error will be reported from gRPC.
var MaxRecvMsgSize = math.MaxInt64
// Timeout durations.
var (
dialTimeout = 5 * time.Second
readTimeoutShort = 20 * time.Second // For requests that read/write several key-values.
ReadTimeoutMedium = 60 * time.Second // For requests that may need scan region.
ReadTimeoutLong = 150 * time.Second // For requests that may need scan region multiple times.
ReadTimeoutUltraLong = 3600 * time.Second // For requests that may scan many regions for tiflash.
GCTimeout = 5 * time.Minute
UnsafeDestroyRangeTimeout = 5 * time.Minute
AccessLockObserverTimeout = 10 * time.Second
)
const (
grpcInitialWindowSize = 1 << 30
grpcInitialConnWindowSize = 1 << 30
)
// Client is a client that sends RPC.
// It should not be used after calling Close().
type Client interface {
// Close should release all data.
Close() error
// SendRequest sends Request.
SendRequest(ctx context.Context, addr string, req *tikvrpc.Request, timeout time.Duration) (*tikvrpc.Response, error)
}
type connArray struct {
// The target host.
target string
index uint32
v []*grpc.ClientConn
// streamTimeout binds with a background goroutine to process coprocessor streaming timeout.
streamTimeout chan *tikvrpc.Lease
// batchConn is not null when batch is enabled.
*batchConn
done chan struct{}
}
func newConnArray(maxSize uint, addr string, security config.Security, idleNotify *uint32, enableBatch bool) (*connArray, error) {
a := &connArray{
index: 0,
v: make([]*grpc.ClientConn, maxSize),
streamTimeout: make(chan *tikvrpc.Lease, 1024),
done: make(chan struct{}),
}
if err := a.Init(addr, security, idleNotify, enableBatch); err != nil {
return nil, err
}
return a, nil
}
func (a *connArray) Init(addr string, security config.Security, idleNotify *uint32, enableBatch bool) error {
a.target = addr
opt := grpc.WithInsecure()
if len(security.ClusterSSLCA) != 0 {
tlsConfig, err := security.ToTLSConfig()
if err != nil {
return errors.Trace(err)
}
opt = grpc.WithTransportCredentials(credentials.NewTLS(tlsConfig))
}
cfg := config.GetGlobalConfig()
var (
unaryInterceptor grpc.UnaryClientInterceptor
streamInterceptor grpc.StreamClientInterceptor
)
if cfg.OpenTracing.Enable {
unaryInterceptor = grpc_opentracing.UnaryClientInterceptor()
streamInterceptor = grpc_opentracing.StreamClientInterceptor()
}
allowBatch := (cfg.TiKVClient.MaxBatchSize > 0) && enableBatch
if allowBatch {
a.batchConn = newBatchConn(uint(len(a.v)), cfg.TiKVClient.MaxBatchSize, idleNotify)
a.pendingRequests = metrics.TiKVPendingBatchRequests.WithLabelValues(a.target)
}
keepAlive := cfg.TiKVClient.GrpcKeepAliveTime
keepAliveTimeout := cfg.TiKVClient.GrpcKeepAliveTimeout
for i := range a.v {
ctx, cancel := context.WithTimeout(context.Background(), dialTimeout)
conn, err := grpc.DialContext(
ctx,
addr,
opt,
grpc.WithInitialWindowSize(grpcInitialWindowSize),
grpc.WithInitialConnWindowSize(grpcInitialConnWindowSize),
grpc.WithUnaryInterceptor(unaryInterceptor),
grpc.WithStreamInterceptor(streamInterceptor),
grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(MaxRecvMsgSize)),
grpc.WithConnectParams(grpc.ConnectParams{
Backoff: backoff.Config{
BaseDelay: 100 * time.Millisecond, // Default was 1s.
Multiplier: 1.6, // Default
Jitter: 0.2, // Default
MaxDelay: 3 * time.Second, // Default was 120s.
},
MinConnectTimeout: dialTimeout,
}),
grpc.WithKeepaliveParams(keepalive.ClientParameters{
Time: time.Duration(keepAlive) * time.Second,
Timeout: time.Duration(keepAliveTimeout) * time.Second,
PermitWithoutStream: true,
}),
)
cancel()
if err != nil {
// Cleanup if the initialization fails.
a.Close()
return errors.Trace(err)
}
a.v[i] = conn
if allowBatch {
batchClient := &batchCommandsClient{
target: a.target,
conn: conn,
batched: sync.Map{},
idAlloc: 0,
closed: 0,
tikvClientCfg: cfg.TiKVClient,
tikvLoad: &a.tikvTransportLayerLoad,
}
a.batchCommandsClients = append(a.batchCommandsClients, batchClient)
}
}
go tikvrpc.CheckStreamTimeoutLoop(a.streamTimeout, a.done)
if allowBatch {
go a.batchSendLoop(cfg.TiKVClient)
}
return nil
}
func (a *connArray) Get() *grpc.ClientConn {
next := atomic.AddUint32(&a.index, 1) % uint32(len(a.v))
return a.v[next]
}
func (a *connArray) Close() {
if a.batchConn != nil {
a.batchConn.Close()
}
for i, c := range a.v {
if c != nil {
err := c.Close()
terror.Log(errors.Trace(err))
a.v[i] = nil
}
}
close(a.done)
}
// rpcClient is RPC client struct.
// TODO: Add flow control between RPC clients in TiDB ond RPC servers in TiKV.
// Since we use shared client connection to communicate to the same TiKV, it's possible
// that there are too many concurrent requests which overload the service of TiKV.
type rpcClient struct {
sync.RWMutex
conns map[string]*connArray
security config.Security
idleNotify uint32
// Periodically check whether there is any connection that is idle and then close and remove these connections.
// Implement background cleanup.
isClosed bool
}
func newRPCClient(security config.Security) *rpcClient {
return &rpcClient{
conns: make(map[string]*connArray),
security: security,
}
}
// NewTestRPCClient is for some external tests.
func NewTestRPCClient(security config.Security) Client {
return newRPCClient(security)
}
func (c *rpcClient) getConnArray(addr string, enableBatch bool) (*connArray, error) {
c.RLock()
if c.isClosed {
c.RUnlock()
return nil, errors.Errorf("rpcClient is closed")
}
array, ok := c.conns[addr]
c.RUnlock()
if !ok {
var err error
array, err = c.createConnArray(addr, enableBatch)
if err != nil {
return nil, err
}
}
return array, nil
}
func (c *rpcClient) createConnArray(addr string, enableBatch bool) (*connArray, error) {
c.Lock()
defer c.Unlock()
array, ok := c.conns[addr]
if !ok {
var err error
connCount := config.GetGlobalConfig().TiKVClient.GrpcConnectionCount
array, err = newConnArray(connCount, addr, c.security, &c.idleNotify, enableBatch)
if err != nil {
return nil, err
}
c.conns[addr] = array
}
return array, nil
}
func (c *rpcClient) closeConns() {
c.Lock()
if !c.isClosed {
c.isClosed = true
// close all connections
for _, array := range c.conns {
array.Close()
}
}
c.Unlock()
}
var sendReqHistCache sync.Map
type sendReqHistCacheKey struct {
tp tikvrpc.CmdType
id uint64
}
func (c *rpcClient) updateTiKVSendReqHistogram(req *tikvrpc.Request, start time.Time) {
key := sendReqHistCacheKey{
req.Type,
req.Context.GetPeer().GetStoreId(),
}
v, ok := sendReqHistCache.Load(key)
if !ok {
reqType := req.Type.String()
storeID := strconv.FormatUint(req.Context.GetPeer().GetStoreId(), 10)
v = metrics.TiKVSendReqHistogram.WithLabelValues(reqType, storeID)
sendReqHistCache.Store(key, v)
}
v.(prometheus.Observer).Observe(time.Since(start).Seconds())
}
// SendRequest sends a Request to server and receives Response.
func (c *rpcClient) SendRequest(ctx context.Context, addr string, req *tikvrpc.Request, timeout time.Duration) (*tikvrpc.Response, error) {
if span := opentracing.SpanFromContext(ctx); span != nil && span.Tracer() != nil {
span1 := span.Tracer().StartSpan("rpcClient.SendRequest", opentracing.ChildOf(span.Context()))
defer span1.Finish()
ctx = opentracing.ContextWithSpan(ctx, span1)
}
start := time.Now()
defer c.updateTiKVSendReqHistogram(req, start)
if atomic.CompareAndSwapUint32(&c.idleNotify, 1, 0) {
c.recycleIdleConnArray()
}
// TiDB will not send batch commands to TiFlash, to resolve the conflict with Batch Cop Request.
enableBatch := req.StoreTp != kv.TiDB && req.StoreTp != kv.TiFlash
connArray, err := c.getConnArray(addr, enableBatch)
if err != nil {
return nil, errors.Trace(err)
}
// TiDB RPC server supports batch RPC, but batch connection will send heart beat, It's not necessary since
// request to TiDB is not high frequency.
if config.GetGlobalConfig().TiKVClient.MaxBatchSize > 0 && enableBatch {
if batchReq := req.ToBatchCommandsRequest(); batchReq != nil {
return sendBatchRequest(ctx, addr, connArray.batchConn, batchReq, timeout)
}
}
clientConn := connArray.Get()
if state := clientConn.GetState(); state == connectivity.TransientFailure {
storeID := strconv.FormatUint(req.Context.GetPeer().GetStoreId(), 10)
metrics.GRPCConnTransientFailureCounter.WithLabelValues(addr, storeID).Inc()
}
if req.IsDebugReq() {
client := debugpb.NewDebugClient(clientConn)
ctx1, cancel := context.WithTimeout(ctx, timeout)
defer cancel()
return tikvrpc.CallDebugRPC(ctx1, client, req)
}
client := tikvpb.NewTikvClient(clientConn)
if req.Type == tikvrpc.CmdBatchCop {
return c.getBatchCopStreamResponse(ctx, client, req, timeout, connArray)
}
if req.Type == tikvrpc.CmdCopStream {
return c.getCopStreamResponse(ctx, client, req, timeout, connArray)
}
ctx1, cancel := context.WithTimeout(ctx, timeout)
defer cancel()
return tikvrpc.CallRPC(ctx1, client, req)
}
func (c *rpcClient) getCopStreamResponse(ctx context.Context, client tikvpb.TikvClient, req *tikvrpc.Request, timeout time.Duration, connArray *connArray) (*tikvrpc.Response, error) {
// Coprocessor streaming request.
// Use context to support timeout for grpc streaming client.
ctx1, cancel := context.WithCancel(ctx)
// Should NOT call defer cancel() here because it will cancel further stream.Recv()
// We put it in copStream.Lease.Cancel call this cancel at copStream.Close
// TODO: add unit test for SendRequest.
resp, err := tikvrpc.CallRPC(ctx1, client, req)
if err != nil {
cancel()
return nil, errors.Trace(err)
}
// Put the lease object to the timeout channel, so it would be checked periodically.
copStream := resp.Resp.(*tikvrpc.CopStreamResponse)
copStream.Timeout = timeout
copStream.Lease.Cancel = cancel
connArray.streamTimeout <- &copStream.Lease
// Read the first streaming response to get CopStreamResponse.
// This can make error handling much easier, because SendReq() retry on
// region error automatically.
var first *coprocessor.Response
first, err = copStream.Recv()
if err != nil {
if errors.Cause(err) != io.EOF {
return nil, errors.Trace(err)
}
logutil.BgLogger().Debug("copstream returns nothing for the request.")
}
copStream.Response = first
return resp, nil
}
func (c *rpcClient) getBatchCopStreamResponse(ctx context.Context, client tikvpb.TikvClient, req *tikvrpc.Request, timeout time.Duration, connArray *connArray) (*tikvrpc.Response, error) {
// Coprocessor streaming request.
// Use context to support timeout for grpc streaming client.
ctx1, cancel := context.WithCancel(ctx)
// Should NOT call defer cancel() here because it will cancel further stream.Recv()
// We put it in copStream.Lease.Cancel call this cancel at copStream.Close
// TODO: add unit test for SendRequest.
resp, err := tikvrpc.CallRPC(ctx1, client, req)
if err != nil {
cancel()
return nil, errors.Trace(err)
}
// Put the lease object to the timeout channel, so it would be checked periodically.
copStream := resp.Resp.(*tikvrpc.BatchCopStreamResponse)
copStream.Timeout = timeout
copStream.Lease.Cancel = cancel
connArray.streamTimeout <- &copStream.Lease
// Read the first streaming response to get CopStreamResponse.
// This can make error handling much easier, because SendReq() retry on
// region error automatically.
var first *coprocessor.BatchResponse
first, err = copStream.Recv()
if err != nil {
if errors.Cause(err) != io.EOF {
return nil, errors.Trace(err)
}
logutil.BgLogger().Debug("batch copstream returns nothing for the request.")
}
copStream.BatchResponse = first
return resp, nil
}
func (c *rpcClient) Close() error {
// TODO: add a unit test for SendRequest After Closed
c.closeConns()
return nil
}
| store/tikv/client.go | 0 | https://github.com/pingcap/tidb/commit/788dc28a7423d4b85ebe11bbce632733b056d63a | [
0.004686517640948296,
0.000455229077488184,
0.00016554429021198303,
0.0001728954230202362,
0.0008447467116639018
] |
{
"id": 4,
"code_window": [
"\tif err != nil {\n",
"\t\tjob.State = model.JobStateCancelled\n",
"\t\treturn ver, errors.Trace(err)\n",
"\t}\n",
"\n",
"\tif dbInfo.Charset == toCharset && dbInfo.Collate == toCollate {\n"
],
"labels": [
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "ddl/schema.go",
"type": "replace",
"edit_start_line_idx": 120
} | // Copyright 2015 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package ddl
import (
"fmt"
"strings"
"sync/atomic"
"time"
"github.com/pingcap/errors"
"github.com/pingcap/failpoint"
"github.com/pingcap/parser/ast"
"github.com/pingcap/parser/model"
"github.com/pingcap/parser/mysql"
"github.com/pingcap/tidb/ddl/util"
"github.com/pingcap/tidb/infoschema"
"github.com/pingcap/tidb/meta"
"github.com/pingcap/tidb/sessionctx"
"github.com/pingcap/tidb/table"
"github.com/pingcap/tidb/types"
"github.com/pingcap/tidb/util/logutil"
"github.com/pingcap/tidb/util/sqlexec"
"go.uber.org/zap"
)
// adjustColumnInfoInAddColumn is used to set the correct position of column info when adding column.
// 1. The added column was append at the end of tblInfo.Columns, due to ddl state was not public then.
// It should be moved to the correct position when the ddl state to be changed to public.
// 2. The offset of column should also to be set to the right value.
func adjustColumnInfoInAddColumn(tblInfo *model.TableInfo, offset int) {
oldCols := tblInfo.Columns
newCols := make([]*model.ColumnInfo, 0, len(oldCols))
newCols = append(newCols, oldCols[:offset]...)
newCols = append(newCols, oldCols[len(oldCols)-1])
newCols = append(newCols, oldCols[offset:len(oldCols)-1]...)
// Adjust column offset.
offsetChanged := make(map[int]int, len(newCols)-offset-1)
for i := offset + 1; i < len(newCols); i++ {
offsetChanged[newCols[i].Offset] = i
newCols[i].Offset = i
}
newCols[offset].Offset = offset
// Update index column offset info.
// TODO: There may be some corner cases for index column offsets, we may check this later.
for _, idx := range tblInfo.Indices {
for _, col := range idx.Columns {
newOffset, ok := offsetChanged[col.Offset]
if ok {
col.Offset = newOffset
}
}
}
tblInfo.Columns = newCols
}
// adjustColumnInfoInDropColumn is used to set the correct position of column info when dropping column.
// 1. The offset of column should to be set to the last of the columns.
// 2. The dropped column is moved to the end of tblInfo.Columns, due to it was not public any more.
func adjustColumnInfoInDropColumn(tblInfo *model.TableInfo, offset int) {
oldCols := tblInfo.Columns
// Adjust column offset.
offsetChanged := make(map[int]int, len(oldCols)-offset-1)
for i := offset + 1; i < len(oldCols); i++ {
offsetChanged[oldCols[i].Offset] = i - 1
oldCols[i].Offset = i - 1
}
oldCols[offset].Offset = len(oldCols) - 1
// For expression index, we drop hidden columns and index simultaneously.
// So we need to change the offset of expression index.
offsetChanged[offset] = len(oldCols) - 1
// Update index column offset info.
// TODO: There may be some corner cases for index column offsets, we may check this later.
for _, idx := range tblInfo.Indices {
for _, col := range idx.Columns {
newOffset, ok := offsetChanged[col.Offset]
if ok {
col.Offset = newOffset
}
}
}
newCols := make([]*model.ColumnInfo, 0, len(oldCols))
newCols = append(newCols, oldCols[:offset]...)
newCols = append(newCols, oldCols[offset+1:]...)
newCols = append(newCols, oldCols[offset])
tblInfo.Columns = newCols
}
func createColumnInfo(tblInfo *model.TableInfo, colInfo *model.ColumnInfo, pos *ast.ColumnPosition) (*model.ColumnInfo, *ast.ColumnPosition, int, error) {
// Check column name duplicate.
cols := tblInfo.Columns
offset := len(cols)
// Should initialize pos when it is nil.
if pos == nil {
pos = &ast.ColumnPosition{}
}
// Get column offset.
if pos.Tp == ast.ColumnPositionFirst {
offset = 0
} else if pos.Tp == ast.ColumnPositionAfter {
c := model.FindColumnInfo(cols, pos.RelativeColumn.Name.L)
if c == nil {
return nil, pos, 0, infoschema.ErrColumnNotExists.GenWithStackByArgs(pos.RelativeColumn, tblInfo.Name)
}
// Insert offset is after the mentioned column.
offset = c.Offset + 1
}
colInfo.ID = allocateColumnID(tblInfo)
colInfo.State = model.StateNone
// To support add column asynchronous, we should mark its offset as the last column.
// So that we can use origin column offset to get value from row.
colInfo.Offset = len(cols)
// Append the column info to the end of the tblInfo.Columns.
// It will reorder to the right offset in "Columns" when it state change to public.
tblInfo.Columns = append(cols, colInfo)
return colInfo, pos, offset, nil
}
func checkAddColumn(t *meta.Meta, job *model.Job) (*model.TableInfo, *model.ColumnInfo, *model.ColumnInfo, *ast.ColumnPosition, int, error) {
schemaID := job.SchemaID
tblInfo, err := getTableInfoAndCancelFaultJob(t, job, schemaID)
if err != nil {
return nil, nil, nil, nil, 0, errors.Trace(err)
}
col := &model.ColumnInfo{}
pos := &ast.ColumnPosition{}
offset := 0
err = job.DecodeArgs(col, pos, &offset)
if err != nil {
job.State = model.JobStateCancelled
return nil, nil, nil, nil, 0, errors.Trace(err)
}
columnInfo := model.FindColumnInfo(tblInfo.Columns, col.Name.L)
if columnInfo != nil {
if columnInfo.State == model.StatePublic {
// We already have a column with the same column name.
job.State = model.JobStateCancelled
return nil, nil, nil, nil, 0, infoschema.ErrColumnExists.GenWithStackByArgs(col.Name)
}
}
return tblInfo, columnInfo, col, pos, offset, nil
}
func onAddColumn(d *ddlCtx, t *meta.Meta, job *model.Job) (ver int64, err error) {
// Handle the rolling back job.
if job.IsRollingback() {
ver, err = onDropColumn(t, job)
if err != nil {
return ver, errors.Trace(err)
}
return ver, nil
}
failpoint.Inject("errorBeforeDecodeArgs", func(val failpoint.Value) {
if val.(bool) {
failpoint.Return(ver, errors.New("occur an error before decode args"))
}
})
tblInfo, columnInfo, col, pos, offset, err := checkAddColumn(t, job)
if err != nil {
return ver, errors.Trace(err)
}
if columnInfo == nil {
columnInfo, _, offset, err = createColumnInfo(tblInfo, col, pos)
if err != nil {
job.State = model.JobStateCancelled
return ver, errors.Trace(err)
}
logutil.BgLogger().Info("[ddl] run add column job", zap.String("job", job.String()), zap.Reflect("columnInfo", *columnInfo), zap.Int("offset", offset))
// Set offset arg to job.
if offset != 0 {
job.Args = []interface{}{columnInfo, pos, offset}
}
if err = checkAddColumnTooManyColumns(len(tblInfo.Columns)); err != nil {
job.State = model.JobStateCancelled
return ver, errors.Trace(err)
}
}
originalState := columnInfo.State
switch columnInfo.State {
case model.StateNone:
// none -> delete only
job.SchemaState = model.StateDeleteOnly
columnInfo.State = model.StateDeleteOnly
ver, err = updateVersionAndTableInfoWithCheck(t, job, tblInfo, originalState != columnInfo.State)
case model.StateDeleteOnly:
// delete only -> write only
job.SchemaState = model.StateWriteOnly
columnInfo.State = model.StateWriteOnly
ver, err = updateVersionAndTableInfo(t, job, tblInfo, originalState != columnInfo.State)
case model.StateWriteOnly:
// write only -> reorganization
job.SchemaState = model.StateWriteReorganization
columnInfo.State = model.StateWriteReorganization
ver, err = updateVersionAndTableInfo(t, job, tblInfo, originalState != columnInfo.State)
case model.StateWriteReorganization:
// reorganization -> public
// Adjust table column offset.
adjustColumnInfoInAddColumn(tblInfo, offset)
columnInfo.State = model.StatePublic
ver, err = updateVersionAndTableInfo(t, job, tblInfo, originalState != columnInfo.State)
if err != nil {
return ver, errors.Trace(err)
}
// Finish this job.
job.FinishTableJob(model.JobStateDone, model.StatePublic, ver, tblInfo)
asyncNotifyEvent(d, &util.Event{Tp: model.ActionAddColumn, TableInfo: tblInfo, ColumnInfos: []*model.ColumnInfo{columnInfo}})
default:
err = ErrInvalidDDLState.GenWithStackByArgs("column", columnInfo.State)
}
return ver, errors.Trace(err)
}
func checkAddColumns(t *meta.Meta, job *model.Job) (*model.TableInfo, []*model.ColumnInfo, []*model.ColumnInfo, []*ast.ColumnPosition, []int, []bool, error) {
schemaID := job.SchemaID
tblInfo, err := getTableInfoAndCancelFaultJob(t, job, schemaID)
if err != nil {
return nil, nil, nil, nil, nil, nil, errors.Trace(err)
}
columns := []*model.ColumnInfo{}
positions := []*ast.ColumnPosition{}
offsets := []int{}
ifNotExists := []bool{}
err = job.DecodeArgs(&columns, &positions, &offsets, &ifNotExists)
if err != nil {
job.State = model.JobStateCancelled
return nil, nil, nil, nil, nil, nil, errors.Trace(err)
}
columnInfos := make([]*model.ColumnInfo, 0, len(columns))
newColumns := make([]*model.ColumnInfo, 0, len(columns))
newPositions := make([]*ast.ColumnPosition, 0, len(columns))
newOffsets := make([]int, 0, len(columns))
newIfNotExists := make([]bool, 0, len(columns))
for i, col := range columns {
columnInfo := model.FindColumnInfo(tblInfo.Columns, col.Name.L)
if columnInfo != nil {
if columnInfo.State == model.StatePublic {
// We already have a column with the same column name.
if ifNotExists[i] {
// TODO: Should return a warning.
logutil.BgLogger().Warn("[ddl] check add columns, duplicate column", zap.Stringer("col", col.Name))
continue
}
job.State = model.JobStateCancelled
return nil, nil, nil, nil, nil, nil, infoschema.ErrColumnExists.GenWithStackByArgs(col.Name)
}
columnInfos = append(columnInfos, columnInfo)
}
newColumns = append(newColumns, columns[i])
newPositions = append(newPositions, positions[i])
newOffsets = append(newOffsets, offsets[i])
newIfNotExists = append(newIfNotExists, ifNotExists[i])
}
return tblInfo, columnInfos, newColumns, newPositions, newOffsets, newIfNotExists, nil
}
func setColumnsState(columnInfos []*model.ColumnInfo, state model.SchemaState) {
for i := range columnInfos {
columnInfos[i].State = state
}
}
func onAddColumns(d *ddlCtx, t *meta.Meta, job *model.Job) (ver int64, err error) {
// Handle the rolling back job.
if job.IsRollingback() {
ver, err = onDropColumns(t, job)
if err != nil {
return ver, errors.Trace(err)
}
return ver, nil
}
failpoint.Inject("errorBeforeDecodeArgs", func(val failpoint.Value) {
if val.(bool) {
failpoint.Return(ver, errors.New("occur an error before decode args"))
}
})
tblInfo, columnInfos, columns, positions, offsets, ifNotExists, err := checkAddColumns(t, job)
if err != nil {
return ver, errors.Trace(err)
}
if len(columnInfos) == 0 {
if len(columns) == 0 {
job.State = model.JobStateCancelled
return ver, nil
}
for i := range columns {
columnInfo, pos, offset, err := createColumnInfo(tblInfo, columns[i], positions[i])
if err != nil {
job.State = model.JobStateCancelled
return ver, errors.Trace(err)
}
logutil.BgLogger().Info("[ddl] run add columns job", zap.String("job", job.String()), zap.Reflect("columnInfo", *columnInfo), zap.Int("offset", offset))
positions[i] = pos
offsets[i] = offset
if err = checkAddColumnTooManyColumns(len(tblInfo.Columns)); err != nil {
job.State = model.JobStateCancelled
return ver, errors.Trace(err)
}
columnInfos = append(columnInfos, columnInfo)
}
// Set arg to job.
job.Args = []interface{}{columnInfos, positions, offsets, ifNotExists}
}
originalState := columnInfos[0].State
switch columnInfos[0].State {
case model.StateNone:
// none -> delete only
job.SchemaState = model.StateDeleteOnly
setColumnsState(columnInfos, model.StateDeleteOnly)
ver, err = updateVersionAndTableInfoWithCheck(t, job, tblInfo, originalState != columnInfos[0].State)
case model.StateDeleteOnly:
// delete only -> write only
job.SchemaState = model.StateWriteOnly
setColumnsState(columnInfos, model.StateWriteOnly)
ver, err = updateVersionAndTableInfo(t, job, tblInfo, originalState != columnInfos[0].State)
case model.StateWriteOnly:
// write only -> reorganization
job.SchemaState = model.StateWriteReorganization
setColumnsState(columnInfos, model.StateWriteReorganization)
ver, err = updateVersionAndTableInfo(t, job, tblInfo, originalState != columnInfos[0].State)
case model.StateWriteReorganization:
// reorganization -> public
// Adjust table column offsets.
oldCols := tblInfo.Columns[:len(tblInfo.Columns)-len(offsets)]
newCols := tblInfo.Columns[len(tblInfo.Columns)-len(offsets):]
tblInfo.Columns = oldCols
for i := range offsets {
// For multiple columns with after position, should adjust offsets.
// e.g. create table t(a int);
// alter table t add column b int after a, add column c int after a;
// alter table t add column a1 int after a, add column b1 int after b, add column c1 int after c;
// alter table t add column a1 int after a, add column b1 int first;
if positions[i].Tp == ast.ColumnPositionAfter {
for j := 0; j < i; j++ {
if (positions[j].Tp == ast.ColumnPositionAfter && offsets[j] < offsets[i]) || positions[j].Tp == ast.ColumnPositionFirst {
offsets[i]++
}
}
}
tblInfo.Columns = append(tblInfo.Columns, newCols[i])
adjustColumnInfoInAddColumn(tblInfo, offsets[i])
}
setColumnsState(columnInfos, model.StatePublic)
ver, err = updateVersionAndTableInfo(t, job, tblInfo, originalState != columnInfos[0].State)
if err != nil {
return ver, errors.Trace(err)
}
// Finish this job.
job.FinishTableJob(model.JobStateDone, model.StatePublic, ver, tblInfo)
asyncNotifyEvent(d, &util.Event{Tp: model.ActionAddColumns, TableInfo: tblInfo, ColumnInfos: columnInfos})
default:
err = ErrInvalidDDLState.GenWithStackByArgs("column", columnInfos[0].State)
}
return ver, errors.Trace(err)
}
func onDropColumns(t *meta.Meta, job *model.Job) (ver int64, _ error) {
tblInfo, colInfos, delCount, err := checkDropColumns(t, job)
if err != nil {
return ver, errors.Trace(err)
}
if len(colInfos) == 0 {
job.State = model.JobStateCancelled
return ver, nil
}
originalState := colInfos[0].State
switch colInfos[0].State {
case model.StatePublic:
// public -> write only
job.SchemaState = model.StateWriteOnly
setColumnsState(colInfos, model.StateWriteOnly)
for _, colInfo := range colInfos {
err = checkDropColumnForStatePublic(tblInfo, colInfo)
if err != nil {
return ver, errors.Trace(err)
}
}
ver, err = updateVersionAndTableInfoWithCheck(t, job, tblInfo, originalState != colInfos[0].State)
case model.StateWriteOnly:
// write only -> delete only
job.SchemaState = model.StateDeleteOnly
setColumnsState(colInfos, model.StateDeleteOnly)
ver, err = updateVersionAndTableInfo(t, job, tblInfo, originalState != colInfos[0].State)
case model.StateDeleteOnly:
// delete only -> reorganization
job.SchemaState = model.StateDeleteReorganization
setColumnsState(colInfos, model.StateDeleteReorganization)
ver, err = updateVersionAndTableInfo(t, job, tblInfo, originalState != colInfos[0].State)
case model.StateDeleteReorganization:
// reorganization -> absent
// All reorganization jobs are done, drop this column.
tblInfo.Columns = tblInfo.Columns[:len(tblInfo.Columns)-delCount]
setColumnsState(colInfos, model.StateNone)
ver, err = updateVersionAndTableInfo(t, job, tblInfo, originalState != colInfos[0].State)
if err != nil {
return ver, errors.Trace(err)
}
// Finish this job.
if job.IsRollingback() {
job.FinishTableJob(model.JobStateRollbackDone, model.StateNone, ver, tblInfo)
} else {
job.FinishTableJob(model.JobStateDone, model.StateNone, ver, tblInfo)
}
default:
err = errInvalidDDLJob.GenWithStackByArgs("table", tblInfo.State)
}
return ver, errors.Trace(err)
}
func checkDropColumns(t *meta.Meta, job *model.Job) (*model.TableInfo, []*model.ColumnInfo, int, error) {
schemaID := job.SchemaID
tblInfo, err := getTableInfoAndCancelFaultJob(t, job, schemaID)
if err != nil {
return nil, nil, 0, errors.Trace(err)
}
var colNames []model.CIStr
var ifExists []bool
err = job.DecodeArgs(&colNames, &ifExists)
if err != nil {
job.State = model.JobStateCancelled
return nil, nil, 0, errors.Trace(err)
}
newColNames := make([]model.CIStr, 0, len(colNames))
colInfos := make([]*model.ColumnInfo, 0, len(colNames))
newIfExists := make([]bool, 0, len(colNames))
for i, colName := range colNames {
colInfo := model.FindColumnInfo(tblInfo.Columns, colName.L)
if colInfo == nil || colInfo.Hidden {
if ifExists[i] {
// TODO: Should return a warning.
logutil.BgLogger().Warn(fmt.Sprintf("column %s doesn't exist", colName))
continue
}
job.State = model.JobStateCancelled
return nil, nil, 0, ErrCantDropFieldOrKey.GenWithStack("column %s doesn't exist", colName)
}
if err = isDroppableColumn(tblInfo, colName); err != nil {
job.State = model.JobStateCancelled
return nil, nil, 0, errors.Trace(err)
}
newColNames = append(newColNames, colName)
newIfExists = append(newIfExists, ifExists[i])
colInfos = append(colInfos, colInfo)
}
job.Args = []interface{}{newColNames, newIfExists}
return tblInfo, colInfos, len(colInfos), nil
}
func checkDropColumnForStatePublic(tblInfo *model.TableInfo, colInfo *model.ColumnInfo) (err error) {
// Set this column's offset to the last and reset all following columns' offsets.
adjustColumnInfoInDropColumn(tblInfo, colInfo.Offset)
// When the dropping column has not-null flag and it hasn't the default value, we can backfill the column value like "add column".
// NOTE: If the state of StateWriteOnly can be rollbacked, we'd better reconsider the original default value.
// And we need consider the column without not-null flag.
if colInfo.OriginDefaultValue == nil && mysql.HasNotNullFlag(colInfo.Flag) {
// If the column is timestamp default current_timestamp, and DDL owner is new version TiDB that set column.Version to 1,
// then old TiDB update record in the column write only stage will uses the wrong default value of the dropping column.
// Because new version of the column default value is UTC time, but old version TiDB will think the default value is the time in system timezone.
// But currently will be ok, because we can't cancel the drop column job when the job is running,
// so the column will be dropped succeed and client will never see the wrong default value of the dropped column.
// More info about this problem, see PR#9115.
colInfo.OriginDefaultValue, err = generateOriginDefaultValue(colInfo)
if err != nil {
return err
}
}
return nil
}
func onDropColumn(t *meta.Meta, job *model.Job) (ver int64, _ error) {
tblInfo, colInfo, err := checkDropColumn(t, job)
if err != nil {
return ver, errors.Trace(err)
}
originalState := colInfo.State
switch colInfo.State {
case model.StatePublic:
// public -> write only
job.SchemaState = model.StateWriteOnly
colInfo.State = model.StateWriteOnly
err = checkDropColumnForStatePublic(tblInfo, colInfo)
if err != nil {
return ver, errors.Trace(err)
}
ver, err = updateVersionAndTableInfoWithCheck(t, job, tblInfo, originalState != colInfo.State)
case model.StateWriteOnly:
// write only -> delete only
job.SchemaState = model.StateDeleteOnly
colInfo.State = model.StateDeleteOnly
ver, err = updateVersionAndTableInfo(t, job, tblInfo, originalState != colInfo.State)
case model.StateDeleteOnly:
// delete only -> reorganization
job.SchemaState = model.StateDeleteReorganization
colInfo.State = model.StateDeleteReorganization
ver, err = updateVersionAndTableInfo(t, job, tblInfo, originalState != colInfo.State)
case model.StateDeleteReorganization:
// reorganization -> absent
// All reorganization jobs are done, drop this column.
tblInfo.Columns = tblInfo.Columns[:len(tblInfo.Columns)-1]
colInfo.State = model.StateNone
ver, err = updateVersionAndTableInfo(t, job, tblInfo, originalState != colInfo.State)
if err != nil {
return ver, errors.Trace(err)
}
// Finish this job.
if job.IsRollingback() {
job.FinishTableJob(model.JobStateRollbackDone, model.StateNone, ver, tblInfo)
} else {
job.FinishTableJob(model.JobStateDone, model.StateNone, ver, tblInfo)
}
default:
err = errInvalidDDLJob.GenWithStackByArgs("table", tblInfo.State)
}
return ver, errors.Trace(err)
}
func checkDropColumn(t *meta.Meta, job *model.Job) (*model.TableInfo, *model.ColumnInfo, error) {
schemaID := job.SchemaID
tblInfo, err := getTableInfoAndCancelFaultJob(t, job, schemaID)
if err != nil {
return nil, nil, errors.Trace(err)
}
var colName model.CIStr
err = job.DecodeArgs(&colName)
if err != nil {
job.State = model.JobStateCancelled
return nil, nil, errors.Trace(err)
}
colInfo := model.FindColumnInfo(tblInfo.Columns, colName.L)
if colInfo == nil || colInfo.Hidden {
job.State = model.JobStateCancelled
return nil, nil, ErrCantDropFieldOrKey.GenWithStack("column %s doesn't exist", colName)
}
if err = isDroppableColumn(tblInfo, colName); err != nil {
job.State = model.JobStateCancelled
return nil, nil, errors.Trace(err)
}
return tblInfo, colInfo, nil
}
func onSetDefaultValue(t *meta.Meta, job *model.Job) (ver int64, _ error) {
newCol := &model.ColumnInfo{}
err := job.DecodeArgs(newCol)
if err != nil {
job.State = model.JobStateCancelled
return ver, errors.Trace(err)
}
return updateColumnDefaultValue(t, job, newCol, &newCol.Name)
}
func (w *worker) onModifyColumn(t *meta.Meta, job *model.Job) (ver int64, _ error) {
newCol := &model.ColumnInfo{}
oldColName := &model.CIStr{}
pos := &ast.ColumnPosition{}
var modifyColumnTp byte
err := job.DecodeArgs(newCol, oldColName, pos, &modifyColumnTp)
if err != nil {
job.State = model.JobStateCancelled
return ver, errors.Trace(err)
}
return w.doModifyColumn(t, job, newCol, oldColName, pos, modifyColumnTp)
}
// doModifyColumn updates the column information and reorders all columns.
func (w *worker) doModifyColumn(t *meta.Meta, job *model.Job, newCol *model.ColumnInfo, oldName *model.CIStr, pos *ast.ColumnPosition, modifyColumnTp byte) (ver int64, _ error) {
dbInfo, err := t.GetDatabase(job.SchemaID)
if err != nil {
return ver, errors.Trace(err)
}
tblInfo, err := getTableInfoAndCancelFaultJob(t, job, job.SchemaID)
if err != nil {
return ver, errors.Trace(err)
}
oldCol := model.FindColumnInfo(tblInfo.Columns, oldName.L)
if job.IsRollingback() {
ver, err = rollbackModifyColumnJob(t, tblInfo, job, oldCol, modifyColumnTp)
if err != nil {
return ver, errors.Trace(err)
}
job.FinishTableJob(model.JobStateRollbackDone, model.StateNone, ver, tblInfo)
return ver, nil
}
if oldCol == nil || oldCol.State != model.StatePublic {
job.State = model.JobStateCancelled
return ver, infoschema.ErrColumnNotExists.GenWithStackByArgs(oldName, tblInfo.Name)
}
// If we want to rename the column name, we need to check whether it already exists.
if newCol.Name.L != oldName.L {
c := model.FindColumnInfo(tblInfo.Columns, newCol.Name.L)
if c != nil {
job.State = model.JobStateCancelled
return ver, infoschema.ErrColumnExists.GenWithStackByArgs(newCol.Name)
}
}
failpoint.Inject("uninitializedOffsetAndState", func(val failpoint.Value) {
if val.(bool) {
if newCol.State != model.StatePublic {
failpoint.Return(ver, errors.New("the column state is wrong"))
}
}
})
// Column from null to not null.
if !mysql.HasNotNullFlag(oldCol.Flag) && mysql.HasNotNullFlag(newCol.Flag) {
noPreventNullFlag := !mysql.HasPreventNullInsertFlag(oldCol.Flag)
// Introduce the `mysql.PreventNullInsertFlag` flag to prevent users from inserting or updating null values.
err = modifyColsFromNull2NotNull(w, dbInfo, tblInfo, []*model.ColumnInfo{oldCol}, newCol.Name, oldCol.Tp != newCol.Tp)
if err != nil {
if ErrWarnDataTruncated.Equal(err) || errInvalidUseOfNull.Equal(err) {
job.State = model.JobStateRollingback
}
return ver, err
}
// The column should get into prevent null status first.
if noPreventNullFlag {
return updateVersionAndTableInfoWithCheck(t, job, tblInfo, true)
}
}
// We need the latest column's offset and state. This information can be obtained from the store.
newCol.Offset = oldCol.Offset
newCol.State = oldCol.State
// Calculate column's new position.
oldPos, newPos := oldCol.Offset, oldCol.Offset
if pos.Tp == ast.ColumnPositionAfter {
if oldName.L == pos.RelativeColumn.Name.L {
// `alter table tableName modify column b int after b` will return ver,ErrColumnNotExists.
// Modified the type definition of 'null' to 'not null' before this, so rollback the job when an error occurs.
job.State = model.JobStateRollingback
return ver, infoschema.ErrColumnNotExists.GenWithStackByArgs(oldName, tblInfo.Name)
}
relative := model.FindColumnInfo(tblInfo.Columns, pos.RelativeColumn.Name.L)
if relative == nil || relative.State != model.StatePublic {
job.State = model.JobStateRollingback
return ver, infoschema.ErrColumnNotExists.GenWithStackByArgs(pos.RelativeColumn, tblInfo.Name)
}
if relative.Offset < oldPos {
newPos = relative.Offset + 1
} else {
newPos = relative.Offset
}
} else if pos.Tp == ast.ColumnPositionFirst {
newPos = 0
}
columnChanged := make(map[string]*model.ColumnInfo)
columnChanged[oldName.L] = newCol
if newPos == oldPos {
tblInfo.Columns[newPos] = newCol
} else {
cols := tblInfo.Columns
// Reorder columns in place.
if newPos < oldPos {
copy(cols[newPos+1:], cols[newPos:oldPos])
} else {
copy(cols[oldPos:], cols[oldPos+1:newPos+1])
}
cols[newPos] = newCol
for i, col := range tblInfo.Columns {
if col.Offset != i {
columnChanged[col.Name.L] = col
col.Offset = i
}
}
}
// Change offset and name in indices.
for _, idx := range tblInfo.Indices {
for _, c := range idx.Columns {
if newCol, ok := columnChanged[c.Name.L]; ok {
c.Name = newCol.Name
c.Offset = newCol.Offset
}
}
}
ver, err = updateVersionAndTableInfoWithCheck(t, job, tblInfo, true)
if err != nil {
// Modified the type definition of 'null' to 'not null' before this, so rollBack the job when an error occurs.
job.State = model.JobStateRollingback
return ver, errors.Trace(err)
}
job.FinishTableJob(model.JobStateDone, model.StatePublic, ver, tblInfo)
return ver, nil
}
// checkForNullValue ensure there are no null values of the column of this table.
// `isDataTruncated` indicates whether the new field and the old field type are the same, in order to be compatible with mysql.
func checkForNullValue(ctx sessionctx.Context, isDataTruncated bool, schema, table, newCol model.CIStr, oldCols ...*model.ColumnInfo) error {
colsStr := ""
for i, col := range oldCols {
if i == 0 {
colsStr += "`" + col.Name.L + "` is null"
} else {
colsStr += " or `" + col.Name.L + "` is null"
}
}
sql := fmt.Sprintf("select 1 from `%s`.`%s` where %s limit 1;", schema.L, table.L, colsStr)
rows, _, err := ctx.(sqlexec.RestrictedSQLExecutor).ExecRestrictedSQL(sql)
if err != nil {
return errors.Trace(err)
}
rowCount := len(rows)
if rowCount != 0 {
if isDataTruncated {
return ErrWarnDataTruncated.GenWithStackByArgs(newCol.L, rowCount)
}
return errInvalidUseOfNull
}
return nil
}
func updateColumnDefaultValue(t *meta.Meta, job *model.Job, newCol *model.ColumnInfo, oldColName *model.CIStr) (ver int64, _ error) {
tblInfo, err := getTableInfoAndCancelFaultJob(t, job, job.SchemaID)
if err != nil {
return ver, errors.Trace(err)
}
oldCol := model.FindColumnInfo(tblInfo.Columns, oldColName.L)
if oldCol == nil || oldCol.State != model.StatePublic {
job.State = model.JobStateCancelled
return ver, infoschema.ErrColumnNotExists.GenWithStackByArgs(newCol.Name, tblInfo.Name)
}
// The newCol's offset may be the value of the old schema version, so we can't use newCol directly.
oldCol.DefaultValue = newCol.DefaultValue
oldCol.DefaultValueBit = newCol.DefaultValueBit
oldCol.Flag = newCol.Flag
ver, err = updateVersionAndTableInfo(t, job, tblInfo, true)
if err != nil {
job.State = model.JobStateCancelled
return ver, errors.Trace(err)
}
job.FinishTableJob(model.JobStateDone, model.StatePublic, ver, tblInfo)
return ver, nil
}
func isColumnWithIndex(colName string, indices []*model.IndexInfo) bool {
for _, indexInfo := range indices {
for _, col := range indexInfo.Columns {
if col.Name.L == colName {
return true
}
}
}
return false
}
func getColumnForeignKeyInfo(colName string, fkInfos []*model.FKInfo) *model.FKInfo {
for _, fkInfo := range fkInfos {
for _, col := range fkInfo.Cols {
if col.L == colName {
return fkInfo
}
}
}
return nil
}
func allocateColumnID(tblInfo *model.TableInfo) int64 {
tblInfo.MaxColumnID++
return tblInfo.MaxColumnID
}
func checkAddColumnTooManyColumns(colNum int) error {
if uint32(colNum) > atomic.LoadUint32(&TableColumnCountLimit) {
return errTooManyFields
}
return nil
}
// rollbackModifyColumnJob rollbacks the job when an error occurs.
func rollbackModifyColumnJob(t *meta.Meta, tblInfo *model.TableInfo, job *model.Job, oldCol *model.ColumnInfo, modifyColumnTp byte) (ver int64, _ error) {
var err error
if modifyColumnTp == mysql.TypeNull {
// field NotNullFlag flag reset.
tblInfo.Columns[oldCol.Offset].Flag = oldCol.Flag &^ mysql.NotNullFlag
// field PreventNullInsertFlag flag reset.
tblInfo.Columns[oldCol.Offset].Flag = oldCol.Flag &^ mysql.PreventNullInsertFlag
ver, err = updateVersionAndTableInfo(t, job, tblInfo, true)
if err != nil {
return ver, errors.Trace(err)
}
}
return ver, nil
}
// modifyColsFromNull2NotNull modifies the type definitions of 'null' to 'not null'.
// Introduce the `mysql.PreventNullInsertFlag` flag to prevent users from inserting or updating null values.
func modifyColsFromNull2NotNull(w *worker, dbInfo *model.DBInfo, tblInfo *model.TableInfo, cols []*model.ColumnInfo,
newColName model.CIStr, isModifiedType bool) error {
// Get sessionctx from context resource pool.
var ctx sessionctx.Context
ctx, err := w.sessPool.get()
if err != nil {
return errors.Trace(err)
}
defer w.sessPool.put(ctx)
// If there is a null value inserted, it cannot be modified and needs to be rollback.
err = checkForNullValue(ctx, isModifiedType, dbInfo.Name, tblInfo.Name, newColName, cols...)
if err != nil {
return errors.Trace(err)
}
// Prevent this field from inserting null values.
for _, col := range cols {
col.Flag |= mysql.PreventNullInsertFlag
}
return nil
}
func generateOriginDefaultValue(col *model.ColumnInfo) (interface{}, error) {
var err error
odValue := col.GetDefaultValue()
if odValue == nil && mysql.HasNotNullFlag(col.Flag) {
zeroVal := table.GetZeroValue(col)
odValue, err = zeroVal.ToString()
if err != nil {
return nil, errors.Trace(err)
}
}
if odValue == strings.ToUpper(ast.CurrentTimestamp) {
if col.Tp == mysql.TypeTimestamp {
odValue = time.Now().UTC().Format(types.TimeFormat)
} else if col.Tp == mysql.TypeDatetime {
odValue = time.Now().Format(types.TimeFormat)
}
}
return odValue, nil
}
func findColumnInIndexCols(c string, cols []*model.IndexColumn) *model.IndexColumn {
for _, c1 := range cols {
if c == c1.Name.L {
return c1
}
}
return nil
}
func getColumnInfoByName(tbInfo *model.TableInfo, column string) *model.ColumnInfo {
for _, colInfo := range tbInfo.Cols() {
if colInfo.Name.L == column {
return colInfo
}
}
return nil
}
| ddl/column.go | 1 | https://github.com/pingcap/tidb/commit/788dc28a7423d4b85ebe11bbce632733b056d63a | [
0.989463210105896,
0.06201319023966789,
0.00016215122013818473,
0.0012811175547540188,
0.21420034766197205
] |
{
"id": 4,
"code_window": [
"\tif err != nil {\n",
"\t\tjob.State = model.JobStateCancelled\n",
"\t\treturn ver, errors.Trace(err)\n",
"\t}\n",
"\n",
"\tif dbInfo.Charset == toCharset && dbInfo.Collate == toCollate {\n"
],
"labels": [
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "ddl/schema.go",
"type": "replace",
"edit_start_line_idx": 120
} | [
{
"name": "TestPushLimitDownIndexLookUpReader",
"cases": [
// Limit should be pushed down into IndexLookUpReader, row count of IndexLookUpReader and TableScan should be 1.00.
"explain select * from tbl use index(idx_b_c) where b > 1 limit 2,1",
// Projection atop IndexLookUpReader, Limit should be pushed down into IndexLookUpReader, and Projection should have row count 1.00 as well.
"explain select * from tbl use index(idx_b_c) where b > 1 order by b desc limit 2,1",
// Limit should be pushed down into IndexLookUpReader when Selection on top of IndexScan.
"explain select * from tbl use index(idx_b_c) where b > 1 and c > 1 limit 2,1",
// Limit should NOT be pushed down into IndexLookUpReader when Selection on top of TableScan.
"explain select * from tbl use index(idx_b_c) where b > 1 and a > 1 limit 2,1"
]
},
{
"name": "TestIsFromUnixtimeNullRejective",
"cases": [
// fix #12385
"explain select * from t t1 left join t t2 on t1.a=t2.a where from_unixtime(t2.b);"
]
},
{
"name": "TestSimplifyOuterJoinWithCast",
"cases": [
// LeftOuterJoin should no be simplified to InnerJoin.
"explain select * from t t1 left join t t2 on t1.a = t2.a where cast(t1.b as date) >= '2019-01-01'"
]
},
{
"name": "TestMaxMinEliminate",
"cases": [
"explain (select max(a) from t) union (select min(a) from t)"
]
},
{
"name": "TestIndexJoinUniqueCompositeIndex",
"cases": [
// Row count of IndexScan should be 2.
"explain select /*+ TIDB_INLJ(t2) */ * from t1 join t2 on t1.a = t2.a and t1.c = t2.c",
// Row count of IndexScan should be 2.
"explain select /*+ TIDB_INLJ(t2) */ * from t1 join t2 on t1.a = t2.a and t1.c <= t2.b",
// Row count of IndexScan should be 1.
"explain select /*+ TIDB_INLJ(t2) */ * from t1 join t2 on t1.a = t2.a and t2.b = 1"
]
},
{
"name": "TestPartitionTableStats",
"cases": [
"explain select * from t order by a",
"select * from t order by a",
"explain select * from t order by a limit 3",
"select * from t order by a limit 3"
]
},
{
"name": "TestIndexMerge",
"cases": [
"explain select /*+ USE_INDEX_MERGE(t, a, b) */ * from t where a = 1 or b = 2",
"explain select /*+ USE_INDEX_MERGE(t, primary) */ * from t where 1 or t.c",
"explain select /*+ USE_INDEX_MERGE(t, a, b, c) */ * from t where 1 or t.a = 1 or t.b = 2"
]
},
{
"name": "TestSubqueryWithTopN",
"cases": [
"desc select t1.b from t t1 where t1.b in (select t2.a from t t2 order by t1.a+t2.a limit 1)"
]
},
{
"name": "TestIndexJoinTableRange",
"cases": [
"desc select /*+ TIDB_INLJ(t2)*/ * from t1, t2 where t1.a = t2.a and t1.b = t2.b",
"desc select /*+ TIDB_INLJ(t2)*/ * from t1, t2 where t1.a = t2.a and t1.b = t2.a and t1.b = t2.b"
]
},
{
"name": "TestHintWithRequiredProperty",
"cases": [
"desc select /*+ INL_JOIN(t2) */ * from t t1, t t2 where t1.a = t2.b order by t2.a",
"desc select /*+ INL_HASH_JOIN(t2) */ * from t t1, t t2 where t1.a = t2.b order by t2.a",
"desc select /*+ INL_MERGE_JOIN(t2)*/ t1.a, t2.a from t t1, t t2 ,t t3 where t1.a = t2.a and t3.a=t2.a",
"desc select * from t t1, (select /*+ HASH_AGG() */ b, max(a) from t t2 group by b) t2 where t1.b = t2.b order by t1.b",
"desc select /*+ INL_HASH_JOIN(t2) */ distinct t2.a from t t1 join t t2 on t1.a = t2.a",
// This hint cannot work, so choose another plan.
"desc select /*+ INL_JOIN(t2) */ * from t t1, t t2 where t1.a = t2.c order by t1.a"
]
},
{
"name": "TestIndexHintWarning",
"cases": [
"select /*+ USE_INDEX(t1, j) */ * from t1",
"select /*+ IGNORE_INDEX(t1, j) */ * from t1",
"select /*+ USE_INDEX(t2, a, b, c) */ * from t1",
"select /*+ USE_INDEX(t2) */ * from t1",
"select /*+ USE_INDEX(t1, a), USE_INDEX(t2, a), USE_INDEX(t3, a) */ * from t1, t2 where t1.a=t2.a",
"select /*+ USE_INDEX(t3, a), USE_INDEX(t4, b), IGNORE_INDEX(t3, a) */ * from t1, t2 where t1.a=t2.a",
"select /*+ USE_INDEX_MERGE(t3, a, b, d) */ * from t1",
"select /*+ USE_INDEX_MERGE(t1, a, b, c, d) */ * from t1",
"select /*+ USE_INDEX_MERGE(t1, a, b), USE_INDEX(t1, a) */ * from t1",
"select /*+ USE_INDEX_MERGE(t1, a, b), IGNORE_INDEX(t1, a) */ * from t1",
"select /*+ USE_INDEX_MERGE(t1, primary, a, b, c) */ * from t1"
]
},
{
"name": "TestHintWithoutTableWarning",
"cases": [
"select /*+ TIDB_SMJ() */ * from t1, t2 where t1.a=t2.a",
"select /*+ MERGE_JOIN() */ * from t1, t2 where t1.a=t2.a",
"select /*+ INL_JOIN() */ * from t1, t2 where t1.a=t2.a",
"select /*+ TIDB_INLJ() */ * from t1, t2 where t1.a=t2.a",
"select /*+ INL_HASH_JOIN() */ * from t1, t2 where t1.a=t2.a",
"select /*+ INL_MERGE_JOIN() */ * from t1, t2 where t1.a=t2.a",
"select /*+ HASH_JOIN() */ * from t1, t2 where t1.a=t2.a",
"select /*+ USE_INDEX() */ * from t1, t2 where t1.a=t2.a",
"select /*+ IGNORE_INDEX() */ * from t1, t2 where t1.a=t2.a",
"select /*+ USE_INDEX_MERGE() */ * from t1, t2 where t1.a=t2.a"
]
},
{
"name": "TestPartitionPruningForInExpr",
"cases": [
"explain select * from t where a in (1, 2,'11')",
"explain select * from t where a in (17, null)",
"explain select * from t where a in (16, 'abc')",
"explain select * from t where a in (15, 0.12, 3.47)",
"explain select * from t where a in (0.12, 3.47)",
"explain select * from t where a in (14, floor(3.47))",
"explain select * from t where b in (3, 4)"
]
},
{
"name": "TestStreamAggProp",
"cases": [
"select /*+ stream_agg() */ count(*) c from t group by a order by c limit 1",
"select /*+ stream_agg() */ count(*) c from t group by a order by c",
"select /*+ stream_agg() */ count(*) c from t group by a order by a limit 1",
"select /*+ stream_agg() */ count(*) c from t group by a order by a"
]
}
]
| planner/core/testdata/integration_suite_in.json | 0 | https://github.com/pingcap/tidb/commit/788dc28a7423d4b85ebe11bbce632733b056d63a | [
0.00017910735914483666,
0.0001737530983518809,
0.00016692043573129922,
0.00017389531421940774,
0.000002530334541006596
] |
{
"id": 4,
"code_window": [
"\tif err != nil {\n",
"\t\tjob.State = model.JobStateCancelled\n",
"\t\treturn ver, errors.Trace(err)\n",
"\t}\n",
"\n",
"\tif dbInfo.Charset == toCharset && dbInfo.Collate == toCollate {\n"
],
"labels": [
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "ddl/schema.go",
"type": "replace",
"edit_start_line_idx": 120
} | // Copyright 2016 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package tikv
import (
"bytes"
"context"
"fmt"
"io"
"sort"
"strconv"
"strings"
"sync"
"sync/atomic"
"time"
"unsafe"
"github.com/cznic/mathutil"
"github.com/gogo/protobuf/proto"
"github.com/pingcap/errors"
"github.com/pingcap/failpoint"
"github.com/pingcap/kvproto/pkg/coprocessor"
"github.com/pingcap/kvproto/pkg/kvrpcpb"
"github.com/pingcap/parser/terror"
"github.com/pingcap/tidb/domain/infosync"
"github.com/pingcap/tidb/errno"
"github.com/pingcap/tidb/kv"
"github.com/pingcap/tidb/metrics"
"github.com/pingcap/tidb/store/tikv/tikvrpc"
"github.com/pingcap/tidb/util/execdetails"
"github.com/pingcap/tidb/util/logutil"
"github.com/pingcap/tidb/util/memory"
"github.com/pingcap/tipb/go-tipb"
"go.uber.org/zap"
)
var tikvTxnRegionsNumHistogramWithCoprocessor = metrics.TiKVTxnRegionsNumHistogram.WithLabelValues("coprocessor")
var tikvTxnRegionsNumHistogramWithBatchCoprocessor = metrics.TiKVTxnRegionsNumHistogram.WithLabelValues("batch_coprocessor")
// CopClient is coprocessor client.
type CopClient struct {
kv.RequestTypeSupportedChecker
store *tikvStore
replicaReadSeed uint32
}
// Send builds the request and gets the coprocessor iterator response.
func (c *CopClient) Send(ctx context.Context, req *kv.Request, vars *kv.Variables) kv.Response {
if req.StoreType == kv.TiFlash && req.BatchCop {
logutil.BgLogger().Debug("send batch requests")
return c.sendBatch(ctx, req, vars)
}
ctx = context.WithValue(ctx, txnStartKey, req.StartTs)
bo := NewBackoffer(ctx, copBuildTaskMaxBackoff).WithVars(vars)
tasks, err := buildCopTasks(bo, c.store.regionCache, &copRanges{mid: req.KeyRanges}, req)
if err != nil {
return copErrorResponse{err}
}
it := &copIterator{
store: c.store,
req: req,
concurrency: req.Concurrency,
finishCh: make(chan struct{}),
vars: vars,
memTracker: req.MemTracker,
replicaReadSeed: c.replicaReadSeed,
}
it.minCommitTSPushed.data = make(map[uint64]struct{}, 5)
it.tasks = tasks
if it.concurrency > len(tasks) {
it.concurrency = len(tasks)
}
if it.concurrency < 1 {
// Make sure that there is at least one worker.
it.concurrency = 1
}
if it.req.KeepOrder {
it.sendRate = newRateLimit(2 * it.concurrency)
} else {
it.respChan = make(chan *copResponse, it.concurrency)
}
it.open(ctx)
return it
}
// copTask contains a related Region and KeyRange for a kv.Request.
type copTask struct {
region RegionVerID
ranges *copRanges
respChan chan *copResponse
storeAddr string
cmdType tikvrpc.CmdType
storeType kv.StoreType
}
func (r *copTask) String() string {
return fmt.Sprintf("region(%d %d %d) ranges(%d) store(%s)",
r.region.id, r.region.confVer, r.region.ver, r.ranges.len(), r.storeAddr)
}
// copRanges is like []kv.KeyRange, but may has extra elements at head/tail.
// It's for avoiding alloc big slice during build copTask.
type copRanges struct {
first *kv.KeyRange
mid []kv.KeyRange
last *kv.KeyRange
}
func (r *copRanges) String() string {
var s string
r.do(func(ran *kv.KeyRange) {
s += fmt.Sprintf("[%q, %q]", ran.StartKey, ran.EndKey)
})
return s
}
func (r *copRanges) len() int {
var l int
if r.first != nil {
l++
}
l += len(r.mid)
if r.last != nil {
l++
}
return l
}
func (r *copRanges) at(i int) kv.KeyRange {
if r.first != nil {
if i == 0 {
return *r.first
}
i--
}
if i < len(r.mid) {
return r.mid[i]
}
return *r.last
}
func (r *copRanges) slice(from, to int) *copRanges {
var ran copRanges
if r.first != nil {
if from == 0 && to > 0 {
ran.first = r.first
}
if from > 0 {
from--
}
if to > 0 {
to--
}
}
if to <= len(r.mid) {
ran.mid = r.mid[from:to]
} else {
if from <= len(r.mid) {
ran.mid = r.mid[from:]
}
if from < to {
ran.last = r.last
}
}
return &ran
}
func (r *copRanges) do(f func(ran *kv.KeyRange)) {
if r.first != nil {
f(r.first)
}
for _, ran := range r.mid {
f(&ran)
}
if r.last != nil {
f(r.last)
}
}
func (r *copRanges) toPBRanges() []*coprocessor.KeyRange {
ranges := make([]*coprocessor.KeyRange, 0, r.len())
r.do(func(ran *kv.KeyRange) {
ranges = append(ranges, &coprocessor.KeyRange{
Start: ran.StartKey,
End: ran.EndKey,
})
})
return ranges
}
// split ranges into (left, right) by key.
func (r *copRanges) split(key []byte) (*copRanges, *copRanges) {
n := sort.Search(r.len(), func(i int) bool {
cur := r.at(i)
return len(cur.EndKey) == 0 || bytes.Compare(cur.EndKey, key) > 0
})
// If a range p contains the key, it will split to 2 parts.
if n < r.len() {
p := r.at(n)
if bytes.Compare(key, p.StartKey) > 0 {
left := r.slice(0, n)
left.last = &kv.KeyRange{StartKey: p.StartKey, EndKey: key}
right := r.slice(n+1, r.len())
right.first = &kv.KeyRange{StartKey: key, EndKey: p.EndKey}
return left, right
}
}
return r.slice(0, n), r.slice(n, r.len())
}
// rangesPerTask limits the length of the ranges slice sent in one copTask.
const rangesPerTask = 25000
func buildCopTasks(bo *Backoffer, cache *RegionCache, ranges *copRanges, req *kv.Request) ([]*copTask, error) {
start := time.Now()
cmdType := tikvrpc.CmdCop
if req.Streaming {
cmdType = tikvrpc.CmdCopStream
}
if req.StoreType == kv.TiDB {
return buildTiDBMemCopTasks(ranges, req)
}
rangesLen := ranges.len()
var tasks []*copTask
appendTask := func(regionWithRangeInfo *KeyLocation, ranges *copRanges) {
// TiKV will return gRPC error if the message is too large. So we need to limit the length of the ranges slice
// to make sure the message can be sent successfully.
rLen := ranges.len()
for i := 0; i < rLen; {
nextI := mathutil.Min(i+rangesPerTask, rLen)
tasks = append(tasks, &copTask{
region: regionWithRangeInfo.Region,
ranges: ranges.slice(i, nextI),
// Channel buffer is 2 for handling region split.
// In a common case, two region split tasks will not be blocked.
respChan: make(chan *copResponse, 2),
cmdType: cmdType,
storeType: req.StoreType,
})
i = nextI
}
}
err := splitRanges(bo, cache, ranges, appendTask)
if err != nil {
return nil, errors.Trace(err)
}
if req.Desc {
reverseTasks(tasks)
}
if elapsed := time.Since(start); elapsed > time.Millisecond*500 {
logutil.BgLogger().Warn("buildCopTasks takes too much time",
zap.Duration("elapsed", elapsed),
zap.Int("range len", rangesLen),
zap.Int("task len", len(tasks)))
}
tikvTxnRegionsNumHistogramWithCoprocessor.Observe(float64(len(tasks)))
return tasks, nil
}
func buildTiDBMemCopTasks(ranges *copRanges, req *kv.Request) ([]*copTask, error) {
servers, err := infosync.GetAllServerInfo(context.Background())
if err != nil {
return nil, err
}
cmdType := tikvrpc.CmdCop
if req.Streaming {
cmdType = tikvrpc.CmdCopStream
}
tasks := make([]*copTask, 0, len(servers))
for _, ser := range servers {
addr := ser.IP + ":" + strconv.FormatUint(uint64(ser.StatusPort), 10)
tasks = append(tasks, &copTask{
ranges: ranges,
respChan: make(chan *copResponse, 2),
cmdType: cmdType,
storeType: req.StoreType,
storeAddr: addr,
})
}
return tasks, nil
}
func splitRanges(bo *Backoffer, cache *RegionCache, ranges *copRanges, fn func(regionWithRangeInfo *KeyLocation, ranges *copRanges)) error {
for ranges.len() > 0 {
loc, err := cache.LocateKey(bo, ranges.at(0).StartKey)
if err != nil {
return errors.Trace(err)
}
// Iterate to the first range that is not complete in the region.
var i int
for ; i < ranges.len(); i++ {
r := ranges.at(i)
if !(loc.Contains(r.EndKey) || bytes.Equal(loc.EndKey, r.EndKey)) {
break
}
}
// All rest ranges belong to the same region.
if i == ranges.len() {
fn(loc, ranges)
break
}
r := ranges.at(i)
if loc.Contains(r.StartKey) {
// Part of r is not in the region. We need to split it.
taskRanges := ranges.slice(0, i)
taskRanges.last = &kv.KeyRange{
StartKey: r.StartKey,
EndKey: loc.EndKey,
}
fn(loc, taskRanges)
ranges = ranges.slice(i+1, ranges.len())
ranges.first = &kv.KeyRange{
StartKey: loc.EndKey,
EndKey: r.EndKey,
}
} else {
// rs[i] is not in the region.
taskRanges := ranges.slice(0, i)
fn(loc, taskRanges)
ranges = ranges.slice(i, ranges.len())
}
}
return nil
}
// SplitRegionRanges get the split ranges from pd region.
func SplitRegionRanges(bo *Backoffer, cache *RegionCache, keyRanges []kv.KeyRange) ([]kv.KeyRange, error) {
ranges := copRanges{mid: keyRanges}
var ret []kv.KeyRange
appendRange := func(regionWithRangeInfo *KeyLocation, ranges *copRanges) {
for i := 0; i < ranges.len(); i++ {
ret = append(ret, ranges.at(i))
}
}
err := splitRanges(bo, cache, &ranges, appendRange)
if err != nil {
return nil, errors.Trace(err)
}
return ret, nil
}
func reverseTasks(tasks []*copTask) {
for i := 0; i < len(tasks)/2; i++ {
j := len(tasks) - i - 1
tasks[i], tasks[j] = tasks[j], tasks[i]
}
}
type copIterator struct {
store *tikvStore
req *kv.Request
concurrency int
finishCh chan struct{}
// If keepOrder, results are stored in copTask.respChan, read them out one by one.
tasks []*copTask
curr int
// sendRate controls the sending rate of copIteratorTaskSender, if keepOrder,
// to prevent all tasks being done (aka. all of the responses are buffered)
sendRate *rateLimit
// Otherwise, results are stored in respChan.
respChan chan *copResponse
vars *kv.Variables
memTracker *memory.Tracker
replicaReadSeed uint32
wg sync.WaitGroup
// closed represents when the Close is called.
// There are two cases we need to close the `finishCh` channel, one is when context is done, the other one is
// when the Close is called. we use atomic.CompareAndSwap `closed` to to make sure the channel is not closed twice.
closed uint32
minCommitTSPushed
}
// copIteratorWorker receives tasks from copIteratorTaskSender, handles tasks and sends the copResponse to respChan.
type copIteratorWorker struct {
taskCh <-chan *copTask
wg *sync.WaitGroup
store *tikvStore
req *kv.Request
respChan chan<- *copResponse
finishCh <-chan struct{}
vars *kv.Variables
clientHelper
memTracker *memory.Tracker
replicaReadSeed uint32
}
// copIteratorTaskSender sends tasks to taskCh then wait for the workers to exit.
type copIteratorTaskSender struct {
taskCh chan<- *copTask
wg *sync.WaitGroup
tasks []*copTask
finishCh <-chan struct{}
respChan chan<- *copResponse
sendRate *rateLimit
}
type copResponse struct {
pbResp *coprocessor.Response
detail *execdetails.ExecDetails
startKey kv.Key
err error
respSize int64
respTime time.Duration
}
const (
sizeofExecDetails = int(unsafe.Sizeof(execdetails.ExecDetails{}))
sizeofCommitDetails = int(unsafe.Sizeof(execdetails.CommitDetails{}))
)
// GetData implements the kv.ResultSubset GetData interface.
func (rs *copResponse) GetData() []byte {
return rs.pbResp.Data
}
// GetStartKey implements the kv.ResultSubset GetStartKey interface.
func (rs *copResponse) GetStartKey() kv.Key {
return rs.startKey
}
func (rs *copResponse) GetExecDetails() *execdetails.ExecDetails {
return rs.detail
}
// MemSize returns how many bytes of memory this response use
func (rs *copResponse) MemSize() int64 {
if rs.respSize != 0 {
return rs.respSize
}
// ignore rs.err
rs.respSize += int64(cap(rs.startKey))
if rs.detail != nil {
rs.respSize += int64(sizeofExecDetails)
if rs.detail.CommitDetail != nil {
rs.respSize += int64(sizeofCommitDetails)
}
}
if rs.pbResp != nil {
// Using a approximate size since it's hard to get a accurate value.
rs.respSize += int64(rs.pbResp.Size())
}
return rs.respSize
}
func (rs *copResponse) RespTime() time.Duration {
return rs.respTime
}
const minLogCopTaskTime = 300 * time.Millisecond
// run is a worker function that get a copTask from channel, handle it and
// send the result back.
func (worker *copIteratorWorker) run(ctx context.Context) {
defer worker.wg.Done()
for task := range worker.taskCh {
respCh := worker.respChan
if respCh == nil {
respCh = task.respChan
}
worker.handleTask(ctx, task, respCh)
close(task.respChan)
select {
case <-worker.finishCh:
return
default:
}
}
}
// open starts workers and sender goroutines.
func (it *copIterator) open(ctx context.Context) {
taskCh := make(chan *copTask, 1)
it.wg.Add(it.concurrency)
// Start it.concurrency number of workers to handle cop requests.
for i := 0; i < it.concurrency; i++ {
worker := &copIteratorWorker{
taskCh: taskCh,
wg: &it.wg,
store: it.store,
req: it.req,
respChan: it.respChan,
finishCh: it.finishCh,
vars: it.vars,
clientHelper: clientHelper{
LockResolver: it.store.lockResolver,
RegionCache: it.store.regionCache,
minCommitTSPushed: &it.minCommitTSPushed,
Client: it.store.client,
},
memTracker: it.memTracker,
replicaReadSeed: it.replicaReadSeed,
}
go worker.run(ctx)
}
taskSender := &copIteratorTaskSender{
taskCh: taskCh,
wg: &it.wg,
tasks: it.tasks,
finishCh: it.finishCh,
sendRate: it.sendRate,
}
taskSender.respChan = it.respChan
go taskSender.run()
}
func (sender *copIteratorTaskSender) run() {
// Send tasks to feed the worker goroutines.
for _, t := range sender.tasks {
// If keepOrder, we must control the sending rate to prevent all tasks
// being done (aka. all of the responses are buffered) by copIteratorWorker.
// We keep the number of inflight tasks within the number of concurrency * 2.
// It sends one more task if a task has been finished in copIterator.Next.
if sender.sendRate != nil {
exit := sender.sendRate.getToken(sender.finishCh)
if exit {
break
}
}
exit := sender.sendToTaskCh(t)
if exit {
break
}
}
close(sender.taskCh)
// Wait for worker goroutines to exit.
sender.wg.Wait()
if sender.respChan != nil {
close(sender.respChan)
}
}
func (it *copIterator) recvFromRespCh(ctx context.Context, respCh <-chan *copResponse) (resp *copResponse, ok bool, exit bool) {
select {
case resp, ok = <-respCh:
if it.memTracker != nil && resp != nil {
it.memTracker.Consume(-resp.MemSize())
}
case <-it.finishCh:
exit = true
case <-ctx.Done():
// We select the ctx.Done() in the thread of `Next` instead of in the worker to avoid the cost of `WithCancel`.
if atomic.CompareAndSwapUint32(&it.closed, 0, 1) {
close(it.finishCh)
}
exit = true
}
return
}
func (sender *copIteratorTaskSender) sendToTaskCh(t *copTask) (exit bool) {
select {
case sender.taskCh <- t:
case <-sender.finishCh:
exit = true
}
return
}
func (worker *copIteratorWorker) sendToRespCh(resp *copResponse, respCh chan<- *copResponse, checkOOM bool) (exit bool) {
if worker.memTracker != nil && checkOOM {
worker.memTracker.Consume(resp.MemSize())
}
select {
case respCh <- resp:
case <-worker.finishCh:
exit = true
}
return
}
// Next returns next coprocessor result.
// NOTE: Use nil to indicate finish, so if the returned ResultSubset is not nil, reader should continue to call Next().
func (it *copIterator) Next(ctx context.Context) (kv.ResultSubset, error) {
var (
resp *copResponse
ok bool
closed bool
)
// If data order matters, response should be returned in the same order as copTask slice.
// Otherwise all responses are returned from a single channel.
if it.respChan != nil {
// Get next fetched resp from chan
resp, ok, closed = it.recvFromRespCh(ctx, it.respChan)
if !ok || closed {
return nil, nil
}
} else {
for {
if it.curr >= len(it.tasks) {
// Resp will be nil if iterator is finishCh.
return nil, nil
}
task := it.tasks[it.curr]
resp, ok, closed = it.recvFromRespCh(ctx, task.respChan)
if closed {
// Close() is already called, so Next() is invalid.
return nil, nil
}
if ok {
break
}
// Switch to next task.
it.tasks[it.curr] = nil
it.curr++
it.sendRate.putToken()
}
}
if resp.err != nil {
return nil, errors.Trace(resp.err)
}
err := it.store.CheckVisibility(it.req.StartTs)
if err != nil {
return nil, errors.Trace(err)
}
return resp, nil
}
// Associate each region with an independent backoffer. In this way, when multiple regions are
// unavailable, TiDB can execute very quickly without blocking
func chooseBackoffer(ctx context.Context, backoffermap map[uint64]*Backoffer, task *copTask, worker *copIteratorWorker) *Backoffer {
bo, ok := backoffermap[task.region.id]
if ok {
return bo
}
newbo := NewBackoffer(ctx, copNextMaxBackoff).WithVars(worker.vars)
backoffermap[task.region.id] = newbo
return newbo
}
// handleTask handles single copTask, sends the result to channel, retry automatically on error.
func (worker *copIteratorWorker) handleTask(ctx context.Context, task *copTask, respCh chan<- *copResponse) {
defer func() {
r := recover()
if r != nil {
logutil.BgLogger().Error("copIteratorWork meet panic",
zap.Reflect("r", r),
zap.Stack("stack trace"))
resp := &copResponse{err: errors.Errorf("%v", r)}
// if panic has happened, set checkOOM to false to avoid another panic.
worker.sendToRespCh(resp, respCh, false)
}
}()
remainTasks := []*copTask{task}
backoffermap := make(map[uint64]*Backoffer)
for len(remainTasks) > 0 {
curTask := remainTasks[0]
bo := chooseBackoffer(ctx, backoffermap, curTask, worker)
tasks, err := worker.handleTaskOnce(bo, curTask, respCh)
if err != nil {
resp := &copResponse{err: errors.Trace(err)}
worker.sendToRespCh(resp, respCh, true)
return
}
if len(tasks) > 0 {
remainTasks = append(tasks, remainTasks[1:]...)
} else {
remainTasks = remainTasks[1:]
}
}
}
// handleTaskOnce handles single copTask, successful results are send to channel.
// If error happened, returns error. If region split or meet lock, returns the remain tasks.
func (worker *copIteratorWorker) handleTaskOnce(bo *Backoffer, task *copTask, ch chan<- *copResponse) ([]*copTask, error) {
failpoint.Inject("handleTaskOnceError", func(val failpoint.Value) {
if val.(bool) {
failpoint.Return(nil, errors.New("mock handleTaskOnce error"))
}
})
copReq := coprocessor.Request{
Tp: worker.req.Tp,
StartTs: worker.req.StartTs,
Data: worker.req.Data,
Ranges: task.ranges.toPBRanges(),
SchemaVer: worker.req.SchemaVar,
}
var cacheKey []byte = nil
var cacheValue *coprCacheValue = nil
// If there are many ranges, it is very likely to be a TableLookupRequest. They are not worth to cache since
// computing is not the main cost. Ignore such requests directly to avoid slowly building the cache key.
if task.cmdType == tikvrpc.CmdCop && worker.store.coprCache != nil && worker.req.Cacheable && len(copReq.Ranges) < 10 {
cKey, err := coprCacheBuildKey(&copReq)
if err == nil {
cacheKey = cKey
cValue := worker.store.coprCache.Get(cKey)
copReq.IsCacheEnabled = true
if cValue != nil && cValue.RegionID == task.region.id && cValue.TimeStamp <= worker.req.StartTs {
// Append cache version to the request to skip Coprocessor computation if possible
// when request result is cached
copReq.CacheIfMatchVersion = cValue.RegionDataVersion
cacheValue = cValue
} else {
copReq.CacheIfMatchVersion = 0
}
} else {
logutil.BgLogger().Warn("Failed to build copr cache key", zap.Error(err))
}
}
req := tikvrpc.NewReplicaReadRequest(task.cmdType, &copReq, worker.req.ReplicaRead, &worker.replicaReadSeed, kvrpcpb.Context{
IsolationLevel: pbIsolationLevel(worker.req.IsolationLevel),
Priority: kvPriorityToCommandPri(worker.req.Priority),
NotFillCache: worker.req.NotFillCache,
HandleTime: true,
ScanDetail: true,
TaskId: worker.req.TaskID,
})
req.StoreTp = task.storeType
startTime := time.Now()
resp, rpcCtx, storeAddr, err := worker.SendReqCtx(bo, req, task.region, ReadTimeoutMedium, task.storeType, task.storeAddr)
if err != nil {
if task.storeType == kv.TiDB {
err = worker.handleTiDBSendReqErr(err, task, ch)
return nil, err
}
return nil, errors.Trace(err)
}
// Set task.storeAddr field so its task.String() method have the store address information.
task.storeAddr = storeAddr
costTime := time.Since(startTime)
if costTime > minLogCopTaskTime {
worker.logTimeCopTask(costTime, task, bo, resp)
}
metrics.TiKVCoprocessorHistogram.Observe(costTime.Seconds())
if task.cmdType == tikvrpc.CmdCopStream {
return worker.handleCopStreamResult(bo, rpcCtx, resp.Resp.(*tikvrpc.CopStreamResponse), task, ch, costTime)
}
// Handles the response for non-streaming copTask.
return worker.handleCopResponse(bo, rpcCtx, &copResponse{pbResp: resp.Resp.(*coprocessor.Response)}, cacheKey, cacheValue, task, ch, nil, costTime)
}
type minCommitTSPushed struct {
data map[uint64]struct{}
sync.RWMutex
}
func (m *minCommitTSPushed) Update(from []uint64) {
m.Lock()
for _, v := range from {
m.data[v] = struct{}{}
}
m.Unlock()
}
func (m *minCommitTSPushed) Get() []uint64 {
m.RLock()
defer m.RUnlock()
if len(m.data) == 0 {
return nil
}
ret := make([]uint64, 0, len(m.data))
for k := range m.data {
ret = append(ret, k)
}
return ret
}
// clientHelper wraps LockResolver and RegionRequestSender.
// It's introduced to support the new lock resolving pattern in the large transaction.
// In the large transaction protocol, sending requests and resolving locks are
// context-dependent. For example, when a send request meets a secondary lock, we'll
// call ResolveLock, and if the lock belongs to a large transaction, we may retry
// the request. If there is no context information about the resolved locks, we'll
// meet the secondary lock again and run into a deadloop.
type clientHelper struct {
*LockResolver
*RegionCache
*minCommitTSPushed
Client
resolveLite bool
}
// ResolveLocks wraps the ResolveLocks function and store the resolved result.
func (ch *clientHelper) ResolveLocks(bo *Backoffer, callerStartTS uint64, locks []*Lock) (int64, error) {
var err error
var resolvedLocks []uint64
var msBeforeTxnExpired int64
if ch.resolveLite {
msBeforeTxnExpired, resolvedLocks, err = ch.LockResolver.resolveLocksLite(bo, callerStartTS, locks)
} else {
msBeforeTxnExpired, resolvedLocks, err = ch.LockResolver.ResolveLocks(bo, callerStartTS, locks)
}
if err != nil {
return msBeforeTxnExpired, err
}
if len(resolvedLocks) > 0 {
ch.minCommitTSPushed.Update(resolvedLocks)
return 0, nil
}
return msBeforeTxnExpired, nil
}
// SendReqCtx wraps the SendReqCtx function and use the resolved lock result in the kvrpcpb.Context.
func (ch *clientHelper) SendReqCtx(bo *Backoffer, req *tikvrpc.Request, regionID RegionVerID, timeout time.Duration, sType kv.StoreType, directStoreAddr string) (*tikvrpc.Response, *RPCContext, string, error) {
sender := NewRegionRequestSender(ch.RegionCache, ch.Client)
if len(directStoreAddr) > 0 {
sender.storeAddr = directStoreAddr
}
req.Context.ResolvedLocks = ch.minCommitTSPushed.Get()
resp, ctx, err := sender.SendReqCtx(bo, req, regionID, timeout, sType)
return resp, ctx, sender.storeAddr, err
}
const (
minLogBackoffTime = 100
minLogKVProcessTime = 100
minLogKVWaitTime = 200
)
func (worker *copIteratorWorker) logTimeCopTask(costTime time.Duration, task *copTask, bo *Backoffer, resp *tikvrpc.Response) {
logStr := fmt.Sprintf("[TIME_COP_PROCESS] resp_time:%s txnStartTS:%d region_id:%d store_addr:%s", costTime, worker.req.StartTs, task.region.id, task.storeAddr)
if bo.totalSleep > minLogBackoffTime {
backoffTypes := strings.Replace(fmt.Sprintf("%v", bo.types), " ", ",", -1)
logStr += fmt.Sprintf(" backoff_ms:%d backoff_types:%s", bo.totalSleep, backoffTypes)
}
var detail *kvrpcpb.ExecDetails
if resp.Resp != nil {
switch r := resp.Resp.(type) {
case *coprocessor.Response:
detail = r.ExecDetails
case *tikvrpc.CopStreamResponse:
// streaming request returns io.EOF, so the first CopStreamResponse.Response maybe nil.
if r.Response != nil {
detail = r.Response.ExecDetails
}
default:
panic("unreachable")
}
}
if detail != nil && detail.HandleTime != nil {
processMs := detail.HandleTime.ProcessMs
waitMs := detail.HandleTime.WaitMs
if processMs > minLogKVProcessTime {
logStr += fmt.Sprintf(" kv_process_ms:%d", processMs)
if detail.ScanDetail != nil {
logStr = appendScanDetail(logStr, "write", detail.ScanDetail.Write)
logStr = appendScanDetail(logStr, "data", detail.ScanDetail.Data)
logStr = appendScanDetail(logStr, "lock", detail.ScanDetail.Lock)
}
}
if waitMs > minLogKVWaitTime {
logStr += fmt.Sprintf(" kv_wait_ms:%d", waitMs)
if processMs <= minLogKVProcessTime {
logStr = strings.Replace(logStr, "TIME_COP_PROCESS", "TIME_COP_WAIT", 1)
}
}
}
logutil.Logger(bo.ctx).Info(logStr)
}
func appendScanDetail(logStr string, columnFamily string, scanInfo *kvrpcpb.ScanInfo) string {
if scanInfo != nil {
logStr += fmt.Sprintf(" scan_total_%s:%d", columnFamily, scanInfo.Total)
logStr += fmt.Sprintf(" scan_processed_%s:%d", columnFamily, scanInfo.Processed)
}
return logStr
}
func (worker *copIteratorWorker) handleCopStreamResult(bo *Backoffer, rpcCtx *RPCContext, stream *tikvrpc.CopStreamResponse, task *copTask, ch chan<- *copResponse, costTime time.Duration) ([]*copTask, error) {
defer stream.Close()
var resp *coprocessor.Response
var lastRange *coprocessor.KeyRange
resp = stream.Response
if resp == nil {
// streaming request returns io.EOF, so the first Response is nil.
return nil, nil
}
for {
remainedTasks, err := worker.handleCopResponse(bo, rpcCtx, &copResponse{pbResp: resp}, nil, nil, task, ch, lastRange, costTime)
if err != nil || len(remainedTasks) != 0 {
return remainedTasks, errors.Trace(err)
}
resp, err = stream.Recv()
if err != nil {
if errors.Cause(err) == io.EOF {
return nil, nil
}
if err1 := bo.Backoff(boTiKVRPC, errors.Errorf("recv stream response error: %v, task: %s", err, task)); err1 != nil {
return nil, errors.Trace(err)
}
// No coprocessor.Response for network error, rebuild task based on the last success one.
if errors.Cause(err) == context.Canceled {
logutil.BgLogger().Info("stream recv timeout", zap.Error(err))
} else {
logutil.BgLogger().Info("stream unknown error", zap.Error(err))
}
return worker.buildCopTasksFromRemain(bo, lastRange, task)
}
if resp.Range != nil {
lastRange = resp.Range
}
}
}
// handleCopResponse checks coprocessor Response for region split and lock,
// returns more tasks when that happens, or handles the response if no error.
// if we're handling streaming coprocessor response, lastRange is the range of last
// successful response, otherwise it's nil.
func (worker *copIteratorWorker) handleCopResponse(bo *Backoffer, rpcCtx *RPCContext, resp *copResponse, cacheKey []byte, cacheValue *coprCacheValue, task *copTask, ch chan<- *copResponse, lastRange *coprocessor.KeyRange, costTime time.Duration) ([]*copTask, error) {
if regionErr := resp.pbResp.GetRegionError(); regionErr != nil {
if rpcCtx != nil && task.storeType == kv.TiDB {
resp.err = errors.Errorf("error: %v", regionErr)
worker.sendToRespCh(resp, ch, true)
return nil, nil
}
errStr := fmt.Sprintf("region_id:%v, region_ver:%v, store_type:%s, peer_addr:%s, error:%s",
task.region.id, task.region.ver, task.storeType.Name(), task.storeAddr, regionErr.String())
if err := bo.Backoff(BoRegionMiss, errors.New(errStr)); err != nil {
return nil, errors.Trace(err)
}
// We may meet RegionError at the first packet, but not during visiting the stream.
return buildCopTasks(bo, worker.store.regionCache, task.ranges, worker.req)
}
if lockErr := resp.pbResp.GetLocked(); lockErr != nil {
logutil.BgLogger().Debug("coprocessor encounters",
zap.Stringer("lock", lockErr))
msBeforeExpired, err1 := worker.ResolveLocks(bo, worker.req.StartTs, []*Lock{NewLock(lockErr)})
if err1 != nil {
return nil, errors.Trace(err1)
}
if msBeforeExpired > 0 {
if err := bo.BackoffWithMaxSleep(boTxnLockFast, int(msBeforeExpired), errors.New(lockErr.String())); err != nil {
return nil, errors.Trace(err)
}
}
return worker.buildCopTasksFromRemain(bo, lastRange, task)
}
if otherErr := resp.pbResp.GetOtherError(); otherErr != "" {
err := errors.Errorf("other error: %s", otherErr)
logutil.BgLogger().Warn("other error",
zap.Uint64("txnStartTS", worker.req.StartTs),
zap.Uint64("regionID", task.region.id),
zap.String("storeAddr", task.storeAddr),
zap.Error(err))
return nil, errors.Trace(err)
}
// When the request is using streaming API, the `Range` is not nil.
if resp.pbResp.Range != nil {
resp.startKey = resp.pbResp.Range.Start
} else if task.ranges != nil && task.ranges.len() > 0 {
resp.startKey = task.ranges.at(0).StartKey
}
if resp.detail == nil {
resp.detail = new(execdetails.ExecDetails)
}
resp.detail.BackoffTime = time.Duration(bo.totalSleep) * time.Millisecond
resp.detail.BackoffSleep = make(map[string]time.Duration, len(bo.backoffTimes))
resp.detail.BackoffTimes = make(map[string]int, len(bo.backoffTimes))
for backoff := range bo.backoffTimes {
backoffName := backoff.String()
resp.detail.BackoffTimes[backoffName] = bo.backoffTimes[backoff]
resp.detail.BackoffSleep[backoffName] = time.Duration(bo.backoffSleepMS[backoff]) * time.Millisecond
}
if rpcCtx != nil {
resp.detail.CalleeAddress = rpcCtx.Addr
}
resp.respTime = costTime
if pbDetails := resp.pbResp.ExecDetails; pbDetails != nil {
if handleTime := pbDetails.HandleTime; handleTime != nil {
resp.detail.WaitTime = time.Duration(handleTime.WaitMs) * time.Millisecond
resp.detail.ProcessTime = time.Duration(handleTime.ProcessMs) * time.Millisecond
}
if scanDetail := pbDetails.ScanDetail; scanDetail != nil {
if scanDetail.Write != nil {
resp.detail.TotalKeys += scanDetail.Write.Total
resp.detail.ProcessedKeys += scanDetail.Write.Processed
}
}
}
if resp.pbResp.IsCacheHit {
if cacheValue == nil {
return nil, errors.New("Internal error: received illegal TiKV response")
}
// Cache hit and is valid: use cached data as response data and we don't update the cache.
data := make([]byte, len(cacheValue.Data))
copy(data, cacheValue.Data)
resp.pbResp.Data = data
} else {
// Cache not hit or cache hit but not valid: update the cache if the response can be cached.
if cacheKey != nil && resp.pbResp.CanBeCached && resp.pbResp.CacheLastVersion > 0 {
if worker.store.coprCache.CheckAdmission(resp.pbResp.Data.Size(), resp.detail.ProcessTime) {
data := make([]byte, len(resp.pbResp.Data))
copy(data, resp.pbResp.Data)
newCacheValue := coprCacheValue{
Data: data,
TimeStamp: worker.req.StartTs,
RegionID: task.region.id,
RegionDataVersion: resp.pbResp.CacheLastVersion,
}
worker.store.coprCache.Set(cacheKey, &newCacheValue)
}
}
}
worker.sendToRespCh(resp, ch, true)
return nil, nil
}
func (worker *copIteratorWorker) handleTiDBSendReqErr(err error, task *copTask, ch chan<- *copResponse) error {
errCode := errno.ErrUnknown
errMsg := err.Error()
if terror.ErrorEqual(err, ErrTiKVServerTimeout) {
errCode = errno.ErrTiKVServerTimeout
errMsg = "TiDB server timeout, address is " + task.storeAddr
}
selResp := tipb.SelectResponse{
Warnings: []*tipb.Error{
{
Code: int32(errCode),
Msg: errMsg,
},
},
}
data, err := proto.Marshal(&selResp)
if err != nil {
return errors.Trace(err)
}
resp := &copResponse{
pbResp: &coprocessor.Response{
Data: data,
},
detail: &execdetails.ExecDetails{},
}
worker.sendToRespCh(resp, ch, true)
return nil
}
func (worker *copIteratorWorker) buildCopTasksFromRemain(bo *Backoffer, lastRange *coprocessor.KeyRange, task *copTask) ([]*copTask, error) {
remainedRanges := task.ranges
if worker.req.Streaming && lastRange != nil {
remainedRanges = worker.calculateRemain(task.ranges, lastRange, worker.req.Desc)
}
return buildCopTasks(bo, worker.store.regionCache, remainedRanges, worker.req)
}
// calculateRemain splits the input ranges into two, and take one of them according to desc flag.
// It's used in streaming API, to calculate which range is consumed and what needs to be retry.
// For example:
// ranges: [r1 --> r2) [r3 --> r4)
// split: [s1 --> s2)
// In normal scan order, all data before s1 is consumed, so the remain ranges should be [s1 --> r2) [r3 --> r4)
// In reverse scan order, all data after s2 is consumed, so the remain ranges should be [r1 --> r2) [r3 --> s2)
func (worker *copIteratorWorker) calculateRemain(ranges *copRanges, split *coprocessor.KeyRange, desc bool) *copRanges {
if desc {
left, _ := ranges.split(split.End)
return left
}
_, right := ranges.split(split.Start)
return right
}
func (it *copIterator) Close() error {
if atomic.CompareAndSwapUint32(&it.closed, 0, 1) {
close(it.finishCh)
}
it.wg.Wait()
return nil
}
type rateLimit struct {
token chan struct{}
}
func newRateLimit(n int) *rateLimit {
return &rateLimit{
token: make(chan struct{}, n),
}
}
func (r *rateLimit) getToken(done <-chan struct{}) (exit bool) {
select {
case <-done:
return true
case r.token <- struct{}{}:
return false
}
}
func (r *rateLimit) putToken() {
select {
case <-r.token:
default:
panic("put a redundant token")
}
}
// copErrorResponse returns error when calling Next()
type copErrorResponse struct{ error }
func (it copErrorResponse) Next(ctx context.Context) (kv.ResultSubset, error) {
return nil, it.error
}
func (it copErrorResponse) Close() error {
return nil
}
| store/tikv/coprocessor.go | 0 | https://github.com/pingcap/tidb/commit/788dc28a7423d4b85ebe11bbce632733b056d63a | [
0.0004839011817239225,
0.00017573611694388092,
0.00015903635357972234,
0.0001707045448711142,
0.00003438277053646743
] |
{
"id": 4,
"code_window": [
"\tif err != nil {\n",
"\t\tjob.State = model.JobStateCancelled\n",
"\t\treturn ver, errors.Trace(err)\n",
"\t}\n",
"\n",
"\tif dbInfo.Charset == toCharset && dbInfo.Collate == toCollate {\n"
],
"labels": [
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "ddl/schema.go",
"type": "replace",
"edit_start_line_idx": 120
} | // Copyright 2018 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package core_test
import (
. "github.com/pingcap/check"
"github.com/pingcap/parser/terror"
"github.com/pingcap/tidb/planner/core"
"github.com/pingcap/tidb/util/testkit"
"github.com/pingcap/tidb/util/testleak"
"github.com/pingcap/tidb/util/testutil"
)
var _ = Suite(&testExpressionRewriterSuite{})
type testExpressionRewriterSuite struct {
}
func (s *testExpressionRewriterSuite) TestIfNullEliminateColName(c *C) {
defer testleak.AfterTest(c)()
store, dom, err := newStoreWithBootstrap()
c.Assert(err, IsNil)
tk := testkit.NewTestKit(c, store)
defer func() {
dom.Close()
store.Close()
}()
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a int not null, b int not null)")
rs, err := tk.Exec("select ifnull(a,b) from t")
c.Assert(err, IsNil)
fields := rs.Fields()
c.Assert(fields[0].Column.Name.L, Equals, "ifnull(a,b)")
}
func (s *testExpressionRewriterSuite) TestBinaryOpFunction(c *C) {
defer testleak.AfterTest(c)()
store, dom, err := newStoreWithBootstrap()
c.Assert(err, IsNil)
tk := testkit.NewTestKit(c, store)
defer func() {
dom.Close()
store.Close()
}()
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("CREATE TABLE t(a int, b int, c int);")
tk.MustExec("INSERT INTO t VALUES (1, 2, 3), (NULL, 2, 3 ), (1, NULL, 3),(1, 2, NULL),(NULL, 2, 3+1), (1, NULL, 3+1), (1, 2+1, NULL),(NULL, 2, 3-1), (1, NULL, 3-1), (1, 2-1, NULL)")
tk.MustQuery("SELECT * FROM t WHERE (a,b,c) <= (1,2,3) order by b").Check(testkit.Rows("1 1 <nil>", "1 2 3"))
tk.MustQuery("SELECT * FROM t WHERE (a,b,c) > (1,2,3) order by b").Check(testkit.Rows("1 3 <nil>"))
}
func (s *testExpressionRewriterSuite) TestDefaultFunction(c *C) {
defer testleak.AfterTest(c)()
store, dom, err := newStoreWithBootstrap()
c.Assert(err, IsNil)
tk := testkit.NewTestKit(c, store)
defer func() {
dom.Close()
store.Close()
}()
tk.MustExec("use test")
tk.MustExec("drop table if exists t1")
tk.MustExec(`create table t1(
a varchar(10) default 'def',
b varchar(10),
c int default '10',
d double default '3.14',
e datetime default '20180101',
f datetime default current_timestamp);`)
tk.MustExec("insert into t1(a, b, c, d) values ('1', '1', 1, 1)")
tk.MustQuery(`select
default(a) as defa,
default(b) as defb,
default(c) as defc,
default(d) as defd,
default(e) as defe,
default(f) as deff
from t1`).Check(testutil.RowsWithSep("|", "def|<nil>|10|3.14|2018-01-01 00:00:00|<nil>"))
err = tk.ExecToErr("select default(x) from t1")
c.Assert(err.Error(), Equals, "[planner:1054]Unknown column 'x' in 'field list'")
tk.MustQuery("select default(a0) from (select a as a0 from t1) as t0").Check(testkit.Rows("def"))
err = tk.ExecToErr("select default(a0) from (select a+1 as a0 from t1) as t0")
c.Assert(err.Error(), Equals, "[table:1364]Field 'a0' doesn't have a default value")
tk.MustExec("create table t2(a varchar(10), b varchar(10))")
tk.MustExec("insert into t2 values ('1', '1')")
err = tk.ExecToErr("select default(a) from t1, t2")
c.Assert(err.Error(), Equals, "[planner:1052]Column 'a' in field list is ambiguous")
tk.MustQuery("select default(t1.a) from t1, t2").Check(testkit.Rows("def"))
tk.MustExec(`create table t3(
a datetime default current_timestamp,
b timestamp default current_timestamp,
c timestamp(6) default current_timestamp(6),
d varchar(20) default 'current_timestamp')`)
tk.MustExec("insert into t3 values ()")
tk.MustQuery(`select
default(a) as defa,
default(b) as defb,
default(c) as defc,
default(d) as defd
from t3`).Check(testutil.RowsWithSep("|", "<nil>|0000-00-00 00:00:00|0000-00-00 00:00:00.000000|current_timestamp"))
tk.MustExec(`create table t4(a int default 1, b varchar(5))`)
tk.MustExec(`insert into t4 values (0, 'B'), (1, 'B'), (2, 'B')`)
tk.MustExec(`create table t5(d int default 0, e varchar(5))`)
tk.MustExec(`insert into t5 values (5, 'B')`)
tk.MustQuery(`select a from t4 where a > (select default(d) from t5 where t4.b = t5.e)`).Check(testkit.Rows("1", "2"))
tk.MustQuery(`select a from t4 where a > (select default(a) from t5 where t4.b = t5.e)`).Check(testkit.Rows("2"))
tk.MustExec("prepare stmt from 'select default(a) from t1';")
tk.MustQuery("execute stmt").Check(testkit.Rows("def"))
tk.MustExec("alter table t1 modify a varchar(10) default 'DEF'")
tk.MustQuery("execute stmt").Check(testkit.Rows("DEF"))
tk.MustExec("update t1 set c = c + default(c)")
tk.MustQuery("select c from t1").Check(testkit.Rows("11"))
}
func (s *testExpressionRewriterSuite) TestCompareSubquery(c *C) {
defer testleak.AfterTest(c)()
store, dom, err := newStoreWithBootstrap()
c.Assert(err, IsNil)
tk := testkit.NewTestKit(c, store)
defer func() {
dom.Close()
store.Close()
}()
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("drop table if exists s")
tk.MustExec("create table t(a int, b int)")
tk.MustExec("create table s(a int, b int)")
tk.MustExec("insert into t values(1, null), (2, null)")
// Test empty checker.
tk.MustQuery("select a != any (select a from s) from t").Check(testkit.Rows(
"0",
"0",
))
tk.MustQuery("select b != any (select a from s) from t").Check(testkit.Rows(
"0",
"0",
))
tk.MustQuery("select a = all (select a from s) from t").Check(testkit.Rows(
"1",
"1",
))
tk.MustQuery("select b = all (select a from s) from t").Check(testkit.Rows(
"1",
"1",
))
tk.MustQuery("select * from t where a != any (select a from s)").Check(testkit.Rows())
tk.MustQuery("select * from t where b != any (select a from s)").Check(testkit.Rows())
tk.MustQuery("select * from t where a = all (select a from s)").Check(testkit.Rows(
"1 <nil>",
"2 <nil>",
))
tk.MustQuery("select * from t where b = all (select a from s)").Check(testkit.Rows(
"1 <nil>",
"2 <nil>",
))
// Test outer null checker.
tk.MustQuery("select b != any (select a from t t2) from t t1").Check(testkit.Rows(
"<nil>",
"<nil>",
))
tk.MustQuery("select b = all (select a from t t2) from t t1").Check(testkit.Rows(
"<nil>",
"<nil>",
))
tk.MustQuery("select * from t t1 where b != any (select a from t t2)").Check(testkit.Rows())
tk.MustQuery("select * from t t1 where b = all (select a from t t2)").Check(testkit.Rows())
tk.MustExec("delete from t where a = 2")
tk.MustQuery("select b != any (select a from t t2) from t t1").Check(testkit.Rows(
"<nil>",
))
tk.MustQuery("select b = all (select a from t t2) from t t1").Check(testkit.Rows(
"<nil>",
))
tk.MustQuery("select * from t t1 where b != any (select a from t t2)").Check(testkit.Rows())
tk.MustQuery("select * from t t1 where b = all (select a from t t2)").Check(testkit.Rows())
// Test inner null checker.
tk.MustExec("insert into t values(null, 1)")
tk.MustQuery("select b != any (select a from t t2) from t t1").Check(testkit.Rows(
"<nil>",
"<nil>",
))
tk.MustQuery("select b = all (select a from t t2) from t t1").Check(testkit.Rows(
"<nil>",
"<nil>",
))
tk.MustQuery("select * from t t1 where b != any (select a from t t2)").Check(testkit.Rows())
tk.MustQuery("select * from t t1 where b = all (select a from t t2)").Check(testkit.Rows())
tk.MustExec("delete from t where b = 1")
tk.MustExec("insert into t values(null, 2)")
tk.MustQuery("select b != any (select a from t t2) from t t1").Check(testkit.Rows(
"<nil>",
"1",
))
tk.MustQuery("select b = all (select a from t t2) from t t1").Check(testkit.Rows(
"<nil>",
"0",
))
tk.MustQuery("select * from t t1 where b != any (select a from t t2)").Check(testkit.Rows(
"<nil> 2",
))
tk.MustQuery("select * from t t1 where b = all (select a from t t2)").Check(testkit.Rows())
}
func (s *testExpressionRewriterSuite) TestCheckFullGroupBy(c *C) {
defer testleak.AfterTest(c)()
store, dom, err := newStoreWithBootstrap()
c.Assert(err, IsNil)
tk := testkit.NewTestKit(c, store)
defer func() {
dom.Close()
store.Close()
}()
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a int, b int)")
tk.MustQuery("select t1.a, (select max(t2.b) from t t2) from t t1").Check(testkit.Rows())
err = tk.ExecToErr("select t1.a, (select t2.a, max(t2.b) from t t2) from t t1")
c.Assert(terror.ErrorEqual(err, core.ErrMixOfGroupFuncAndFields), IsTrue, Commentf("err %v", err))
}
func (s *testExpressionRewriterSuite) TestPatternLikeToExpression(c *C) {
defer testleak.AfterTest(c)()
store, dom, err := newStoreWithBootstrap()
c.Assert(err, IsNil)
tk := testkit.NewTestKit(c, store)
defer func() {
dom.Close()
store.Close()
}()
tk.MustQuery("select 0 like 'a string';").Check(testkit.Rows("0"))
tk.MustQuery("select 0.0 like 'a string';").Check(testkit.Rows("0"))
tk.MustQuery("select 0 like '0.00';").Check(testkit.Rows("0"))
tk.MustQuery("select cast(\"2011-5-3\" as datetime) like \"2011-05-03\";").Check(testkit.Rows("0"))
tk.MustQuery("select 1 like '1';").Check(testkit.Rows("1"))
tk.MustQuery("select 0 like '0';").Check(testkit.Rows("1"))
tk.MustQuery("select 0.00 like '0.00';").Check(testkit.Rows("1"))
}
| planner/core/expression_rewriter_test.go | 0 | https://github.com/pingcap/tidb/commit/788dc28a7423d4b85ebe11bbce632733b056d63a | [
0.0001791645772755146,
0.00017283431952819228,
0.00016044128278736025,
0.00017402849334757775,
0.000004325046575104352
] |
{
"id": 0,
"code_window": [
"then \"aws/deploy\" would generate access keys for the \"deploy\" policy.\n",
"\n",
"The access keys will have a lease associated with them. The access keys\n",
"can be revoked by using the Vault ID.\n",
"`"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep"
],
"after_edit": [
"can be revoked by using the lease ID.\n"
],
"file_path": "builtin/logical/aws/path_user.go",
"type": "replace",
"edit_start_line_idx": 150
} | package command
import (
"fmt"
"strings"
)
// RevokeCommand is a Command that mounts a new mount.
type RevokeCommand struct {
Meta
}
func (c *RevokeCommand) Run(args []string) int {
var prefix bool
flags := c.Meta.FlagSet("revoke", FlagSetDefault)
flags.BoolVar(&prefix, "prefix", false, "")
flags.Usage = func() { c.Ui.Error(c.Help()) }
if err := flags.Parse(args); err != nil {
return 1
}
args = flags.Args()
if len(args) != 1 {
flags.Usage()
c.Ui.Error(fmt.Sprintf(
"\nRevoke expects one argument: the ID to revoke"))
return 1
}
vaultId := args[0]
client, err := c.Client()
if err != nil {
c.Ui.Error(fmt.Sprintf(
"Error initializing client: %s", err))
return 2
}
if prefix {
err = client.Sys().RevokePrefix(vaultId)
} else {
err = client.Sys().Revoke(vaultId)
}
if err != nil {
c.Ui.Error(fmt.Sprintf(
"Revoke error: %s", err))
return 1
}
c.Ui.Output(fmt.Sprintf("Key revoked with ID '%s'.", vaultId))
return 0
}
func (c *RevokeCommand) Synopsis() string {
return "Revoke a secret."
}
func (c *RevokeCommand) Help() string {
helpText := `
Usage: vault revoke [options] id
Revoke a secret by its Vault ID.
This command revokes a secret by its Vault ID that was returned
with it. Once the key is revoked, it is no longer valid.
With the -prefix flag, the revoke is done by prefix: any secret prefixed
with the given partial ID is revoked. Vault IDs are structured in such
a way to make revocation of prefixes useful.
General Options:
-address=TODO The address of the Vault server.
-ca-cert=path Path to a PEM encoded CA cert file to use to
verify the Vault server SSL certificate.
-ca-path=path Path to a directory of PEM encoded CA cert files
to verify the Vault server SSL certificate. If both
-ca-cert and -ca-path are specified, -ca-path is used.
-insecure Do not verify TLS certificate. This is highly
not recommended. This is especially not recommended
for unsealing a vault.
Revoke Options:
-prefix=true Revoke all secrets with the matching prefix. This
defaults to false: an exact revocation.
`
return strings.TrimSpace(helpText)
}
| command/revoke.go | 1 | https://github.com/hashicorp/vault/commit/c30d877fa422c9425c5e81bd904f81642b9fae87 | [
0.048124127089977264,
0.008644278161227703,
0.00016890863480512053,
0.0018520643934607506,
0.01473152730613947
] |
{
"id": 0,
"code_window": [
"then \"aws/deploy\" would generate access keys for the \"deploy\" policy.\n",
"\n",
"The access keys will have a lease associated with them. The access keys\n",
"can be revoked by using the Vault ID.\n",
"`"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep"
],
"after_edit": [
"can be revoked by using the lease ID.\n"
],
"file_path": "builtin/logical/aws/path_user.go",
"type": "replace",
"edit_start_line_idx": 150
} | (function(){
var Chainable = function(engine){
this.engine = engine;
this._chain = [];
this._updateTimer = this._updateTimer.bind(this);
this._cycle = this._cycle.bind(this);
};
Chainable.prototype._running = false;
Chainable.prototype._updateTimer = function(tick){
this._timer += tick;
if (this._timer >= this._timerMax) {
this.resetTimer();
this._cycle();
}
};
Chainable.prototype.resetTimer = function(){
this.engine.updateChainTimer = undefined;
this._timer = 0;
this._timerMax = 0;
return this;
};
Chainable.prototype.start = function(){
if (this._running || !this._chain.length) {
return this;
}
this._running = true;
return this._cycle();
};
Chainable.prototype.reset = function(){
if (!this._running) {
return this;
}
this.resetTimer();
this._timer = 0;
this._running = false;
return this;
};
Chainable.prototype._cycle = function(){
var current;
if (!this._chain.length) {
return this.reset();
}
current = this._chain.shift();
if (current.type === 'function') {
current.func.apply(current.scope, current.args);
current = null;
return this._cycle();
}
if (current.type === 'wait') {
this.resetTimer();
// Convert timer to seconds
this._timerMax = current.time / 1000;
this.engine.updateChainTimer = this._updateTimer;
current = null;
}
return this;
};
Chainable.prototype.then = Chainable.prototype.exec = function(func, scope, args){
this._chain.push({
type : 'function',
func : func,
scope : scope || window,
args : args || []
});
return this.start();
};
Chainable.prototype.wait = function(time){
this._chain.push({
type : 'wait',
time : time
});
return this.start();
};
window.Chainable = Chainable;
})();
| website/source/assets/javascripts/lib/Chainable.js | 0 | https://github.com/hashicorp/vault/commit/c30d877fa422c9425c5e81bd904f81642b9fae87 | [
0.0001722211018204689,
0.00016823489568196237,
0.00016495845920871943,
0.00016843706544023007,
0.0000024401872451562667
] |
{
"id": 0,
"code_window": [
"then \"aws/deploy\" would generate access keys for the \"deploy\" policy.\n",
"\n",
"The access keys will have a lease associated with them. The access keys\n",
"can be revoked by using the Vault ID.\n",
"`"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep"
],
"after_edit": [
"can be revoked by using the lease ID.\n"
],
"file_path": "builtin/logical/aws/path_user.go",
"type": "replace",
"edit_start_line_idx": 150
} | ---
layout: "docs"
page_title: "Secret Backends"
sidebar_current: "docs-secrets"
description: |-
Secret backends are mountable backends that store or generate secrets in Vault.
---
# Secret Backends
Secret backends are the components in Vault which store and generate
secrets.
Some secret backends, such as "generic", simply store and read
secrets verbatim. Other secret backends, such as "aws", create _dynamic
secrets_: secrets that are made on demand.
Secret backends are part of the
[mount system](#)
in Vault. They behave very similarly to a virtual filesystem:
any read/write/delete is sent to the secret backend, and the secret
backend can choose to react to that operation however it sees fit.
For example, the "generic" backend passes through any operation back
to the configured physical backend for Vault. A "read" turns into a
"read" of the physical backend at the same path, a "write" turns into
a write, etc. This is a lot like a normal filesystem.
The "aws" backend, on the otherhand, behaves differently. When you
write to `aws/root`, it expects a certain format and stores that
information as configuration. You can't read from this path. When you
read from `aws/<name>`, it looks up an IAM policy named `<name>` and
generates AWS access credentials on demand and returns them. It doesn't
behave at all like a typical filesystem: you're not simply storing and
retrieving values, you're interacting with an API.
## Mounting/Unmounting Secret Backends
Secret backends can be mounted/unmounted using the CLI or the API.
There are three operations that can be performed with a secret backend
with regards to mounting:
* **Mount** - This mounts a new secret backend. Multiple secret
backends of the same type can be mounted at the same time by
specifying different mount points. By default, secret backends are
mounted to the same path as their name. This is what you want most
of the time.
* **Unmount** - This unmounts an existing secret backend. When a secret
backend is unmounted, all of its secrets are revoked (if they support
it), and all of the data stored for that backend in the physical storage
layer is deleted.
* **Remount** - This moves the mount point for an existing secret backend.
This revokes all secrets, since secret leases are tied to the path they
were created at. The data stored for the backend won't be deleted.
Once a secret backend is mounted, you can interact with it directly
at its mount point according to its own API. You can use the `vault help`
system to determine the paths it responds to.
## Barrier View
An important concept around secret backends is that they receive a
_barrier view_ to the configured Vault physical storage. This is a lot
like a [chroot](http://en.wikipedia.org/wiki/Chroot).
Whenever a secret backend is mounted, a random UUID is generated. This
becomes the data root for that backend. Whenever that backend writes to
the physical storage layer, it is prefixed with that UUID folder. Since
the Vault storage layer doesn't support relative access (such as `..`),
this makes it impossible for a mounted backend to access any other data.
This is an important security feature in Vault: even a malicious backend
can't access the data from any other backend.
| website/source/docs/secrets/index.html.md | 0 | https://github.com/hashicorp/vault/commit/c30d877fa422c9425c5e81bd904f81642b9fae87 | [
0.00665369164198637,
0.0013281613355502486,
0.00016520197095815092,
0.0005692389677278697,
0.0020350110717117786
] |
{
"id": 0,
"code_window": [
"then \"aws/deploy\" would generate access keys for the \"deploy\" policy.\n",
"\n",
"The access keys will have a lease associated with them. The access keys\n",
"can be revoked by using the Vault ID.\n",
"`"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep"
],
"after_edit": [
"can be revoked by using the lease ID.\n"
],
"file_path": "builtin/logical/aws/path_user.go",
"type": "replace",
"edit_start_line_idx": 150
} | ---
layout: "intro"
page_title: "Starting the Server"
sidebar_current: "gettingstarted-devserver"
description: |-
After installing Vault, the next step is to start the server.
---
# Starting the Vault Server
With Vault installed, the next step is to start a Vault server.
Vault operates as a client/server application. The Vault server is the
only piece of the Vault architecture that interacts with the data
storage and backends. All operations done via the Vault CLI interact
with the server over a TLS connection.
In this page, we'll start and interact with the Vault server to understand
how the server is started, and understanding the seal/unseal process.
## Staring the Dev Server
To start, we're going to start the Vault _dev server_. The dev server
is a built-in flag to start a pre-configured server that is not very
secure but useful for playing with Vault locally. Later in the getting
started guide we'll configure and start a real server.
To start the Vault dev server, run `vault server -dev`:
```
$ vault server -dev
WARNING: Dev mode is enabled!
In this mode, Vault is completely in-memory and unsealed.
Vault is configured to only have a single unseal key. The root
token has already been authenticated with the CLI, so you can
immediately begin using the Vault CLI.
The only step you need to take is to set the following
environment variable since Vault will be taking without TLS:
export VAULT_ADDR='http://127.0.0.1:8200'
The unseal key and root token are reproduced below in case you
want to seal/unseal the Vault or play with authentication.
Unseal Key: 2252546b1a8551e8411502501719c4b3
Root Token: 79bd8011-af5a-f147-557e-c58be4fedf6c
==> Vault server configuration:
Log Level: info
Backend: inmem
Listener 1: tcp (addr: "127.0.0.1:8200", tls: "disabled")
...
```
You should see output similar to that above. As you can see, when you
start a dev server, Vault warns you loudly. The dev server stores all
its data in-memory (but still encrypted), listens on localhost without TLS, and
automatically unseals and shows you the unseal key and root access key.
We'll go over what all this means shortly.
The important thing about the dev server is that it is meant for
development only. **Do not run the dev server in production.** Even if it
was run in production, it wouldn't be very useful since it stores data in-memory
and every restart would clear all your secrets.
With the dev server running, do the following three things before anything
else:
1. Copy and run the `export VAULT_ADDR ...` command from your terminal
output. This will configure the Vault client to talk to our dev server.
2. Save the unseal key somewhere. Don't worry about _how_ to save this
securely. For now, just save it anywhere.
3. Do the same as step 2, but with the root token. We'll use this later.
## Verify the Server is Running
Verify the server is running by running `vault seal-status`. This should
succeed and exit with exit code 0. If you see an error about opening
a connection, make sure you copied and executed the `export VAULT_ADDR...`
command from above properly.
If it ran successful, the output should look like below:
```
$ vault seal-status
Sealed: false
Key Shares: 1
Key Threshold: 1
Unseal Progress: 0
```
If the output looks different, especially if the numbers are different
or the Vault is sealed, then restart the dev server and try again. The
only reason these would ever be different is if you're running a dev
server from going through this guide previously.
We'll cover what this output means later in the guide.
## Next
Congratulations! You've started your first Vault server. We haven't stored
any secrets yet, but we'll do that in the next section.
Next, we're going to
[read and write our first secrets](/intro/getting-started/first-secret.html).
| website/source/intro/getting-started/dev-server.html.md | 0 | https://github.com/hashicorp/vault/commit/c30d877fa422c9425c5e81bd904f81642b9fae87 | [
0.002242415677756071,
0.0006435330142267048,
0.00016685422451701015,
0.00036053155781701207,
0.0006812463980168104
] |
{
"id": 1,
"code_window": [
"\n",
"func (c *RevokeCommand) Help() string {\n",
"\thelpText := `\n",
"Usage: vault revoke [options] id\n",
"\n",
" Revoke a secret by its Vault ID.\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep"
],
"after_edit": [
" Revoke a secret by its lease ID.\n"
],
"file_path": "command/revoke.go",
"type": "replace",
"edit_start_line_idx": 60
} | ---
layout: "intro"
page_title: "Built-in Help"
sidebar_current: "gettingstarted-help"
description: |-
Vault has a built-in help system to learn about the available paths in Vault and how to use them.
---
# Built-in Help
You've now worked with `vault write` and `vault read` for multiple paths:
generic secret backend with `secret/` and dynamic AWS credentials with the
AWS backend provider at `aws/`. In both cases, the usage of read/write and
the paths to use differed. AWS in particular had special paths like
`aws/config`.
Instead of having to memorize or reference documentation constantly
to determine what paths to use, we built a help system directly into
Vault. This help system can be access via the API or the command-line and
generates human-readable help for any mounted backend.
On this page, we'll learn how to use this help system. It is an invaluable
tool as you continue to work with Vault.
## Backend Overview
For this, we'll assume you have the AWS backend mounted. If not, mount
it with `vault mount aws`. Even if you don't have an AWS account, you
can still mount the AWS backend.
With the backend mounted, let's learn about it with `vault help`:
```
$ vault help aws
## DESCRIPTION
The AWS backend dynamically generates AWS access keys for a set of
IAM policies. The AWS access keys have a configurable lease set and
are automatically revoked at the end of the lease.
After mounting this backend, credentials to generate IAM keys must
be configured with the "root" path and policies must be written using
the "policy/" endpoints before any access keys can be generated.
## PATHS
The following paths are supported by this backend. To view help for
any of the paths below, use the help command with any route matching
the path pattern. Note that depending on the policy of your auth token,
you may or may not be able to access certain paths.
^(?P<name>\w+)$
Generate an access key pair for a specific policy.
^policy/(?P<name>\w+)$
Read and write IAM policies that access keys can be made for.
^root$
Configure the root credentials that are used to manage IAM.
```
The `vault help` command takes a path. By specifying the root path for
a mount, it will give us the overview of that mount. Notice how the help
not only contains a description, but also the exact regular expressions
used to match routes for this backend along with a brief description
of what the route is for.
## Path Help
After seeing the overview, we can continue to dive deeper by getting
help for an individual path. For this, just use `vault help` with a path
that would match the regular expression for that path. Note that the path
doesn't need to actually _work_. For example, we'll get the help below
for accessing `aws/operator`, even though we never wrote the `operator`
policy:
```
$ vault help aws/operator
Request: operator
Matching Route: ^(?P<name>\w+)$
Generate an access key pair for a specific policy.
## PARAMETERS
name (string)
Name of the policy
## DESCRIPTION
This path will generate a new, never before used key pair for
accessing AWS. The IAM policy used to back this key pair will be
the "name" parameter. For example, if this backend is mounted at "aws",
then "aws/deploy" would generate access keys for the "deploy" policy.
The access keys will have a lease associated with them. The access keys
can be revoked by using the Vault ID.
```
Within a path, we're given the parameters that this path requires.
Some parameters come from the route itself. In this case, the "name"
parameter is a named capture from the route regular expression.
There is also a description of what that path does.
Go ahead and explore more paths! Mount other backends, traverse their
help systems and learn about what they do. For example, learn about the
generic `secret/` path.
## Next
The help system may not be the most exciting feature of Vault, but it
is indispensable in day-to-day usage of Vault. The help system lets you
learn about how to use any backend within Vault without leaving the command
line.
Next, we'll learn about
[authentication](/intro/getting-started/authentication.html).
| website/source/intro/getting-started/help.html.md | 1 | https://github.com/hashicorp/vault/commit/c30d877fa422c9425c5e81bd904f81642b9fae87 | [
0.5689435005187988,
0.0551486611366272,
0.00016632748884148896,
0.0031445457134395838,
0.15537907183170319
] |
{
"id": 1,
"code_window": [
"\n",
"func (c *RevokeCommand) Help() string {\n",
"\thelpText := `\n",
"Usage: vault revoke [options] id\n",
"\n",
" Revoke a secret by its Vault ID.\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep"
],
"after_edit": [
" Revoke a secret by its lease ID.\n"
],
"file_path": "command/revoke.go",
"type": "replace",
"edit_start_line_idx": 60
} | package http
import (
"fmt"
"net"
"net/http"
"net/http/cookiejar"
"testing"
"time"
"github.com/hashicorp/vault/vault"
)
func TestServer(t *testing.T, core *vault.Core) (net.Listener, string) {
fail := func(format string, args ...interface{}) {
panic(fmt.Sprintf(format, args...))
}
if t != nil {
fail = t.Fatalf
}
ln, err := net.Listen("tcp", "127.0.0.1:0")
if err != nil {
fail("err: %s", err)
}
addr := "http://" + ln.Addr().String()
// Create a muxer to handle our requests so that we can authenticate
// for tests.
mux := http.NewServeMux()
mux.Handle("/_test/auth", http.HandlerFunc(testHandleAuth))
mux.Handle("/", Handler(core))
server := &http.Server{
Addr: ln.Addr().String(),
Handler: mux,
}
go server.Serve(ln)
return ln, addr
}
func TestServerAuth(t *testing.T, addr string, token string) {
// If no cookie jar is set on the default HTTP client, then setup the jar
if http.DefaultClient.Jar == nil {
jar, err := cookiejar.New(&cookiejar.Options{})
if err != nil {
t.Fatalf("err: %s", err)
}
http.DefaultClient.Jar = jar
}
// Get the internal path so that we set the cookie
if _, err := http.Get(addr + "/_test/auth?token=" + token); err != nil {
t.Fatalf("error authenticating: %s", err)
}
}
func testHandleAuth(w http.ResponseWriter, req *http.Request) {
token := req.URL.Query().Get("token")
http.SetCookie(w, &http.Cookie{
Name: AuthCookieName,
Value: token,
Path: "/",
Expires: time.Now().UTC().Add(1 * time.Hour),
})
respondOk(w, nil)
}
| http/testing.go | 0 | https://github.com/hashicorp/vault/commit/c30d877fa422c9425c5e81bd904f81642b9fae87 | [
0.0010981978848576546,
0.0002938345423899591,
0.00016777170822024345,
0.00017464501434005797,
0.00030435086227953434
] |
{
"id": 1,
"code_window": [
"\n",
"func (c *RevokeCommand) Help() string {\n",
"\thelpText := `\n",
"Usage: vault revoke [options] id\n",
"\n",
" Revoke a secret by its Vault ID.\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep"
],
"after_edit": [
" Revoke a secret by its lease ID.\n"
],
"file_path": "command/revoke.go",
"type": "replace",
"edit_start_line_idx": 60
} | ---
layout: "intro"
page_title: "Starting the Server"
sidebar_current: "gettingstarted-devserver"
description: |-
After installing Vault, the next step is to start the server.
---
# Starting the Vault Server
With Vault installed, the next step is to start a Vault server.
Vault operates as a client/server application. The Vault server is the
only piece of the Vault architecture that interacts with the data
storage and backends. All operations done via the Vault CLI interact
with the server over a TLS connection.
In this page, we'll start and interact with the Vault server to understand
how the server is started, and understanding the seal/unseal process.
## Staring the Dev Server
To start, we're going to start the Vault _dev server_. The dev server
is a built-in flag to start a pre-configured server that is not very
secure but useful for playing with Vault locally. Later in the getting
started guide we'll configure and start a real server.
To start the Vault dev server, run `vault server -dev`:
```
$ vault server -dev
WARNING: Dev mode is enabled!
In this mode, Vault is completely in-memory and unsealed.
Vault is configured to only have a single unseal key. The root
token has already been authenticated with the CLI, so you can
immediately begin using the Vault CLI.
The only step you need to take is to set the following
environment variable since Vault will be taking without TLS:
export VAULT_ADDR='http://127.0.0.1:8200'
The unseal key and root token are reproduced below in case you
want to seal/unseal the Vault or play with authentication.
Unseal Key: 2252546b1a8551e8411502501719c4b3
Root Token: 79bd8011-af5a-f147-557e-c58be4fedf6c
==> Vault server configuration:
Log Level: info
Backend: inmem
Listener 1: tcp (addr: "127.0.0.1:8200", tls: "disabled")
...
```
You should see output similar to that above. As you can see, when you
start a dev server, Vault warns you loudly. The dev server stores all
its data in-memory (but still encrypted), listens on localhost without TLS, and
automatically unseals and shows you the unseal key and root access key.
We'll go over what all this means shortly.
The important thing about the dev server is that it is meant for
development only. **Do not run the dev server in production.** Even if it
was run in production, it wouldn't be very useful since it stores data in-memory
and every restart would clear all your secrets.
With the dev server running, do the following three things before anything
else:
1. Copy and run the `export VAULT_ADDR ...` command from your terminal
output. This will configure the Vault client to talk to our dev server.
2. Save the unseal key somewhere. Don't worry about _how_ to save this
securely. For now, just save it anywhere.
3. Do the same as step 2, but with the root token. We'll use this later.
## Verify the Server is Running
Verify the server is running by running `vault seal-status`. This should
succeed and exit with exit code 0. If you see an error about opening
a connection, make sure you copied and executed the `export VAULT_ADDR...`
command from above properly.
If it ran successful, the output should look like below:
```
$ vault seal-status
Sealed: false
Key Shares: 1
Key Threshold: 1
Unseal Progress: 0
```
If the output looks different, especially if the numbers are different
or the Vault is sealed, then restart the dev server and try again. The
only reason these would ever be different is if you're running a dev
server from going through this guide previously.
We'll cover what this output means later in the guide.
## Next
Congratulations! You've started your first Vault server. We haven't stored
any secrets yet, but we'll do that in the next section.
Next, we're going to
[read and write our first secrets](/intro/getting-started/first-secret.html).
| website/source/intro/getting-started/dev-server.html.md | 0 | https://github.com/hashicorp/vault/commit/c30d877fa422c9425c5e81bd904f81642b9fae87 | [
0.0026926053687930107,
0.0006386758177541196,
0.00016755716933403164,
0.00035034146276302636,
0.0007037385366857052
] |
{
"id": 1,
"code_window": [
"\n",
"func (c *RevokeCommand) Help() string {\n",
"\thelpText := `\n",
"Usage: vault revoke [options] id\n",
"\n",
" Revoke a secret by its Vault ID.\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep"
],
"after_edit": [
" Revoke a secret by its lease ID.\n"
],
"file_path": "command/revoke.go",
"type": "replace",
"edit_start_line_idx": 60
} | package http
import (
"net/http"
"strings"
"github.com/hashicorp/vault/logical"
"github.com/hashicorp/vault/vault"
)
func handleSysListPolicies(core *vault.Core) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if r.Method != "GET" {
respondError(w, http.StatusMethodNotAllowed, nil)
return
}
resp, ok := request(core, w, requestAuth(r, &logical.Request{
Operation: logical.ReadOperation,
Path: "sys/policy",
}))
if !ok {
return
}
var policies []string
policiesRaw, ok := resp.Data["keys"]
if ok {
policies = policiesRaw.([]string)
}
respondOk(w, &listPolicyResponse{Policies: policies})
})
}
func handleSysPolicy(core *vault.Core) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
switch r.Method {
case "GET":
handleSysReadPolicy(core, w, r)
case "PUT":
fallthrough
case "POST":
handleSysWritePolicy(core, w, r)
case "DELETE":
handleSysDeletePolicy(core, w, r)
default:
respondError(w, http.StatusMethodNotAllowed, nil)
return
}
})
}
func handleSysDeletePolicy(core *vault.Core, w http.ResponseWriter, r *http.Request) {
// Determine the path...
prefix := "/v1/sys/policy/"
if !strings.HasPrefix(r.URL.Path, prefix) {
respondError(w, http.StatusNotFound, nil)
return
}
path := r.URL.Path[len(prefix):]
if path == "" {
respondError(w, http.StatusNotFound, nil)
return
}
_, ok := request(core, w, requestAuth(r, &logical.Request{
Operation: logical.DeleteOperation,
Path: "sys/policy/" + path,
}))
if !ok {
return
}
respondOk(w, nil)
}
func handleSysReadPolicy(core *vault.Core, w http.ResponseWriter, r *http.Request) {
// Determine the path...
prefix := "/v1/sys/policy/"
if !strings.HasPrefix(r.URL.Path, prefix) {
respondError(w, http.StatusNotFound, nil)
return
}
path := r.URL.Path[len(prefix):]
if path == "" {
respondError(w, http.StatusNotFound, nil)
return
}
resp, ok := request(core, w, requestAuth(r, &logical.Request{
Operation: logical.ReadOperation,
Path: "sys/policy/" + path,
}))
if !ok {
return
}
respondOk(w, resp.Data)
}
func handleSysWritePolicy(core *vault.Core, w http.ResponseWriter, r *http.Request) {
// Determine the path...
prefix := "/v1/sys/policy/"
if !strings.HasPrefix(r.URL.Path, prefix) {
respondError(w, http.StatusNotFound, nil)
return
}
path := r.URL.Path[len(prefix):]
if path == "" {
respondError(w, http.StatusNotFound, nil)
return
}
// Parse the request if we can
var req writePolicyRequest
if err := parseRequest(r, &req); err != nil {
respondError(w, http.StatusBadRequest, err)
return
}
_, ok := request(core, w, requestAuth(r, &logical.Request{
Operation: logical.WriteOperation,
Path: "sys/policy/" + path,
Data: map[string]interface{}{
"rules": req.Rules,
},
}))
if !ok {
return
}
respondOk(w, nil)
}
type listPolicyResponse struct {
Policies []string `json:"policies"`
}
type writePolicyRequest struct {
Rules string `json:"rules"`
}
| http/sys_policy.go | 0 | https://github.com/hashicorp/vault/commit/c30d877fa422c9425c5e81bd904f81642b9fae87 | [
0.0011041957186535,
0.0003056842542719096,
0.0001665865129325539,
0.00018788140732795,
0.00024263016530312598
] |
{
"id": 2,
"code_window": [
"\n",
" This command revokes a secret by its Vault ID that was returned\n",
" with it. Once the key is revoked, it is no longer valid.\n",
"\n",
" With the -prefix flag, the revoke is done by prefix: any secret prefixed\n"
],
"labels": [
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
" This command revokes a secret by its lease ID that was returned\n"
],
"file_path": "command/revoke.go",
"type": "replace",
"edit_start_line_idx": 62
} | package command
import (
"fmt"
"strings"
)
// RevokeCommand is a Command that mounts a new mount.
type RevokeCommand struct {
Meta
}
func (c *RevokeCommand) Run(args []string) int {
var prefix bool
flags := c.Meta.FlagSet("revoke", FlagSetDefault)
flags.BoolVar(&prefix, "prefix", false, "")
flags.Usage = func() { c.Ui.Error(c.Help()) }
if err := flags.Parse(args); err != nil {
return 1
}
args = flags.Args()
if len(args) != 1 {
flags.Usage()
c.Ui.Error(fmt.Sprintf(
"\nRevoke expects one argument: the ID to revoke"))
return 1
}
vaultId := args[0]
client, err := c.Client()
if err != nil {
c.Ui.Error(fmt.Sprintf(
"Error initializing client: %s", err))
return 2
}
if prefix {
err = client.Sys().RevokePrefix(vaultId)
} else {
err = client.Sys().Revoke(vaultId)
}
if err != nil {
c.Ui.Error(fmt.Sprintf(
"Revoke error: %s", err))
return 1
}
c.Ui.Output(fmt.Sprintf("Key revoked with ID '%s'.", vaultId))
return 0
}
func (c *RevokeCommand) Synopsis() string {
return "Revoke a secret."
}
func (c *RevokeCommand) Help() string {
helpText := `
Usage: vault revoke [options] id
Revoke a secret by its Vault ID.
This command revokes a secret by its Vault ID that was returned
with it. Once the key is revoked, it is no longer valid.
With the -prefix flag, the revoke is done by prefix: any secret prefixed
with the given partial ID is revoked. Vault IDs are structured in such
a way to make revocation of prefixes useful.
General Options:
-address=TODO The address of the Vault server.
-ca-cert=path Path to a PEM encoded CA cert file to use to
verify the Vault server SSL certificate.
-ca-path=path Path to a directory of PEM encoded CA cert files
to verify the Vault server SSL certificate. If both
-ca-cert and -ca-path are specified, -ca-path is used.
-insecure Do not verify TLS certificate. This is highly
not recommended. This is especially not recommended
for unsealing a vault.
Revoke Options:
-prefix=true Revoke all secrets with the matching prefix. This
defaults to false: an exact revocation.
`
return strings.TrimSpace(helpText)
}
| command/revoke.go | 1 | https://github.com/hashicorp/vault/commit/c30d877fa422c9425c5e81bd904f81642b9fae87 | [
0.9907880425453186,
0.11381995677947998,
0.00016771686205174774,
0.005546629428863525,
0.2934829890727997
] |
{
"id": 2,
"code_window": [
"\n",
" This command revokes a secret by its Vault ID that was returned\n",
" with it. Once the key is revoked, it is no longer valid.\n",
"\n",
" With the -prefix flag, the revoke is done by prefix: any secret prefixed\n"
],
"labels": [
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
" This command revokes a secret by its lease ID that was returned\n"
],
"file_path": "command/revoke.go",
"type": "replace",
"edit_start_line_idx": 62
} | package kvFlag
import (
"fmt"
"strings"
)
// Flag is a flag.Value implementation for parsing user variables
// from the command-line in the format of '-var key=value'.
type Flag map[string]string
func (v *Flag) String() string {
return ""
}
func (v *Flag) Set(raw string) error {
idx := strings.Index(raw, "=")
if idx == -1 {
return fmt.Errorf("No '=' value in arg: %s", raw)
}
if *v == nil {
*v = make(map[string]string)
}
key, value := raw[0:idx], raw[idx+1:]
(*v)[key] = value
return nil
}
| helper/flag-kv/flag.go | 0 | https://github.com/hashicorp/vault/commit/c30d877fa422c9425c5e81bd904f81642b9fae87 | [
0.0002764099626801908,
0.00022267608437687159,
0.0001829786633607,
0.0002086396561935544,
0.00003941334944101982
] |
{
"id": 2,
"code_window": [
"\n",
" This command revokes a secret by its Vault ID that was returned\n",
" with it. Once the key is revoked, it is no longer valid.\n",
"\n",
" With the -prefix flag, the revoke is done by prefix: any secret prefixed\n"
],
"labels": [
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
" This command revokes a secret by its lease ID that was returned\n"
],
"file_path": "command/revoke.go",
"type": "replace",
"edit_start_line_idx": 62
} | package framework
import (
"bufio"
"bytes"
"fmt"
"strings"
"text/template"
)
func executeTemplate(tpl string, data interface{}) (string, error) {
// Define the functions
funcs := map[string]interface{}{
"indent": funcIndent,
}
// Parse the help template
t, err := template.New("root").Funcs(funcs).Parse(tpl)
if err != nil {
return "", fmt.Errorf("error parsing template: %s", err)
}
// Execute the template and store the output
var buf bytes.Buffer
if err := t.Execute(&buf, data); err != nil {
return "", fmt.Errorf("error executing template: %s", err)
}
return strings.TrimSpace(buf.String()), nil
}
func funcIndent(count int, text string) string {
var buf bytes.Buffer
prefix := strings.Repeat(" ", count)
scan := bufio.NewScanner(strings.NewReader(text))
for scan.Scan() {
buf.WriteString(prefix + scan.Text() + "\n")
}
return strings.TrimRight(buf.String(), "\n")
}
| logical/framework/template.go | 0 | https://github.com/hashicorp/vault/commit/c30d877fa422c9425c5e81bd904f81642b9fae87 | [
0.0004050660936627537,
0.00021716192713938653,
0.00016770389629527926,
0.00017183038289658725,
0.00009396841778652743
] |
{
"id": 2,
"code_window": [
"\n",
" This command revokes a secret by its Vault ID that was returned\n",
" with it. Once the key is revoked, it is no longer valid.\n",
"\n",
" With the -prefix flag, the revoke is done by prefix: any secret prefixed\n"
],
"labels": [
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
" This command revokes a secret by its lease ID that was returned\n"
],
"file_path": "command/revoke.go",
"type": "replace",
"edit_start_line_idx": 62
} | //
// Variables
// --------------------------------------------------
// Global values
// --------------------------------------------------
$header-height: 90px;
$btn-border-radius: 4px;
$el-border-radius: 6px;
// colors
// -------------------------
$white: #fff;
$black: #000;
$gray: #929199;
$light-gray: #C2C1C7;
$faint-gray: #E3E3EA;
$dark-blue: #00ABE0;
$blue: #00BFE0;
$light-black: #242424;
$orange: #e78c5b;
$green: #5be764;
$gray-darker: #555;
$gray: #777;
$gray-light: #939393;
$gray-lighter: #979797;
$red: #dd4e58;
$red-dark: #c5454e;
$purple: #822ff7;
$light-purple: #f7f3f9;
$btn-color: #4592C5;
// Scaffolding
// -------------------------
$body-bg: #fff;
$text-color: $gray;
// Links
// -------------------------
$link-color: $red-dark;
$link-hover-color: darken($link-color, 15%);
// Typography
// -------------------------
$font-family-open-sans: 'Open Sans', "Helvetica Neue", Helvetica, Arial, sans-serif;
$font-family-lato: 'Lato', "Helvetica Neue", Helvetica, Arial, sans-serif;
$font-weight-lato-xl: 300;
$font-weight-lato-reg: 300;
$font-weight-lato-sb: 500;
$font-weight-lato-xb: 700;
$font-weight-open: $font-weight-lato-reg;
$text-shadow: 1px 1px 1px #000;
$shadow: $text-shadow;
//
// ----
$imagePath: "atlas/";
//margin + padding
$xsmall-pad: 20px;
$small-pad: 30px;
$med-pad: 70px;
$large-pad: 120px;
$xl-pad: 150px;
$light-outline: #f7f7f7; | website/source/assets/stylesheets/_variables.scss | 0 | https://github.com/hashicorp/vault/commit/c30d877fa422c9425c5e81bd904f81642b9fae87 | [
0.00018444481247570366,
0.0001737569400575012,
0.00016851435066200793,
0.0001717739214655012,
0.000005369920927478233
] |
{
"id": 3,
"code_window": [
" with it. Once the key is revoked, it is no longer valid.\n",
"\n",
" With the -prefix flag, the revoke is done by prefix: any secret prefixed\n",
" with the given partial ID is revoked. Vault IDs are structured in such\n",
" a way to make revocation of prefixes useful.\n",
"\n",
"General Options:\n",
"\n",
" -address=TODO The address of the Vault server.\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
" with the given partial ID is revoked. Lease IDs are structured in such\n"
],
"file_path": "command/revoke.go",
"type": "replace",
"edit_start_line_idx": 66
} | ---
layout: "intro"
page_title: "Dynamic Secrets"
sidebar_current: "gettingstarted-dynamicsecrets"
description: |-
On this page we introduce dynamic secrets by showing you how to create AWS access keys with Vault.
---
# Dynamic Secrets
Now that we've written basic secrets to Vault and we have an understanding
of the mount system, we're going to move on to the next core feature of
Vault: _dynamic secrets_.
Dynamic secrets are secrets that are generated when they're accessed,
and aren't statically written like we did in
[Your First Secret](/intro/getting-started/first-secret.html).
On this page, we'll use the built-in AWS secret backend to dynamically
generate AWS access keys.
The power of dynamic secrets is that they simply don't exist before
they're read, so there is no risk of someone stealing them or another
client using the same secrets. And because Vault has built-in revocation
mechanisms (covered later), the dynamic secret can be revoked right after
use, minimizing the amount of time the secret existed.
-> **Note:** Before starting this page, please register for an
[AWS account](http://aws.amazon.com). We won't be using any features that
cost money, so you shouldn't be charged for anything. However, we're not
responsible for any charges that may incur.
## Mounting the AWS Backend
Let's generate our first dynamic secret. We'll use the AWS backend to
dynamically generate an AWS access key pair. First, mount the AWS backend:
```
$ vault mount aws
Successfully mounted 'aws' at 'aws'!
```
The AWS backend is now mounted at `aws/`. As we covered in a previous
section: different secret backends allow for different behavior, and in this
case the AWS backend is a dynamic backend for generating AWS access credentials.
## Configuring the AWS Backend
With the AWS backend mounted, the first step is to configure it with
the AWS credentials that will be used to create the other credentials.
For now, use the root keys for your AWS account.
To configure the backend, we use `vault write` to a special path
`aws/root`:
```
$ vault write aws/root \
access_key=AKIAI4SGLQPBX6CSENIQ \
secret_key=z1Pdn06b3TnpG+9Gwj3ppPSOlAsu08Qw99PUW+eB
Success! Data written to: aws/root
```
Remember that secret backends can behave anyway they want when
reading/writing a path, so this path stores this configuration for
later. Notice you can't read it back:
```
$ vault read aws/root
TODO
```
To help keep the credentials secure, the AWS backend doesn't let you
read them back even if you're using a root credential.
## Writing a Policy
The next step is to configure the AWS backend with an IAM policy.
IAM is the system AWS uses for creating new credentials with limited
API permissions.
The AWS backend requires an IAM policy to associate created credentials
with. For this example, we'll write just one policy, but you can associate
many policies with the backend. Save a file named "policy.json" with the following contents:
```
{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "Stmt1426528957000",
"Effect": "Allow",
"Action": [
"ec2:*"
],
"Resource": [
"*"
]
}
]
}
```
This is a basic IAM policy that lets the user perform any action within
Amazon EC2. With the policy saved, write it to Vault:
```
$ vault write aws/policy/deploy [email protected]
Success! Data written to: aws/policy/deploy
```
Again, we're using a special path here `aws/policy/<NAME>` to write
an IAM policy to Vault. We also used the special syntax `@filename` with
`vault write` to write the contents of a file.
## Generating the Secret
Now that we've configured the AWS backend and wrote a policy, we can now
request an access key pair for that policy. To do so, just read the
special path `aws/<NAME>` where `NAME` is the policy name:
```
$ vault read aws/deploy
Key Value
lease_id aws/deploy/0d042c53-aa8a-7ce7-9dfd-310351c465e5
access_key AKIAJFN42DVCQWDHQYHQ
secret_key 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
```
Success! The access and secret key can now be used to perform any EC2
operations within AWS. You can verify they work, if you want. Also notice
that these keys are new, they're not the keys you entered earlier.
The `lease_id` above is a special ID used for Vault for renewal,
revocation, etc. Copy and save your Lease ID now.
## Revoking the Secret
Let's complete the loop and revoke this secret now, purging it from
existence. Once the secret is revoked, the access keys will no longer
work.
To revoke the secret, use `vault revoke` with the vault ID that was
outputted from `vault read` when you ran it:
```
$ vault revoke aws/deploy/0d042c53-aa8a-7ce7-9dfd-310351c465e5
Key revoked with ID 'aws/deploy/0d042c53-aa8a-7ce7-9dfd-310351c465e5'.
```
Done! If you look at your AWS account, you'll notice that no IAM users
exist. If you try to use the access keys that were generated, you'll
find that they no longer work.
With such easy dynamic creation and revocation, you can hopefully begin
to see how easy it is to work with dynamic secrets and ensure they only
exist for the duration that they're needed.
## Next
On this page we experienced our first dynamic secret, and we also saw
the revocation system in action. Dynamic secrets are incredibly powerful.
As time goes on, we expect that more systems will support some sort of
API to create access credentials, and Vault will be ready to get the
most value out of this practice.
Before going further, we're going to take a quick detour to learn
about the
[built-in help system](/intro/getting-started/help.html).
| website/source/intro/getting-started/dynamic-secrets.html.md | 1 | https://github.com/hashicorp/vault/commit/c30d877fa422c9425c5e81bd904f81642b9fae87 | [
0.017057601362466812,
0.0026094538625329733,
0.00016478457837365568,
0.00026010480360127985,
0.0053940461948513985
] |
{
"id": 3,
"code_window": [
" with it. Once the key is revoked, it is no longer valid.\n",
"\n",
" With the -prefix flag, the revoke is done by prefix: any secret prefixed\n",
" with the given partial ID is revoked. Vault IDs are structured in such\n",
" a way to make revocation of prefixes useful.\n",
"\n",
"General Options:\n",
"\n",
" -address=TODO The address of the Vault server.\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
" with the given partial ID is revoked. Lease IDs are structured in such\n"
],
"file_path": "command/revoke.go",
"type": "replace",
"edit_start_line_idx": 66
} | Demo.DemoCrudController = Ember.ObjectController.extend({
needs: ['demo'],
isLoading: Ember.computed.alias('controllers.demo.isLoading'),
currentText: Ember.computed.alias('controllers.demo.currentText'),
currentLog: Ember.computed.alias('controllers.demo.currentLog'),
logPrefix: Ember.computed.alias('controllers.demo.logPrefix'),
currentMarker: Ember.computed.alias('controllers.demo.currentMarker'),
notCleared: Ember.computed.alias('controllers.demo.notCleared'),
sendCommand: function() {
// Request
Ember.run.later(this, function() {
var command = this.getWithDefault('currentText', '');
var currentLogs = this.get('currentLog').toArray();
// Add the last log item
currentLogs.push(command);
// Clean the state
this.set('currentText', '');
// Push the new logs
this.set('currentLog', currentLogs);
switch(command) {
case "clear":
this.set('currentLog', []);
this.set('notCleared', false);
break;
default:
console.log("Submitting: ", command);
}
this.set('isLoading', false);
}, 1000);
},
actions: {
submitText: function() {
this.set('isLoading', true);
// Send the actual request (fake for now)
this.sendCommand();
}
}
});
| website/source/assets/javascripts/demo/controllers/crud.js | 0 | https://github.com/hashicorp/vault/commit/c30d877fa422c9425c5e81bd904f81642b9fae87 | [
0.00016940913337748498,
0.00016714211960788816,
0.0001635128865018487,
0.00016806022904347628,
0.000002044055918304366
] |
{
"id": 3,
"code_window": [
" with it. Once the key is revoked, it is no longer valid.\n",
"\n",
" With the -prefix flag, the revoke is done by prefix: any secret prefixed\n",
" with the given partial ID is revoked. Vault IDs are structured in such\n",
" a way to make revocation of prefixes useful.\n",
"\n",
"General Options:\n",
"\n",
" -address=TODO The address of the Vault server.\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
" with the given partial ID is revoked. Lease IDs are structured in such\n"
],
"file_path": "command/revoke.go",
"type": "replace",
"edit_start_line_idx": 66
} | package aws
import (
"log"
"os"
"testing"
"time"
"github.com/hashicorp/aws-sdk-go/aws"
"github.com/hashicorp/aws-sdk-go/gen/ec2"
"github.com/hashicorp/vault/logical"
logicaltest "github.com/hashicorp/vault/logical/testing"
"github.com/mitchellh/mapstructure"
)
func TestBackend_basic(t *testing.T) {
logicaltest.Test(t, logicaltest.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Backend: Backend(),
Steps: []logicaltest.TestStep{
testAccStepConfig(t),
testAccStepWritePolicy(t, "test", testPolicy),
testAccStepReadUser(t, "test"),
},
})
}
func testAccPreCheck(t *testing.T) {
if v := os.Getenv("AWS_ACCESS_KEY_ID"); v == "" {
t.Fatal("AWS_ACCESS_KEY_ID must be set for acceptance tests")
}
if v := os.Getenv("AWS_SECRET_ACCESS_KEY"); v == "" {
t.Fatal("AWS_SECRET_ACCESS_KEY must be set for acceptance tests")
}
if v := os.Getenv("AWS_DEFAULT_REGION"); v == "" {
log.Println("[INFO] Test: Using us-west-2 as test region")
os.Setenv("AWS_DEFAULT_REGION", "us-west-2")
}
}
func testAccStepConfig(t *testing.T) logicaltest.TestStep {
return logicaltest.TestStep{
Operation: logical.WriteOperation,
Path: "root",
Data: map[string]interface{}{
"access_key": os.Getenv("AWS_ACCESS_KEY_ID"),
"secret_key": os.Getenv("AWS_SECRET_ACCESS_KEY"),
"region": os.Getenv("AWS_DEFAULT_REGION"),
},
}
}
func testAccStepReadUser(t *testing.T, name string) logicaltest.TestStep {
return logicaltest.TestStep{
Operation: logical.ReadOperation,
Path: name,
Check: func(resp *logical.Response) error {
var d struct {
AccessKey string `mapstructure:"access_key"`
SecretKey string `mapstructure:"secret_key"`
}
if err := mapstructure.Decode(resp.Data, &d); err != nil {
return err
}
log.Printf("[WARN] Generated credentials: %v", d)
// Sleep sometime because AWS is eventually consistent
log.Println("[WARN] Sleeping for 10 seconds waiting for AWS...")
time.Sleep(10 * time.Second)
// Build a client and verify that the credentials work
creds := aws.Creds(d.AccessKey, d.SecretKey, "")
client := ec2.New(creds, "us-east-1", nil)
log.Printf("[WARN] Verifying that the generated credentials work...")
_, err := client.DescribeInstances(&ec2.DescribeInstancesRequest{})
if err != nil {
return err
}
return nil
},
}
}
func testAccStepWritePolicy(t *testing.T, name string, policy string) logicaltest.TestStep {
return logicaltest.TestStep{
Operation: logical.WriteOperation,
Path: "policy/" + name,
Data: map[string]interface{}{
"policy": testPolicy,
},
}
}
const testPolicy = `
{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "Stmt1426528957000",
"Effect": "Allow",
"Action": [
"ec2:*"
],
"Resource": [
"*"
]
}
]
}
`
| builtin/logical/aws/backend_test.go | 0 | https://github.com/hashicorp/vault/commit/c30d877fa422c9425c5e81bd904f81642b9fae87 | [
0.00022935181914363056,
0.00019110007269773632,
0.00016560476797167212,
0.00019193737534806132,
0.000022288382751867175
] |
{
"id": 3,
"code_window": [
" with it. Once the key is revoked, it is no longer valid.\n",
"\n",
" With the -prefix flag, the revoke is done by prefix: any secret prefixed\n",
" with the given partial ID is revoked. Vault IDs are structured in such\n",
" a way to make revocation of prefixes useful.\n",
"\n",
"General Options:\n",
"\n",
" -address=TODO The address of the Vault server.\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
" with the given partial ID is revoked. Lease IDs are structured in such\n"
],
"file_path": "command/revoke.go",
"type": "replace",
"edit_start_line_idx": 66
} | package shamir
import (
"bytes"
"testing"
)
func TestSplit_invalid(t *testing.T) {
secret := []byte("test")
if _, err := Split(secret, 0, 0); err == nil {
t.Fatalf("expect error")
}
if _, err := Split(secret, 2, 3); err == nil {
t.Fatalf("expect error")
}
if _, err := Split(secret, 1000, 3); err == nil {
t.Fatalf("expect error")
}
if _, err := Split(secret, 10, 1); err == nil {
t.Fatalf("expect error")
}
if _, err := Split(nil, 3, 2); err == nil {
t.Fatalf("expect error")
}
}
func TestSplit(t *testing.T) {
secret := []byte("test")
out, err := Split(secret, 5, 3)
if err != nil {
t.Fatalf("err: %v", err)
}
if len(out) != 5 {
t.Fatalf("bad: %v", out)
}
for _, share := range out {
if len(share) != len(secret)+1 {
t.Fatalf("bad: %v", out)
}
}
}
func TestCombine_invalid(t *testing.T) {
// Not enough parts
if _, err := Combine(nil); err == nil {
t.Fatalf("should err")
}
// Mis-match in length
parts := [][]byte{
[]byte("foo"),
[]byte("ba"),
}
if _, err := Combine(parts); err == nil {
t.Fatalf("should err")
}
//Too short
parts = [][]byte{
[]byte("f"),
[]byte("b"),
}
if _, err := Combine(parts); err == nil {
t.Fatalf("should err")
}
}
func TestCombine(t *testing.T) {
secret := []byte("test")
out, err := Split(secret, 5, 3)
if err != nil {
t.Fatalf("err: %v", err)
}
// There is 5*4*3 possible choices,
// we will just brute force try them all
for i := 0; i < 5; i++ {
for j := 0; j < 5; j++ {
if j == i {
continue
}
for k := 0; k < 5; k++ {
if k == i || k == j {
continue
}
parts := [][]byte{out[i], out[j], out[k]}
recomb, err := Combine(parts)
if err != nil {
t.Fatalf("err: %v", err)
}
if !bytes.Equal(recomb, secret) {
t.Errorf("parts: (i:%d, j:%d, k:%d) %v", i, j, k, parts)
t.Fatalf("bad: %v %v", recomb, secret)
}
}
}
}
}
func TestField_Add(t *testing.T) {
if out := add(16, 16); out != 0 {
t.Fatalf("Bad: %v 16", out)
}
if out := add(3, 4); out != 7 {
t.Fatalf("Bad: %v 7", out)
}
}
func TestField_Mult(t *testing.T) {
if out := mult(3, 7); out != 9 {
t.Fatalf("Bad: %v 9", out)
}
if out := mult(3, 0); out != 0 {
t.Fatalf("Bad: %v 0", out)
}
if out := mult(0, 3); out != 0 {
t.Fatalf("Bad: %v 0", out)
}
}
func TestField_Divide(t *testing.T) {
if out := div(0, 7); out != 0 {
t.Fatalf("Bad: %v 0", out)
}
if out := div(3, 3); out != 1 {
t.Fatalf("Bad: %v 1", out)
}
if out := div(6, 3); out != 2 {
t.Fatalf("Bad: %v 2", out)
}
}
func TestPolynomial_Random(t *testing.T) {
p, err := makePolynomial(42, 2)
if err != nil {
t.Fatalf("err: %v", err)
}
if p.coefficients[0] != 42 {
t.Fatalf("bad: %v", p.coefficients)
}
if p.coefficients[2] == 0 {
t.Fatalf("bad: %v", p.coefficients)
}
}
func TestPolynomial_Eval(t *testing.T) {
p, err := makePolynomial(42, 1)
if err != nil {
t.Fatalf("err: %v", err)
}
if out := p.evaluate(0); out != 42 {
t.Fatalf("bad: %v", out)
}
out := p.evaluate(1)
exp := add(42, mult(1, p.coefficients[1]))
if out != exp {
t.Fatalf("bad: %v %v %v", out, exp, p.coefficients)
}
}
func TestInterpolate_Rand(t *testing.T) {
for i := 0; i < 256; i++ {
p, err := makePolynomial(uint8(i), 2)
if err != nil {
t.Fatalf("err: %v", err)
}
x_vals := []uint8{1, 2, 3}
y_vals := []uint8{p.evaluate(1), p.evaluate(2), p.evaluate(3)}
out := interpolatePolynomial(x_vals, y_vals, 0)
if out != uint8(i) {
t.Fatalf("Bad: %v %d", out, i)
}
}
}
| shamir/shamir_test.go | 0 | https://github.com/hashicorp/vault/commit/c30d877fa422c9425c5e81bd904f81642b9fae87 | [
0.0007290481007657945,
0.00026212786906398833,
0.00016842165496200323,
0.00019064830848947167,
0.00014536156959366053
] |
{
"id": 4,
"code_window": [
"\n",
"Let's complete the loop and revoke this secret now, purging it from\n",
"existence. Once the secret is revoked, the access keys will no longer\n",
"work.\n",
"\n",
"To revoke the secret, use `vault revoke` with the vault ID that was\n",
"outputted from `vault read` when you ran it:\n",
"\n",
"```\n",
"$ vault revoke aws/deploy/0d042c53-aa8a-7ce7-9dfd-310351c465e5\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"To revoke the secret, use `vault revoke` with the lease ID that was\n"
],
"file_path": "website/source/intro/getting-started/dynamic-secrets.html.md",
"type": "replace",
"edit_start_line_idx": 140
} | ---
layout: "intro"
page_title: "Built-in Help"
sidebar_current: "gettingstarted-help"
description: |-
Vault has a built-in help system to learn about the available paths in Vault and how to use them.
---
# Built-in Help
You've now worked with `vault write` and `vault read` for multiple paths:
generic secret backend with `secret/` and dynamic AWS credentials with the
AWS backend provider at `aws/`. In both cases, the usage of read/write and
the paths to use differed. AWS in particular had special paths like
`aws/config`.
Instead of having to memorize or reference documentation constantly
to determine what paths to use, we built a help system directly into
Vault. This help system can be access via the API or the command-line and
generates human-readable help for any mounted backend.
On this page, we'll learn how to use this help system. It is an invaluable
tool as you continue to work with Vault.
## Backend Overview
For this, we'll assume you have the AWS backend mounted. If not, mount
it with `vault mount aws`. Even if you don't have an AWS account, you
can still mount the AWS backend.
With the backend mounted, let's learn about it with `vault help`:
```
$ vault help aws
## DESCRIPTION
The AWS backend dynamically generates AWS access keys for a set of
IAM policies. The AWS access keys have a configurable lease set and
are automatically revoked at the end of the lease.
After mounting this backend, credentials to generate IAM keys must
be configured with the "root" path and policies must be written using
the "policy/" endpoints before any access keys can be generated.
## PATHS
The following paths are supported by this backend. To view help for
any of the paths below, use the help command with any route matching
the path pattern. Note that depending on the policy of your auth token,
you may or may not be able to access certain paths.
^(?P<name>\w+)$
Generate an access key pair for a specific policy.
^policy/(?P<name>\w+)$
Read and write IAM policies that access keys can be made for.
^root$
Configure the root credentials that are used to manage IAM.
```
The `vault help` command takes a path. By specifying the root path for
a mount, it will give us the overview of that mount. Notice how the help
not only contains a description, but also the exact regular expressions
used to match routes for this backend along with a brief description
of what the route is for.
## Path Help
After seeing the overview, we can continue to dive deeper by getting
help for an individual path. For this, just use `vault help` with a path
that would match the regular expression for that path. Note that the path
doesn't need to actually _work_. For example, we'll get the help below
for accessing `aws/operator`, even though we never wrote the `operator`
policy:
```
$ vault help aws/operator
Request: operator
Matching Route: ^(?P<name>\w+)$
Generate an access key pair for a specific policy.
## PARAMETERS
name (string)
Name of the policy
## DESCRIPTION
This path will generate a new, never before used key pair for
accessing AWS. The IAM policy used to back this key pair will be
the "name" parameter. For example, if this backend is mounted at "aws",
then "aws/deploy" would generate access keys for the "deploy" policy.
The access keys will have a lease associated with them. The access keys
can be revoked by using the Vault ID.
```
Within a path, we're given the parameters that this path requires.
Some parameters come from the route itself. In this case, the "name"
parameter is a named capture from the route regular expression.
There is also a description of what that path does.
Go ahead and explore more paths! Mount other backends, traverse their
help systems and learn about what they do. For example, learn about the
generic `secret/` path.
## Next
The help system may not be the most exciting feature of Vault, but it
is indispensable in day-to-day usage of Vault. The help system lets you
learn about how to use any backend within Vault without leaving the command
line.
Next, we'll learn about
[authentication](/intro/getting-started/authentication.html).
| website/source/intro/getting-started/help.html.md | 1 | https://github.com/hashicorp/vault/commit/c30d877fa422c9425c5e81bd904f81642b9fae87 | [
0.02321254275739193,
0.004992065019905567,
0.000283111963653937,
0.0009573323186486959,
0.0075257569551467896
] |
{
"id": 4,
"code_window": [
"\n",
"Let's complete the loop and revoke this secret now, purging it from\n",
"existence. Once the secret is revoked, the access keys will no longer\n",
"work.\n",
"\n",
"To revoke the secret, use `vault revoke` with the vault ID that was\n",
"outputted from `vault read` when you ran it:\n",
"\n",
"```\n",
"$ vault revoke aws/deploy/0d042c53-aa8a-7ce7-9dfd-310351c465e5\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"To revoke the secret, use `vault revoke` with the lease ID that was\n"
],
"file_path": "website/source/intro/getting-started/dynamic-secrets.html.md",
"type": "replace",
"edit_start_line_idx": 140
} | .people {
margin-top: 30px;
.person {
margin-bottom: 40px;
h3 {
text-transform: none;
}
img {
width: 125px;
margin: auto auto;
}
.bio {
padding-left: 150px;
}
}
}
| website/source/assets/stylesheets/_community.scss | 0 | https://github.com/hashicorp/vault/commit/c30d877fa422c9425c5e81bd904f81642b9fae87 | [
0.0014757807366549969,
0.0006056847632862628,
0.00016790037625469267,
0.0001733732788125053,
0.0006152548012323678
] |
{
"id": 4,
"code_window": [
"\n",
"Let's complete the loop and revoke this secret now, purging it from\n",
"existence. Once the secret is revoked, the access keys will no longer\n",
"work.\n",
"\n",
"To revoke the secret, use `vault revoke` with the vault ID that was\n",
"outputted from `vault read` when you ran it:\n",
"\n",
"```\n",
"$ vault revoke aws/deploy/0d042c53-aa8a-7ce7-9dfd-310351c465e5\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"To revoke the secret, use `vault revoke` with the lease ID that was\n"
],
"file_path": "website/source/intro/getting-started/dynamic-secrets.html.md",
"type": "replace",
"edit_start_line_idx": 140
} | ---
layout: "docs"
page_title: "Configuration"
sidebar_current: "docs-config"
description: |-
Vault uses text files to describe infrastructure and to set variables. These text files are called Vault _configurations_ and end in `.tf`. This section talks about the format of these files as well as how they're loaded.
---
# Configuration
Vault uses text files to describe infrastructure and to set variables.
These text files are called Vault _configurations_ and end in
`.tf`. This section talks about the format of these files as well as
how they're loaded.
The format of the configuration files are able to be in two formats:
Vault format and JSON. The Vault format is more human-readable,
supports comments, and is the generally recommended format for most
Vault files. The JSON format is meant for machines to create,
modify, and update, but can also be done by Vault operators if
you prefer. Vault format ends in `.tf` and JSON format ends in
`.tf.json`.
Click a sub-section in the navigation to the left to learn more about
Vault configuration.
| website/source/docs/configuration/index.html.md | 0 | https://github.com/hashicorp/vault/commit/c30d877fa422c9425c5e81bd904f81642b9fae87 | [
0.001984367845579982,
0.0016126573318615556,
0.0009586059604771435,
0.0018949981313198805,
0.0004639210528694093
] |
{
"id": 4,
"code_window": [
"\n",
"Let's complete the loop and revoke this secret now, purging it from\n",
"existence. Once the secret is revoked, the access keys will no longer\n",
"work.\n",
"\n",
"To revoke the secret, use `vault revoke` with the vault ID that was\n",
"outputted from `vault read` when you ran it:\n",
"\n",
"```\n",
"$ vault revoke aws/deploy/0d042c53-aa8a-7ce7-9dfd-310351c465e5\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"To revoke the secret, use `vault revoke` with the lease ID that was\n"
],
"file_path": "website/source/intro/getting-started/dynamic-secrets.html.md",
"type": "replace",
"edit_start_line_idx": 140
} | <%= partial "layouts/sidebar" %>
<div id="hero">
<div class="container">
<div class="row">
<div class="col-md-offset-3 col-md-6">
<div id="logo-lockup">
<div class="keys">
<span></span>
<span></span>
<span></span>
<span></span>
<span></span>
<span></span>
<span></span>
<span></span>
<span></span>
<span></span>
</div>
</div>
<h2 id="tag-line">A tool for managing secrets.</h2>
<div>
<a class="v-btn blue lrg" href="/intro">Get Started</a>
</div>
<div id="diagram"></div>
<p><span class="strong">Vault</span> secures, stores, and tightly controls access to tokens, passwords, certificates, API keys, and other secrets in modern computing. Vault handles leasing, key revocation, key rolling, and auditing. Vault presents a unified API to access multiple backends: HSMs, AWS IAM, SQL databases, raw key/value, and more.
</p>
</div>
</div>
</div>
</div>
<div id="demo-app"></div>
<div id="content">
<div class="container">
<div class="row">
<div class="col-md-offset-3 col-md-6">
<h2 class="featuer-header">Features</h2>
<div id="crud" class="feature">
<div class="graphic"></div>
<h3 class="">Secret Storage</h3>
<p>
Vault encrypts and provides access to any secrets. Leases can be associated with secrets, and Vault will automatically revoke secrets after the lease period ends. Access control policies provide strict control over who can access what secrets.
</p>
<div class="feature-footer">
<a class="v-btn black sml" href="/intro">Learn more</a>
</div>
</div> <!-- .feature -->
<div id="key" class="feature">
<div class="graphic"></div>
<h3 class="">Key Rolling</h3>
<p>
Every secret in Vault is associated with a lease. Clients must renew their secret within the lease period, or request a new secret. Key rolling is as simple as storing a new secret and revoking existing secrets or waiting for the lease period to expire.
<div class="feature-footer">
<a class="v-btn black sml" href="/intro">Learn more</a>
<a class="v-btn black sml terminal" href="/#/demo/crud">Launch Interactive Terminal</a>
</div>
</p>
</div> <!-- .feature -->
<div id="audit" class="feature">
<div class="graphic"></div>
<h3 class="">Audit Logs</h3>
<p>
Vault stores a detailed audit log of every interaction: authentication, token creation, secret access, secret revocation, and more. Audit logs can be sent to multiple backends to ensure redundant copies. Paired with Vault's strict leasing policies, operators can easily trace back to the source of any secret.
<div class="feature-footer">
<a class="v-btn black sml" href="/intro">Learn more</a>
</div>
</p>
</div> <!-- .feature -->
</div> <!-- .col -->
</div> <!-- .row -->
<div class="row">
<div class="col-sm-offset-1 col-sm-10 col-xs-12">
<div id="cta">
<a class="v-btn black sml" href="/intro/index.html">Get Started with Vault</a>
<p class="cta-black">Completely free and open source.</p>
<!--<p class="cta-gray">Vivamus venenatis augue.</p>-->
</div><!-- #cta -->
</div> <!-- .col -->
</div> <!-- .row -->
</div> <!-- /container -->
</div> <!-- #features -->
| website/source/index.html.erb | 0 | https://github.com/hashicorp/vault/commit/c30d877fa422c9425c5e81bd904f81642b9fae87 | [
0.01264648512005806,
0.004993882030248642,
0.00020946086442563683,
0.00162314937915653,
0.0048762415535748005
] |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.